From 3be6c4eb6afc04ce48d4dd794226de9d855e3341 Mon Sep 17 00:00:00 2001 From: Stas Bekman Date: Mon, 30 Nov 2020 09:23:44 -0800 Subject: [PATCH 01/10] restore skip --- .circleci/config.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index f6346b0c33c461..f3b13d26eb5246 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -88,7 +88,7 @@ jobs: parallelism: 1 steps: - checkout - # - skip-job-on-doc-only-changes + - skip-job-on-doc-only-changes - restore_cache: keys: - v0.4-torch_and_tf-{{ checksum "setup.py" }} @@ -115,7 +115,7 @@ jobs: parallelism: 1 steps: - checkout - # - skip-job-on-doc-only-changes + - skip-job-on-doc-only-changes - restore_cache: keys: - v0.4-torch-{{ checksum "setup.py" }} @@ -142,7 +142,7 @@ jobs: parallelism: 1 steps: - checkout - # - skip-job-on-doc-only-changes + - skip-job-on-doc-only-changes - restore_cache: keys: - v0.4-tf-{{ checksum "setup.py" }} @@ -169,7 +169,7 @@ jobs: parallelism: 1 steps: - checkout - # - skip-job-on-doc-only-changes + - skip-job-on-doc-only-changes - restore_cache: keys: - v0.4-flax-{{ checksum "setup.py" }} @@ -196,7 +196,7 @@ jobs: parallelism: 1 steps: - checkout - # - skip-job-on-doc-only-changes + - skip-job-on-doc-only-changes - restore_cache: keys: - v0.4-torch-{{ checksum "setup.py" }} @@ -223,7 +223,7 @@ jobs: parallelism: 1 steps: - checkout - # - skip-job-on-doc-only-changes + - skip-job-on-doc-only-changes - restore_cache: keys: - v0.4-tf-{{ checksum "setup.py" }} @@ -248,7 +248,7 @@ jobs: RUN_CUSTOM_TOKENIZERS: yes steps: - checkout - # - skip-job-on-doc-only-changes + - skip-job-on-doc-only-changes - restore_cache: keys: - v0.4-custom_tokenizers-{{ checksum "setup.py" }} @@ -276,7 +276,7 @@ jobs: parallelism: 1 steps: - checkout - # - skip-job-on-doc-only-changes + - skip-job-on-doc-only-changes - restore_cache: keys: - v0.4-torch_examples-{{ checksum "setup.py" }} From dfec84db3fdce1079f01f1bc8dfaf21db2ccaba1 Mon Sep 17 00:00:00 2001 From: Stas Bekman Date: Mon, 30 Nov 2020 09:25:19 -0800 Subject: [PATCH 02/10] Revert "Remove deprecated `evalutate_during_training` (#8852)" This reverts commit 553029909620455e040a49032a9c45f6a5f0cd52. --- examples/seq2seq/builtin_trainer/finetune.sh | 3 +-- examples/seq2seq/builtin_trainer/finetune_tpu.sh | 3 +-- .../seq2seq/builtin_trainer/train_distil_marian_enro.sh | 3 +-- .../builtin_trainer/train_distil_marian_enro_tpu.sh | 3 +-- examples/seq2seq/builtin_trainer/train_distilbart_cnn.sh | 3 +-- examples/seq2seq/builtin_trainer/train_mbart_cc25_enro.sh | 3 +-- src/transformers/integrations.py | 5 ++--- src/transformers/trainer_tf.py | 4 ++-- src/transformers/training_args_tf.py | 8 ++------ 9 files changed, 12 insertions(+), 23 deletions(-) diff --git a/examples/seq2seq/builtin_trainer/finetune.sh b/examples/seq2seq/builtin_trainer/finetune.sh index 8c2d13d5adff58..65f207c21a39ba 100644 --- a/examples/seq2seq/builtin_trainer/finetune.sh +++ b/examples/seq2seq/builtin_trainer/finetune.sh @@ -3,8 +3,7 @@ python finetune_trainer.py \ --learning_rate=3e-5 \ --fp16 \ - --do_train --do_eval --do_predict \ - --evaluation_strategy steps \ + --do_train --do_eval --do_predict --evaluate_during_training \ --predict_with_generate \ --n_val 1000 \ "$@" diff --git a/examples/seq2seq/builtin_trainer/finetune_tpu.sh b/examples/seq2seq/builtin_trainer/finetune_tpu.sh index 577f99fc7a2501..8bd367c852deaa 100644 --- a/examples/seq2seq/builtin_trainer/finetune_tpu.sh +++ b/examples/seq2seq/builtin_trainer/finetune_tpu.sh @@ -5,8 +5,7 @@ export TPU_NUM_CORES=8 python xla_spawn.py --num_cores $TPU_NUM_CORES \ finetune_trainer.py \ --learning_rate=3e-5 \ - --do_train --do_eval \ - --evaluation_strategy steps \ + --do_train --do_eval --evaluate_during_training \ --prediction_loss_only \ --n_val 1000 \ "$@" diff --git a/examples/seq2seq/builtin_trainer/train_distil_marian_enro.sh b/examples/seq2seq/builtin_trainer/train_distil_marian_enro.sh index 10c809b0e3a0ca..1503e821a84a4c 100644 --- a/examples/seq2seq/builtin_trainer/train_distil_marian_enro.sh +++ b/examples/seq2seq/builtin_trainer/train_distil_marian_enro.sh @@ -16,8 +16,7 @@ python finetune_trainer.py \ --num_train_epochs=6 \ --save_steps 3000 --eval_steps 3000 \ --max_source_length $MAX_LEN --max_target_length $MAX_LEN --val_max_target_length $MAX_LEN --test_max_target_length $MAX_LEN \ - --do_train --do_eval --do_predict \ - --evaluation_strategy steps \ + --do_train --do_eval --do_predict --evaluate_during_training\ --predict_with_generate --logging_first_step \ --task translation --label_smoothing 0.1 \ "$@" diff --git a/examples/seq2seq/builtin_trainer/train_distil_marian_enro_tpu.sh b/examples/seq2seq/builtin_trainer/train_distil_marian_enro_tpu.sh index 098425d65f1161..ca9a57fa432fb5 100644 --- a/examples/seq2seq/builtin_trainer/train_distil_marian_enro_tpu.sh +++ b/examples/seq2seq/builtin_trainer/train_distil_marian_enro_tpu.sh @@ -17,8 +17,7 @@ python xla_spawn.py --num_cores $TPU_NUM_CORES \ --save_steps 500 --eval_steps 500 \ --logging_first_step --logging_steps 200 \ --max_source_length $MAX_LEN --max_target_length $MAX_LEN --val_max_target_length $MAX_LEN --test_max_target_length $MAX_LEN \ - --do_train --do_eval \ - --evaluation_strategy steps \ + --do_train --do_eval --evaluate_during_training \ --prediction_loss_only \ --task translation --label_smoothing 0.1 \ "$@" diff --git a/examples/seq2seq/builtin_trainer/train_distilbart_cnn.sh b/examples/seq2seq/builtin_trainer/train_distilbart_cnn.sh index d29f6b8037cc1c..dbb85cbe1b8363 100644 --- a/examples/seq2seq/builtin_trainer/train_distilbart_cnn.sh +++ b/examples/seq2seq/builtin_trainer/train_distilbart_cnn.sh @@ -19,7 +19,6 @@ python finetune_trainer.py \ --save_steps 3000 --eval_steps 3000 \ --logging_first_step \ --max_target_length 56 --val_max_target_length $MAX_TGT_LEN --test_max_target_length $MAX_TGT_LEN \ - --do_train --do_eval --do_predict \ - --evaluation_strategy steps \ + --do_train --do_eval --do_predict --evaluate_during_training \ --predict_with_generate --sortish_sampler \ "$@" diff --git a/examples/seq2seq/builtin_trainer/train_mbart_cc25_enro.sh b/examples/seq2seq/builtin_trainer/train_mbart_cc25_enro.sh index 3dc711f20358f0..7a2a5c72209340 100644 --- a/examples/seq2seq/builtin_trainer/train_mbart_cc25_enro.sh +++ b/examples/seq2seq/builtin_trainer/train_mbart_cc25_enro.sh @@ -15,8 +15,7 @@ python finetune_trainer.py \ --sortish_sampler \ --num_train_epochs 6 \ --save_steps 25000 --eval_steps 25000 --logging_steps 1000 \ - --do_train --do_eval --do_predict \ - --evaluation_strategy steps \ + --do_train --do_eval --do_predict --evaluate_during_training \ --predict_with_generate --logging_first_step \ --task translation \ "$@" diff --git a/src/transformers/integrations.py b/src/transformers/integrations.py index 4c813e1ce29255..d14e6e7ce13522 100644 --- a/src/transformers/integrations.py +++ b/src/transformers/integrations.py @@ -2,7 +2,6 @@ import math import os -from .trainer_utils import EvaluationStrategy from .utils import logging @@ -213,13 +212,13 @@ def _objective(trial, checkpoint_dir=None): # Check for `do_eval` and `eval_during_training` for schedulers that require intermediate reporting. if isinstance( kwargs["scheduler"], (ASHAScheduler, MedianStoppingRule, HyperBandForBOHB, PopulationBasedTraining) - ) and (not trainer.args.do_eval or trainer.args.evaluation_strategy == EvaluationStrategy.NO): + ) and (not trainer.args.do_eval or not trainer.args.evaluate_during_training): raise RuntimeError( "You are using {cls} as a scheduler but you haven't enabled evaluation during training. " "This means your trials will not report intermediate results to Ray Tune, and " "can thus not be stopped early or used to exploit other trials parameters. " "If this is what you want, do not use {cls}. If you would like to use {cls}, " - "make sure you pass `do_eval=True` and `evaluation_strategy='steps'` in the " + "make sure you pass `do_eval=True` and `evaluate_during_training=True` in the " "Trainer `args`.".format(cls=type(kwargs["scheduler"]).__name__) ) diff --git a/src/transformers/trainer_tf.py b/src/transformers/trainer_tf.py index 162815dbc63738..6275ceafe5a0e2 100644 --- a/src/transformers/trainer_tf.py +++ b/src/transformers/trainer_tf.py @@ -19,7 +19,7 @@ from .modeling_tf_utils import TFPreTrainedModel from .optimization_tf import GradientAccumulator, create_optimizer -from .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, EvaluationStrategy, PredictionOutput, set_seed +from .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput, set_seed from .training_args_tf import TFTrainingArguments from .utils import logging @@ -561,7 +561,7 @@ def train(self) -> None: if ( self.args.eval_steps > 0 - and self.args.evaluate_strategy == EvaluationStrategy.STEPS + and self.args.evaluate_during_training and self.global_step % self.args.eval_steps == 0 ): self.evaluate() diff --git a/src/transformers/training_args_tf.py b/src/transformers/training_args_tf.py index 2efe7a6becc08f..91890605da4895 100644 --- a/src/transformers/training_args_tf.py +++ b/src/transformers/training_args_tf.py @@ -34,12 +34,8 @@ class TFTrainingArguments(TrainingArguments): Whether to run evaluation on the dev set or not. do_predict (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to run predictions on the test set or not. - evaluation_strategy (:obj:`str` or :class:`~transformers.trainer_utils.EvaluationStrategy`, `optional`, defaults to :obj:`"no"`): - The evaluation strategy to adopt during training. Possible values are: - - * :obj:`"no"`: No evaluation is done during training. - * :obj:`"steps"`: Evaluation is done (and logged) every :obj:`eval_steps`. - + evaluate_during_training (:obj:`bool`, `optional`, defaults to :obj:`False`): + Whether to run evaluation during training at each logging step or not. per_device_train_batch_size (:obj:`int`, `optional`, defaults to 8): The batch size per GPU/TPU core/CPU for training. per_device_eval_batch_size (:obj:`int`, `optional`, defaults to 8): From 4c37d054175b6fb8bc42b408ab23247853ae58cd Mon Sep 17 00:00:00 2001 From: Stas Bekman Date: Mon, 30 Nov 2020 19:00:09 -0800 Subject: [PATCH 03/10] check that pipeline.git.base_revision is defined before proceeding --- .circleci/config.yml | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index f3b13d26eb5246..ef4e76b7578c09 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -11,12 +11,18 @@ commands: - run: name: docs-only changes skip check command: | - if git diff --name-only << pipeline.git.base_revision >>...<< pipeline.git.revision >> | egrep -qv '\.(md|rst)$' + # pipeline.git.base_revision is not always defined, so only proceed if all external vars are defined + if test -n "<< pipeline.git.base_revision >>" && test -n "<< pipeline.git.revision >>" then - echo "Non-docs were modified in this PR, proceeding normally" + if git diff --name-only << pipeline.git.base_revision >>...<< pipeline.git.revision >> | egrep -qv '\.(md|rst)$' + then + echo "Non-docs were modified in this PR, proceeding normally" + else + echo "Only docs were modified in this PR, quitting this job" + circleci step halt + fi else - echo "Only docs were modified in this PR, quitting this job" - circleci step halt + echo "Can't perform skipping check w/o base_revision define, continue the job..." fi # TPU REFERENCES From c40d62a5348a98d44b0dcf2802797c648c45a510 Mon Sep 17 00:00:00 2001 From: Stas Bekman Date: Mon, 30 Nov 2020 19:03:38 -0800 Subject: [PATCH 04/10] Revert "Revert "Remove deprecated `evalutate_during_training` (#8852)"" This reverts commit dfec84db3fdce1079f01f1bc8dfaf21db2ccaba1. --- examples/seq2seq/builtin_trainer/finetune.sh | 3 ++- examples/seq2seq/builtin_trainer/finetune_tpu.sh | 3 ++- .../seq2seq/builtin_trainer/train_distil_marian_enro.sh | 3 ++- .../builtin_trainer/train_distil_marian_enro_tpu.sh | 3 ++- examples/seq2seq/builtin_trainer/train_distilbart_cnn.sh | 3 ++- examples/seq2seq/builtin_trainer/train_mbart_cc25_enro.sh | 3 ++- src/transformers/integrations.py | 5 +++-- src/transformers/trainer_tf.py | 4 ++-- src/transformers/training_args_tf.py | 8 ++++++-- 9 files changed, 23 insertions(+), 12 deletions(-) diff --git a/examples/seq2seq/builtin_trainer/finetune.sh b/examples/seq2seq/builtin_trainer/finetune.sh index 65f207c21a39ba..8c2d13d5adff58 100644 --- a/examples/seq2seq/builtin_trainer/finetune.sh +++ b/examples/seq2seq/builtin_trainer/finetune.sh @@ -3,7 +3,8 @@ python finetune_trainer.py \ --learning_rate=3e-5 \ --fp16 \ - --do_train --do_eval --do_predict --evaluate_during_training \ + --do_train --do_eval --do_predict \ + --evaluation_strategy steps \ --predict_with_generate \ --n_val 1000 \ "$@" diff --git a/examples/seq2seq/builtin_trainer/finetune_tpu.sh b/examples/seq2seq/builtin_trainer/finetune_tpu.sh index 8bd367c852deaa..577f99fc7a2501 100644 --- a/examples/seq2seq/builtin_trainer/finetune_tpu.sh +++ b/examples/seq2seq/builtin_trainer/finetune_tpu.sh @@ -5,7 +5,8 @@ export TPU_NUM_CORES=8 python xla_spawn.py --num_cores $TPU_NUM_CORES \ finetune_trainer.py \ --learning_rate=3e-5 \ - --do_train --do_eval --evaluate_during_training \ + --do_train --do_eval \ + --evaluation_strategy steps \ --prediction_loss_only \ --n_val 1000 \ "$@" diff --git a/examples/seq2seq/builtin_trainer/train_distil_marian_enro.sh b/examples/seq2seq/builtin_trainer/train_distil_marian_enro.sh index 1503e821a84a4c..10c809b0e3a0ca 100644 --- a/examples/seq2seq/builtin_trainer/train_distil_marian_enro.sh +++ b/examples/seq2seq/builtin_trainer/train_distil_marian_enro.sh @@ -16,7 +16,8 @@ python finetune_trainer.py \ --num_train_epochs=6 \ --save_steps 3000 --eval_steps 3000 \ --max_source_length $MAX_LEN --max_target_length $MAX_LEN --val_max_target_length $MAX_LEN --test_max_target_length $MAX_LEN \ - --do_train --do_eval --do_predict --evaluate_during_training\ + --do_train --do_eval --do_predict \ + --evaluation_strategy steps \ --predict_with_generate --logging_first_step \ --task translation --label_smoothing 0.1 \ "$@" diff --git a/examples/seq2seq/builtin_trainer/train_distil_marian_enro_tpu.sh b/examples/seq2seq/builtin_trainer/train_distil_marian_enro_tpu.sh index ca9a57fa432fb5..098425d65f1161 100644 --- a/examples/seq2seq/builtin_trainer/train_distil_marian_enro_tpu.sh +++ b/examples/seq2seq/builtin_trainer/train_distil_marian_enro_tpu.sh @@ -17,7 +17,8 @@ python xla_spawn.py --num_cores $TPU_NUM_CORES \ --save_steps 500 --eval_steps 500 \ --logging_first_step --logging_steps 200 \ --max_source_length $MAX_LEN --max_target_length $MAX_LEN --val_max_target_length $MAX_LEN --test_max_target_length $MAX_LEN \ - --do_train --do_eval --evaluate_during_training \ + --do_train --do_eval \ + --evaluation_strategy steps \ --prediction_loss_only \ --task translation --label_smoothing 0.1 \ "$@" diff --git a/examples/seq2seq/builtin_trainer/train_distilbart_cnn.sh b/examples/seq2seq/builtin_trainer/train_distilbart_cnn.sh index dbb85cbe1b8363..d29f6b8037cc1c 100644 --- a/examples/seq2seq/builtin_trainer/train_distilbart_cnn.sh +++ b/examples/seq2seq/builtin_trainer/train_distilbart_cnn.sh @@ -19,6 +19,7 @@ python finetune_trainer.py \ --save_steps 3000 --eval_steps 3000 \ --logging_first_step \ --max_target_length 56 --val_max_target_length $MAX_TGT_LEN --test_max_target_length $MAX_TGT_LEN \ - --do_train --do_eval --do_predict --evaluate_during_training \ + --do_train --do_eval --do_predict \ + --evaluation_strategy steps \ --predict_with_generate --sortish_sampler \ "$@" diff --git a/examples/seq2seq/builtin_trainer/train_mbart_cc25_enro.sh b/examples/seq2seq/builtin_trainer/train_mbart_cc25_enro.sh index 7a2a5c72209340..3dc711f20358f0 100644 --- a/examples/seq2seq/builtin_trainer/train_mbart_cc25_enro.sh +++ b/examples/seq2seq/builtin_trainer/train_mbart_cc25_enro.sh @@ -15,7 +15,8 @@ python finetune_trainer.py \ --sortish_sampler \ --num_train_epochs 6 \ --save_steps 25000 --eval_steps 25000 --logging_steps 1000 \ - --do_train --do_eval --do_predict --evaluate_during_training \ + --do_train --do_eval --do_predict \ + --evaluation_strategy steps \ --predict_with_generate --logging_first_step \ --task translation \ "$@" diff --git a/src/transformers/integrations.py b/src/transformers/integrations.py index d14e6e7ce13522..4c813e1ce29255 100644 --- a/src/transformers/integrations.py +++ b/src/transformers/integrations.py @@ -2,6 +2,7 @@ import math import os +from .trainer_utils import EvaluationStrategy from .utils import logging @@ -212,13 +213,13 @@ def _objective(trial, checkpoint_dir=None): # Check for `do_eval` and `eval_during_training` for schedulers that require intermediate reporting. if isinstance( kwargs["scheduler"], (ASHAScheduler, MedianStoppingRule, HyperBandForBOHB, PopulationBasedTraining) - ) and (not trainer.args.do_eval or not trainer.args.evaluate_during_training): + ) and (not trainer.args.do_eval or trainer.args.evaluation_strategy == EvaluationStrategy.NO): raise RuntimeError( "You are using {cls} as a scheduler but you haven't enabled evaluation during training. " "This means your trials will not report intermediate results to Ray Tune, and " "can thus not be stopped early or used to exploit other trials parameters. " "If this is what you want, do not use {cls}. If you would like to use {cls}, " - "make sure you pass `do_eval=True` and `evaluate_during_training=True` in the " + "make sure you pass `do_eval=True` and `evaluation_strategy='steps'` in the " "Trainer `args`.".format(cls=type(kwargs["scheduler"]).__name__) ) diff --git a/src/transformers/trainer_tf.py b/src/transformers/trainer_tf.py index 6275ceafe5a0e2..162815dbc63738 100644 --- a/src/transformers/trainer_tf.py +++ b/src/transformers/trainer_tf.py @@ -19,7 +19,7 @@ from .modeling_tf_utils import TFPreTrainedModel from .optimization_tf import GradientAccumulator, create_optimizer -from .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput, set_seed +from .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, EvaluationStrategy, PredictionOutput, set_seed from .training_args_tf import TFTrainingArguments from .utils import logging @@ -561,7 +561,7 @@ def train(self) -> None: if ( self.args.eval_steps > 0 - and self.args.evaluate_during_training + and self.args.evaluate_strategy == EvaluationStrategy.STEPS and self.global_step % self.args.eval_steps == 0 ): self.evaluate() diff --git a/src/transformers/training_args_tf.py b/src/transformers/training_args_tf.py index 91890605da4895..2efe7a6becc08f 100644 --- a/src/transformers/training_args_tf.py +++ b/src/transformers/training_args_tf.py @@ -34,8 +34,12 @@ class TFTrainingArguments(TrainingArguments): Whether to run evaluation on the dev set or not. do_predict (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to run predictions on the test set or not. - evaluate_during_training (:obj:`bool`, `optional`, defaults to :obj:`False`): - Whether to run evaluation during training at each logging step or not. + evaluation_strategy (:obj:`str` or :class:`~transformers.trainer_utils.EvaluationStrategy`, `optional`, defaults to :obj:`"no"`): + The evaluation strategy to adopt during training. Possible values are: + + * :obj:`"no"`: No evaluation is done during training. + * :obj:`"steps"`: Evaluation is done (and logged) every :obj:`eval_steps`. + per_device_train_batch_size (:obj:`int`, `optional`, defaults to 8): The batch size per GPU/TPU core/CPU for training. per_device_eval_batch_size (:obj:`int`, `optional`, defaults to 8): From 7dc5da781661d4276c49602cdaa5ee75e5e4c697 Mon Sep 17 00:00:00 2001 From: Stas Bekman Date: Mon, 30 Nov 2020 19:04:40 -0800 Subject: [PATCH 05/10] check that pipeline.git.base_revision is defined before proceeding --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index ef4e76b7578c09..b67de226c74fb6 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -14,7 +14,7 @@ commands: # pipeline.git.base_revision is not always defined, so only proceed if all external vars are defined if test -n "<< pipeline.git.base_revision >>" && test -n "<< pipeline.git.revision >>" then - if git diff --name-only << pipeline.git.base_revision >>...<< pipeline.git.revision >> | egrep -qv '\.(md|rst)$' + if git diff --name-only << pipeline.git.base_revision >>...<< pipeline.git.revision >> | egrep -qv '\.(yml|md|rst)$' then echo "Non-docs were modified in this PR, proceeding normally" else From e1f8ec12198b538902d9594b9bacc91d35e091d6 Mon Sep 17 00:00:00 2001 From: Stas Bekman Date: Mon, 30 Nov 2020 19:06:01 -0800 Subject: [PATCH 06/10] doc only --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index ef428819837f6b..c58f31cdad0b89 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,4 @@ +


From aa449b124c39c252f4a5b35387c156aefaed2aa5 Mon Sep 17 00:00:00 2001 From: Stas Bekman Date: Mon, 30 Nov 2020 19:08:22 -0800 Subject: [PATCH 07/10] doc + code --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index 254b91193064b8..9c6fa3313b3cff 100644 --- a/setup.py +++ b/setup.py @@ -1,3 +1,4 @@ + """ Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/master/setup.py From 9e216fb375655ca13729304454f22ffd2491a864 Mon Sep 17 00:00:00 2001 From: Stas Bekman Date: Mon, 30 Nov 2020 19:14:33 -0800 Subject: [PATCH 08/10] restore --- .circleci/config.yml | 2 +- README.md | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index b67de226c74fb6..ef4e76b7578c09 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -14,7 +14,7 @@ commands: # pipeline.git.base_revision is not always defined, so only proceed if all external vars are defined if test -n "<< pipeline.git.base_revision >>" && test -n "<< pipeline.git.revision >>" then - if git diff --name-only << pipeline.git.base_revision >>...<< pipeline.git.revision >> | egrep -qv '\.(yml|md|rst)$' + if git diff --name-only << pipeline.git.base_revision >>...<< pipeline.git.revision >> | egrep -qv '\.(md|rst)$' then echo "Non-docs were modified in this PR, proceeding normally" else diff --git a/README.md b/README.md index c58f31cdad0b89..ef428819837f6b 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,3 @@ -


From 6ab950f18e6ff3d02adf37e5f8b049f34280147c Mon Sep 17 00:00:00 2001 From: Stas Bekman Date: Mon, 30 Nov 2020 19:15:44 -0800 Subject: [PATCH 09/10] restore --- setup.py | 1 - 1 file changed, 1 deletion(-) diff --git a/setup.py b/setup.py index 9c6fa3313b3cff..254b91193064b8 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,3 @@ - """ Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/master/setup.py From b6ecec9a1c055cf9ab9b45750a8602671f3d0626 Mon Sep 17 00:00:00 2001 From: Stas Bekman Date: Mon, 30 Nov 2020 19:16:42 -0800 Subject: [PATCH 10/10] typo --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index ef4e76b7578c09..eb004a6deb6ced 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -22,7 +22,7 @@ commands: circleci step halt fi else - echo "Can't perform skipping check w/o base_revision define, continue the job..." + echo "Can't perform skipping check w/o base_revision defined, continuing the job" fi # TPU REFERENCES