diff --git a/.circleci/config.yml b/.circleci/config.yml index b2eb935da2dd40..ea528132b85071 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -655,6 +655,71 @@ jobs: - store_artifacts: path: ~/transformers/reports + run_examples_tensorflow: + working_directory: ~/transformers + docker: + - image: cimg/python:3.7.12 + environment: + OMP_NUM_THREADS: 1 + TRANSFORMERS_IS_CI: yes + PYTEST_TIMEOUT: 120 + resource_class: xlarge + parallelism: 1 + steps: + - checkout + - restore_cache: + keys: + - v0.5-tensorflow_examples-{{ checksum "setup.py" }} + - v0.5-{{ checksum "setup.py" }} + - run: pip install --upgrade pip + - run: pip install .[sklearn,tensorflow,sentencepiece,testing] + - run: pip install -r examples/tensorflow/_tests_requirements.txt + - save_cache: + key: v0.5-tensorflow_examples-{{ checksum "setup.py" }} + paths: + - '~/.cache/pip' + - run: python utils/tests_fetcher.py --filters examples tests | tee test_preparation.txt + - store_artifacts: + path: ~/transformers/test_preparation.txt + - run: | + if [ -f test_list.txt ]; then + python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -s --make-reports=examples_tensorflow ./examples/tensorflow/ | tee tests_output.txt + fi + - store_artifacts: + path: ~/transformers/tensorflow_examples_output.txt + - store_artifacts: + path: ~/transformers/reports + + run_examples_tensorflow_all: + working_directory: ~/transformers + docker: + - image: cimg/python:3.7.12 + environment: + OMP_NUM_THREADS: 1 + TRANSFORMERS_IS_CI: yes + PYTEST_TIMEOUT: 120 + resource_class: xlarge + parallelism: 1 + steps: + - checkout + - restore_cache: + keys: + - v0.5-tensorflow_examples-{{ checksum "setup.py" }} + - v0.5-{{ checksum "setup.py" }} + - run: pip install --upgrade pip + - run: pip install .[sklearn,tensorflow,sentencepiece,testing] + - run: pip install -r examples/tensorflow/_tests_requirements.txt + - save_cache: + key: v0.5-tensorflow_examples-{{ checksum "setup.py" }} + paths: + - '~/.cache/pip' + - run: | + TRANSFORMERS_IS_CI=1 python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -s --make-reports=examples_tensorflow ./examples/tensorflow/ | tee examples_output.txt + - store_artifacts: + path: ~/transformers/tensorflow_examples_output.txt + - store_artifacts: + path: ~/transformers/reports + run_examples_flax: working_directory: ~/transformers docker: @@ -912,49 +977,20 @@ workflows: jobs: - check_code_quality - check_repository_consistency - - fetch_tests - - run_examples_torch: - requires: - - fetch_tests - - run_examples_tensorflow: - requires: - - fetch_tests - - run_examples_flax: - requires: - - fetch_tests - - run_tests_custom_tokenizers: - requires: - - fetch_tests - - run_tests_torch_and_tf: - requires: - - fetch_tests - - run_tests_torch_and_flax: - requires: - - fetch_tests - - run_tests_torch: - requires: - - fetch_tests - - run_tests_tf: - requires: - - fetch_tests - - run_tests_flax: - requires: - - fetch_tests - - run_tests_pipelines_torch: - requires: - - fetch_tests - - run_tests_pipelines_tf: - requires: - - fetch_tests - - run_tests_onnxruntime: - requires: - - fetch_tests - - run_tests_hub: - requires: - - fetch_tests - - run_tests_layoutlmv2_and_v3: - requires: - - fetch_tests + - run_examples_torch + - run_examples_tensorflow + - run_examples_flax + - run_tests_custom_tokenizers + - run_tests_torch_and_tf + - run_tests_torch_and_flax + - run_tests_torch + - run_tests_tf + - run_tests_flax + - run_tests_pipelines_torch + - run_tests_pipelines_tf + - run_tests_onnxruntime + - run_tests_hub + - run_tests_layoutlmv2_and_v3 nightly: triggers: - schedule: @@ -964,49 +1000,18 @@ workflows: only: - main jobs: - - fetch_all_tests - - run_examples_torch: - requires: - - fetch_all_tests - - run_examples_tensorflow: - requires: - - fetch_all_tests - - run_examples_flax: - requires: - - fetch_all_tests - - run_tests_custom_tokenizers: - requires: - - fetch_all_tests - - run_tests_torch_and_tf: - requires: - - fetch_all_tests - - run_tests_torch_and_flax: - requires: - - fetch_all_tests - - run_tests_torch: - requires: - - fetch_all_tests - - run_tests_tf: - requires: - - fetch_all_tests - - run_tests_flax: - requires: - - fetch_all_tests - - run_tests_pipelines_torch: - requires: - - fetch_all_tests - - run_tests_pipelines_tf: - requires: - - fetch_all_tests - - run_tests_onnxruntime: - requires: - - fetch_all_tests - - run_tests_hub: - requires: - - fetch_all_tests - - run_tests_layoutlmv2_and_v3: - requires: - - fetch_all_tests + - run_examples_torch_all + - run_examples_tensorflow_all + - run_examples_flax_all + - run_tests_torch_and_tf_all + - run_tests_torch_and_flax_all + - run_tests_torch_all + - run_tests_tf_all + - run_tests_flax_all + - run_tests_pipelines_torch_all + - run_tests_pipelines_tf_all + - run_tests_onnxruntime_all + - run_tests_hub_all # tpu_testing_jobs: # triggers: diff --git a/examples/tensorflow/test_tensorflow_examples.py b/examples/tensorflow/test_tensorflow_examples.py index f4b383eabe5303..9b692ce80cbdd6 100644 --- a/examples/tensorflow/test_tensorflow_examples.py +++ b/examples/tensorflow/test_tensorflow_examples.py @@ -157,7 +157,6 @@ def test_run_mlm(self): --do_eval --prediction_loss_only --num_train_epochs=1 - --learning_rate=1e-4 """.split() with patch.object(sys, "argv", testargs): diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index 170315fe2d49ed..33b14ab8a67005 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -119,6 +119,7 @@ class OptimizerNames(ExplicitEnum): @dataclass class TrainingArguments: + framework = "pt" """ TrainingArguments is the subset of the arguments we use in our example scripts **which relate to the training loop itself**. diff --git a/src/transformers/training_args_tf.py b/src/transformers/training_args_tf.py index b3068b211a6d35..0fee48197d0b70 100644 --- a/src/transformers/training_args_tf.py +++ b/src/transformers/training_args_tf.py @@ -28,6 +28,7 @@ @dataclass class TFTrainingArguments(TrainingArguments): + framework = "tf" """ TrainingArguments is the subset of the arguments we use in our example scripts **which relate to the training loop itself**.