From 9abf017ce34ac46769dbb161a89b224323a9f801 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Tue, 9 Jul 2024 16:23:25 +0200 Subject: [PATCH 01/95] test --- tests/trainer/test_trainer_seq2seq.py | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/tests/trainer/test_trainer_seq2seq.py b/tests/trainer/test_trainer_seq2seq.py index a4b38aecb2af..beba917bdab0 100644 --- a/tests/trainer/test_trainer_seq2seq.py +++ b/tests/trainer/test_trainer_seq2seq.py @@ -21,8 +21,9 @@ Seq2SeqTrainer, Seq2SeqTrainingArguments, T5Tokenizer, + logging, ) -from transformers.testing_utils import TestCasePlus, require_sentencepiece, require_torch, slow +from transformers.testing_utils import LoggingLevel, TestCasePlus, require_sentencepiece, require_torch, slow from transformers.utils import is_datasets_available @@ -195,12 +196,13 @@ def test_bad_generation_config_fail_early(self): training_args = Seq2SeqTrainingArguments( ".", predict_with_generate=True, generation_config=gen_config, report_to="none" ) - with self.assertRaises(ValueError) as exc: - _ = Seq2SeqTrainer( - model=model, - args=training_args, - tokenizer=tokenizer, - data_collator=data_collator, - compute_metrics=lambda x: {"samples": x[0].shape[0]}, - ) - self.assertIn("The loaded generation config instance is invalid", str(exc.exception)) + with LoggingLevel(logging.WARNING): + with self.assertRaises(ValueError) as exc: + _ = Seq2SeqTrainer( + model=model, + args=training_args, + tokenizer=tokenizer, + data_collator=data_collator, + compute_metrics=lambda x: {"samples": x[0].shape[0]}, + ) + self.assertIn("The loaded generation config instance is invalid", str(exc.exception)) From d808d489fbab79b5aa540f1c9ff60b36122b5413 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Tue, 9 Jul 2024 16:27:06 +0200 Subject: [PATCH 02/95] [test_all] check From 20356e4f4da2c3aacbe5b7fc19e9f8ac9d4db292 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Tue, 9 Jul 2024 16:35:45 +0200 Subject: [PATCH 03/95] test --- tests/trainer/test_trainer_seq2seq.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/trainer/test_trainer_seq2seq.py b/tests/trainer/test_trainer_seq2seq.py index beba917bdab0..6af4ea97f546 100644 --- a/tests/trainer/test_trainer_seq2seq.py +++ b/tests/trainer/test_trainer_seq2seq.py @@ -197,12 +197,12 @@ def test_bad_generation_config_fail_early(self): ".", predict_with_generate=True, generation_config=gen_config, report_to="none" ) with LoggingLevel(logging.WARNING): - with self.assertRaises(ValueError) as exc: - _ = Seq2SeqTrainer( - model=model, - args=training_args, - tokenizer=tokenizer, - data_collator=data_collator, - compute_metrics=lambda x: {"samples": x[0].shape[0]}, - ) - self.assertIn("The loaded generation config instance is invalid", str(exc.exception)) + # with self.assertRaises(ValueError) as exc: + _ = Seq2SeqTrainer( + model=model, + args=training_args, + tokenizer=tokenizer, + data_collator=data_collator, + compute_metrics=lambda x: {"samples": x[0].shape[0]}, + ) + # self.assertIn("The loaded generation config instance is invalid", str(exc.exception)) From 0ea842343e5f92711503491ae9a69a1905d3c42d Mon Sep 17 00:00:00 2001 From: ydshieh Date: Tue, 9 Jul 2024 16:41:58 +0200 Subject: [PATCH 04/95] [test_all] check From d36b30156efabd6befae7fe0a621b61a5d49b1cf Mon Sep 17 00:00:00 2001 From: ydshieh Date: Tue, 9 Jul 2024 16:48:16 +0200 Subject: [PATCH 05/95] test --- tests/trainer/test_trainer_seq2seq.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/trainer/test_trainer_seq2seq.py b/tests/trainer/test_trainer_seq2seq.py index 6af4ea97f546..beba917bdab0 100644 --- a/tests/trainer/test_trainer_seq2seq.py +++ b/tests/trainer/test_trainer_seq2seq.py @@ -197,12 +197,12 @@ def test_bad_generation_config_fail_early(self): ".", predict_with_generate=True, generation_config=gen_config, report_to="none" ) with LoggingLevel(logging.WARNING): - # with self.assertRaises(ValueError) as exc: - _ = Seq2SeqTrainer( - model=model, - args=training_args, - tokenizer=tokenizer, - data_collator=data_collator, - compute_metrics=lambda x: {"samples": x[0].shape[0]}, - ) - # self.assertIn("The loaded generation config instance is invalid", str(exc.exception)) + with self.assertRaises(ValueError) as exc: + _ = Seq2SeqTrainer( + model=model, + args=training_args, + tokenizer=tokenizer, + data_collator=data_collator, + compute_metrics=lambda x: {"samples": x[0].shape[0]}, + ) + self.assertIn("The loaded generation config instance is invalid", str(exc.exception)) From 4960219e72dfb1a04490803acd48b64e70fbb410 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Tue, 9 Jul 2024 16:48:29 +0200 Subject: [PATCH 06/95] [test_all] check From 4e7ba85d93bf4b4dcabc1571d86aad43647112d0 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Tue, 9 Jul 2024 16:54:21 +0200 Subject: [PATCH 07/95] [test_all] check From 1d7c622792086ccc9aec052c138a3b53318953b5 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Tue, 9 Jul 2024 17:15:12 +0200 Subject: [PATCH 08/95] test --- tests/trainer/test_trainer_seq2seq.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/trainer/test_trainer_seq2seq.py b/tests/trainer/test_trainer_seq2seq.py index beba917bdab0..864f51030d44 100644 --- a/tests/trainer/test_trainer_seq2seq.py +++ b/tests/trainer/test_trainer_seq2seq.py @@ -196,8 +196,8 @@ def test_bad_generation_config_fail_early(self): training_args = Seq2SeqTrainingArguments( ".", predict_with_generate=True, generation_config=gen_config, report_to="none" ) - with LoggingLevel(logging.WARNING): - with self.assertRaises(ValueError) as exc: + with self.assertRaises(ValueError) as exc: + with LoggingLevel(logging.WARNING): _ = Seq2SeqTrainer( model=model, args=training_args, From 73867dcedcb07dc21a1f2e67a091e18d22144471 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Tue, 9 Jul 2024 17:15:20 +0200 Subject: [PATCH 09/95] [test_all] check From a8505ca00393a0ab745bff98c9dfbac74fcd9a56 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Tue, 9 Jul 2024 17:24:41 +0200 Subject: [PATCH 10/95] test --- tests/trainer/test_trainer_seq2seq.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/trainer/test_trainer_seq2seq.py b/tests/trainer/test_trainer_seq2seq.py index 864f51030d44..682662e7f728 100644 --- a/tests/trainer/test_trainer_seq2seq.py +++ b/tests/trainer/test_trainer_seq2seq.py @@ -205,4 +205,4 @@ def test_bad_generation_config_fail_early(self): data_collator=data_collator, compute_metrics=lambda x: {"samples": x[0].shape[0]}, ) - self.assertIn("The loaded generation config instance is invalid", str(exc.exception)) + self.assertIn("The loaded generation config instance is invalid", str(exc.exception)) From 7cdddbe31b31804eca22914a0030dfc3e0236eec Mon Sep 17 00:00:00 2001 From: ydshieh Date: Tue, 9 Jul 2024 17:36:55 +0200 Subject: [PATCH 11/95] [test_all] check From a1a4b7b70f79af0acfd7eb802358749fdcd1d079 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Tue, 9 Jul 2024 18:11:33 +0200 Subject: [PATCH 12/95] test --- src/transformers/trainer_seq2seq.py | 1 + tests/trainer/test_trainer_seq2seq.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/transformers/trainer_seq2seq.py b/src/transformers/trainer_seq2seq.py index b6bce1b57d5e..a737b3944c69 100644 --- a/src/transformers/trainer_seq2seq.py +++ b/src/transformers/trainer_seq2seq.py @@ -112,6 +112,7 @@ def load_generation_config(gen_config_arg: Union[str, GenerationConfig]) -> Gene # Strict validation to fail early. `GenerationConfig.save_pretrained()`, run at the end of training, throws # an exception if there are warnings at validation time. try: + logger.setLevel(logging.WARNING) with warnings.catch_warnings(record=True) as caught_warnings: gen_config.validate() if len(caught_warnings) > 0: diff --git a/tests/trainer/test_trainer_seq2seq.py b/tests/trainer/test_trainer_seq2seq.py index 682662e7f728..d31e4e8d6016 100644 --- a/tests/trainer/test_trainer_seq2seq.py +++ b/tests/trainer/test_trainer_seq2seq.py @@ -23,7 +23,7 @@ T5Tokenizer, logging, ) -from transformers.testing_utils import LoggingLevel, TestCasePlus, require_sentencepiece, require_torch, slow +from transformers.testing_utils import LoggingLevel, TestCasePlus, is_flaky, require_sentencepiece, require_torch, slow from transformers.utils import is_datasets_available From 3c694a71b08c3d20baeeec6b602298fdf0a2114a Mon Sep 17 00:00:00 2001 From: ydshieh Date: Tue, 9 Jul 2024 18:16:31 +0200 Subject: [PATCH 13/95] [test_all] check From db4a05366f6d9f188396f303353e81a797da4d03 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Tue, 9 Jul 2024 18:39:01 +0200 Subject: [PATCH 14/95] test --- src/transformers/trainer_seq2seq.py | 1 - tests/trainer/test_trainer_seq2seq.py | 18 ++++++++---------- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/src/transformers/trainer_seq2seq.py b/src/transformers/trainer_seq2seq.py index a737b3944c69..b6bce1b57d5e 100644 --- a/src/transformers/trainer_seq2seq.py +++ b/src/transformers/trainer_seq2seq.py @@ -112,7 +112,6 @@ def load_generation_config(gen_config_arg: Union[str, GenerationConfig]) -> Gene # Strict validation to fail early. `GenerationConfig.save_pretrained()`, run at the end of training, throws # an exception if there are warnings at validation time. try: - logger.setLevel(logging.WARNING) with warnings.catch_warnings(record=True) as caught_warnings: gen_config.validate() if len(caught_warnings) > 0: diff --git a/tests/trainer/test_trainer_seq2seq.py b/tests/trainer/test_trainer_seq2seq.py index d31e4e8d6016..f3bb5d3ab983 100644 --- a/tests/trainer/test_trainer_seq2seq.py +++ b/tests/trainer/test_trainer_seq2seq.py @@ -196,13 +196,11 @@ def test_bad_generation_config_fail_early(self): training_args = Seq2SeqTrainingArguments( ".", predict_with_generate=True, generation_config=gen_config, report_to="none" ) - with self.assertRaises(ValueError) as exc: - with LoggingLevel(logging.WARNING): - _ = Seq2SeqTrainer( - model=model, - args=training_args, - tokenizer=tokenizer, - data_collator=data_collator, - compute_metrics=lambda x: {"samples": x[0].shape[0]}, - ) - self.assertIn("The loaded generation config instance is invalid", str(exc.exception)) + _ = Seq2SeqTrainer( + model=model, + args=training_args, + tokenizer=tokenizer, + data_collator=data_collator, + compute_metrics=lambda x: {"samples": x[0].shape[0]}, + ) + From 37b72efd909feba4bcf6178ab7e494f8cd152238 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Tue, 9 Jul 2024 18:50:47 +0200 Subject: [PATCH 15/95] [test_all] check From 84f394faac5325506c746f72d94aa9f61316c9af Mon Sep 17 00:00:00 2001 From: ydshieh Date: Tue, 9 Jul 2024 19:06:42 +0200 Subject: [PATCH 16/95] test --- src/transformers/trainer_seq2seq.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/src/transformers/trainer_seq2seq.py b/src/transformers/trainer_seq2seq.py index b6bce1b57d5e..eedd67e24f41 100644 --- a/src/transformers/trainer_seq2seq.py +++ b/src/transformers/trainer_seq2seq.py @@ -114,13 +114,8 @@ def load_generation_config(gen_config_arg: Union[str, GenerationConfig]) -> Gene try: with warnings.catch_warnings(record=True) as caught_warnings: gen_config.validate() - if len(caught_warnings) > 0: - raise ValueError(str([w.message for w in caught_warnings])) - except ValueError as exc: - raise ValueError( - "The loaded generation config instance is invalid -- `GenerationConfig.validate()` throws warnings " - "and/or exceptions. Fix these issues to train your model.\n\nThrown during validation:\n" + str(exc) - ) + if len(caught_warnings) == 0: + raise ValueError("Hello") return gen_config def evaluate( From d5261bc4d19e2c6151134085117c97264a8c216a Mon Sep 17 00:00:00 2001 From: ydshieh Date: Tue, 9 Jul 2024 19:06:45 +0200 Subject: [PATCH 17/95] [test_all] check From f7a7dabb721ce19406d0d29c63a40763f79c0ef4 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Tue, 9 Jul 2024 19:18:36 +0200 Subject: [PATCH 18/95] test --- src/transformers/trainer_seq2seq.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/transformers/trainer_seq2seq.py b/src/transformers/trainer_seq2seq.py index eedd67e24f41..ca64eb4f08ac 100644 --- a/src/transformers/trainer_seq2seq.py +++ b/src/transformers/trainer_seq2seq.py @@ -111,11 +111,11 @@ def load_generation_config(gen_config_arg: Union[str, GenerationConfig]) -> Gene # Strict validation to fail early. `GenerationConfig.save_pretrained()`, run at the end of training, throws # an exception if there are warnings at validation time. - try: - with warnings.catch_warnings(record=True) as caught_warnings: - gen_config.validate() - if len(caught_warnings) == 0: - raise ValueError("Hello") + + with warnings.catch_warnings(record=True) as caught_warnings: + gen_config.validate() + if len(caught_warnings) == 0: + raise ValueError("Hello") return gen_config def evaluate( From f5852589de288824d0295a1ab4ea71736f406b1f Mon Sep 17 00:00:00 2001 From: ydshieh Date: Tue, 9 Jul 2024 19:18:39 +0200 Subject: [PATCH 19/95] [test_all] check From 0225fe1f5f2c37e7b2ac85c8bf6ef01499bd5a8c Mon Sep 17 00:00:00 2001 From: ydshieh Date: Tue, 9 Jul 2024 19:47:25 +0200 Subject: [PATCH 20/95] test --- src/transformers/generation/configuration_utils.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/transformers/generation/configuration_utils.py b/src/transformers/generation/configuration_utils.py index 8ba17a6a350f..13c6c707513d 100644 --- a/src/transformers/generation/configuration_utils.py +++ b/src/transformers/generation/configuration_utils.py @@ -546,6 +546,8 @@ def validate(self, is_init=False): greedy_wrong_parameter_msg.format(flag_name="top_p", flag_value=self.top_p), UserWarning, ) + else: + raise ValueError("bad bad") if self.min_p is not None: warnings.warn( greedy_wrong_parameter_msg.format(flag_name="min_p", flag_value=self.min_p), From 84a1eb8c2cb795e2c795590e0520ffb21a301d4c Mon Sep 17 00:00:00 2001 From: ydshieh Date: Tue, 9 Jul 2024 19:47:28 +0200 Subject: [PATCH 21/95] [test_all] check From d81e0f4b19ef460f2571407de5699e11b77db1aa Mon Sep 17 00:00:00 2001 From: ydshieh Date: Tue, 9 Jul 2024 20:07:32 +0200 Subject: [PATCH 22/95] test --- src/transformers/generation/configuration_utils.py | 7 ++----- src/transformers/trainer_seq2seq.py | 8 ++++---- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/src/transformers/generation/configuration_utils.py b/src/transformers/generation/configuration_utils.py index 13c6c707513d..2c386c90a8b4 100644 --- a/src/transformers/generation/configuration_utils.py +++ b/src/transformers/generation/configuration_utils.py @@ -542,12 +542,9 @@ def validate(self, is_init=False): UserWarning, ) if self.top_p is not None and self.top_p != 1.0: - warnings.warn( - greedy_wrong_parameter_msg.format(flag_name="top_p", flag_value=self.top_p), - UserWarning, - ) - else: raise ValueError("bad bad") + else: + pass if self.min_p is not None: warnings.warn( greedy_wrong_parameter_msg.format(flag_name="min_p", flag_value=self.min_p), diff --git a/src/transformers/trainer_seq2seq.py b/src/transformers/trainer_seq2seq.py index ca64eb4f08ac..3e87e534ad0d 100644 --- a/src/transformers/trainer_seq2seq.py +++ b/src/transformers/trainer_seq2seq.py @@ -112,10 +112,10 @@ def load_generation_config(gen_config_arg: Union[str, GenerationConfig]) -> Gene # Strict validation to fail early. `GenerationConfig.save_pretrained()`, run at the end of training, throws # an exception if there are warnings at validation time. - with warnings.catch_warnings(record=True) as caught_warnings: - gen_config.validate() - if len(caught_warnings) == 0: - raise ValueError("Hello") + # with warnings.catch_warnings(record=True) as caught_warnings: + gen_config.validate() + # if len(caught_warnings) == 0: + # raise ValueError("Hello") return gen_config def evaluate( From b72d14655fb6dd29498c8c6aa45f633c779ae621 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Tue, 9 Jul 2024 20:07:36 +0200 Subject: [PATCH 23/95] [test_all] check From 45f08975c2e97d2fa01b373fdac9911162680e98 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 09:43:50 +0200 Subject: [PATCH 24/95] fix --- .circleci/create_circleci_config.py | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index d783488caecc..4c2d31c558a7 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -421,27 +421,14 @@ def job_name(self): ) REGULAR_TESTS = [ - torch_and_tf_job, - torch_and_flax_job, torch_job, - tf_job, - flax_job, - custom_tokenizers_job, - hub_job, - onnx_job, - exotic_models_job, - tokenization_job ] EXAMPLES_TESTS = [ - examples_torch_job, - examples_tensorflow_job, ] PIPELINE_TESTS = [ - pipelines_torch_job, - pipelines_tf_job, ] -REPO_UTIL_TESTS = [repo_utils_job] -DOC_TESTS = [doc_test_job] +REPO_UTIL_TESTS = [] +DOC_TESTS = [] def create_circleci_config(folder=None): From c5641659ba5f28dfc95e232a795544fac35610cc Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 09:43:55 +0200 Subject: [PATCH 25/95] [test_all] check From 224abe7784e24f088e4e6c39ce3328d5012b427f Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 09:57:38 +0200 Subject: [PATCH 26/95] fix --- src/transformers/generation/configuration_utils.py | 6 +++++- src/transformers/trainer_seq2seq.py | 8 ++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/src/transformers/generation/configuration_utils.py b/src/transformers/generation/configuration_utils.py index 2c386c90a8b4..ee35485a36a9 100644 --- a/src/transformers/generation/configuration_utils.py +++ b/src/transformers/generation/configuration_utils.py @@ -542,7 +542,11 @@ def validate(self, is_init=False): UserWarning, ) if self.top_p is not None and self.top_p != 1.0: - raise ValueError("bad bad") + # raise ValueError("bad bad") + warnings.warn( + greedy_wrong_parameter_msg.format(flag_name="top_p", flag_value=self.top_p), + UserWarning, + ) else: pass if self.min_p is not None: diff --git a/src/transformers/trainer_seq2seq.py b/src/transformers/trainer_seq2seq.py index 3e87e534ad0d..a4e4183a0fb3 100644 --- a/src/transformers/trainer_seq2seq.py +++ b/src/transformers/trainer_seq2seq.py @@ -112,10 +112,10 @@ def load_generation_config(gen_config_arg: Union[str, GenerationConfig]) -> Gene # Strict validation to fail early. `GenerationConfig.save_pretrained()`, run at the end of training, throws # an exception if there are warnings at validation time. - # with warnings.catch_warnings(record=True) as caught_warnings: - gen_config.validate() - # if len(caught_warnings) == 0: - # raise ValueError("Hello") + with warnings.catch_warnings(record=True) as caught_warnings: + gen_config.validate() + if len(caught_warnings) == 0: + raise ValueError("not captured") return gen_config def evaluate( From 9dae6b9f07fe3e8a35e5caa755fa973daa9e4e8d Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 09:57:41 +0200 Subject: [PATCH 27/95] [test_all] check From a24d5ff0ee78991b652d07e3f4fbf918b2e527d8 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 10:21:38 +0200 Subject: [PATCH 28/95] fix --- src/transformers/trainer_seq2seq.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/transformers/trainer_seq2seq.py b/src/transformers/trainer_seq2seq.py index a4e4183a0fb3..ba27096ac6ca 100644 --- a/src/transformers/trainer_seq2seq.py +++ b/src/transformers/trainer_seq2seq.py @@ -114,8 +114,14 @@ def load_generation_config(gen_config_arg: Union[str, GenerationConfig]) -> Gene with warnings.catch_warnings(record=True) as caught_warnings: gen_config.validate() - if len(caught_warnings) == 0: - raise ValueError("not captured") + + from transformers.utils.logging import _get_library_root_logger, get_logger + assert _get_library_root_logger().level == 30 + assert str(logger) == "" + assert get_logger("py.warnings").level == 0 + + # if len(caught_warnings) == 0: + # raise ValueError("not captured") return gen_config def evaluate( From e539ee2d989ced0994b688747ca0e9197b55854b Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 10:21:41 +0200 Subject: [PATCH 29/95] [test_all] check From 08732e95ef939a0f3b1f2372366209391e9ff4e2 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 10:35:20 +0200 Subject: [PATCH 30/95] fix --- src/transformers/trainer_seq2seq.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/transformers/trainer_seq2seq.py b/src/transformers/trainer_seq2seq.py index ba27096ac6ca..4751198cbae9 100644 --- a/src/transformers/trainer_seq2seq.py +++ b/src/transformers/trainer_seq2seq.py @@ -112,16 +112,22 @@ def load_generation_config(gen_config_arg: Union[str, GenerationConfig]) -> Gene # Strict validation to fail early. `GenerationConfig.save_pretrained()`, run at the end of training, throws # an exception if there are warnings at validation time. + from transformers.utils.logging import _get_library_root_logger, get_logger + + assert _get_library_root_logger().level == 30 + assert str(logger) == "" + assert get_logger("py.warnings").level == 0 + with warnings.catch_warnings(record=True) as caught_warnings: gen_config.validate() - from transformers.utils.logging import _get_library_root_logger, get_logger assert _get_library_root_logger().level == 30 assert str(logger) == "" assert get_logger("py.warnings").level == 0 - # if len(caught_warnings) == 0: - # raise ValueError("not captured") + if len(caught_warnings) == 0: + raise ValueError("not captured") + return gen_config def evaluate( From de711fdca24702ddff454034f6b0dbb9e75b58ca Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 10:35:23 +0200 Subject: [PATCH 31/95] [test_all] check From dc20fc737017f4e0b4427f646e4edc3a733d05e2 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 10:48:48 +0200 Subject: [PATCH 32/95] fix --- src/transformers/trainer_seq2seq.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/transformers/trainer_seq2seq.py b/src/transformers/trainer_seq2seq.py index 4751198cbae9..e2f14f34fd80 100644 --- a/src/transformers/trainer_seq2seq.py +++ b/src/transformers/trainer_seq2seq.py @@ -112,7 +112,8 @@ def load_generation_config(gen_config_arg: Union[str, GenerationConfig]) -> Gene # Strict validation to fail early. `GenerationConfig.save_pretrained()`, run at the end of training, throws # an exception if there are warnings at validation time. - from transformers.utils.logging import _get_library_root_logger, get_logger + from transformers.utils.logging import _get_library_root_logger, get_logger, captureWarnings + captureWarnings(True) assert _get_library_root_logger().level == 30 assert str(logger) == "" From 7ebb9ca2f65135fcdfb0ce64b59043b8be15337e Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 10:54:44 +0200 Subject: [PATCH 33/95] fix --- src/transformers/trainer_seq2seq.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/trainer_seq2seq.py b/src/transformers/trainer_seq2seq.py index e2f14f34fd80..6cc049838117 100644 --- a/src/transformers/trainer_seq2seq.py +++ b/src/transformers/trainer_seq2seq.py @@ -124,7 +124,7 @@ def load_generation_config(gen_config_arg: Union[str, GenerationConfig]) -> Gene assert _get_library_root_logger().level == 30 assert str(logger) == "" - assert get_logger("py.warnings").level == 0 + assert get_logger("py.warnings").level == 30 if len(caught_warnings) == 0: raise ValueError("not captured") From ca5e4d082a2bd7dfd5721aa82fa01fa63dabea21 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 10:54:50 +0200 Subject: [PATCH 34/95] [test_all] check From c35aff902c161f552c5b7014e228e5b260ccbd58 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 11:03:49 +0200 Subject: [PATCH 35/95] fix --- src/transformers/trainer_seq2seq.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/trainer_seq2seq.py b/src/transformers/trainer_seq2seq.py index 6cc049838117..7c9e096de93d 100644 --- a/src/transformers/trainer_seq2seq.py +++ b/src/transformers/trainer_seq2seq.py @@ -117,7 +117,7 @@ def load_generation_config(gen_config_arg: Union[str, GenerationConfig]) -> Gene assert _get_library_root_logger().level == 30 assert str(logger) == "" - assert get_logger("py.warnings").level == 0 + assert get_logger("py.warnings").level == 30 with warnings.catch_warnings(record=True) as caught_warnings: gen_config.validate() From 933bf12253de27e6cd4589c27bf5951e866707f0 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 11:03:55 +0200 Subject: [PATCH 36/95] [test_all] check From d5e5610fbe0620e9341c78e03756a53e28137dff Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 11:21:42 +0200 Subject: [PATCH 37/95] fix --- src/transformers/trainer_seq2seq.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/trainer_seq2seq.py b/src/transformers/trainer_seq2seq.py index 7c9e096de93d..4fc806ff4dd8 100644 --- a/src/transformers/trainer_seq2seq.py +++ b/src/transformers/trainer_seq2seq.py @@ -113,7 +113,7 @@ def load_generation_config(gen_config_arg: Union[str, GenerationConfig]) -> Gene # an exception if there are warnings at validation time. from transformers.utils.logging import _get_library_root_logger, get_logger, captureWarnings - captureWarnings(True) + captureWarnings(False) assert _get_library_root_logger().level == 30 assert str(logger) == "" From cc7f414ec58b5ea2dc10f78f4abc7f28616823e1 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 11:25:40 +0200 Subject: [PATCH 38/95] fix --- src/transformers/trainer_seq2seq.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/transformers/trainer_seq2seq.py b/src/transformers/trainer_seq2seq.py index 4fc806ff4dd8..b7840f23dbb9 100644 --- a/src/transformers/trainer_seq2seq.py +++ b/src/transformers/trainer_seq2seq.py @@ -116,6 +116,7 @@ def load_generation_config(gen_config_arg: Union[str, GenerationConfig]) -> Gene captureWarnings(False) assert _get_library_root_logger().level == 30 + assert logger.level == 0 assert str(logger) == "" assert get_logger("py.warnings").level == 30 @@ -123,6 +124,7 @@ def load_generation_config(gen_config_arg: Union[str, GenerationConfig]) -> Gene gen_config.validate() assert _get_library_root_logger().level == 30 + assert logger.level == 0 assert str(logger) == "" assert get_logger("py.warnings").level == 30 From ab1a5d374d3ce6ab5a83c074a0e03dc2e5145df3 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 12:05:33 +0200 Subject: [PATCH 39/95] fix --- src/transformers/trainer_seq2seq.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/transformers/trainer_seq2seq.py b/src/transformers/trainer_seq2seq.py index b7840f23dbb9..949cc295c314 100644 --- a/src/transformers/trainer_seq2seq.py +++ b/src/transformers/trainer_seq2seq.py @@ -116,7 +116,8 @@ def load_generation_config(gen_config_arg: Union[str, GenerationConfig]) -> Gene captureWarnings(False) assert _get_library_root_logger().level == 30 - assert logger.level == 0 + logger.setLevel(30) + assert logger.level == 30 assert str(logger) == "" assert get_logger("py.warnings").level == 30 @@ -124,7 +125,8 @@ def load_generation_config(gen_config_arg: Union[str, GenerationConfig]) -> Gene gen_config.validate() assert _get_library_root_logger().level == 30 - assert logger.level == 0 + logger.setLevel(30) + assert logger.level == 30 assert str(logger) == "" assert get_logger("py.warnings").level == 30 From 03c39ce10ce786e6725fc3066743ce0d7f2fd3d9 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 12:20:59 +0200 Subject: [PATCH 40/95] fix --- src/transformers/trainer_seq2seq.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/transformers/trainer_seq2seq.py b/src/transformers/trainer_seq2seq.py index 949cc295c314..4d756e09cb9c 100644 --- a/src/transformers/trainer_seq2seq.py +++ b/src/transformers/trainer_seq2seq.py @@ -120,6 +120,10 @@ def load_generation_config(gen_config_arg: Union[str, GenerationConfig]) -> Gene assert logger.level == 30 assert str(logger) == "" assert get_logger("py.warnings").level == 30 + assert len(_get_library_root_logger().handlers) == 1 + assert len(logger.handlers) == 0 + assert len(get_logger("py.warnings").handlers) == 0 + assert _get_library_root_logger().handlers[0].level == 0 with warnings.catch_warnings(record=True) as caught_warnings: gen_config.validate() From 749b660c0400fcd4090f7f7c061d4417e48a1c38 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 12:21:03 +0200 Subject: [PATCH 41/95] [test_all] check From 5d87dfbf3e0d529c2fbcad2822b56e67adf1d3eb Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 12:37:47 +0200 Subject: [PATCH 42/95] fix --- src/transformers/trainer_seq2seq.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/transformers/trainer_seq2seq.py b/src/transformers/trainer_seq2seq.py index 4d756e09cb9c..cea4053431f6 100644 --- a/src/transformers/trainer_seq2seq.py +++ b/src/transformers/trainer_seq2seq.py @@ -122,7 +122,6 @@ def load_generation_config(gen_config_arg: Union[str, GenerationConfig]) -> Gene assert get_logger("py.warnings").level == 30 assert len(_get_library_root_logger().handlers) == 1 assert len(logger.handlers) == 0 - assert len(get_logger("py.warnings").handlers) == 0 assert _get_library_root_logger().handlers[0].level == 0 with warnings.catch_warnings(record=True) as caught_warnings: @@ -135,6 +134,8 @@ def load_generation_config(gen_config_arg: Union[str, GenerationConfig]) -> Gene assert get_logger("py.warnings").level == 30 if len(caught_warnings) == 0: + # assert len(get_logger("py.warnings").handlers) == 0 + logger.warning(f'{get_logger("py.warnings").handlers}') raise ValueError("not captured") return gen_config From 6fff0e6cff9404e8716b8d5631c690e466f0cc3d Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 12:38:07 +0200 Subject: [PATCH 43/95] [test_all] check From a481c6bca4caec4950d1aadb584ae2dd6474a788 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 12:47:07 +0200 Subject: [PATCH 44/95] fix --- src/transformers/trainer_seq2seq.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/trainer_seq2seq.py b/src/transformers/trainer_seq2seq.py index cea4053431f6..2da4e5a9aad4 100644 --- a/src/transformers/trainer_seq2seq.py +++ b/src/transformers/trainer_seq2seq.py @@ -113,7 +113,7 @@ def load_generation_config(gen_config_arg: Union[str, GenerationConfig]) -> Gene # an exception if there are warnings at validation time. from transformers.utils.logging import _get_library_root_logger, get_logger, captureWarnings - captureWarnings(False) + # captureWarnings(False) assert _get_library_root_logger().level == 30 logger.setLevel(30) From 43b7492e6312ace9e21486c54ca725faa43c58bf Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 12:55:19 +0200 Subject: [PATCH 45/95] fix --- src/transformers/trainer_seq2seq.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/transformers/trainer_seq2seq.py b/src/transformers/trainer_seq2seq.py index 2da4e5a9aad4..269c16069e35 100644 --- a/src/transformers/trainer_seq2seq.py +++ b/src/transformers/trainer_seq2seq.py @@ -119,7 +119,7 @@ def load_generation_config(gen_config_arg: Union[str, GenerationConfig]) -> Gene logger.setLevel(30) assert logger.level == 30 assert str(logger) == "" - assert get_logger("py.warnings").level == 30 + # assert get_logger("py.warnings").level == 30 assert len(_get_library_root_logger().handlers) == 1 assert len(logger.handlers) == 0 assert _get_library_root_logger().handlers[0].level == 0 @@ -131,7 +131,7 @@ def load_generation_config(gen_config_arg: Union[str, GenerationConfig]) -> Gene logger.setLevel(30) assert logger.level == 30 assert str(logger) == "" - assert get_logger("py.warnings").level == 30 + # assert get_logger("py.warnings").level == 30 if len(caught_warnings) == 0: # assert len(get_logger("py.warnings").handlers) == 0 From 3bc60ccd06e8c87be57fed1999e69e182f57c56f Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 13:07:50 +0200 Subject: [PATCH 46/95] fix --- .circleci/create_circleci_config.py | 31 ++++++++++++++++------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index 4c2d31c558a7..b88233e8d5d1 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -127,20 +127,23 @@ def to_dict(self): # junit familiy xunit1 is necessary to support splitting on test name or class name with circleci split test_command += f"python3 -m pytest -rsfE -p no:warnings -o junit_family=xunit1 --tb=short --junitxml=test-results/junit.xml -n {self.pytest_num_workers} " + " ".join(pytest_flags) - if self.parallelism == 1: - if self.tests_to_run is None: - test_command += " << pipeline.parameters.tests_to_run >>" - else: - test_command += " " + " ".join(self.tests_to_run) - else: - # We need explicit list instead of `pipeline.parameters.tests_to_run` (only available at job runtime) - tests = self.tests_to_run - if tests is None: - folder = os.environ["test_preparation_dir"] - test_file = os.path.join(folder, "filtered_test_list.txt") - if os.path.exists(test_file): # We take this job's tests from the filtered test_list.txt - with open(test_file) as f: - tests = f.read().split(" ") + tests = "tests/benchmark tests/generation tests/models/autoformer/test_modeling_autoformer.py tests/models/big_bird/test_modeling_big_bird.py tests/models/blip/test_modeling_blip.py tests/models/camembert/test_modeling_camembert.py tests/models/clvp/test_modeling_clvp.py tests/models/convnextv2/test_modeling_convnextv2.py tests/models/data2vec/test_modeling_data2vec_vision.py tests/models/deit/test_modeling_deit.py tests/models/dit/test_modeling_dit.py tests/models/efficientnet/test_modeling_efficientnet.py tests/models/esm/test_modeling_esmfold.py tests/models/focalnet/test_modeling_focalnet.py tests/models/git/test_modeling_git.py tests/models/gpt_neox_japanese/test_modeling_gpt_neox_japanese.py tests/models/idefics/test_modeling_idefics.py tests/models/jamba/test_modeling_jamba.py tests/models/led/test_modeling_led.py tests/models/llava_next_video/test_modeling_llava_next_video.py tests/models/mamba/test_modeling_mamba.py tests/models/mbart/test_modeling_mbart.py tests/models/mobilebert/test_modeling_mobilebert.py tests/models/mpt/test_modeling_mpt.py tests/models/nllb_moe/test_modeling_nllb_moe.py tests/models/owlv2/test_modeling_owlv2.py tests/models/pegasus_x/test_modeling_pegasus_x.py tests/models/plbart/test_modeling_plbart.py tests/models/qwen2/test_modeling_qwen2.py tests/models/rembert/test_modeling_rembert.py tests/models/rt_detr/test_modeling_rt_detr.py tests/models/segformer/test_modeling_segformer.py tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/superpoint/test_modeling_superpoint.py tests/models/t5/test_modeling_t5.py tests/models/trocr/test_modeling_trocr.py tests/models/univnet/test_modeling_univnet.py tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py tests/models/vitdet/test_modeling_vitdet.py tests/models/wav2vec2_conformer/test_modeling_wav2vec2_conformer.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/models/zoedepth/test_modeling_zoedepth.py tests/test_configuration_common.py tests/test_modeling_tf_common.py tests/trainer" + tests = tests.split() + + # if self.parallelism == 1: + # if self.tests_to_run is None: + # test_command += " << pipeline.parameters.tests_to_run >>" + # else: + # test_command += " " + " ".join(self.tests_to_run) + # else: + # # We need explicit list instead of `pipeline.parameters.tests_to_run` (only available at job runtime) + # tests = self.tests_to_run + # if tests is None: + # folder = os.environ["test_preparation_dir"] + # test_file = os.path.join(folder, "filtered_test_list.txt") + # if os.path.exists(test_file): # We take this job's tests from the filtered test_list.txt + # with open(test_file) as f: + # tests = f.read().split(" ") # expand the test list if tests == ["tests"]: From a5e9bded0ac9a7fadc18be83ee4ccc766d16a4c2 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 13:07:53 +0200 Subject: [PATCH 47/95] [test_all] check From f243cce7e6691f205cf1198723cfb1fa882cda2a Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 13:11:22 +0200 Subject: [PATCH 48/95] fix --- .circleci/create_circleci_config.py | 38 ++++++++++++++++------------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index b88233e8d5d1..9866bc688c9f 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -127,23 +127,27 @@ def to_dict(self): # junit familiy xunit1 is necessary to support splitting on test name or class name with circleci split test_command += f"python3 -m pytest -rsfE -p no:warnings -o junit_family=xunit1 --tb=short --junitxml=test-results/junit.xml -n {self.pytest_num_workers} " + " ".join(pytest_flags) - tests = "tests/benchmark tests/generation tests/models/autoformer/test_modeling_autoformer.py tests/models/big_bird/test_modeling_big_bird.py tests/models/blip/test_modeling_blip.py tests/models/camembert/test_modeling_camembert.py tests/models/clvp/test_modeling_clvp.py tests/models/convnextv2/test_modeling_convnextv2.py tests/models/data2vec/test_modeling_data2vec_vision.py tests/models/deit/test_modeling_deit.py tests/models/dit/test_modeling_dit.py tests/models/efficientnet/test_modeling_efficientnet.py tests/models/esm/test_modeling_esmfold.py tests/models/focalnet/test_modeling_focalnet.py tests/models/git/test_modeling_git.py tests/models/gpt_neox_japanese/test_modeling_gpt_neox_japanese.py tests/models/idefics/test_modeling_idefics.py tests/models/jamba/test_modeling_jamba.py tests/models/led/test_modeling_led.py tests/models/llava_next_video/test_modeling_llava_next_video.py tests/models/mamba/test_modeling_mamba.py tests/models/mbart/test_modeling_mbart.py tests/models/mobilebert/test_modeling_mobilebert.py tests/models/mpt/test_modeling_mpt.py tests/models/nllb_moe/test_modeling_nllb_moe.py tests/models/owlv2/test_modeling_owlv2.py tests/models/pegasus_x/test_modeling_pegasus_x.py tests/models/plbart/test_modeling_plbart.py tests/models/qwen2/test_modeling_qwen2.py tests/models/rembert/test_modeling_rembert.py tests/models/rt_detr/test_modeling_rt_detr.py tests/models/segformer/test_modeling_segformer.py tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/superpoint/test_modeling_superpoint.py tests/models/t5/test_modeling_t5.py tests/models/trocr/test_modeling_trocr.py tests/models/univnet/test_modeling_univnet.py tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py tests/models/vitdet/test_modeling_vitdet.py tests/models/wav2vec2_conformer/test_modeling_wav2vec2_conformer.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/models/zoedepth/test_modeling_zoedepth.py tests/test_configuration_common.py tests/test_modeling_tf_common.py tests/trainer" - tests = tests.split() - - # if self.parallelism == 1: - # if self.tests_to_run is None: - # test_command += " << pipeline.parameters.tests_to_run >>" - # else: - # test_command += " " + " ".join(self.tests_to_run) - # else: - # # We need explicit list instead of `pipeline.parameters.tests_to_run` (only available at job runtime) - # tests = self.tests_to_run - # if tests is None: - # folder = os.environ["test_preparation_dir"] - # test_file = os.path.join(folder, "filtered_test_list.txt") - # if os.path.exists(test_file): # We take this job's tests from the filtered test_list.txt - # with open(test_file) as f: - # tests = f.read().split(" ") + # tests = "tests/benchmark tests/generation tests/models/autoformer/test_modeling_autoformer.py tests/models/big_bird/test_modeling_big_bird.py tests/models/blip/test_modeling_blip.py tests/models/camembert/test_modeling_camembert.py tests/models/clvp/test_modeling_clvp.py tests/models/convnextv2/test_modeling_convnextv2.py tests/models/data2vec/test_modeling_data2vec_vision.py tests/models/deit/test_modeling_deit.py tests/models/dit/test_modeling_dit.py tests/models/efficientnet/test_modeling_efficientnet.py tests/models/esm/test_modeling_esmfold.py tests/models/focalnet/test_modeling_focalnet.py tests/models/git/test_modeling_git.py tests/models/gpt_neox_japanese/test_modeling_gpt_neox_japanese.py tests/models/idefics/test_modeling_idefics.py tests/models/jamba/test_modeling_jamba.py tests/models/led/test_modeling_led.py tests/models/llava_next_video/test_modeling_llava_next_video.py tests/models/mamba/test_modeling_mamba.py tests/models/mbart/test_modeling_mbart.py tests/models/mobilebert/test_modeling_mobilebert.py tests/models/mpt/test_modeling_mpt.py tests/models/nllb_moe/test_modeling_nllb_moe.py tests/models/owlv2/test_modeling_owlv2.py tests/models/pegasus_x/test_modeling_pegasus_x.py tests/models/plbart/test_modeling_plbart.py tests/models/qwen2/test_modeling_qwen2.py tests/models/rembert/test_modeling_rembert.py tests/models/rt_detr/test_modeling_rt_detr.py tests/models/segformer/test_modeling_segformer.py tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/superpoint/test_modeling_superpoint.py tests/models/t5/test_modeling_t5.py tests/models/trocr/test_modeling_trocr.py tests/models/univnet/test_modeling_univnet.py tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py tests/models/vitdet/test_modeling_vitdet.py tests/models/wav2vec2_conformer/test_modeling_wav2vec2_conformer.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/models/zoedepth/test_modeling_zoedepth.py tests/test_configuration_common.py tests/test_modeling_tf_common.py tests/trainer" + # tests = tests.split() + + if self.parallelism == 1: + if self.tests_to_run is None: + test_command += " << pipeline.parameters.tests_to_run >>" + else: + test_command += " " + " ".join(self.tests_to_run) + else: + # We need explicit list instead of `pipeline.parameters.tests_to_run` (only available at job runtime) + + tests = "tests/benchmark tests/generation tests/models/autoformer/test_modeling_autoformer.py tests/models/big_bird/test_modeling_big_bird.py tests/models/blip/test_modeling_blip.py tests/models/camembert/test_modeling_camembert.py tests/models/clvp/test_modeling_clvp.py tests/models/convnextv2/test_modeling_convnextv2.py tests/models/data2vec/test_modeling_data2vec_vision.py tests/models/deit/test_modeling_deit.py tests/models/dit/test_modeling_dit.py tests/models/efficientnet/test_modeling_efficientnet.py tests/models/esm/test_modeling_esmfold.py tests/models/focalnet/test_modeling_focalnet.py tests/models/git/test_modeling_git.py tests/models/gpt_neox_japanese/test_modeling_gpt_neox_japanese.py tests/models/idefics/test_modeling_idefics.py tests/models/jamba/test_modeling_jamba.py tests/models/led/test_modeling_led.py tests/models/llava_next_video/test_modeling_llava_next_video.py tests/models/mamba/test_modeling_mamba.py tests/models/mbart/test_modeling_mbart.py tests/models/mobilebert/test_modeling_mobilebert.py tests/models/mpt/test_modeling_mpt.py tests/models/nllb_moe/test_modeling_nllb_moe.py tests/models/owlv2/test_modeling_owlv2.py tests/models/pegasus_x/test_modeling_pegasus_x.py tests/models/plbart/test_modeling_plbart.py tests/models/qwen2/test_modeling_qwen2.py tests/models/rembert/test_modeling_rembert.py tests/models/rt_detr/test_modeling_rt_detr.py tests/models/segformer/test_modeling_segformer.py tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/superpoint/test_modeling_superpoint.py tests/models/t5/test_modeling_t5.py tests/models/trocr/test_modeling_trocr.py tests/models/univnet/test_modeling_univnet.py tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py tests/models/vitdet/test_modeling_vitdet.py tests/models/wav2vec2_conformer/test_modeling_wav2vec2_conformer.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/models/zoedepth/test_modeling_zoedepth.py tests/test_configuration_common.py tests/test_modeling_tf_common.py tests/trainer" + tests = tests.split() + + # tests = self.tests_to_run + if tests is None: + folder = os.environ["test_preparation_dir"] + test_file = os.path.join(folder, "filtered_test_list.txt") + if os.path.exists(test_file): # We take this job's tests from the filtered test_list.txt + with open(test_file) as f: + tests = f.read().split(" ") # expand the test list if tests == ["tests"]: From 4b0f59b4d7b91b9a32eea108a37cea56e2cb791e Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 13:11:25 +0200 Subject: [PATCH 49/95] [test_all] check From 0c2f50725e344bd57f19af1e4d197192882ecbe3 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 13:53:18 +0200 Subject: [PATCH 50/95] fix --- .circleci/create_circleci_config.py | 1 + 1 file changed, 1 insertion(+) diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index 9866bc688c9f..3e06aec9c2c8 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -139,6 +139,7 @@ def to_dict(self): # We need explicit list instead of `pipeline.parameters.tests_to_run` (only available at job runtime) tests = "tests/benchmark tests/generation tests/models/autoformer/test_modeling_autoformer.py tests/models/big_bird/test_modeling_big_bird.py tests/models/blip/test_modeling_blip.py tests/models/camembert/test_modeling_camembert.py tests/models/clvp/test_modeling_clvp.py tests/models/convnextv2/test_modeling_convnextv2.py tests/models/data2vec/test_modeling_data2vec_vision.py tests/models/deit/test_modeling_deit.py tests/models/dit/test_modeling_dit.py tests/models/efficientnet/test_modeling_efficientnet.py tests/models/esm/test_modeling_esmfold.py tests/models/focalnet/test_modeling_focalnet.py tests/models/git/test_modeling_git.py tests/models/gpt_neox_japanese/test_modeling_gpt_neox_japanese.py tests/models/idefics/test_modeling_idefics.py tests/models/jamba/test_modeling_jamba.py tests/models/led/test_modeling_led.py tests/models/llava_next_video/test_modeling_llava_next_video.py tests/models/mamba/test_modeling_mamba.py tests/models/mbart/test_modeling_mbart.py tests/models/mobilebert/test_modeling_mobilebert.py tests/models/mpt/test_modeling_mpt.py tests/models/nllb_moe/test_modeling_nllb_moe.py tests/models/owlv2/test_modeling_owlv2.py tests/models/pegasus_x/test_modeling_pegasus_x.py tests/models/plbart/test_modeling_plbart.py tests/models/qwen2/test_modeling_qwen2.py tests/models/rembert/test_modeling_rembert.py tests/models/rt_detr/test_modeling_rt_detr.py tests/models/segformer/test_modeling_segformer.py tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/superpoint/test_modeling_superpoint.py tests/models/t5/test_modeling_t5.py tests/models/trocr/test_modeling_trocr.py tests/models/univnet/test_modeling_univnet.py tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py tests/models/vitdet/test_modeling_vitdet.py tests/models/wav2vec2_conformer/test_modeling_wav2vec2_conformer.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/models/zoedepth/test_modeling_zoedepth.py tests/test_configuration_common.py tests/test_modeling_tf_common.py tests/trainer" + tests = "tests/benchmark tests/models/blip/test_modeling_blip.py tests/models/data2vec/test_modeling_data2vec_vision.py tests/models/esm/test_modeling_esmfold.py tests/models/idefics/test_modeling_idefics.py tests/models/mamba/test_modeling_mamba.py tests/models/nllb_moe/test_modeling_nllb_moe.py tests/models/qwen2/test_modeling_qwen2.py tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/univnet/test_modeling_univnet.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/trainer" tests = tests.split() # tests = self.tests_to_run From d4508b9528b6cf62516e670724c79df61d8acc10 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 13:53:21 +0200 Subject: [PATCH 51/95] [test_all] check From 61c1d2faf52cd71c8592cc1dee0c26e6ba9d06f7 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 14:11:32 +0200 Subject: [PATCH 52/95] fix --- .circleci/create_circleci_config.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index 3e06aec9c2c8..ca39bfec86f0 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -140,6 +140,7 @@ def to_dict(self): tests = "tests/benchmark tests/generation tests/models/autoformer/test_modeling_autoformer.py tests/models/big_bird/test_modeling_big_bird.py tests/models/blip/test_modeling_blip.py tests/models/camembert/test_modeling_camembert.py tests/models/clvp/test_modeling_clvp.py tests/models/convnextv2/test_modeling_convnextv2.py tests/models/data2vec/test_modeling_data2vec_vision.py tests/models/deit/test_modeling_deit.py tests/models/dit/test_modeling_dit.py tests/models/efficientnet/test_modeling_efficientnet.py tests/models/esm/test_modeling_esmfold.py tests/models/focalnet/test_modeling_focalnet.py tests/models/git/test_modeling_git.py tests/models/gpt_neox_japanese/test_modeling_gpt_neox_japanese.py tests/models/idefics/test_modeling_idefics.py tests/models/jamba/test_modeling_jamba.py tests/models/led/test_modeling_led.py tests/models/llava_next_video/test_modeling_llava_next_video.py tests/models/mamba/test_modeling_mamba.py tests/models/mbart/test_modeling_mbart.py tests/models/mobilebert/test_modeling_mobilebert.py tests/models/mpt/test_modeling_mpt.py tests/models/nllb_moe/test_modeling_nllb_moe.py tests/models/owlv2/test_modeling_owlv2.py tests/models/pegasus_x/test_modeling_pegasus_x.py tests/models/plbart/test_modeling_plbart.py tests/models/qwen2/test_modeling_qwen2.py tests/models/rembert/test_modeling_rembert.py tests/models/rt_detr/test_modeling_rt_detr.py tests/models/segformer/test_modeling_segformer.py tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/superpoint/test_modeling_superpoint.py tests/models/t5/test_modeling_t5.py tests/models/trocr/test_modeling_trocr.py tests/models/univnet/test_modeling_univnet.py tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py tests/models/vitdet/test_modeling_vitdet.py tests/models/wav2vec2_conformer/test_modeling_wav2vec2_conformer.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/models/zoedepth/test_modeling_zoedepth.py tests/test_configuration_common.py tests/test_modeling_tf_common.py tests/trainer" tests = "tests/benchmark tests/models/blip/test_modeling_blip.py tests/models/data2vec/test_modeling_data2vec_vision.py tests/models/esm/test_modeling_esmfold.py tests/models/idefics/test_modeling_idefics.py tests/models/mamba/test_modeling_mamba.py tests/models/nllb_moe/test_modeling_nllb_moe.py tests/models/qwen2/test_modeling_qwen2.py tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/univnet/test_modeling_univnet.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/trainer" + tests = "" tests = tests.split() # tests = self.tests_to_run @@ -178,6 +179,7 @@ def to_dict(self): # Each executor to run ~10 tests n_executors = max(len(expanded_tests) // 10, 1) + n_executors = 4 # Avoid empty test list on some executor(s) or launching too many executors if n_executors > self.parallelism: n_executors = self.parallelism From e5d63e952b5c07f81ae39b8ba27e4fb415580f5b Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 14:11:35 +0200 Subject: [PATCH 53/95] [test_all] check From b5159c812586fa3faf4c00eb70fcbd81f2a63898 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 14:11:44 +0200 Subject: [PATCH 54/95] [test_all] check From d4b870625e6fb24d615e7b5e95f785dc85daef7a Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 14:21:22 +0200 Subject: [PATCH 55/95] fix --- .circleci/create_circleci_config.py | 1 - 1 file changed, 1 deletion(-) diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index ca39bfec86f0..978be8b6dc74 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -140,7 +140,6 @@ def to_dict(self): tests = "tests/benchmark tests/generation tests/models/autoformer/test_modeling_autoformer.py tests/models/big_bird/test_modeling_big_bird.py tests/models/blip/test_modeling_blip.py tests/models/camembert/test_modeling_camembert.py tests/models/clvp/test_modeling_clvp.py tests/models/convnextv2/test_modeling_convnextv2.py tests/models/data2vec/test_modeling_data2vec_vision.py tests/models/deit/test_modeling_deit.py tests/models/dit/test_modeling_dit.py tests/models/efficientnet/test_modeling_efficientnet.py tests/models/esm/test_modeling_esmfold.py tests/models/focalnet/test_modeling_focalnet.py tests/models/git/test_modeling_git.py tests/models/gpt_neox_japanese/test_modeling_gpt_neox_japanese.py tests/models/idefics/test_modeling_idefics.py tests/models/jamba/test_modeling_jamba.py tests/models/led/test_modeling_led.py tests/models/llava_next_video/test_modeling_llava_next_video.py tests/models/mamba/test_modeling_mamba.py tests/models/mbart/test_modeling_mbart.py tests/models/mobilebert/test_modeling_mobilebert.py tests/models/mpt/test_modeling_mpt.py tests/models/nllb_moe/test_modeling_nllb_moe.py tests/models/owlv2/test_modeling_owlv2.py tests/models/pegasus_x/test_modeling_pegasus_x.py tests/models/plbart/test_modeling_plbart.py tests/models/qwen2/test_modeling_qwen2.py tests/models/rembert/test_modeling_rembert.py tests/models/rt_detr/test_modeling_rt_detr.py tests/models/segformer/test_modeling_segformer.py tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/superpoint/test_modeling_superpoint.py tests/models/t5/test_modeling_t5.py tests/models/trocr/test_modeling_trocr.py tests/models/univnet/test_modeling_univnet.py tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py tests/models/vitdet/test_modeling_vitdet.py tests/models/wav2vec2_conformer/test_modeling_wav2vec2_conformer.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/models/zoedepth/test_modeling_zoedepth.py tests/test_configuration_common.py tests/test_modeling_tf_common.py tests/trainer" tests = "tests/benchmark tests/models/blip/test_modeling_blip.py tests/models/data2vec/test_modeling_data2vec_vision.py tests/models/esm/test_modeling_esmfold.py tests/models/idefics/test_modeling_idefics.py tests/models/mamba/test_modeling_mamba.py tests/models/nllb_moe/test_modeling_nllb_moe.py tests/models/qwen2/test_modeling_qwen2.py tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/univnet/test_modeling_univnet.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/trainer" - tests = "" tests = tests.split() # tests = self.tests_to_run From d791e93cebd8fbcd2d8ab82971d1b3c90b34170c Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 14:36:19 +0200 Subject: [PATCH 56/95] fix --- .circleci/create_circleci_config.py | 1 + 1 file changed, 1 insertion(+) diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index 978be8b6dc74..85f1f7cff394 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -140,6 +140,7 @@ def to_dict(self): tests = "tests/benchmark tests/generation tests/models/autoformer/test_modeling_autoformer.py tests/models/big_bird/test_modeling_big_bird.py tests/models/blip/test_modeling_blip.py tests/models/camembert/test_modeling_camembert.py tests/models/clvp/test_modeling_clvp.py tests/models/convnextv2/test_modeling_convnextv2.py tests/models/data2vec/test_modeling_data2vec_vision.py tests/models/deit/test_modeling_deit.py tests/models/dit/test_modeling_dit.py tests/models/efficientnet/test_modeling_efficientnet.py tests/models/esm/test_modeling_esmfold.py tests/models/focalnet/test_modeling_focalnet.py tests/models/git/test_modeling_git.py tests/models/gpt_neox_japanese/test_modeling_gpt_neox_japanese.py tests/models/idefics/test_modeling_idefics.py tests/models/jamba/test_modeling_jamba.py tests/models/led/test_modeling_led.py tests/models/llava_next_video/test_modeling_llava_next_video.py tests/models/mamba/test_modeling_mamba.py tests/models/mbart/test_modeling_mbart.py tests/models/mobilebert/test_modeling_mobilebert.py tests/models/mpt/test_modeling_mpt.py tests/models/nllb_moe/test_modeling_nllb_moe.py tests/models/owlv2/test_modeling_owlv2.py tests/models/pegasus_x/test_modeling_pegasus_x.py tests/models/plbart/test_modeling_plbart.py tests/models/qwen2/test_modeling_qwen2.py tests/models/rembert/test_modeling_rembert.py tests/models/rt_detr/test_modeling_rt_detr.py tests/models/segformer/test_modeling_segformer.py tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/superpoint/test_modeling_superpoint.py tests/models/t5/test_modeling_t5.py tests/models/trocr/test_modeling_trocr.py tests/models/univnet/test_modeling_univnet.py tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py tests/models/vitdet/test_modeling_vitdet.py tests/models/wav2vec2_conformer/test_modeling_wav2vec2_conformer.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/models/zoedepth/test_modeling_zoedepth.py tests/test_configuration_common.py tests/test_modeling_tf_common.py tests/trainer" tests = "tests/benchmark tests/models/blip/test_modeling_blip.py tests/models/data2vec/test_modeling_data2vec_vision.py tests/models/esm/test_modeling_esmfold.py tests/models/idefics/test_modeling_idefics.py tests/models/mamba/test_modeling_mamba.py tests/models/nllb_moe/test_modeling_nllb_moe.py tests/models/qwen2/test_modeling_qwen2.py tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/univnet/test_modeling_univnet.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/trainer" + tests = "tests/benchmark tests/models/blip/test_modeling_blip.py tests/models/data2vec/test_modeling_data2vec_vision.py tests/models/esm/test_modeling_esmfold.py tests/trainer" tests = tests.split() # tests = self.tests_to_run From 679dd453604ab130de428f22541fed8543a368de Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 14:36:22 +0200 Subject: [PATCH 57/95] [test_all] check From 92f9ea4003ee98008ec93f0defa8ea3ca8dfcc45 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 14:46:05 +0200 Subject: [PATCH 58/95] fix --- .circleci/create_circleci_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index 85f1f7cff394..c45c3212100e 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -179,7 +179,7 @@ def to_dict(self): # Each executor to run ~10 tests n_executors = max(len(expanded_tests) // 10, 1) - n_executors = 4 + n_executors = 1 # Avoid empty test list on some executor(s) or launching too many executors if n_executors > self.parallelism: n_executors = self.parallelism From 6a3e3e0e9a925beba02e81e7fb69c757679cbeb7 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 14:53:50 +0200 Subject: [PATCH 59/95] fix --- .circleci/create_circleci_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index c45c3212100e..d5572b3b4c48 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -140,7 +140,7 @@ def to_dict(self): tests = "tests/benchmark tests/generation tests/models/autoformer/test_modeling_autoformer.py tests/models/big_bird/test_modeling_big_bird.py tests/models/blip/test_modeling_blip.py tests/models/camembert/test_modeling_camembert.py tests/models/clvp/test_modeling_clvp.py tests/models/convnextv2/test_modeling_convnextv2.py tests/models/data2vec/test_modeling_data2vec_vision.py tests/models/deit/test_modeling_deit.py tests/models/dit/test_modeling_dit.py tests/models/efficientnet/test_modeling_efficientnet.py tests/models/esm/test_modeling_esmfold.py tests/models/focalnet/test_modeling_focalnet.py tests/models/git/test_modeling_git.py tests/models/gpt_neox_japanese/test_modeling_gpt_neox_japanese.py tests/models/idefics/test_modeling_idefics.py tests/models/jamba/test_modeling_jamba.py tests/models/led/test_modeling_led.py tests/models/llava_next_video/test_modeling_llava_next_video.py tests/models/mamba/test_modeling_mamba.py tests/models/mbart/test_modeling_mbart.py tests/models/mobilebert/test_modeling_mobilebert.py tests/models/mpt/test_modeling_mpt.py tests/models/nllb_moe/test_modeling_nllb_moe.py tests/models/owlv2/test_modeling_owlv2.py tests/models/pegasus_x/test_modeling_pegasus_x.py tests/models/plbart/test_modeling_plbart.py tests/models/qwen2/test_modeling_qwen2.py tests/models/rembert/test_modeling_rembert.py tests/models/rt_detr/test_modeling_rt_detr.py tests/models/segformer/test_modeling_segformer.py tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/superpoint/test_modeling_superpoint.py tests/models/t5/test_modeling_t5.py tests/models/trocr/test_modeling_trocr.py tests/models/univnet/test_modeling_univnet.py tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py tests/models/vitdet/test_modeling_vitdet.py tests/models/wav2vec2_conformer/test_modeling_wav2vec2_conformer.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/models/zoedepth/test_modeling_zoedepth.py tests/test_configuration_common.py tests/test_modeling_tf_common.py tests/trainer" tests = "tests/benchmark tests/models/blip/test_modeling_blip.py tests/models/data2vec/test_modeling_data2vec_vision.py tests/models/esm/test_modeling_esmfold.py tests/models/idefics/test_modeling_idefics.py tests/models/mamba/test_modeling_mamba.py tests/models/nllb_moe/test_modeling_nllb_moe.py tests/models/qwen2/test_modeling_qwen2.py tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/univnet/test_modeling_univnet.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/trainer" - tests = "tests/benchmark tests/models/blip/test_modeling_blip.py tests/models/data2vec/test_modeling_data2vec_vision.py tests/models/esm/test_modeling_esmfold.py tests/trainer" + tests = "tests/benchmark tests/models/idefics/test_modeling_idefics.py tests/models/mamba/test_modeling_mamba.py tests/models/nllb_moe/test_modeling_nllb_moe.py tests/models/qwen2/test_modeling_qwen2.py tests/trainer" tests = tests.split() # tests = self.tests_to_run From c9ab9105820930bbf4f57028206f80890c7e60c3 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 15:01:25 +0200 Subject: [PATCH 60/95] fix --- .circleci/create_circleci_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index d5572b3b4c48..8a7a4e2db468 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -140,7 +140,7 @@ def to_dict(self): tests = "tests/benchmark tests/generation tests/models/autoformer/test_modeling_autoformer.py tests/models/big_bird/test_modeling_big_bird.py tests/models/blip/test_modeling_blip.py tests/models/camembert/test_modeling_camembert.py tests/models/clvp/test_modeling_clvp.py tests/models/convnextv2/test_modeling_convnextv2.py tests/models/data2vec/test_modeling_data2vec_vision.py tests/models/deit/test_modeling_deit.py tests/models/dit/test_modeling_dit.py tests/models/efficientnet/test_modeling_efficientnet.py tests/models/esm/test_modeling_esmfold.py tests/models/focalnet/test_modeling_focalnet.py tests/models/git/test_modeling_git.py tests/models/gpt_neox_japanese/test_modeling_gpt_neox_japanese.py tests/models/idefics/test_modeling_idefics.py tests/models/jamba/test_modeling_jamba.py tests/models/led/test_modeling_led.py tests/models/llava_next_video/test_modeling_llava_next_video.py tests/models/mamba/test_modeling_mamba.py tests/models/mbart/test_modeling_mbart.py tests/models/mobilebert/test_modeling_mobilebert.py tests/models/mpt/test_modeling_mpt.py tests/models/nllb_moe/test_modeling_nllb_moe.py tests/models/owlv2/test_modeling_owlv2.py tests/models/pegasus_x/test_modeling_pegasus_x.py tests/models/plbart/test_modeling_plbart.py tests/models/qwen2/test_modeling_qwen2.py tests/models/rembert/test_modeling_rembert.py tests/models/rt_detr/test_modeling_rt_detr.py tests/models/segformer/test_modeling_segformer.py tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/superpoint/test_modeling_superpoint.py tests/models/t5/test_modeling_t5.py tests/models/trocr/test_modeling_trocr.py tests/models/univnet/test_modeling_univnet.py tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py tests/models/vitdet/test_modeling_vitdet.py tests/models/wav2vec2_conformer/test_modeling_wav2vec2_conformer.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/models/zoedepth/test_modeling_zoedepth.py tests/test_configuration_common.py tests/test_modeling_tf_common.py tests/trainer" tests = "tests/benchmark tests/models/blip/test_modeling_blip.py tests/models/data2vec/test_modeling_data2vec_vision.py tests/models/esm/test_modeling_esmfold.py tests/models/idefics/test_modeling_idefics.py tests/models/mamba/test_modeling_mamba.py tests/models/nllb_moe/test_modeling_nllb_moe.py tests/models/qwen2/test_modeling_qwen2.py tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/univnet/test_modeling_univnet.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/trainer" - tests = "tests/benchmark tests/models/idefics/test_modeling_idefics.py tests/models/mamba/test_modeling_mamba.py tests/models/nllb_moe/test_modeling_nllb_moe.py tests/models/qwen2/test_modeling_qwen2.py tests/trainer" + # tests = "tests/benchmark tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/univnet/test_modeling_univnet.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/trainer" tests = tests.split() # tests = self.tests_to_run From aa04cda2c3605a61eb81c62950880fc4e90cc8db Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 15:01:28 +0200 Subject: [PATCH 61/95] [test_all] check From 173348542b32b82b6464ca6a61379f7f85a52a50 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 15:06:28 +0200 Subject: [PATCH 62/95] fix --- .circleci/create_circleci_config.py | 1 + 1 file changed, 1 insertion(+) diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index 8a7a4e2db468..2b56fca59c15 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -141,6 +141,7 @@ def to_dict(self): tests = "tests/benchmark tests/generation tests/models/autoformer/test_modeling_autoformer.py tests/models/big_bird/test_modeling_big_bird.py tests/models/blip/test_modeling_blip.py tests/models/camembert/test_modeling_camembert.py tests/models/clvp/test_modeling_clvp.py tests/models/convnextv2/test_modeling_convnextv2.py tests/models/data2vec/test_modeling_data2vec_vision.py tests/models/deit/test_modeling_deit.py tests/models/dit/test_modeling_dit.py tests/models/efficientnet/test_modeling_efficientnet.py tests/models/esm/test_modeling_esmfold.py tests/models/focalnet/test_modeling_focalnet.py tests/models/git/test_modeling_git.py tests/models/gpt_neox_japanese/test_modeling_gpt_neox_japanese.py tests/models/idefics/test_modeling_idefics.py tests/models/jamba/test_modeling_jamba.py tests/models/led/test_modeling_led.py tests/models/llava_next_video/test_modeling_llava_next_video.py tests/models/mamba/test_modeling_mamba.py tests/models/mbart/test_modeling_mbart.py tests/models/mobilebert/test_modeling_mobilebert.py tests/models/mpt/test_modeling_mpt.py tests/models/nllb_moe/test_modeling_nllb_moe.py tests/models/owlv2/test_modeling_owlv2.py tests/models/pegasus_x/test_modeling_pegasus_x.py tests/models/plbart/test_modeling_plbart.py tests/models/qwen2/test_modeling_qwen2.py tests/models/rembert/test_modeling_rembert.py tests/models/rt_detr/test_modeling_rt_detr.py tests/models/segformer/test_modeling_segformer.py tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/superpoint/test_modeling_superpoint.py tests/models/t5/test_modeling_t5.py tests/models/trocr/test_modeling_trocr.py tests/models/univnet/test_modeling_univnet.py tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py tests/models/vitdet/test_modeling_vitdet.py tests/models/wav2vec2_conformer/test_modeling_wav2vec2_conformer.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/models/zoedepth/test_modeling_zoedepth.py tests/test_configuration_common.py tests/test_modeling_tf_common.py tests/trainer" tests = "tests/benchmark tests/models/blip/test_modeling_blip.py tests/models/data2vec/test_modeling_data2vec_vision.py tests/models/esm/test_modeling_esmfold.py tests/models/idefics/test_modeling_idefics.py tests/models/mamba/test_modeling_mamba.py tests/models/nllb_moe/test_modeling_nllb_moe.py tests/models/qwen2/test_modeling_qwen2.py tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/univnet/test_modeling_univnet.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/trainer" # tests = "tests/benchmark tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/univnet/test_modeling_univnet.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/trainer" + tests = "tests/benchmark tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/univnet/test_modeling_univnet.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/trainer" tests = tests.split() # tests = self.tests_to_run From 98b21bec077b9b8e2bc017b1b4c9594e21825e51 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 15:06:42 +0200 Subject: [PATCH 63/95] [test_all] check From b98bf0658eb4564ad2afec708aa61d4064393169 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 15:10:05 +0200 Subject: [PATCH 64/95] fix --- .circleci/create_circleci_config.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index 2b56fca59c15..917d3b8b3395 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -142,6 +142,8 @@ def to_dict(self): tests = "tests/benchmark tests/models/blip/test_modeling_blip.py tests/models/data2vec/test_modeling_data2vec_vision.py tests/models/esm/test_modeling_esmfold.py tests/models/idefics/test_modeling_idefics.py tests/models/mamba/test_modeling_mamba.py tests/models/nllb_moe/test_modeling_nllb_moe.py tests/models/qwen2/test_modeling_qwen2.py tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/univnet/test_modeling_univnet.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/trainer" # tests = "tests/benchmark tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/univnet/test_modeling_univnet.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/trainer" tests = "tests/benchmark tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/univnet/test_modeling_univnet.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/trainer" + tests = "tests/benchmark tests/models/speech_to_text/test_modeling_speech_to_text.py tests/trainer" + tests = tests.split() # tests = self.tests_to_run From faa47ae0249e52eb227d85499009d87086b70923 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 15:16:00 +0200 Subject: [PATCH 65/95] fix --- .circleci/create_circleci_config.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index 917d3b8b3395..d856c0312c9a 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -142,7 +142,9 @@ def to_dict(self): tests = "tests/benchmark tests/models/blip/test_modeling_blip.py tests/models/data2vec/test_modeling_data2vec_vision.py tests/models/esm/test_modeling_esmfold.py tests/models/idefics/test_modeling_idefics.py tests/models/mamba/test_modeling_mamba.py tests/models/nllb_moe/test_modeling_nllb_moe.py tests/models/qwen2/test_modeling_qwen2.py tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/univnet/test_modeling_univnet.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/trainer" # tests = "tests/benchmark tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/univnet/test_modeling_univnet.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/trainer" tests = "tests/benchmark tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/univnet/test_modeling_univnet.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/trainer" - tests = "tests/benchmark tests/models/speech_to_text/test_modeling_speech_to_text.py tests/trainer" + #tests = "tests/benchmark tests/models/speech_to_text/test_modeling_speech_to_text.py tests/trainer" + + tests = "tests/benchmark tests/models/univnet/test_modeling_univnet.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/trainer" tests = tests.split() From 120938a10a20d0a96bb2ce01468fe0c5ef71295d Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 15:16:08 +0200 Subject: [PATCH 66/95] [test_all] check From 727e673563ac9b2dff2d39e5fed524e7a3be991d Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 15:32:32 +0200 Subject: [PATCH 67/95] fix --- .circleci/create_circleci_config.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index d856c0312c9a..b6fa7562da58 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -143,9 +143,10 @@ def to_dict(self): # tests = "tests/benchmark tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/univnet/test_modeling_univnet.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/trainer" tests = "tests/benchmark tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/univnet/test_modeling_univnet.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/trainer" #tests = "tests/benchmark tests/models/speech_to_text/test_modeling_speech_to_text.py tests/trainer" - tests = "tests/benchmark tests/models/univnet/test_modeling_univnet.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/trainer" + tests = "tests/benchmark tests/models/blip/test_modeling_blip.py tests/models/data2vec/test_modeling_data2vec_vision.py tests/models/esm/test_modeling_esmfold.py tests/models/idefics/test_modeling_idefics.py tests/models/mamba/test_modeling_mamba.py tests/models/nllb_moe/test_modeling_nllb_moe.py tests/models/qwen2/test_modeling_qwen2.py tests/trainer" + tests = tests.split() # tests = self.tests_to_run From 7ce8dca850a1117ce985fa251a48b5c001354c28 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 15:32:34 +0200 Subject: [PATCH 68/95] [test_all] check From 61bf3682f1863c3696106f0aaca5c3ddbca05c23 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 15:37:22 +0200 Subject: [PATCH 69/95] fix --- .circleci/create_circleci_config.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index b6fa7562da58..652dd1f72fd4 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -145,7 +145,8 @@ def to_dict(self): #tests = "tests/benchmark tests/models/speech_to_text/test_modeling_speech_to_text.py tests/trainer" tests = "tests/benchmark tests/models/univnet/test_modeling_univnet.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/trainer" - tests = "tests/benchmark tests/models/blip/test_modeling_blip.py tests/models/data2vec/test_modeling_data2vec_vision.py tests/models/esm/test_modeling_esmfold.py tests/models/idefics/test_modeling_idefics.py tests/models/mamba/test_modeling_mamba.py tests/models/nllb_moe/test_modeling_nllb_moe.py tests/models/qwen2/test_modeling_qwen2.py tests/trainer" + tests = "tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/univnet/test_modeling_univnet.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py" + tests = "tests/benchmark tests/models/blip/test_modeling_blip.py tests/models/data2vec/test_modeling_data2vec_vision.py tests/models/esm/test_modeling_esmfold.py tests/models/idefics/test_modeling_idefics.py tests/models/mamba/test_modeling_mamba.py tests/models/nllb_moe/test_modeling_nllb_moe.py tests/models/qwen2/test_modeling_qwen2.py tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/univnet/test_modeling_univnet.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/trainer" tests = tests.split() From 773bb24a113eb2b5885d82ec5e39e430f20ca8b7 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 15:37:26 +0200 Subject: [PATCH 70/95] [test_all] check From a91e8f610b6378b83d1bdb683f0fff41dec7b8a2 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 15:51:25 +0200 Subject: [PATCH 71/95] fix --- .circleci/create_circleci_config.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index 652dd1f72fd4..35aee201c05b 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -148,8 +148,12 @@ def to_dict(self): tests = "tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/univnet/test_modeling_univnet.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py" tests = "tests/benchmark tests/models/blip/test_modeling_blip.py tests/models/data2vec/test_modeling_data2vec_vision.py tests/models/esm/test_modeling_esmfold.py tests/models/idefics/test_modeling_idefics.py tests/models/mamba/test_modeling_mamba.py tests/models/nllb_moe/test_modeling_nllb_moe.py tests/models/qwen2/test_modeling_qwen2.py tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/univnet/test_modeling_univnet.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/trainer" + tests = "tests/benchmark tests/trainer" + tests = tests.split() + self.pytest_num_workers = 1 + # tests = self.tests_to_run if tests is None: folder = os.environ["test_preparation_dir"] From 0c99218685eed25627e4749d1d563c2b282bb989 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 16:08:38 +0200 Subject: [PATCH 72/95] fix --- .circleci/create_circleci_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index 35aee201c05b..015f0531b0b1 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -146,7 +146,7 @@ def to_dict(self): tests = "tests/benchmark tests/models/univnet/test_modeling_univnet.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/trainer" tests = "tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/univnet/test_modeling_univnet.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py" - tests = "tests/benchmark tests/models/blip/test_modeling_blip.py tests/models/data2vec/test_modeling_data2vec_vision.py tests/models/esm/test_modeling_esmfold.py tests/models/idefics/test_modeling_idefics.py tests/models/mamba/test_modeling_mamba.py tests/models/nllb_moe/test_modeling_nllb_moe.py tests/models/qwen2/test_modeling_qwen2.py tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/univnet/test_modeling_univnet.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/trainer" + tests = "tests/benchmark tests/models/qwen2/test_modeling_qwen2.py tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/univnet/test_modeling_univnet.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/trainer" tests = "tests/benchmark tests/trainer" From 01ca7f84b5da60d857cd01e31a56a5ff66e23edd Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 16:08:43 +0200 Subject: [PATCH 73/95] [test_all] check From 37aa99d7d1133630c13ade6e571059a8fab668a9 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 16:14:35 +0200 Subject: [PATCH 74/95] fix --- .circleci/create_circleci_config.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index 015f0531b0b1..b3bf0668078f 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -146,9 +146,9 @@ def to_dict(self): tests = "tests/benchmark tests/models/univnet/test_modeling_univnet.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/trainer" tests = "tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/univnet/test_modeling_univnet.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py" - tests = "tests/benchmark tests/models/qwen2/test_modeling_qwen2.py tests/models/speech_to_text/test_modeling_speech_to_text.py tests/models/univnet/test_modeling_univnet.py tests/models/xlm_roberta/test_modeling_xlm_roberta.py tests/trainer" + tests = "tests/benchmark tests/models/qwen2/test_modeling_qwen2.py tests/trainer" - tests = "tests/benchmark tests/trainer" + tests = "tests/trainer" tests = tests.split() From ca2d28d76e64d6a4fa34c949982e04eb5ec2c8c3 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 16:14:38 +0200 Subject: [PATCH 75/95] [test_all] check From 6f6f3e972bc476d54ca31e4200a4841565f143b9 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 16:20:11 +0200 Subject: [PATCH 76/95] fix --- .circleci/create_circleci_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index b3bf0668078f..feb10e05b71c 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -209,7 +209,7 @@ def to_dict(self): test_command = "" if self.command_timeout: test_command = f"timeout {self.command_timeout} " - test_command += f"python3 -m pytest -rsfE -p no:warnings --tb=short -o junit_family=xunit1 --junitxml=test-results/junit.xml -n {self.pytest_num_workers} " + " ".join(pytest_flags) + test_command += f"python3 -m pytest -k 'TrainerIntegrationPrerunTest or Seq2seqTrainerTester' -rsfE -p no:warnings --tb=short -o junit_family=xunit1 --junitxml=test-results/junit.xml -n {self.pytest_num_workers} " + " ".join(pytest_flags) test_command += " $(cat splitted_tests.txt)" if self.marker is not None: test_command += f" -m {self.marker}" From 4086c15606212e7e4ad6c0f25cdcd1c9375a12ac Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 16:26:45 +0200 Subject: [PATCH 77/95] fix --- .circleci/create_circleci_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index feb10e05b71c..281cdbb1874a 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -209,7 +209,7 @@ def to_dict(self): test_command = "" if self.command_timeout: test_command = f"timeout {self.command_timeout} " - test_command += f"python3 -m pytest -k 'TrainerIntegrationPrerunTest or Seq2seqTrainerTester' -rsfE -p no:warnings --tb=short -o junit_family=xunit1 --junitxml=test-results/junit.xml -n {self.pytest_num_workers} " + " ".join(pytest_flags) + test_command += f"python3 -m pytest -k 'TrainerIntegrationPrerunTest or TrainerIntegrationTest or TrainerOptimizerChoiceTest or Seq2seqTrainerTester' -rsfE -p no:warnings --tb=short -o junit_family=xunit1 --junitxml=test-results/junit.xml -n {self.pytest_num_workers} " + " ".join(pytest_flags) test_command += " $(cat splitted_tests.txt)" if self.marker is not None: test_command += f" -m {self.marker}" From 26777f4f859931cb9a302819348c76aafcee414b Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 16:31:13 +0200 Subject: [PATCH 78/95] fix --- .circleci/create_circleci_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index 281cdbb1874a..f8a8f54f1d0f 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -209,7 +209,7 @@ def to_dict(self): test_command = "" if self.command_timeout: test_command = f"timeout {self.command_timeout} " - test_command += f"python3 -m pytest -k 'TrainerIntegrationPrerunTest or TrainerIntegrationTest or TrainerOptimizerChoiceTest or Seq2seqTrainerTester' -rsfE -p no:warnings --tb=short -o junit_family=xunit1 --junitxml=test-results/junit.xml -n {self.pytest_num_workers} " + " ".join(pytest_flags) + test_command += f"python3 -m pytest -k 'TrainerIntegrationPrerunTest or TrainerIntegrationTest or TrainerOptimizerChoiceTest or HyperParameterSearchBackendsTest or OptimizerAndModelInspectionTest or DataCollatorIntegrationTest or Seq2seqTrainerTester' -rsfE -p no:warnings --tb=short -o junit_family=xunit1 --junitxml=test-results/junit.xml -n {self.pytest_num_workers} " + " ".join(pytest_flags) test_command += " $(cat splitted_tests.txt)" if self.marker is not None: test_command += f" -m {self.marker}" From 5903ea94d78bb78bfa6003175bbebc049fda5d25 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 16:33:07 +0200 Subject: [PATCH 79/95] fix --- .circleci/create_circleci_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index f8a8f54f1d0f..a07a6451aae5 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -209,7 +209,7 @@ def to_dict(self): test_command = "" if self.command_timeout: test_command = f"timeout {self.command_timeout} " - test_command += f"python3 -m pytest -k 'TrainerIntegrationPrerunTest or TrainerIntegrationTest or TrainerOptimizerChoiceTest or HyperParameterSearchBackendsTest or OptimizerAndModelInspectionTest or DataCollatorIntegrationTest or Seq2seqTrainerTester' -rsfE -p no:warnings --tb=short -o junit_family=xunit1 --junitxml=test-results/junit.xml -n {self.pytest_num_workers} " + " ".join(pytest_flags) + test_command += f"python3 -m pytest -k 'TrainerIntegrationPrerunTest or TrainerIntegrationTest or TrainerOptimizerChoiceTest or HyperParameterSearchBackendsTest or OptimizerAndModelInspectionTest or DataCollatorIntegrationTest or DataCollatorImmutabilityTest or NumpyDataCollatorIntegrationTest or NumpyDataCollatorImmutabilityTest or Seq2seqTrainerTester' -rsfE -p no:warnings --tb=short -o junit_family=xunit1 --junitxml=test-results/junit.xml -n {self.pytest_num_workers} " + " ".join(pytest_flags) test_command += " $(cat splitted_tests.txt)" if self.marker is not None: test_command += f" -m {self.marker}" From 026286ff897c079abe0bf1c039ecaadb3cad7c10 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 16:34:28 +0200 Subject: [PATCH 80/95] fix --- .circleci/create_circleci_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index a07a6451aae5..cd7ffe4d4b1e 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -209,7 +209,7 @@ def to_dict(self): test_command = "" if self.command_timeout: test_command = f"timeout {self.command_timeout} " - test_command += f"python3 -m pytest -k 'TrainerIntegrationPrerunTest or TrainerIntegrationTest or TrainerOptimizerChoiceTest or HyperParameterSearchBackendsTest or OptimizerAndModelInspectionTest or DataCollatorIntegrationTest or DataCollatorImmutabilityTest or NumpyDataCollatorIntegrationTest or NumpyDataCollatorImmutabilityTest or Seq2seqTrainerTester' -rsfE -p no:warnings --tb=short -o junit_family=xunit1 --junitxml=test-results/junit.xml -n {self.pytest_num_workers} " + " ".join(pytest_flags) + test_command += f"python3 -m pytest -k 'TrainerIntegrationPrerunTest or TrainerIntegrationTest or TrainerOptimizerChoiceTest or HyperParameterSearchBackendsTest or OptimizerAndModelInspectionTest or DataCollatorIntegrationTest or DataCollatorImmutabilityTest or NumpyDataCollatorIntegrationTest or NumpyDataCollatorImmutabilityTest or TrainerUtilsTest or Seq2seqTrainerTester' -rsfE -p no:warnings --tb=short -o junit_family=xunit1 --junitxml=test-results/junit.xml -n {self.pytest_num_workers} " + " ".join(pytest_flags) test_command += " $(cat splitted_tests.txt)" if self.marker is not None: test_command += f" -m {self.marker}" From 4b6ae84722ffa72977744c768ecf2677f73c316b Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 16:36:36 +0200 Subject: [PATCH 81/95] fix --- .circleci/create_circleci_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index cd7ffe4d4b1e..de5521f7f2b1 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -209,7 +209,7 @@ def to_dict(self): test_command = "" if self.command_timeout: test_command = f"timeout {self.command_timeout} " - test_command += f"python3 -m pytest -k 'TrainerIntegrationPrerunTest or TrainerIntegrationTest or TrainerOptimizerChoiceTest or HyperParameterSearchBackendsTest or OptimizerAndModelInspectionTest or DataCollatorIntegrationTest or DataCollatorImmutabilityTest or NumpyDataCollatorIntegrationTest or NumpyDataCollatorImmutabilityTest or TrainerUtilsTest or Seq2seqTrainerTester' -rsfE -p no:warnings --tb=short -o junit_family=xunit1 --junitxml=test-results/junit.xml -n {self.pytest_num_workers} " + " ".join(pytest_flags) + test_command += f"python3 -m pytest -k 'TrainerIntegrationPrerunTest or TrainerIntegrationTest or TrainerOptimizerChoiceTest or HyperParameterSearchBackendsTest or OptimizerAndModelInspectionTest or DataCollatorIntegrationTest or DataCollatorImmutabilityTest or NumpyDataCollatorIntegrationTest or NumpyDataCollatorImmutabilityTest or TrainerUtilsTest or TrainerCallbackTest or Seq2seqTrainerTester' -rsfE -p no:warnings --tb=short -o junit_family=xunit1 --junitxml=test-results/junit.xml -n {self.pytest_num_workers} " + " ".join(pytest_flags) test_command += " $(cat splitted_tests.txt)" if self.marker is not None: test_command += f" -m {self.marker}" From b1f986de24473883c5ca9ec36ab2bac01dabaf60 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 16:39:33 +0200 Subject: [PATCH 82/95] fix --- .circleci/create_circleci_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index de5521f7f2b1..cf870707aff8 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -209,7 +209,7 @@ def to_dict(self): test_command = "" if self.command_timeout: test_command = f"timeout {self.command_timeout} " - test_command += f"python3 -m pytest -k 'TrainerIntegrationPrerunTest or TrainerIntegrationTest or TrainerOptimizerChoiceTest or HyperParameterSearchBackendsTest or OptimizerAndModelInspectionTest or DataCollatorIntegrationTest or DataCollatorImmutabilityTest or NumpyDataCollatorIntegrationTest or NumpyDataCollatorImmutabilityTest or TrainerUtilsTest or TrainerCallbackTest or Seq2seqTrainerTester' -rsfE -p no:warnings --tb=short -o junit_family=xunit1 --junitxml=test-results/junit.xml -n {self.pytest_num_workers} " + " ".join(pytest_flags) + test_command += f"python3 -m pytest -k 'TrainerCallbackTest or Seq2seqTrainerTester' -rsfE -p no:warnings --tb=short -o junit_family=xunit1 --junitxml=test-results/junit.xml -n {self.pytest_num_workers} " + " ".join(pytest_flags) test_command += " $(cat splitted_tests.txt)" if self.marker is not None: test_command += f" -m {self.marker}" From f3777e1aca38a4b33c9f61a3262a0c83f45e2a44 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 16:43:37 +0200 Subject: [PATCH 83/95] fix --- .circleci/create_circleci_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index cf870707aff8..60554f9f70d5 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -209,7 +209,7 @@ def to_dict(self): test_command = "" if self.command_timeout: test_command = f"timeout {self.command_timeout} " - test_command += f"python3 -m pytest -k 'TrainerCallbackTest or Seq2seqTrainerTester' -rsfE -p no:warnings --tb=short -o junit_family=xunit1 --junitxml=test-results/junit.xml -n {self.pytest_num_workers} " + " ".join(pytest_flags) + test_command += f"python3 -m pytest -k '(TrainerCallbackTest and test_event_flow) or Seq2seqTrainerTester' -rsfE -p no:warnings --tb=short -o junit_family=xunit1 --junitxml=test-results/junit.xml -n {self.pytest_num_workers} " + " ".join(pytest_flags) test_command += " $(cat splitted_tests.txt)" if self.marker is not None: test_command += f" -m {self.marker}" From 58d7f12989d2ef93beb47b2d9b6fe5cabb3d2b05 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 16:43:41 +0200 Subject: [PATCH 84/95] [test_all] check From 08ed067a88d38e7f5c31394f0a1c575442e519e8 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 16:48:48 +0200 Subject: [PATCH 85/95] fix --- .circleci/create_circleci_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index 60554f9f70d5..9cda1d721fdb 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -209,7 +209,7 @@ def to_dict(self): test_command = "" if self.command_timeout: test_command = f"timeout {self.command_timeout} " - test_command += f"python3 -m pytest -k '(TrainerCallbackTest and test_event_flow) or Seq2seqTrainerTester' -rsfE -p no:warnings --tb=short -o junit_family=xunit1 --junitxml=test-results/junit.xml -n {self.pytest_num_workers} " + " ".join(pytest_flags) + test_command += f"python3 -m pytest -k '(TrainerCallbackTest and test_stateful_) or Seq2seqTrainerTester' -rsfE -p no:warnings --tb=short -o junit_family=xunit1 --junitxml=test-results/junit.xml -n {self.pytest_num_workers} " + " ".join(pytest_flags) test_command += " $(cat splitted_tests.txt)" if self.marker is not None: test_command += f" -m {self.marker}" From ca45dcc5142bfc01e81019ce475ce03161865362 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 16:53:24 +0200 Subject: [PATCH 86/95] fix --- .circleci/create_circleci_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index 9cda1d721fdb..26d9ad960344 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -209,7 +209,7 @@ def to_dict(self): test_command = "" if self.command_timeout: test_command = f"timeout {self.command_timeout} " - test_command += f"python3 -m pytest -k '(TrainerCallbackTest and test_stateful_) or Seq2seqTrainerTester' -rsfE -p no:warnings --tb=short -o junit_family=xunit1 --junitxml=test-results/junit.xml -n {self.pytest_num_workers} " + " ".join(pytest_flags) + test_command += f"python3 -m pytest -k '(TrainerCallbackTest and (test_add_remove_callback or test_init_callback or test_missing_stateful_callback)) or Seq2seqTrainerTester' -rsfE -p no:warnings --tb=short -o junit_family=xunit1 --junitxml=test-results/junit.xml -n {self.pytest_num_workers} " + " ".join(pytest_flags) test_command += " $(cat splitted_tests.txt)" if self.marker is not None: test_command += f" -m {self.marker}" From 9eb53997ee9d470c09730b4ba094ccb19ccbf6a2 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 16:57:01 +0200 Subject: [PATCH 87/95] fix --- .circleci/create_circleci_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index 26d9ad960344..23b42e932fc9 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -209,7 +209,7 @@ def to_dict(self): test_command = "" if self.command_timeout: test_command = f"timeout {self.command_timeout} " - test_command += f"python3 -m pytest -k '(TrainerCallbackTest and (test_add_remove_callback or test_init_callback or test_missing_stateful_callback)) or Seq2seqTrainerTester' -rsfE -p no:warnings --tb=short -o junit_family=xunit1 --junitxml=test-results/junit.xml -n {self.pytest_num_workers} " + " ".join(pytest_flags) + test_command += f"python3 -m pytest -k '(TrainerCallbackTest and (test_add_remove_callback or test_event_flow or test_init_callback or test_missing_stateful_callback)) or Seq2seqTrainerTester' -rsfE -p no:warnings --tb=short -o junit_family=xunit1 --junitxml=test-results/junit.xml -n {self.pytest_num_workers} " + " ".join(pytest_flags) test_command += " $(cat splitted_tests.txt)" if self.marker is not None: test_command += f" -m {self.marker}" From 1f5199ac00b8bf6f318c77770a80ec2f3eef78be Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 16:57:05 +0200 Subject: [PATCH 88/95] [test_all] check From 90cff9e2b2b8a75874136b75d572b4b92baca729 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 17:00:19 +0200 Subject: [PATCH 89/95] fix --- .circleci/create_circleci_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index 23b42e932fc9..6d05829e2993 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -209,7 +209,7 @@ def to_dict(self): test_command = "" if self.command_timeout: test_command = f"timeout {self.command_timeout} " - test_command += f"python3 -m pytest -k '(TrainerCallbackTest and (test_add_remove_callback or test_event_flow or test_init_callback or test_missing_stateful_callback)) or Seq2seqTrainerTester' -rsfE -p no:warnings --tb=short -o junit_family=xunit1 --junitxml=test-results/junit.xml -n {self.pytest_num_workers} " + " ".join(pytest_flags) + test_command += f"python3 -m pytest -k '(TrainerCallbackTest and (test_event_flow)) or Seq2seqTrainerTester' -rsfE -p no:warnings --tb=short -o junit_family=xunit1 --junitxml=test-results/junit.xml -n {self.pytest_num_workers} " + " ".join(pytest_flags) test_command += " $(cat splitted_tests.txt)" if self.marker is not None: test_command += f" -m {self.marker}" From 525be715069b3ab4270f64a80405293f890324b9 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 17:00:23 +0200 Subject: [PATCH 90/95] [test_all] check From a9eef7ba6f9ef147ac63ae45b6310d7968802d7e Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 17:04:58 +0200 Subject: [PATCH 91/95] fix --- .circleci/create_circleci_config.py | 2 +- tests/trainer/test_trainer_callback.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index 6d05829e2993..7a11956ad06b 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -209,7 +209,7 @@ def to_dict(self): test_command = "" if self.command_timeout: test_command = f"timeout {self.command_timeout} " - test_command += f"python3 -m pytest -k '(TrainerCallbackTest and (test_event_flow)) or Seq2seqTrainerTester' -rsfE -p no:warnings --tb=short -o junit_family=xunit1 --junitxml=test-results/junit.xml -n {self.pytest_num_workers} " + " ".join(pytest_flags) + test_command += f"python3 -m pytest -k '(TrainerCallbackTest and (test_add_remove_callback or test_0_event_flow or test_init_callback or test_missing_stateful_callback)) or Seq2seqTrainerTester' -rsfE -p no:warnings --tb=short -o junit_family=xunit1 --junitxml=test-results/junit.xml -n {self.pytest_num_workers} " + " ".join(pytest_flags) test_command += " $(cat splitted_tests.txt)" if self.marker is not None: test_command += f" -m {self.marker}" diff --git a/tests/trainer/test_trainer_callback.py b/tests/trainer/test_trainer_callback.py index edd73f29dc98..9c205de3fc11 100644 --- a/tests/trainer/test_trainer_callback.py +++ b/tests/trainer/test_trainer_callback.py @@ -214,7 +214,7 @@ def test_add_remove_callback(self): expected_callbacks.insert(0, DefaultFlowCallback) self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) - def test_event_flow(self): + def test_0_event_flow(self): import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested From 8123b272b5840af49ed1171ba75f147f16563f13 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 17:05:02 +0200 Subject: [PATCH 92/95] [test_all] check From 6fb41dae9d36e940569a965e4da936ce200c6dcd Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 17:08:18 +0200 Subject: [PATCH 93/95] fix --- tests/trainer/test_trainer_callback.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/trainer/test_trainer_callback.py b/tests/trainer/test_trainer_callback.py index 9c205de3fc11..e5227b24d484 100644 --- a/tests/trainer/test_trainer_callback.py +++ b/tests/trainer/test_trainer_callback.py @@ -215,10 +215,10 @@ def test_add_remove_callback(self): self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) def test_0_event_flow(self): - import warnings - - # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested - warnings.simplefilter(action="ignore", category=UserWarning) + # import warnings + # + # # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested + # warnings.simplefilter(action="ignore", category=UserWarning) trainer = self.get_trainer(callbacks=[MyTestTrainerCallback]) trainer.train() From f195c65512edc989002a4dd866ac4a4b45e4f7d2 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 17:12:43 +0200 Subject: [PATCH 94/95] fix --- tests/trainer/test_trainer_callback.py | 95 +++++++++++++------------- 1 file changed, 48 insertions(+), 47 deletions(-) diff --git a/tests/trainer/test_trainer_callback.py b/tests/trainer/test_trainer_callback.py index e5227b24d484..b9bacfb6184e 100644 --- a/tests/trainer/test_trainer_callback.py +++ b/tests/trainer/test_trainer_callback.py @@ -215,55 +215,56 @@ def test_add_remove_callback(self): self.check_callbacks_equality(trainer.callback_handler.callbacks, expected_callbacks) def test_0_event_flow(self): - # import warnings - # - # # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested - # warnings.simplefilter(action="ignore", category=UserWarning) - - trainer = self.get_trainer(callbacks=[MyTestTrainerCallback]) - trainer.train() - events = trainer.callback_handler.callbacks[-2].events - self.assertEqual(events, self.get_expected_events(trainer)) - - # Independent log/save/eval - trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], logging_steps=5) - trainer.train() - events = trainer.callback_handler.callbacks[-2].events - self.assertEqual(events, self.get_expected_events(trainer)) - - trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], save_steps=5) - trainer.train() - events = trainer.callback_handler.callbacks[-2].events - self.assertEqual(events, self.get_expected_events(trainer)) - - trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], eval_steps=5, eval_strategy="steps") - trainer.train() - events = trainer.callback_handler.callbacks[-2].events - self.assertEqual(events, self.get_expected_events(trainer)) - - trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], eval_strategy="epoch") - trainer.train() - events = trainer.callback_handler.callbacks[-2].events - self.assertEqual(events, self.get_expected_events(trainer)) - - # A bit of everything - trainer = self.get_trainer( - callbacks=[MyTestTrainerCallback], - logging_steps=3, - save_steps=10, - eval_steps=5, - eval_strategy="steps", - ) - trainer.train() - events = trainer.callback_handler.callbacks[-2].events - self.assertEqual(events, self.get_expected_events(trainer)) - - # warning should be emitted for duplicated callbacks - with patch("transformers.trainer_callback.logger.warning") as warn_mock: + import warnings + + # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested + with warnings.catch_warnings(): + warnings.simplefilter(action="ignore", category=UserWarning) + + trainer = self.get_trainer(callbacks=[MyTestTrainerCallback]) + trainer.train() + events = trainer.callback_handler.callbacks[-2].events + self.assertEqual(events, self.get_expected_events(trainer)) + + # Independent log/save/eval + trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], logging_steps=5) + trainer.train() + events = trainer.callback_handler.callbacks[-2].events + self.assertEqual(events, self.get_expected_events(trainer)) + + trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], save_steps=5) + trainer.train() + events = trainer.callback_handler.callbacks[-2].events + self.assertEqual(events, self.get_expected_events(trainer)) + + trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], eval_steps=5, eval_strategy="steps") + trainer.train() + events = trainer.callback_handler.callbacks[-2].events + self.assertEqual(events, self.get_expected_events(trainer)) + + trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], eval_strategy="epoch") + trainer.train() + events = trainer.callback_handler.callbacks[-2].events + self.assertEqual(events, self.get_expected_events(trainer)) + + # A bit of everything trainer = self.get_trainer( - callbacks=[MyTestTrainerCallback, MyTestTrainerCallback], + callbacks=[MyTestTrainerCallback], + logging_steps=3, + save_steps=10, + eval_steps=5, + eval_strategy="steps", ) - assert str(MyTestTrainerCallback) in warn_mock.call_args[0][0] + trainer.train() + events = trainer.callback_handler.callbacks[-2].events + self.assertEqual(events, self.get_expected_events(trainer)) + + # warning should be emitted for duplicated callbacks + with patch("transformers.trainer_callback.logger.warning") as warn_mock: + trainer = self.get_trainer( + callbacks=[MyTestTrainerCallback, MyTestTrainerCallback], + ) + assert str(MyTestTrainerCallback) in warn_mock.call_args[0][0] def test_stateful_callbacks(self): # Use something with non-defaults From 868bd3d525f80c988a36482b4c2e2b02492ab9d2 Mon Sep 17 00:00:00 2001 From: ydshieh Date: Wed, 10 Jul 2024 17:15:20 +0200 Subject: [PATCH 95/95] fix --- tests/trainer/test_trainer_callback.py | 90 +++++++++++++------------- 1 file changed, 45 insertions(+), 45 deletions(-) diff --git a/tests/trainer/test_trainer_callback.py b/tests/trainer/test_trainer_callback.py index b9bacfb6184e..3bc3e4f1570b 100644 --- a/tests/trainer/test_trainer_callback.py +++ b/tests/trainer/test_trainer_callback.py @@ -218,53 +218,53 @@ def test_0_event_flow(self): import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested - with warnings.catch_warnings(): - warnings.simplefilter(action="ignore", category=UserWarning) - - trainer = self.get_trainer(callbacks=[MyTestTrainerCallback]) - trainer.train() - events = trainer.callback_handler.callbacks[-2].events - self.assertEqual(events, self.get_expected_events(trainer)) - - # Independent log/save/eval - trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], logging_steps=5) - trainer.train() - events = trainer.callback_handler.callbacks[-2].events - self.assertEqual(events, self.get_expected_events(trainer)) - - trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], save_steps=5) - trainer.train() - events = trainer.callback_handler.callbacks[-2].events - self.assertEqual(events, self.get_expected_events(trainer)) - - trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], eval_steps=5, eval_strategy="steps") - trainer.train() - events = trainer.callback_handler.callbacks[-2].events - self.assertEqual(events, self.get_expected_events(trainer)) - - trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], eval_strategy="epoch") - trainer.train() - events = trainer.callback_handler.callbacks[-2].events - self.assertEqual(events, self.get_expected_events(trainer)) - - # A bit of everything + # with warnings.catch_warnings(): + warnings.simplefilter(action="ignore", category=UserWarning) + + trainer = self.get_trainer(callbacks=[MyTestTrainerCallback]) + trainer.train() + events = trainer.callback_handler.callbacks[-2].events + self.assertEqual(events, self.get_expected_events(trainer)) + + # Independent log/save/eval + trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], logging_steps=5) + trainer.train() + events = trainer.callback_handler.callbacks[-2].events + self.assertEqual(events, self.get_expected_events(trainer)) + + trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], save_steps=5) + trainer.train() + events = trainer.callback_handler.callbacks[-2].events + self.assertEqual(events, self.get_expected_events(trainer)) + + trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], eval_steps=5, eval_strategy="steps") + trainer.train() + events = trainer.callback_handler.callbacks[-2].events + self.assertEqual(events, self.get_expected_events(trainer)) + + trainer = self.get_trainer(callbacks=[MyTestTrainerCallback], eval_strategy="epoch") + trainer.train() + events = trainer.callback_handler.callbacks[-2].events + self.assertEqual(events, self.get_expected_events(trainer)) + + # A bit of everything + trainer = self.get_trainer( + callbacks=[MyTestTrainerCallback], + logging_steps=3, + save_steps=10, + eval_steps=5, + eval_strategy="steps", + ) + trainer.train() + events = trainer.callback_handler.callbacks[-2].events + self.assertEqual(events, self.get_expected_events(trainer)) + + # warning should be emitted for duplicated callbacks + with patch("transformers.trainer_callback.logger.warning") as warn_mock: trainer = self.get_trainer( - callbacks=[MyTestTrainerCallback], - logging_steps=3, - save_steps=10, - eval_steps=5, - eval_strategy="steps", + callbacks=[MyTestTrainerCallback, MyTestTrainerCallback], ) - trainer.train() - events = trainer.callback_handler.callbacks[-2].events - self.assertEqual(events, self.get_expected_events(trainer)) - - # warning should be emitted for duplicated callbacks - with patch("transformers.trainer_callback.logger.warning") as warn_mock: - trainer = self.get_trainer( - callbacks=[MyTestTrainerCallback, MyTestTrainerCallback], - ) - assert str(MyTestTrainerCallback) in warn_mock.call_args[0][0] + assert str(MyTestTrainerCallback) in warn_mock.call_args[0][0] def test_stateful_callbacks(self): # Use something with non-defaults