diff --git a/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py b/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py index c67b27f64fa1..1025fdf75fb4 100644 --- a/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py +++ b/src/transformers/models/deprecated/gptsan_japanese/tokenization_gptsan_japanese.py @@ -495,7 +495,7 @@ def checku2e(x): candidates.append((self.vocab[wd], wd, e)) if len(candidates) > 0: # the smallest token_id is adopted - _, wd, e = sorted(candidates, key=lambda x: x[0])[0] + _, wd, e = min(candidates, key=lambda x: x[0]) result.append(wd) pos = e else: diff --git a/src/transformers/models/gpt_neox_japanese/tokenization_gpt_neox_japanese.py b/src/transformers/models/gpt_neox_japanese/tokenization_gpt_neox_japanese.py index 891f77ece304..584e74a8123e 100644 --- a/src/transformers/models/gpt_neox_japanese/tokenization_gpt_neox_japanese.py +++ b/src/transformers/models/gpt_neox_japanese/tokenization_gpt_neox_japanese.py @@ -318,7 +318,7 @@ def checku2e(x): candidates.append((self.vocab[wd], wd, e)) if len(candidates) > 0: # the smallest token_id is adopted - _, wd, e = sorted(candidates, key=lambda x: x[0])[0] + _, wd, e = min(candidates, key=lambda x: x[0]) result.append(wd) pos = e else: diff --git a/src/transformers/models/ovis2/image_processing_ovis2.py b/src/transformers/models/ovis2/image_processing_ovis2.py index c235504d2d89..633a7fdee46c 100644 --- a/src/transformers/models/ovis2/image_processing_ovis2.py +++ b/src/transformers/models/ovis2/image_processing_ovis2.py @@ -169,10 +169,10 @@ def get_min_tile_covering_grid( if sufficient_covering_grids: # Prefer fewer tiles and higher covering ratio - return sorted(sufficient_covering_grids, key=lambda x: (x[0][0] * x[0][1], -x[1]))[0][0] + return min(sufficient_covering_grids, key=lambda x: (x[0][0] * x[0][1], -x[1]))[0] else: # Fallback: prefer higher covering even if below threshold - return sorted(evaluated_grids, key=lambda x: (-x[1], x[0][0] * x[0][1]))[0][0] + return min(evaluated_grids, key=lambda x: (-x[1], x[0][0] * x[0][1]))[0] class Ovis2ImageProcessor(BaseImageProcessor): diff --git a/src/transformers/tokenization_mistral_common.py b/src/transformers/tokenization_mistral_common.py index d8ea3688efae..f388f46cb9d8 100644 --- a/src/transformers/tokenization_mistral_common.py +++ b/src/transformers/tokenization_mistral_common.py @@ -1789,7 +1789,7 @@ def from_pretrained( if "tekken.json" in valid_tokenizer_files: tokenizer_file = "tekken.json" else: - tokenizer_file = sorted(valid_tokenizer_files)[-1] + tokenizer_file = max(valid_tokenizer_files) logger.warning( f"Multiple tokenizer files found in directory: {pretrained_model_name_or_path}. Using {tokenizer_file}." ) diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index 9a80f0032e7a..f5d8a5dfbf6e 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -3478,7 +3478,7 @@ def test_resume_training_with_randomness(self): checkpoints = [d for d in os.listdir(tmp_dir) if d.startswith("checkpoint-")] # There should be one checkpoint per epoch. self.assertEqual(len(checkpoints), 3) - checkpoint_dir = sorted(checkpoints, key=lambda x: int(x.replace("checkpoint-", "")))[0] + checkpoint_dir = min(checkpoints, key=lambda x: int(x.replace("checkpoint-", ""))) trainer.train(resume_from_checkpoint=os.path.join(tmp_dir, checkpoint_dir)) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() diff --git a/utils/add_pipeline_model_mapping_to_test.py b/utils/add_pipeline_model_mapping_to_test.py index 37723bf0bb9c..ffca3b0d51a9 100644 --- a/utils/add_pipeline_model_mapping_to_test.py +++ b/utils/add_pipeline_model_mapping_to_test.py @@ -134,7 +134,7 @@ def find_test_class(test_file): break # Take the test class with the shortest name (just a heuristic) if target_test_class is None and len(test_classes) > 0: - target_test_class = sorted(test_classes, key=lambda x: (len(x.__name__), x.__name__))[0] + target_test_class = min(test_classes, key=lambda x: (len(x.__name__), x.__name__)) return target_test_class diff --git a/utils/create_dummy_models.py b/utils/create_dummy_models.py index 1e5c67bb909f..4cdb30ff2b40 100644 --- a/utils/create_dummy_models.py +++ b/utils/create_dummy_models.py @@ -389,7 +389,7 @@ def get_tiny_config(config_class, model_class=None, **model_tester_kwargs): # This is to avoid `T5EncoderOnlyModelTest` is used instead of `T5ModelTest`, which has # `is_encoder_decoder=False` and causes some pipeline tests failing (also failures in `Optimum` CI). # TODO: More fine grained control of the desired tester class. - model_tester_class = sorted(tester_classes, key=lambda x: (len(x.__name__), x.__name__))[0] + model_tester_class = min(tester_classes, key=lambda x: (len(x.__name__), x.__name__)) except ModuleNotFoundError: error = f"Tiny config not created for {model_type} - cannot find the testing module from the model name." raise ValueError(error) diff --git a/utils/deprecate_models.py b/utils/deprecate_models.py index 8cbe319fdb65..faf25f9e5c3b 100644 --- a/utils/deprecate_models.py +++ b/utils/deprecate_models.py @@ -37,7 +37,7 @@ def get_last_stable_minor_release(): last_stable_minor_releases = [ release for release in release_data["releases"] if release.startswith(last_major_minor) ] - last_stable_release = sorted(last_stable_minor_releases, key=version.parse)[-1] + last_stable_release = max(last_stable_minor_releases, key=version.parse) return last_stable_release