diff --git a/.github/conda/build.sh b/.github/conda/build.sh deleted file mode 100644 index a40f1097a863..000000000000 --- a/.github/conda/build.sh +++ /dev/null @@ -1 +0,0 @@ -$PYTHON setup.py install # Python command to install the script. diff --git a/.github/conda/meta.yaml b/.github/conda/meta.yaml deleted file mode 100644 index 89dc353b1277..000000000000 --- a/.github/conda/meta.yaml +++ /dev/null @@ -1,56 +0,0 @@ -{% set name = "transformers" %} - -package: - name: "{{ name|lower }}" - version: "{{ TRANSFORMERS_VERSION }}" - -source: - path: ../../ - -build: - noarch: python - -requirements: - host: - - python - - pip - - numpy >=1.17 - - dataclasses - - huggingface_hub - - packaging - - filelock - - requests - - tqdm >=4.27 - - sacremoses - - regex !=2019.12.17 - - protobuf - - tokenizers >=0.11.1,!=0.11.3,<0.13 - - pyyaml >=5.1 - - safetensors - - fsspec - run: - - python - - numpy >=1.17 - - dataclasses - - huggingface_hub - - packaging - - filelock - - requests - - tqdm >=4.27 - - sacremoses - - regex !=2019.12.17 - - protobuf - - tokenizers >=0.11.1,!=0.11.3,<0.13 - - pyyaml >=5.1 - - safetensors - - fsspec - -test: - imports: - - transformers - -about: - home: https://huggingface.co - license: Apache License 2.0 - license_file: LICENSE - summary: "🤗Transformers: State-of-the-art Natural Language Processing for Pytorch and TensorFlow 2.0." diff --git a/test_preparation/tests_hub_test_list.txt b/test_preparation/tests_hub_test_list.txt new file mode 100644 index 000000000000..3598c3003fd4 --- /dev/null +++ b/test_preparation/tests_hub_test_list.txt @@ -0,0 +1 @@ +tests \ No newline at end of file diff --git a/tests/generation/test_utils.py b/tests/generation/test_utils.py index 6630fc2ba9d1..97e962d58d21 100644 --- a/tests/generation/test_utils.py +++ b/tests/generation/test_utils.py @@ -3138,7 +3138,7 @@ def test_logits_processor_not_inplace(self): out = model.generate(input_ids, output_logits=True, output_scores=True, return_dict_in_generate=True) out_with_temp = model.generate( input_ids, - temperature=0.5, + temperature=0.6, do_sample=True, output_logits=True, output_scores=True, diff --git a/utils/tests_fetcher.py b/utils/tests_fetcher.py index 906e85e1de61..43523e214110 100644 --- a/utils/tests_fetcher.py +++ b/utils/tests_fetcher.py @@ -1132,39 +1132,42 @@ def parse_commit_message(commit_message: str) -> Dict[str, bool]: JOB_TO_TEST_FILE = { - "tests_torch_and_tf": r"tests/models/.*/test_modeling_(?:tf_|(?!flax)).*", - "tests_torch_and_flax": r"tests/models/.*/test_modeling_(?:flax|(?!tf)).*", - "tests_tf": r"tests/models/.*/test_modeling_tf_.*", - "tests_torch": r"tests/models/.*/test_modeling_(?!(?:flax_|tf_)).*", - "tests_generate": r"tests/models/.*/test_modeling_(?!(?:flax_|tf_)).*", - "tests_tokenization": r"tests/models/.*/test_tokenization.*", - "tests_processors": r"tests/models/.*/test_(?!(?:modeling_|tokenization_)).*", # takes feature extractors, image processors, processors - "examples_torch": r"examples/pytorch/.*test_.*", - "examples_tensorflow": r"examples/tensorflow/.*test_.*", - "tests_exotic_models": r"tests/models/.*(?=layoutlmv|nat|deta|udop|nougat).*", - "tests_custom_tokenizers": r"tests/models/.*/test_tokenization_(?=bert_japanese|openai|clip).*", - # "repo_utils": r"tests/[^models].*test.*", TODO later on we might want to do - "pipelines_tf": r"tests/models/.*/test_modeling_tf_.*", - "pipelines_torch": r"tests/models/.*/test_modeling_(?!(?:flax_|tf_)).*", - "tests_hub": r"tests/.*", - "tests_onnx": r"tests/models/.*/test_modeling_(?:tf_|(?!flax)).*", - "tests_non_model": r"tests/[^/]*?/test_.*\.py", + "tests_torch_and_tf": [r"tests/models/.*/test_modeling_(?:tf_|(?!flax)).*"], + "tests_torch_and_flax": [r"tests/models/.*/test_modeling_(?:flax|(?!tf)).*"], + "tests_tf": [r"tests/models/.*/test_modeling_tf_.*"], + "tests_torch": [r"tests/models/.*/test_modeling_(?!(?:flax_|tf_)).*"], + "tests_generate": [r"tests/models/.*/test_modeling_(?!(?:flax_|tf_)).*", r"tests/generation/.*"], + "tests_tokenization": [r"tests/models/.*/test_tokenization.*"], + # takes feature extractors, image processors, processors + "tests_processors": [r"tests/models/.*/test_(?!(?:modeling_|tokenization_)).*"], + "examples_torch": [r"examples/pytorch/.*test_.*"], + "examples_tensorflow": [r"examples/tensorflow/.*test_.*"], + "tests_exotic_models": [r"tests/models/.*(?=layoutlmv|nat|deta|udop|nougat).*"], + "tests_custom_tokenizers": [r"tests/models/.*/test_tokenization_(?=bert_japanese|openai|clip).*"], + # "repo_utils": [r"tests/[^models].*test.*"], TODO later on we might want to do + "pipelines_tf": [r"tests/models/.*/test_modeling_tf_.*"], + "pipelines_torch": [r"tests/models/.*/test_modeling_(?!(?:flax_|tf_)).*"], + "tests_hub": [r"tests/.*"], + "tests_onnx": [r"tests/models/.*/test_modeling_(?:tf_|(?!flax)).*"], + "tests_non_model": [r"tests/[^/]*?/test_.*\.py"], } def create_test_list_from_filter(full_test_list, out_path): os.makedirs(out_path, exist_ok=True) all_test_files = "\n".join(full_test_list) - for job_name, _filter in JOB_TO_TEST_FILE.items(): - file_name = os.path.join(out_path, f"{job_name}_test_list.txt") - if job_name == "tests_hub": - files_to_test = ["tests"] - else: - files_to_test = list(re.findall(_filter, all_test_files)) + for job_name, _filters in JOB_TO_TEST_FILE.items(): + files_to_test = set() # Using sets to avoid duplicates when multiple filters match the same file + for _filter in _filters: + file_name = os.path.join(out_path, f"{job_name}_test_list.txt") + if job_name == "tests_hub": + files_to_test.add("tests") + else: + files_to_test = files_to_test.union(set(re.findall(_filter, all_test_files))) print(job_name, file_name) if len(files_to_test) > 0: # No tests -> no file with test list with open(file_name, "w") as f: - f.write("\n".join(files_to_test)) + f.write("\n".join(sorted(files_to_test))) if __name__ == "__main__":