diff --git a/.circleci/config.yml b/.circleci/config.yml index 63c6162fc15dfb..66678d0d4a0f5d 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -43,6 +43,12 @@ jobs: else touch test_preparation/test_list.txt fi + - run: | + if [ -f doctest_list.txt ]; then + cp doctest_list.txt test_preparation/doctest_list.txt + else + touch test_preparation/doctest_list.txt + fi - run: | if [ -f test_repo_utils.txt ]; then mv test_repo_utils.txt test_preparation/test_repo_utils.txt @@ -71,6 +77,8 @@ jobs: fi - store_artifacts: path: test_preparation/test_list.txt + - store_artifacts: + path: test_preparation/doctest_list.txt - store_artifacts: path: ~/transformers/test_preparation/filtered_test_list.txt - store_artifacts: diff --git a/.circleci/create_circleci_config.py b/.circleci/create_circleci_config.py index ef100bdbb1356d..4bc5ce17d08cf9 100644 --- a/.circleci/create_circleci_config.py +++ b/.circleci/create_circleci_config.py @@ -483,7 +483,6 @@ def job_name(self): hub_job, onnx_job, exotic_models_job, - doc_test_job ] EXAMPLES_TESTS = [ examples_torch_job, @@ -495,6 +494,8 @@ def job_name(self): pipelines_tf_job, ] REPO_UTIL_TESTS = [repo_utils_job] +DOC_TESTS = [doc_test_job] + def create_circleci_config(folder=None): if folder is None: @@ -552,6 +553,15 @@ def create_circleci_config(folder=None): if os.path.exists(example_file) and os.path.getsize(example_file) > 0: jobs.extend(EXAMPLES_TESTS) + doctest_file = os.path.join(folder, "doctest_list.txt") + if os.path.exists(doctest_file): + with open(doctest_file) as f: + doctest_list = f.read() + else: + doctest_list = [] + if len(doctest_list) > 0: + jobs.extend(DOC_TESTS) + repo_util_file = os.path.join(folder, "test_repo_utils.txt") if os.path.exists(repo_util_file) and os.path.getsize(repo_util_file) > 0: jobs.extend(REPO_UTIL_TESTS) diff --git a/.github/workflows/build-docker-images.yml b/.github/workflows/build-docker-images.yml index 26c5e517d2a505..dfc407e69abc14 100644 --- a/.github/workflows/build-docker-images.yml +++ b/.github/workflows/build-docker-images.yml @@ -71,6 +71,16 @@ jobs: name: "Latest PyTorch + DeepSpeed" runs-on: ubuntu-latest steps: + - name: Cleanup disk + run: | + sudo ls -l /usr/local/lib/ + sudo ls -l /usr/share/ + sudo du -sh /usr/local/lib/ + sudo du -sh /usr/share/ + sudo rm -rf /usr/local/lib/android + sudo rm -rf /usr/share/dotnet + sudo du -sh /usr/local/lib/ + sudo du -sh /usr/share/ - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 @@ -98,6 +108,16 @@ jobs: name: "Latest PyTorch + DeepSpeed (Push CI - Daily Build)" runs-on: ubuntu-latest steps: + - name: Cleanup disk + run: | + sudo ls -l /usr/local/lib/ + sudo ls -l /usr/share/ + sudo du -sh /usr/local/lib/ + sudo du -sh /usr/share/ + sudo rm -rf /usr/local/lib/android + sudo rm -rf /usr/share/dotnet + sudo du -sh /usr/local/lib/ + sudo du -sh /usr/share/ - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index 1aba38f67a2211..00000000000000 --- a/MANIFEST.in +++ /dev/null @@ -1 +0,0 @@ -include LICENSE diff --git a/Makefile b/Makefile index d6d6966a1dadfd..25ab889148bd1a 100644 --- a/Makefile +++ b/Makefile @@ -111,3 +111,10 @@ post-release: post-patch: python utils/release.py --post_release --patch + +build-release: + rm -rf dist + rm -rf build + python setup.py bdist_wheel + python setup.py sdist + python utils/check_build.py diff --git a/README.md b/README.md index 005a67e85e500a..e2f1b083ab0f30 100644 --- a/README.md +++ b/README.md @@ -115,6 +115,19 @@ In Multimodal tasks: **[Write With Transformer](https://transformer.huggingface.co)**, built by the Hugging Face team, is the official demo of this repoโ€™s text generation capabilities. + +## 100 projects using Transformers + +Transformers is more than a toolkit to use pretrained models: it's a community of projects built around it and the +Hugging Face Hub. We want Transformers to enable developers, researchers, students, professors, engineers, and anyone +else to build their dream projects. + +In order to celebrate the 100,000 stars of transformers, we have decided to put the spotlight on the +community, and we have created the [awesome-transformers](./awesome-transformers.md) page which lists 100 +incredible projects built in the vicinity of transformers. + +If you own or use a project that you believe should be part of the list, please open a PR to add it! + ## If you are looking for custom support from the Hugging Face team diff --git a/awesome-transformers.md b/awesome-transformers.md new file mode 100644 index 00000000000000..beec5379a53b15 --- /dev/null +++ b/awesome-transformers.md @@ -0,0 +1,584 @@ +# Awesome projects built with Transformers + +This page lists awesome projects built on top of Transformers. Transformers is more than a toolkit to use pretrained +models: it's a community of projects built around it and the Hugging Face Hub. We want Transformers to enable +developers, researchers, students, professors, engineers, and anyone else to build their dream projects. + +In this list, we showcase incredibly impactful and novel projects that have pushed the field forward. We celebrate +100 of these projects as we reach the milestone of 100k stars as a community; but we're very open to pull requests +adding other projects to the list. If you believe a project should be here and it's not, then please, open a PR +to add it. + +## [gpt4all](https://github.com/nomic-ai/gpt4all) + +[gpt4all](https://github.com/nomic-ai/gpt4all) is an ecosystem of open-source chatbots trained on massive collections of clean assistant data including code, stories and dialogue. It offers open-source, large language models such as LLaMA and GPT-J trained in an assistant-style. + +Keywords: Open-source, LLaMa, GPT-J, instruction, assistant + +## [recommenders](https://github.com/microsoft/recommenders) + +This repository contains examples and best practices for building recommendation systems, provided as Jupyter notebooks. It goes over several aspects required to build efficient recommendation systems: data preparation, modeling, evaluation, model selection & optimization, as well as operationalization + +Keywords: Recommender systems, AzureML + +## [lama-cleaner](https://github.com/Sanster/lama-cleaner) + +Image inpainting tool powered by Stable Diffusion. Remove any unwanted object, defect, people from your pictures or erase and replace anything on your pictures. + +Keywords: inpainting, SD, Stable Diffusion + +## [flair](https://github.com/flairNLP/flair) + +FLAIR is a powerful PyTorch NLP framework, convering several important tasks: NER, sentiment-analysis, part-of-speech tagging, text and ducoment embeddings, among other things. + +Keywords: NLP, text embedding, document embedding, biomedical, NER, PoS, sentiment-analysis + +## [mindsdb](https://github.com/mindsdb/mindsdb) + +MindsDB is a low-code ML platform, which automates and integrates several ML frameworks into the data stack as "AI Tables" to streamline the integration of AI into applications, making it accessible to developers of all skill levels. + +Keywords: Database, low-code, AI table + +## [langchain](https://github.com/hwchase17/langchain) + +[langchain](https://github.com/hwchase17/langchain) is aimed at assisting in the development of apps merging both LLMs and other sources of knowledge. The library allows chaining calls to applications, creating a sequence across many tools. + +Keywords: LLMs, Large Language Models, Agents, Chains + +## [ParlAI](https://github.com/facebookresearch/ParlAI) + +[ParlAI](https://github.com/facebookresearch/ParlAI) is a python framework for sharing, training and testing dialogue models, from open-domain chitchat, to task-oriented dialogue, to visual question answering. It provides more than 100 datasets under the same API, a large zoo of pretrained models, a set of agents, and has several integrations. + +Keywords: Dialogue, Chatbots, VQA, Datasets, Agents + +## [sentence-transformers](https://github.com/UKPLab/sentence-transformers) + +This framework provides an easy method to compute dense vector representations for sentences, paragraphs, and images. The models are based on transformer networks like BERT / RoBERTa / XLM-RoBERTa etc. and achieve state-of-the-art performance in various task. Text is embedding in vector space such that similar text is close and can efficiently be found using cosine similarity. + +Keywords: Dense vector representations, Text embeddings, Sentence embeddings + +## [ludwig](https://github.com/ludwig-ai/ludwig) + +Ludwig is a declarative machine learning framework that makes it easy to define machine learning pipelines using a simple and flexible data-driven configuration system. Ludwig is targeted at a wide variety of AI tasks. It provides a data-driven configuration system, training, prediction, and evaluation scripts, as well as a programmatic API. + +Keywords: Declarative, Data-driven, ML Framework + +## [InvokeAI](https://github.com/invoke-ai/InvokeAI) + +[InvokeAI](https://github.com/invoke-ai/InvokeAI) is an engine for Stable Diffusion models, aimed at professionals, artists, and enthusiasts. It leverages the latest AI-driven technologies through CLI as well as a WebUI. + +Keywords: Stable-Diffusion, WebUI, CLI + +## [PaddleNLP](https://github.com/PaddlePaddle/PaddleNLP) + +[PaddleNLP](https://github.com/PaddlePaddle/PaddleNLP) is an easy-to-use and powerful NLP library particularly targeted at the Chinese languages. It has support for multiple pre-trained model zoos, and supports a wide-range of NLP tasks from research to industrial applications. + +Keywords: NLP, Chinese, Research, Industry + +## [stanza](https://github.com/stanfordnlp/stanza) + +The Stanford NLP Group's official Python NLP library. It contains support for running various accurate natural language processing tools on 60+ languages and for accessing the Java Stanford CoreNLP software from Python. + +Keywords: NLP, Multilingual, CoreNLP + +## [DeepPavlov](https://github.com/deeppavlov/DeepPavlov) + +[DeepPavlov](https://github.com/deeppavlov/DeepPavlov) is an open-source conversational AI library. It is designed for the development of production ready chat-bots and complex conversational systems, as well as research in the area of NLP and, particularly, of dialog systems. + +Keywords: Conversational, Chatbot, Dialog + +## [alpaca-lora](https://github.com/tloen/alpaca-lora) + +Alpaca-lora contains code for reproducing the Stanford Alpaca results using low-rank adaptation (LoRA). The repository provides training (fine-tuning) as well as generation scripts. + +Keywords: LoRA, Parameter-efficient fine-tuning + +## [imagen-pytorch](https://github.com/lucidrains/imagen-pytorch) + +An open-source Implementation of Imagen, Google's closed-source Text-to-Image Neural Network that beats DALL-E2. As of release, it is the new SOTA for text-to-image synthesis. + +Keywords: Imagen, Text-to-image + +## [adapter-transformers](https://github.com/adapter-hub/adapter-transformers) + +[adapter-transformers](https://github.com/adapter-hub/adapter-transformers) is an extension of HuggingFace's Transformers library, integrating adapters into state-of-the-art language models by incorporating AdapterHub, a central repository for pre-trained adapter modules. It is a drop-in replacement for transformers, which is regularly updated to stay up-to-date with the developments of transformers. + +Keywords: Adapters, LoRA, Parameter-efficient fine-tuning, Hub + +## [NeMo](https://github.com/NVIDIA/NeMo) + +NVIDIA [NeMo](https://github.com/NVIDIA/NeMo) is a conversational AI toolkit built for researchers working on automatic speech recognition (ASR), text-to-speech synthesis (TTS), large language models (LLMs), and natural language processing (NLP). The primary objective of [NeMo](https://github.com/NVIDIA/NeMo) is to help researchers from industry and academia to reuse prior work (code and pretrained models) and make it easier to create new https://developer.nvidia.com/conversational-ai#started. + +Keywords: Conversational, ASR, TTS, LLMs, NLP + +## [Runhouse](https://github.com/run-house/runhouse) + +[Runhouse](https://github.com/run-house/runhouse) allows to send code and data to any of your compute or data infra, all in Python, and continue to interact with them normally from your existing code and environment. Runhouse developers mention: + +> Think of it as an expansion pack to your Python interpreter that lets it take detours to remote machines or manipulate remote data. + +Keywords: MLOps, Infrastructure, Data storage, Modeling + +## [MONAI](https://github.com/Project-MONAI/MONAI) + +[MONAI](https://github.com/Project-MONAI/MONAI) is a PyTorch-based, open-source framework for deep learning in healthcare imaging, part of PyTorch Ecosystem. Its ambitions are: +- developing a community of academic, industrial and clinical researchers collaborating on a common foundation; +- creating state-of-the-art, end-to-end training workflows for healthcare imaging; +- providing researchers with the optimized and standardized way to create and evaluate deep learning models. + +Keywords: Healthcare imaging, Training, Evaluation + +## [simpletransformers](https://github.com/ThilinaRajapakse/simpletransformers) + +Simple Transformers lets you quickly train and evaluate Transformer models. Only 3 lines of code are needed to initialize, train, and evaluate a model. It supports a wide variety of NLP tasks. + +Keywords: Framework, simplicity, NLP + +## [JARVIS](https://github.com/microsoft/JARVIS) + +[JARVIS](https://github.com/microsoft/JARVIS) is a system attempting to merge LLMs such as GPT-4 with the rest of the open-source ML community: leveraging up to 60 downstream models in order to perform tasks identified by the LLM. + +Keywords: LLM, Agents, HF Hub + +## [transformers.js](https://xenova.github.io/transformers.js/) + +[transformers.js](https://xenova.github.io/transformers.js/) is a JavaScript library targeted at running models from transformers directly within the browser. + +Keywords: Transformers, JavaScript, browser + +## [bumblebee](https://github.com/elixir-nx/bumblebee) + +Bumblebee provides pre-trained Neural Network models on top of Axon, a neural networks library for the Elixir language. It includes integration with ๐Ÿค— Models, allowing anyone to download and perform Machine Learning tasks with few lines of code. + +Keywords: Elixir, Axon + +## [argilla](https://github.com/argilla-io/argilla) + +Argilla is an open-source platform providing advanced NLP labeling, monitoring, and workspaces. It is compatible with many open source ecosystems such as Hugging Face, Stanza, FLAIR, and others. + +Keywords: NLP, Labeling, Monitoring, Workspaces + +## [haystack](https://github.com/deepset-ai/haystack) + +Haystack is an open source NLP framework to interact with your data using Transformer models and LLMs. It offers production-ready tools to quickly build complex decision making, question answering, semantic search, text generation applications, and more. + +Keywords: NLP, Framework, LLM + +## [spaCy](https://github.com/explosion/spaCy) + +[spaCy](https://github.com/explosion/spaCy) is a library for advanced Natural Language Processing in Python and Cython. It's built on the very latest research, and was designed from day one to be used in real products. It offers support for transformers models through its third party package, spacy-transformers. + +Keywords: NLP, Framework + +## [speechbrain](https://github.com/speechbrain/speechbrain) + +SpeechBrain is an open-source and all-in-one conversational AI toolkit based on PyTorch. +The goal is to create a single, flexible, and user-friendly toolkit that can be used to easily develop state-of-the-art speech technologies, including systems for speech recognition, speaker recognition, speech enhancement, speech separation, language identification, multi-microphone signal processing, and many others. + +Keywords: Conversational, Speech + +## [skorch](https://github.com/skorch-dev/skorch) + +Skorch is a scikit-learn compatible neural network library that wraps PyTorch. It has support for models within transformers, and tokenizers from tokenizers. + +Keywords: Scikit-Learn, PyTorch + +## [bertviz](https://github.com/jessevig/bertviz) + +BertViz is an interactive tool for visualizing attention in Transformer language models such as BERT, GPT2, or T5. It can be run inside a Jupyter or Colab notebook through a simple Python API that supports most Huggingface models. + +Keywords: Visualization, Transformers + +## [mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax) + +[mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax) is a haiku library using the xmap/pjit operators in JAX for model parallelism of transformers. This library is designed for scalability up to approximately 40B parameters on TPUv3s. It was the library used to train the GPT-J model. + +Keywords: Haiku, Model parallelism, LLM, TPU + +## [deepchem](https://github.com/deepchem/deepchem) + +DeepChem aims to provide a high quality open-source toolchain that democratizes the use of deep-learning in drug discovery, materials science, quantum chemistry, and biology. + +Keywords: Drug discovery, Materials Science, Quantum Chemistry, Biology + +## [OpenNRE](https://github.com/thunlp/OpenNRE) + +An Open-Source Package for Neural Relation Extraction (NRE). It is targeted at a wide range of users, from newcomers to relation extraction, to developers, researchers, or students. + +Keywords: Neural Relation Extraction, Framework + +## [pycorrector](https://github.com/shibing624/pycorrector) + +PyCorrector is a Chinese Text Error Correction Tool. It uses a language model to detect errors, pinyin feature and shape feature to correct Chinese text errors. it can be used for Chinese Pinyin and stroke input method. + +Keywords: Chinese, Error correction tool, Language model, Pinyin + +## [nlpaug](https://github.com/makcedward/nlpaug) + +This python library helps you with augmenting nlp for machine learning projects. It is a lightweight library featuring synthetic data generation for improving model performance, support for audio and text, and compatibility with several ecosystems (scikit-learn, pytorch, tensorflow). + +Keywords: Data augmentation, Synthetic data generation, Audio, NLP + +## [dream-textures](https://github.com/carson-katri/dream-textures) + +[dream-textures](https://github.com/carson-katri/dream-textures) is a library targeted at bringing stable-diffusion support within Blender. It supports several use-cases, such as image generation, texture projection, inpainting/outpainting, ControlNet, and upscaling. + +Keywords: Stable-Diffusion, Blender + +## [seldon-core](https://github.com/SeldonIO/seldon-core) + +Seldon core converts your ML models (Tensorflow, Pytorch, H2o, etc.) or language wrappers (Python, Java, etc.) into production REST/GRPC microservices. +Seldon handles scaling to thousands of production machine learning models and provides advanced machine learning capabilities out of the box including Advanced Metrics, Request Logging, Explainers, Outlier Detectors, A/B Tests, Canaries and more. + +Keywords: Microservices, Modeling, Language wrappers + +## [open_model_zoo](https://github.com/openvinotoolkit/open_model_zoo) + +This repository includes optimized deep learning models and a set of demos to expedite development of high-performance deep learning inference applications. Use these free pre-trained models instead of training your own models to speed-up the development and production deployment process. + +Keywords: Optimized models, Demos + +## [ml-stable-diffusion](https://github.com/apple/ml-stable-diffusion) + +ML-Stable-Diffusion is a repository by Apple bringing Stable Diffusion support to Core ML, on Apple Silicon devices. It supports stable diffusion checkpoints hosted on the Hugging Face Hub. + +Keywords: Stable Diffusion, Apple Silicon, Core ML + +## [stable-dreamfusion](https://github.com/ashawkey/stable-dreamfusion) + +Stable-Dreamfusion is a pytorch implementation of the text-to-3D model Dreamfusion, powered by the Stable Diffusion text-to-2D model. + +Keywords: Text-to-3D, Stable Diffusion + +## [txtai](https://github.com/neuml/txtai) + +[txtai](https://github.com/neuml/txtai) is an open-source platform for semantic search and workflows powered by language models. txtai builds embeddings databases, which are a union of vector indexes and relational databases enabling similarity search with SQL. Semantic workflows connect language models together into unified applications. + +Keywords: Semantic search, LLM + +## [djl](https://github.com/deepjavalibrary/djl) + +Deep Java Library (DJL) is an open-source, high-level, engine-agnostic Java framework for deep learning. DJL is designed to be easy to get started with and simple to use for developers. DJL provides a native Java development experience and functions like any other regular Java library. DJL offers [a Java binding](https://github.com/deepjavalibrary/djl/tree/master/extensions/tokenizers) for HuggingFace Tokenizers and easy conversion toolkit for HuggingFace model to deploy in Java. + +Keywords: Java, Framework + +## [lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness/) + +This project provides a unified framework to test generative language models on a large number of different evaluation tasks. It has support for more than 200 tasks, and supports different ecosystems: HF Transformers, GPT-NeoX, DeepSpeed, as well as the OpenAI API. + +Keywords: LLM, Evaluation, Few-shot + +## [gpt-neox](https://github.com/EleutherAI/gpt-neox) + +This repository records EleutherAI's library for training large-scale language models on GPUs. The framework is based on NVIDIA's Megatron Language Model and has been augmented with techniques from DeepSpeed as well as some novel optimizations. It is focused on training multi-billion-parameter models. + +Keywords: Training, LLM, Megatron, DeepSpeed + +## [muzic](https://github.com/microsoft/muzic) + +Muzic is a research project on AI music that empowers music understanding and generation with deep learning and artificial intelligence. Muzic was created by researchers from Microsoft Research Asia. + +Keywords: Music understanding, Music generation + +## [dalle-flow](https://github.com/jina-ai/dalle-flow) + +DALLยทE Flow is an interactive workflow for generating high-definition images from a text prompt. Itt leverages DALLยทE-Mega, GLID-3 XL, and Stable Diffusion to generate image candidates, and then calls CLIP-as-service to rank the candidates w.r.t. the prompt. +The preferred candidate is fed to GLID-3 XL for diffusion, which often enriches the texture and background. Finally, the candidate is upscaled to 1024x1024 via SwinIR. + +Keywords: High-definition image generation, Stable Diffusion, DALL-E Mega, GLID-3 XL, CLIP, SwinIR + +## [lightseq](https://github.com/bytedance/lightseq) + +LightSeq is a high performance training and inference library for sequence processing and generation implemented in CUDA. It enables highly efficient computation of modern NLP and CV models such as BERT, GPT, Transformer, etc. It is therefore best useful for machine translation, text generation, image classification, and other sequence related tasks. + +Keywords: Training, Inference, Sequence Processing, Sequence Generation + +## [LaTeX-OCR](https://github.com/lukas-blecher/LaTeX-OCR) + +The goal of this project is to create a learning based system that takes an image of a math formula and returns corresponding LaTeX code. + +Keywords: OCR, LaTeX, Math formula + +## [open_clip](https://github.com/mlfoundations/open_clip) + +OpenCLIP is an open source implementation of OpenAI's CLIP. + +The goal of this repository is to enable training models with contrastive image-text supervision, and to investigate their properties such as robustness to distribution shift. +The starting point is an implementation of CLIP that matches the accuracy of the original CLIP models when trained on the same dataset. + +Specifically, a ResNet-50 model trained with this codebase on OpenAI's 15 million image subset of YFCC achieves 32.7% top-1 accuracy on ImageNet. + +Keywords: CLIP, Open-source, Contrastive, Image-text + +## [dalle-playground](https://github.com/saharmor/dalle-playground) + +A playground to generate images from any text prompt using Stable Diffusion and Dall-E mini. + +Keywords: WebUI, Stable Diffusion, Dall-E mini + +## [FedML](https://github.com/FedML-AI/FedML) + +[FedML](https://github.com/FedML-AI/FedML) is a federated learning and analytics library enabling secure and collaborative machine learning on decentralized data anywhere at any scale. + +It supports large-scale cross-silo federated learning, and cross-device federated learning on smartphones/IoTs, and research simulation. + +Keywords: Federated Learning, Analytics, Collaborative ML, Decentralized + +## [gpt-code-clippy](https://github.com/CodedotAl/gpt-code-clippy) + +GPT-Code-Clippy (GPT-CC) is an open source version of GitHub Copilot, a language model -- based on GPT-3, called GPT-Codex -- that is fine-tuned on publicly available code from GitHub. + +Keywords: LLM, Code + +## [TextAttack](https://github.com/QData/TextAttack) + +[TextAttack](https://github.com/QData/TextAttack) ๐Ÿ™ is a Python framework for adversarial attacks, data augmentation, and model training in NLP. + +Keywords: Adversarial attacks, Data augmentation, NLP + +## [OpenPrompt](https://github.com/thunlp/OpenPrompt) + +Prompt-learning is a paradigm to adapt pre-trained language models (PLMs) to downstream NLP tasks, which modify the input text with a textual template and directly uses PLMs to conduct pre-trained tasks. This library provides a standard, flexible and extensible framework to deploy the prompt-learning pipeline. [OpenPrompt](https://github.com/thunlp/OpenPrompt) supports loading PLMs directly from https://github.com/huggingface/transformers. + +## [text-generation-webui](https://github.com/oobabooga/text-generation-webui/) + +[text-generation-webui](https://github.com/oobabooga/text-generation-webui/) is a Gradio Web UI for running Large Language Models like LLaMA, llama.cpp, GPT-J, Pythia, OPT, and GALACTICA. + +Keywords: LLM, WebUI + +## [libra](https://github.com/Palashio/libra) + +An ergonomic machine learning [libra](https://github.com/Palashio/libra)ry for non-technical users. It focuses on ergonomics and on ensuring that training a model is as simple as it can be. + +Keywords: Ergonomic, Non-technical + +## [alibi](https://github.com/SeldonIO/alibi) + +Alibi is an open source Python library aimed at machine learning model inspection and interpretation. The focus of the library is to provide high-quality implementations of black-box, white-box, local and global explanation methods for classification and regression models. + +Keywords: Model inspection, Model interpretation, Black-box, White-box + +## [tortoise-tts](https://github.com/neonbjb/tortoise-tts) + +Tortoise is a text-to-speech program built with the following priorities: strong multi-voice capabilities., and highly realistic prosody and intonation. + +Keywords: Text-to-speech + +## [flower](https://github.com/adap/flower) + +Flower (flwr) is a framework for building federated learning systems. The design of Flower is based on a few guiding principles: customizability, extendability, framework agnosticity, and ease-of-use. + +Keywords: Federated learning systems, Customizable, Extendable, Framework-agnostic, Simplicity + +## [fast-bert](https://github.com/utterworks/fast-bert) + +Fast-Bert is a deep learning library that allows developers and data scientists to train and deploy BERT and XLNet based models for natural language processing tasks beginning with Text Classification. It is aimed at simplicity. + +Keywords: Deployment, BERT, XLNet + +## [towhee](https://github.com/towhee-io/towhee) + +Towhee makes it easy to build neural data processing pipelines for AI applications. We provide hundreds of models, algorithms, and transformations that can be used as standard pipeline building blocks. Users can use Towhee's Pythonic API to build a prototype of their pipeline and automatically optimize it for production-ready environments. + +Keywords: Data processing pipeline, Optimization + +## [alibi-detect](https://github.com/SeldonIO/alibi-detect) + +Alibi Detect is an open source Python library focused on outlier, adversarial and drift detection. The package aims to cover both online and offline detectors for tabular data, text, images and time series. Both TensorFlow and PyTorch backends are supported for drift detection. + +Keywords: Adversarial, Outlier, Drift detection + +## [FARM](https://github.com/deepset-ai/FARM) + +[FARM](https://github.com/deepset-ai/FARM) makes Transfer Learning with BERT & Co simple, fast and enterprise-ready. It's built upon transformers and provides additional features to simplify the life of developers: Parallelized preprocessing, highly modular design, multi-task learning, experiment tracking, easy debugging and close integration with AWS SageMaker. + +Keywords: Transfer Learning, Modular design, Multi-task learning, Experiment tracking + +## [aitextgen](https://github.com/minimaxir/aitextgen) + +A robust Python tool for text-based AI training and generation using OpenAI's GPT-2 and EleutherAI's GPT Neo/GPT-3 architecture. +[aitextgen](https://github.com/minimaxir/aitextgen) is a Python package that leverages PyTorch, Hugging Face Transformers and pytorch-lightning with specific optimizations for text generation using GPT-2, plus many added features. + +Keywords: Training, Generation + +## [diffgram](https://github.com/diffgram/diffgram) + +Diffgram aims to integrate human supervision into platforms. We support your team programmatically changing the UI (Schema, layout, etc.) like in Streamlit. This means that you can collect and annotate timely data from users. In other words, we are the platform behind your platform, an integrated part of your application, to ship new & better AI products faster. + +Keywords: Human supervision, Platfor, + +## [ecco](https://github.com/jalammar/ecco) + +Explain, analyze, and visualize NLP language models. Ecco creates interactive visualizations directly in Jupyter notebooks explaining the behavior of Transformer-based language models (like GPT2, BERT, RoBERTA, T5, and T0). + +Keywords: Model explainability + +## [s3prl](https://github.com/s3prl/s3prl) + +[s3prl](https://github.com/s3prl/s3prl) stands for Self-Supervised Speech Pre-training and Representation Learning. Self-supervised speech pre-trained models are called upstream in this toolkit, and are utilized in various downstream tasks. + +Keywords: Speech, Training + +## [ru-dalle](https://github.com/ai-forever/ru-dalle) + +RuDALL-E aims to be similar to DALL-E, targeted to Russian. + +Keywords: DALL-E, Russian + +## [DeepKE](https://github.com/zjunlp/DeepKE) + +[DeepKE](https://github.com/zjunlp/DeepKE) is a knowledge extraction toolkit for knowledge graph construction supporting cnSchema๏ผŒlow-resource, document-level and multimodal scenarios for entity, relation and attribute extraction. + +Keywords: Knowledge Extraction, Knowledge Graphs + +## [nebullvm](https://github.com/nebuly-ai/nebullvm) + +Nebullvm is an ecosystem of plug and play modules to optimize the performances of your AI systems. The optimization modules are stack-agnostic and work with any library. They are designed to be easily integrated into your system, providing a quick and seamless boost to its performance. Simply plug and play to start realizing the benefits of optimized performance right away. + +Keywords: Optimization, Performance + +## [imaginAIry](https://github.com/brycedrennan/imaginAIry) + +Offers a CLI and a Python API to generate images with Stable Diffusion. It has support for many tools, like image structure control (controlnet), instruction-based image edits (InstructPix2Pix), prompt-based masking (clipseg), among others. + +Keywords: Stable Diffusion, CLI, Python API + +## [sparseml](https://github.com/neuralmagic/sparseml) + +SparseML is an open-source model optimization toolkit that enables you to create inference-optimized sparse models using pruning, quantization, and distillation algorithms. Models optimized with SparseML can then be exported to the ONNX and deployed with DeepSparse for GPU-class performance on CPU hardware. + +Keywords: Model optimization, Pruning, Quantization, Distillation + +## [opacus](https://github.com/pytorch/opacus) + +Opacus is a library that enables training PyTorch models with differential privacy. It supports training with minimal code changes required on the client, has little impact on training performance, and allows the client to online track the privacy budget expended at any given moment. + +Keywords: Differential privacy + +## [LAVIS](https://github.com/salesforce/LAVIS) + +[LAVIS](https://github.com/salesforce/LAVIS) is a Python deep learning library for LAnguage-and-VISion intelligence research and applications. This library aims to provide engineers and researchers with a one-stop solution to rapidly develop models for their specific multimodal scenarios, and benchmark them across standard and customized datasets. It features a unified interface design to access + +Keywords: Multimodal, NLP, Vision + +## [buzz](https://github.com/chidiwilliams/buzz) + +Buzz transcribes and translates audio offline on your personal computer. Powered by OpenAI's Whisper. + +Keywords: Audio transcription, Translation + +## [rust-bert](https://github.com/guillaume-be/rust-bert) + +Rust-native state-of-the-art Natural Language Processing models and pipelines. Port of Hugging Face's Transformers library, using the tch-rs crate and pre-processing from rust-tokenizers. Supports multi-threaded tokenization and GPU inference. This repository exposes the model base architecture, task-specific heads and ready-to-use pipelines. + +Keywords: Rust, BERT, Inference + +## [EasyNLP](https://github.com/alibaba/EasyNLP) + +[EasyNLP](https://github.com/alibaba/EasyNLP) is an easy-to-use NLP development and application toolkit in PyTorch, first released inside Alibaba in 2021. It is built with scalable distributed training strategies and supports a comprehensive suite of NLP algorithms for various NLP applications. [EasyNLP](https://github.com/alibaba/EasyNLP) integrates knowledge distillation and few-shot learning for landing large pre-trained models, together with various popular multi-modality pre-trained models. It provides a unified framework of model training, inference, and deployment for real-world applications. + +Keywords: NLP, Knowledge distillation, Few-shot learning, Multi-modality, Training, Inference, Deployment + +## [TurboTransformers](https://github.com/Tencent/TurboTransformers) + +A fast and user-friendly runtime for transformer inference (Bert, Albert, GPT2, Decoders, etc) on CPU and GPU. + +Keywords: Optimization, Performance + +## [hivemind](https://github.com/learning-at-home/hivemind) + +Hivemind is a PyTorch library for decentralized deep learning across the Internet. Its intended usage is training one large model on hundreds of computers from different universities, companies, and volunteers. + +Keywords: Decentralized training + +## [docquery](https://github.com/impira/docquery) + +DocQuery is a library and command-line tool that makes it easy to analyze semi-structured and unstructured documents (PDFs, scanned images, etc.) using large language models (LLMs). You simply point DocQuery at one or more documents and specify a question you want to ask. DocQuery is created by the team at Impira. + +Keywords: Semi-structured documents, Unstructured documents, LLM, Document Question Answering + +## [CodeGeeX](https://github.com/THUDM/CodeGeeX) + +[CodeGeeX](https://github.com/THUDM/CodeGeeX) is a large-scale multilingual code generation model with 13 billion parameters, pre-trained on a large code corpus of more than 20 programming languages. It has several unique features: +- Multilingual code generation +- Crosslingual code translation +- Is a customizable programming assistant + +Keywords: Code Generation Model + +## [ktrain](https://github.com/amaiya/ktrain) + +[ktrain](https://github.com/amaiya/ktrain) is a lightweight wrapper for the deep learning library TensorFlow Keras (and other libraries) to help build, train, and deploy neural networks and other machine learning models. Inspired by ML framework extensions like fastai and ludwig, [ktrain](https://github.com/amaiya/ktrain) is designed to make deep learning and AI more accessible and easier to apply for both newcomers and experienced practitioners. + +Keywords: Keras wrapper, Model building, Training, Deployment + +## [FastDeploy](https://github.com/PaddlePaddle/FastDeploy) + +[FastDeploy](https://github.com/PaddlePaddle/FastDeploy) is an Easy-to-use and High Performance AI model deployment toolkit for Cloud, Mobile and Edge with packageout-of-the-box and unified experience, endend-to-end optimization for over fire160+ Text, Vision, Speech and Cross-modal AI models. Including image classification, object detection, OCR, face detection, matting, pp-tracking, NLP, stable diffusion, TTS and other tasks to meet developers' industrial deployment needs for multi-scenario, multi-hardware and multi-platform. + +Keywords: Model deployment, CLoud, Mobile, Edge + +## [underthesea](https://github.com/undertheseanlp/underthesea) + +[underthesea](https://github.com/undertheseanlp/underthesea) is a Vietnamese NLP toolkit. Underthesea is a suite of open source Python modules data sets and tutorials supporting research and development in Vietnamese Natural Language Processing. We provides extremely easy API to quickly apply pretrained NLP models to your Vietnamese text, such as word segmentation, part-of-speech tagging (PoS), named entity recognition (NER), text classification and dependency parsing. + +Keywords: Vietnamese, NLP + +## [hasktorch](https://github.com/hasktorch/hasktorch) + +Hasktorch is a library for tensors and neural networks in Haskell. It is an independent open source community project which leverages the core C++ libraries shared by PyTorch. + +Keywords: Haskell, Neural Networks + +## [donut](https://github.com/clovaai/donut) + +Donut, or Document understanding transformer, is a new method of document understanding that utilizes an OCR-free end-to-end Transformer model. + +Donut does not require off-the-shelf OCR engines/APIs, yet it shows state-of-the-art performances on various visual document understanding tasks, such as visual document classification or information extraction (a.k.a. document parsing). + +Keywords: Document Understanding + +## [transformers-interpret](https://github.com/cdpierse/transformers-interpret) + +Transformers Interpret is a model explainability tool designed to work exclusively with the transformers package. + +In line with the philosophy of the Transformers package Transformers Interpret allows any transformers model to be explained in just two lines. Explainers are available for both text and computer vision models. Visualizations are also available in notebooks and as savable png and html files + +Keywords: Model interpretation, Visualization + +## [mlrun](https://github.com/mlrun/mlrun) + +MLRun is an open MLOps platform for quickly building and managing continuous ML applications across their lifecycle. MLRun integrates into your development and CI/CD environment and automates the delivery of production data, ML pipelines, and online applications, significantly reducing engineering efforts, time to production, and computation resources. With MLRun, you can choose any IDE on your local machine or on the cloud. MLRun breaks the silos between data, ML, software, and DevOps/MLOps teams, enabling collaboration and fast continuous improvements. + +Keywords: MLOps + +## [FederatedScope](https://github.com/alibaba/FederatedScope) + +[FederatedScope](https://github.com/alibaba/FederatedScope) is a comprehensive federated learning platform that provides convenient usage and flexible customization for various federated learning tasks in both academia and industry. Based on an event-driven architecture, [FederatedScope](https://github.com/alibaba/FederatedScope) integrates rich collections of functionalities to satisfy the burgeoning demands from federated learning, and aims to build up an easy-to-use platform for promoting learning safely and effectively. + +Keywords: Federated learning, Event-driven + +## [pythainlp](https://github.com/PyThaiNLP/pythainlp) + +PyThaiNLP is a Python package for text processing and linguistic analysis, similar to NLTK with focus on Thai language. + +Keywords: Thai, NLP, NLTK + +## [FlagAI](https://github.com/FlagAI-Open/FlagAI) + +[FlagAI](https://github.com/FlagAI-Open/FlagAI) (Fast LArge-scale General AI models) is a fast, easy-to-use and extensible toolkit for large-scale model. Our goal is to support training, fine-tuning, and deployment of large-scale models on various downstream tasks with multi-modality. + +Keywords: Large models, Training, Fine-tuning, Deployment, Multi-modal + +## [pyserini](https://github.com/castorini/pyserini) + +[pyserini](https://github.com/castorini/pyserini) is a Python toolkit for reproducible information retrieval research with sparse and dense representations. Retrieval using sparse representations is provided via integration with the group's Anserini IR toolkit. Retrieval using dense representations is provided via integration with Facebook's Faiss library. + +Keywords: IR, Information Retrieval, Dense, Sparse + +## [baal](https://github.com/baal-org/baal) + +[baal](https://github.com/baal-org/baal) is an active learning library that supports both industrial applications and research usecases. [baal](https://github.com/baal-org/baal) currently supports Monte-Carlo Dropout, MCDropConnect, deep ensembles, and semi-supervised learning. + +Keywords: Active Learning, Research, Labeling + diff --git a/docker/transformers-all-latest-gpu/Dockerfile b/docker/transformers-all-latest-gpu/Dockerfile index 3ad87790d61ca8..c5ada9209bca9a 100644 --- a/docker/transformers-all-latest-gpu/Dockerfile +++ b/docker/transformers-all-latest-gpu/Dockerfile @@ -35,7 +35,7 @@ RUN [ "$PYTORCH" != "pre" ] && python3 -m pip install --no-cache-dir -U $VERSION RUN python3 -m pip install --no-cache-dir -U tensorflow==2.12 protobuf==3.20.3 tensorflow_text tensorflow_probability RUN python3 -m pip uninstall -y flax jax -RUN python3 -m pip install --no-cache-dir intel_extension_for_pytorch==$INTEL_TORCH_EXT+cpu -f https://software.intel.com/ipex-whl-stable +RUN python3 -m pip install --no-cache-dir intel_extension_for_pytorch==$INTEL_TORCH_EXT+cpu -f https://developer.intel.com/ipex-whl-stable-cpu RUN python3 -m pip install --no-cache-dir git+https://github.com/facebookresearch/detectron2.git pytesseract RUN python3 -m pip install -U "itsdangerous<2.1.0" diff --git a/docker/transformers-pytorch-deepspeed-latest-gpu/Dockerfile b/docker/transformers-pytorch-deepspeed-latest-gpu/Dockerfile index 6d54b971d22c12..b6e892a5e15beb 100644 --- a/docker/transformers-pytorch-deepspeed-latest-gpu/Dockerfile +++ b/docker/transformers-pytorch-deepspeed-latest-gpu/Dockerfile @@ -1,12 +1,12 @@ # https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel_22-08.html#rel_22-08 -FROM nvcr.io/nvidia/pytorch:22.08-py3 +FROM nvcr.io/nvidia/pytorch:22.12-py3 LABEL maintainer="Hugging Face" ARG DEBIAN_FRONTEND=noninteractive ARG PYTORCH='2.0.1' # Example: `cu102`, `cu113`, etc. -ARG CUDA='cu117' +ARG CUDA='cu118' RUN apt -y update RUN apt install -y libaio-dev @@ -15,6 +15,8 @@ RUN python3 -m pip install --no-cache-dir --upgrade pip ARG REF=main RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF +RUN python3 -m pip uninstall -y torch torchvision torchaudio + # Install latest release PyTorch # (PyTorch must be installed before pre-compiling any DeepSpeed c++/cuda ops.) # (https://www.deepspeed.ai/tutorials/advanced-install/#pre-install-deepspeed-ops) diff --git a/docker/transformers-pytorch-deepspeed-nightly-gpu/Dockerfile b/docker/transformers-pytorch-deepspeed-nightly-gpu/Dockerfile index 1e69546a21eba6..50efc08129d866 100644 --- a/docker/transformers-pytorch-deepspeed-nightly-gpu/Dockerfile +++ b/docker/transformers-pytorch-deepspeed-nightly-gpu/Dockerfile @@ -1,11 +1,11 @@ # https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel_22-08.html#rel_22-08 -FROM nvcr.io/nvidia/pytorch:22.08-py3 +FROM nvcr.io/nvidia/pytorch:22.12-py3 LABEL maintainer="Hugging Face" ARG DEBIAN_FRONTEND=noninteractive # Example: `cu102`, `cu113`, etc. -ARG CUDA='cu117' +ARG CUDA='cu118' RUN apt -y update RUN apt install -y libaio-dev @@ -14,6 +14,8 @@ RUN python3 -m pip install --no-cache-dir --upgrade pip ARG REF=main RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF +RUN python3 -m pip uninstall -y torch torchvision torchaudio + # Install **nightly** release PyTorch (flag `--pre`) # (PyTorch must be installed before pre-compiling any DeepSpeed c++/cuda ops.) # (https://www.deepspeed.ai/tutorials/advanced-install/#pre-install-deepspeed-ops) diff --git a/docker/transformers-pytorch-gpu/Dockerfile b/docker/transformers-pytorch-gpu/Dockerfile index 9bf91674afcd11..d06a523af0ce9f 100644 --- a/docker/transformers-pytorch-gpu/Dockerfile +++ b/docker/transformers-pytorch-gpu/Dockerfile @@ -1,4 +1,4 @@ -FROM nvidia/cuda:11.7.1-cudnn8-devel-ubuntu20.04 +FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu20.04 LABEL maintainer="Hugging Face" ARG DEBIAN_FRONTEND=noninteractive @@ -16,7 +16,7 @@ ARG PYTORCH='2.0.1' ARG TORCH_VISION='' ARG TORCH_AUDIO='' # Example: `cu102`, `cu113`, etc. -ARG CUDA='cu117' +ARG CUDA='cu118' RUN [ ${#PYTORCH} -gt 0 ] && VERSION='torch=='$PYTORCH'.*' || VERSION='torch'; python3 -m pip install --no-cache-dir -U $VERSION --extra-index-url https://download.pytorch.org/whl/$CUDA RUN [ ${#TORCH_VISION} -gt 0 ] && VERSION='torchvision=='TORCH_VISION'.*' || VERSION='torchvision'; python3 -m pip install --no-cache-dir -U $VERSION --extra-index-url https://download.pytorch.org/whl/$CUDA diff --git a/docs/source/en/generation_strategies.mdx b/docs/source/en/generation_strategies.mdx index 2b4f9880cffc65..b59649bae41dec 100644 --- a/docs/source/en/generation_strategies.mdx +++ b/docs/source/en/generation_strategies.mdx @@ -338,9 +338,8 @@ For the complete list of the available parameters, refer to the [API documentati Assisted decoding is a modification of the decoding strategies above that uses an assistant model with the same tokenizer (ideally a much smaller model) to greedily generate a few candidate tokens. The main model then validates the candidate tokens in a single forward pass, which speeds up the decoding process. Currently, only greedy search -and sampling are supported with assisted decoding, and doesn't support batched inputs. - - +and sampling are supported with assisted decoding, and doesn't support batched inputs. To learn more about assisted +decoding, check [this blog post](https://huggingface.co/blog/assisted-generation). To enable assisted decoding, set the `assistant_model` argument with a model. @@ -364,8 +363,6 @@ To enable assisted decoding, set the `assistant_model` argument with a model. When using assisted decoding with sampling methods, you can use the `temperarure` argument to control the randomness just like in multinomial sampling. However, in assisted decoding, reducing the temperature will help improving latency. - - ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer diff --git a/docs/source/en/model_doc/pix2struct.mdx b/docs/source/en/model_doc/pix2struct.mdx index c6d31362856959..f4ead88f5cc8fe 100644 --- a/docs/source/en/model_doc/pix2struct.mdx +++ b/docs/source/en/model_doc/pix2struct.mdx @@ -25,6 +25,8 @@ Tips: Pix2Struct has been fine tuned on a variety of tasks and datasets, ranging from image captioning, visual question answering (VQA) over different inputs (books, charts, science diagrams), captioning UI components etc. The full list can be found in Table 1 of the paper. We therefore advise you to use these models for the tasks they have been fine tuned on. For instance, if you want to use Pix2Struct for UI captioning, you should use the model fine tuned on the UI dataset. If you want to use Pix2Struct for image captioning, you should use the model fine tuned on the natural images captioning dataset and so on. +If you want to use the model to perform conditional text captioning, make sure to use the processor with `add_special_tokens=False`. + This model was contributed by [ybelkada](https://huggingface.co/ybelkada). The original code can be found [here](https://github.com/google-research/pix2struct). diff --git a/docs/source/ko/_toctree.yml b/docs/source/ko/_toctree.yml index b983c2d45ea25c..3bae4561b1eeda 100644 --- a/docs/source/ko/_toctree.yml +++ b/docs/source/ko/_toctree.yml @@ -79,8 +79,8 @@ - sections: - local: in_translation title: (๋ฒˆ์—ญ์ค‘) Audio classification - - local: in_translation - title: (๋ฒˆ์—ญ์ค‘) Automatic speech recognition + - local: tasks/asr + title: ์ž๋™ ์Œ์„ฑ ์ธ์‹ title: (๋ฒˆ์—ญ์ค‘) ์˜ค๋””์˜ค isExpanded: false - sections: diff --git a/docs/source/ko/tasks/asr.mdx b/docs/source/ko/tasks/asr.mdx new file mode 100644 index 00000000000000..ec84bd4e8f7e08 --- /dev/null +++ b/docs/source/ko/tasks/asr.mdx @@ -0,0 +1,376 @@ + + +# ์ž๋™ ์Œ์„ฑ ์ธ์‹[[automatic-speech-recognition]] + +[[open-in-colab]] + + + +์ž๋™ ์Œ์„ฑ ์ธ์‹(Automatic Speech Recognition, ASR)์€ ์Œ์„ฑ ์‹ ํ˜ธ๋ฅผ ํ…์ŠคํŠธ๋กœ ๋ณ€ํ™˜ํ•˜์—ฌ ์Œ์„ฑ ์ž…๋ ฅ ์‹œํ€€์Šค๋ฅผ ํ…์ŠคํŠธ ์ถœ๋ ฅ์— ๋งคํ•‘ํ•ฉ๋‹ˆ๋‹ค. +Siri์™€ Alexa์™€ ๊ฐ™์€ ๊ฐ€์ƒ ์–ด์‹œ์Šคํ„ดํŠธ๋Š” ASR ๋ชจ๋ธ์„ ์‚ฌ์šฉํ•˜์—ฌ ์ผ์ƒ์ ์œผ๋กœ ์‚ฌ์šฉ์ž๋ฅผ ๋•๊ณ  ์žˆ์œผ๋ฉฐ, ํšŒ์˜ ์ค‘ ๋ผ์ด๋ธŒ ์บก์…˜ ๋ฐ ๋ฉ”๋ชจ ์ž‘์„ฑ๊ณผ ๊ฐ™์€ ์œ ์šฉํ•œ ์‚ฌ์šฉ์ž ์นœํ™”์  ์‘์šฉ ํ”„๋กœ๊ทธ๋žจ๋„ ๋งŽ์ด ์žˆ์Šต๋‹ˆ๋‹ค. + +์ด ๊ฐ€์ด๋“œ์—์„œ ์†Œ๊ฐœํ•  ๋‚ด์šฉ์€ ์•„๋ž˜์™€ ๊ฐ™์Šต๋‹ˆ๋‹ค: + +1. [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) ๋ฐ์ดํ„ฐ ์„ธํŠธ์—์„œ [Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base)๋ฅผ ๋ฏธ์„ธ ์กฐ์ •ํ•˜์—ฌ ์˜ค๋””์˜ค๋ฅผ ํ…์ŠคํŠธ๋กœ ๋ณ€ํ™˜ํ•ฉ๋‹ˆ๋‹ค. +2. ๋ฏธ์„ธ ์กฐ์ •ํ•œ ๋ชจ๋ธ์„ ์ถ”๋ก ์— ์‚ฌ์šฉํ•ฉ๋‹ˆ๋‹ค. + + +์ด ํŠœํ† ๋ฆฌ์–ผ์—์„œ ์„ค๋ช…ํ•˜๋Š” ์ž‘์—…์€ ๋‹ค์Œ ๋ชจ๋ธ ์•„ํ‚คํ…์ฒ˜์— ์˜ํ•ด ์ง€์›๋ฉ๋‹ˆ๋‹ค: + + + +[Data2VecAudio](../model_doc/data2vec-audio), [Hubert](../model_doc/hubert), [M-CTC-T](../model_doc/mctct), [SEW](../model_doc/sew), [SEW-D](../model_doc/sew-d), [UniSpeech](../model_doc/unispeech), [UniSpeechSat](../model_doc/unispeech-sat), [Wav2Vec2](../model_doc/wav2vec2), [Wav2Vec2-Conformer](../model_doc/wav2vec2-conformer), [WavLM](../model_doc/wavlm) + + + + + +์‹œ์ž‘ํ•˜๊ธฐ ์ „์— ํ•„์š”ํ•œ ๋ชจ๋“  ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๊ฐ€ ์„ค์น˜๋˜์–ด ์žˆ๋Š”์ง€ ํ™•์ธํ•˜์„ธ์š”: + +```bash +pip install transformers datasets evaluate jiwer +``` + +Hugging Face ๊ณ„์ •์— ๋กœ๊ทธ์ธํ•˜๋ฉด ๋ชจ๋ธ์„ ์—…๋กœ๋“œํ•˜๊ณ  ์ปค๋ฎค๋‹ˆํ‹ฐ์— ๊ณต์œ ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. ํ† ํฐ์„ ์ž…๋ ฅํ•˜์—ฌ ๋กœ๊ทธ์ธํ•˜์„ธ์š”. + +```py +>>> from huggingface_hub import notebook_login + +>>> notebook_login() +``` + +## MInDS-14 ๋ฐ์ดํ„ฐ ์„ธํŠธ ๊ฐ€์ ธ์˜ค๊ธฐ[[load-minds-14-dataset]] + +๋จผ์ €, ๐Ÿค— Datasets ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ์—์„œ [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) ๋ฐ์ดํ„ฐ ์„ธํŠธ์˜ ์ผ๋ถ€๋ถ„์„ ๊ฐ€์ ธ์˜ค์„ธ์š”. +์ด๋ ‡๊ฒŒ ํ•˜๋ฉด ์ „์ฒด ๋ฐ์ดํ„ฐ ์„ธํŠธ์— ๋Œ€ํ•œ ํ›ˆ๋ จ์— ์‹œ๊ฐ„์„ ๋“ค์ด๊ธฐ ์ „์— ๋ชจ๋“  ๊ฒƒ์ด ์ž‘๋™ํ•˜๋Š”์ง€ ์‹คํ—˜ํ•˜๊ณ  ๊ฒ€์ฆํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. + +```py +>>> from datasets import load_dataset, Audio + +>>> minds = load_dataset("PolyAI/minds14", name="en-US", split="train[:100]") +``` + +[`~Dataset.train_test_split`] ๋ฉ”์†Œ๋“œ๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ๋ฐ์ดํ„ฐ ์„ธํŠธ์˜ `train`์„ ํ›ˆ๋ จ ์„ธํŠธ์™€ ํ…Œ์ŠคํŠธ ์„ธํŠธ๋กœ ๋‚˜๋ˆ„์„ธ์š”: + +```py +>>> minds = minds.train_test_split(test_size=0.2) +``` + +๊ทธ๋ฆฌ๊ณ  ๋ฐ์ดํ„ฐ ์„ธํŠธ๋ฅผ ํ™•์ธํ•˜์„ธ์š”: + +```py +>>> minds +DatasetDict({ + train: Dataset({ + features: ['path', 'audio', 'transcription', 'english_transcription', 'intent_class', 'lang_id'], + num_rows: 16 + }) + test: Dataset({ + features: ['path', 'audio', 'transcription', 'english_transcription', 'intent_class', 'lang_id'], + num_rows: 4 + }) +}) +``` + +๋ฐ์ดํ„ฐ ์„ธํŠธ์—๋Š” `lang_id`์™€ `english_transcription`๊ณผ ๊ฐ™์€ ์œ ์šฉํ•œ ์ •๋ณด๊ฐ€ ๋งŽ์ด ํฌํ•จ๋˜์–ด ์žˆ์ง€๋งŒ, ์ด ๊ฐ€์ด๋“œ์—์„œ๋Š” `audio`์™€ `transcription`์— ์ดˆ์ ์„ ๋งž์ถœ ๊ฒƒ์ž…๋‹ˆ๋‹ค. ๋‹ค๋ฅธ ์—ด์€ [`~datasets.Dataset.remove_columns`] ๋ฉ”์†Œ๋“œ๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ์ œ๊ฑฐํ•˜์„ธ์š”: + +```py +>>> minds = minds.remove_columns(["english_transcription", "intent_class", "lang_id"]) +``` + +์˜ˆ์‹œ๋ฅผ ๋‹ค์‹œ ํ•œ๋ฒˆ ํ™•์ธํ•ด๋ณด์„ธ์š”: + +```py +>>> minds["train"][0] +{'audio': {'array': array([-0.00024414, 0. , 0. , ..., 0.00024414, + 0.00024414, 0.00024414], dtype=float32), + 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602ba9e2963e11ccd901cd4f.wav', + 'sampling_rate': 8000}, + 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602ba9e2963e11ccd901cd4f.wav', + 'transcription': "hi I'm trying to use the banking app on my phone and currently my checking and savings account balance is not refreshing"} +``` + +๋‘ ๊ฐœ์˜ ํ•„๋“œ๊ฐ€ ์žˆ์Šต๋‹ˆ๋‹ค: + +- `audio`: ์˜ค๋””์˜ค ํŒŒ์ผ์„ ๊ฐ€์ ธ์˜ค๊ณ  ๋ฆฌ์ƒ˜ํ”Œ๋งํ•˜๊ธฐ ์œ„ํ•ด ํ˜ธ์ถœํ•ด์•ผ ํ•˜๋Š” ์Œ์„ฑ ์‹ ํ˜ธ์˜ 1์ฐจ์› `array(๋ฐฐ์—ด)` +- `transcription`: ๋ชฉํ‘œ ํ…์ŠคํŠธ + +## ์ „์ฒ˜๋ฆฌ[[preprocess]] + +๋‹ค์Œ์œผ๋กœ ์˜ค๋””์˜ค ์‹ ํ˜ธ๋ฅผ ์ฒ˜๋ฆฌํ•˜๊ธฐ ์œ„ํ•œ Wav2Vec2 ํ”„๋กœ์„ธ์„œ๋ฅผ ๊ฐ€์ ธ์˜ต๋‹ˆ๋‹ค: + +```py +>>> from transformers import AutoProcessor + +>>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base") +``` + +MInDS-14 ๋ฐ์ดํ„ฐ ์„ธํŠธ์˜ ์ƒ˜ํ”Œ๋ง ๋ ˆ์ดํŠธ๋Š” 8000kHz์ด๋ฏ€๋กœ([๋ฐ์ดํ„ฐ ์„ธํŠธ ์นด๋“œ](https://huggingface.co/datasets/PolyAI/minds14)์—์„œ ํ™•์ธ), ์‚ฌ์ „ ํ›ˆ๋ จ๋œ Wav2Vec2 ๋ชจ๋ธ์„ ์‚ฌ์šฉํ•˜๋ ค๋ฉด ๋ฐ์ดํ„ฐ ์„ธํŠธ๋ฅผ 16000kHz๋กœ ๋ฆฌ์ƒ˜ํ”Œ๋งํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค: + +```py +>>> minds = minds.cast_column("audio", Audio(sampling_rate=16_000)) +>>> minds["train"][0] +{'audio': {'array': array([-2.38064706e-04, -1.58618059e-04, -5.43987835e-06, ..., + 2.78103951e-04, 2.38446111e-04, 1.18740834e-04], dtype=float32), + 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602ba9e2963e11ccd901cd4f.wav', + 'sampling_rate': 16000}, + 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602ba9e2963e11ccd901cd4f.wav', + 'transcription': "hi I'm trying to use the banking app on my phone and currently my checking and savings account balance is not refreshing"} +``` + +์œ„์˜ 'transcription'์—์„œ ๋ณผ ์ˆ˜ ์žˆ๋“ฏ์ด ํ…์ŠคํŠธ๋Š” ๋Œ€๋ฌธ์ž์™€ ์†Œ๋ฌธ์ž๊ฐ€ ์„ž์—ฌ ์žˆ์Šต๋‹ˆ๋‹ค. Wav2Vec2 ํ† ํฌ๋‚˜์ด์ €๋Š” ๋Œ€๋ฌธ์ž ๋ฌธ์ž์— ๋Œ€ํ•ด์„œ๋งŒ ํ›ˆ๋ จ๋˜์–ด ์žˆ์œผ๋ฏ€๋กœ ํ…์ŠคํŠธ๊ฐ€ ํ† ํฌ๋‚˜์ด์ €์˜ ์–ดํœ˜์™€ ์ผ์น˜ํ•˜๋Š”์ง€ ํ™•์ธํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค: + +```py +>>> def uppercase(example): +... return {"transcription": example["transcription"].upper()} + + +>>> minds = minds.map(uppercase) +``` + +์ด์ œ ๋‹ค์Œ ์ž‘์—…์„ ์ˆ˜ํ–‰ํ•  ์ „์ฒ˜๋ฆฌ ํ•จ์ˆ˜๋ฅผ ๋งŒ๋“ค์–ด๋ณด๊ฒ ์Šต๋‹ˆ๋‹ค: + +1. `audio` ์—ด์„ ํ˜ธ์ถœํ•˜์—ฌ ์˜ค๋””์˜ค ํŒŒ์ผ์„ ๊ฐ€์ ธ์˜ค๊ณ  ๋ฆฌ์ƒ˜ํ”Œ๋งํ•ฉ๋‹ˆ๋‹ค. +2. ์˜ค๋””์˜ค ํŒŒ์ผ์—์„œ `input_values`๋ฅผ ์ถ”์ถœํ•˜๊ณ  ํ”„๋กœ์„ธ์„œ๋กœ `transcription` ์—ด์„ ํ† ํฐํ™”ํ•ฉ๋‹ˆ๋‹ค. + +```py +>>> def prepare_dataset(batch): +... audio = batch["audio"] +... batch = processor(audio["array"], sampling_rate=audio["sampling_rate"], text=batch["transcription"]) +... batch["input_length"] = len(batch["input_values"][0]) +... return batch +``` + +์ „์ฒด ๋ฐ์ดํ„ฐ ์„ธํŠธ์— ์ „์ฒ˜๋ฆฌ ํ•จ์ˆ˜๋ฅผ ์ ์šฉํ•˜๋ ค๋ฉด ๐Ÿค— Datasets [`~datasets.Dataset.map`] ํ•จ์ˆ˜๋ฅผ ์‚ฌ์šฉํ•˜์„ธ์š”. `num_proc` ๋งค๊ฐœ๋ณ€์ˆ˜๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ํ”„๋กœ์„ธ์Šค ์ˆ˜๋ฅผ ๋Š˜๋ฆฌ๋ฉด `map`์˜ ์†๋„๋ฅผ ๋†’์ผ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. [`~datasets.Dataset.remove_columns`] ๋ฉ”์†Œ๋“œ๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ํ•„์š”ํ•˜์ง€ ์•Š์€ ์—ด์„ ์ œ๊ฑฐํ•˜์„ธ์š”: + +```py +>>> encoded_minds = minds.map(prepare_dataset, remove_columns=minds.column_names["train"], num_proc=4) +``` + +๐Ÿค— Transformers์—๋Š” ์ž๋™ ์Œ์„ฑ ์ธ์‹์šฉ ๋ฐ์ดํ„ฐ ์ฝœ๋ ˆ์ดํ„ฐ๊ฐ€ ์—†์œผ๋ฏ€๋กœ ์˜ˆ์ œ ๋ฐฐ์น˜๋ฅผ ์ƒ์„ฑํ•˜๋ ค๋ฉด [`DataCollatorWithPadding`]์„ ์กฐ์ •ํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. ์ด๋ ‡๊ฒŒ ํ•˜๋ฉด ๋ฐ์ดํ„ฐ ์ฝœ๋ ˆ์ดํ„ฐ๋Š” ํ…์ŠคํŠธ์™€ ๋ ˆ์ด๋ธ”์„ ๋ฐฐ์น˜์—์„œ ๊ฐ€์žฅ ๊ธด ์š”์†Œ์˜ ๊ธธ์ด์— ๋™์ ์œผ๋กœ ํŒจ๋”ฉํ•˜์—ฌ ๊ธธ์ด๋ฅผ ๊ท ์ผํ•˜๊ฒŒ ํ•ฉ๋‹ˆ๋‹ค. `tokenizer` ํ•จ์ˆ˜์—์„œ `padding=True`๋ฅผ ์„ค์ •ํ•˜์—ฌ ํ…์ŠคํŠธ๋ฅผ ํŒจ๋”ฉํ•  ์ˆ˜ ์žˆ์ง€๋งŒ, ๋™์  ํŒจ๋”ฉ์ด ๋” ํšจ์œจ์ ์ž…๋‹ˆ๋‹ค. + +๋‹ค๋ฅธ ๋ฐ์ดํ„ฐ ์ฝœ๋ ˆ์ดํ„ฐ์™€ ๋‹ฌ๋ฆฌ ์ด ํŠน์ • ๋ฐ์ดํ„ฐ ์ฝœ๋ ˆ์ดํ„ฐ๋Š” `input_values`์™€ `labels`์— ๋Œ€ํ•ด ๋‹ค๋ฅธ ํŒจ๋”ฉ ๋ฐฉ๋ฒ•์„ ์ ์šฉํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค. + +```py +>>> import torch + +>>> from dataclasses import dataclass, field +>>> from typing import Any, Dict, List, Optional, Union + + +>>> @dataclass +... class DataCollatorCTCWithPadding: +... processor: AutoProcessor +... padding: Union[bool, str] = "longest" + +... def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: +... # ์ž…๋ ฅ๊ณผ ๋ ˆ์ด๋ธ”์„ ๋ถ„ํ• ํ•ฉ๋‹ˆ๋‹ค +... # ๊ธธ์ด๊ฐ€ ๋‹ค๋ฅด๊ณ , ๊ฐ๊ฐ ๋‹ค๋ฅธ ํŒจ๋”ฉ ๋ฐฉ๋ฒ•์„ ์‚ฌ์šฉํ•ด์•ผ ํ•˜๊ธฐ ๋•Œ๋ฌธ์ž…๋‹ˆ๋‹ค +... input_features = [{"input_values": feature["input_values"][0]} for feature in features] +... label_features = [{"input_ids": feature["labels"]} for feature in features] + +... batch = self.processor.pad(input_features, padding=self.padding, return_tensors="pt") + +... labels_batch = self.processor.pad(labels=label_features, padding=self.padding, return_tensors="pt") + +... # ํŒจ๋”ฉ์— ๋Œ€ํ•ด ์†์‹ค์„ ์ ์šฉํ•˜์ง€ ์•Š๋„๋ก -100์œผ๋กœ ๋Œ€์ฒดํ•ฉ๋‹ˆ๋‹ค +... labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) + +... batch["labels"] = labels + +... return batch +``` + +์ด์ œ `DataCollatorForCTCWithPadding`์„ ์ธ์Šคํ„ด์Šคํ™”ํ•ฉ๋‹ˆ๋‹ค: + +```py +>>> data_collator = DataCollatorCTCWithPadding(processor=processor, padding="longest") +``` + +## ํ‰๊ฐ€ํ•˜๊ธฐ[[evaluate]] + +ํ›ˆ๋ จ ์ค‘์— ํ‰๊ฐ€ ์ง€ํ‘œ๋ฅผ ํฌํ•จํ•˜๋ฉด ๋ชจ๋ธ์˜ ์„ฑ๋Šฅ์„ ํ‰๊ฐ€ํ•˜๋Š” ๋ฐ ๋„์›€์ด ๋˜๋Š” ๊ฒฝ์šฐ๊ฐ€ ๋งŽ์Šต๋‹ˆ๋‹ค. ๐Ÿค— [Evaluate](https://huggingface.co/docs/evaluate/index) ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๋ฅผ ์‚ฌ์šฉํ•˜๋ฉด ํ‰๊ฐ€ ๋ฐฉ๋ฒ•์„ ๋น ๋ฅด๊ฒŒ ๋ถˆ๋Ÿฌ์˜ฌ ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. +์ด ์ž‘์—…์—์„œ๋Š” [๋‹จ์–ด ์˜ค๋ฅ˜์œจ(Word Error Rate, WER)](https://huggingface.co/spaces/evaluate-metric/wer) ํ‰๊ฐ€ ์ง€ํ‘œ๋ฅผ ๊ฐ€์ ธ์˜ต๋‹ˆ๋‹ค. +(ํ‰๊ฐ€ ์ง€ํ‘œ๋ฅผ ๋ถˆ๋Ÿฌ์˜ค๊ณ  ๊ณ„์‚ฐํ•˜๋Š” ๋ฐฉ๋ฒ•์€ ๐Ÿค— Evaluate [๋‘˜๋Ÿฌ๋ณด๊ธฐ](https://huggingface.co/docs/evaluate/a_quick_tour)๋ฅผ ์ฐธ์กฐํ•˜์„ธ์š”): + +```py +>>> import evaluate + +>>> wer = evaluate.load("wer") +``` + +๊ทธ๋Ÿฐ ๋‹ค์Œ ์˜ˆ์ธก๊ฐ’๊ณผ ๋ ˆ์ด๋ธ”์„ [`~evaluate.EvaluationModule.compute`]์— ์ „๋‹ฌํ•˜์—ฌ WER์„ ๊ณ„์‚ฐํ•˜๋Š” ํ•จ์ˆ˜๋ฅผ ๋งŒ๋“ญ๋‹ˆ๋‹ค: + +```py +>>> import numpy as np + + +>>> def compute_metrics(pred): +... pred_logits = pred.predictions +... pred_ids = np.argmax(pred_logits, axis=-1) + +... pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id + +... pred_str = processor.batch_decode(pred_ids) +... label_str = processor.batch_decode(pred.label_ids, group_tokens=False) + +... wer = wer.compute(predictions=pred_str, references=label_str) + +... return {"wer": wer} +``` + +์ด์ œ `compute_metrics` ํ•จ์ˆ˜๋ฅผ ์‚ฌ์šฉํ•  ์ค€๋น„๊ฐ€ ๋˜์—ˆ์œผ๋ฉฐ, ํ›ˆ๋ จ์„ ์„ค์ •ํ•  ๋•Œ ์ด ํ•จ์ˆ˜๋กœ ๋˜๋Œ์•„์˜ฌ ๊ฒƒ์ž…๋‹ˆ๋‹ค. + +## ํ›ˆ๋ จํ•˜๊ธฐ[[train]] + + + + + +[`Trainer`]๋กœ ๋ชจ๋ธ์„ ๋ฏธ์„ธ ์กฐ์ •ํ•˜๋Š” ๊ฒƒ์ด ์ต์ˆ™ํ•˜์ง€ ์•Š๋‹ค๋ฉด, [์—ฌ๊ธฐ](../training#train-with-pytorch-trainer)์—์„œ ๊ธฐ๋ณธ ํŠœํ† ๋ฆฌ์–ผ์„ ํ™•์ธํ•ด๋ณด์„ธ์š”! + + + +์ด์ œ ๋ชจ๋ธ ํ›ˆ๋ จ์„ ์‹œ์ž‘ํ•  ์ค€๋น„๊ฐ€ ๋˜์—ˆ์Šต๋‹ˆ๋‹ค! [`AutoModelForCTC`]๋กœ Wav2Vec2๋ฅผ ๊ฐ€์ ธ์˜ค์„ธ์š”. `ctc_loss_reduction` ๋งค๊ฐœ๋ณ€์ˆ˜๋กœ CTC ์†์‹ค์— ์ ์šฉํ•  ์ถ•์†Œ(reduction) ๋ฐฉ๋ฒ•์„ ์ง€์ •ํ•˜์„ธ์š”. ๊ธฐ๋ณธ๊ฐ’์ธ ํ•ฉ๊ณ„ ๋Œ€์‹  ํ‰๊ท ์„ ์‚ฌ์šฉํ•˜๋Š” ๊ฒƒ์ด ๋” ์ข‹์€ ๊ฒฝ์šฐ๊ฐ€ ๋งŽ์Šต๋‹ˆ๋‹ค: + +```py +>>> from transformers import AutoModelForCTC, TrainingArguments, Trainer + +>>> model = AutoModelForCTC.from_pretrained( +... "facebook/wav2vec2-base", +... ctc_loss_reduction="mean", +... pad_token_id=processor.tokenizer.pad_token_id, +... ) +``` + +์ด์ œ ์„ธ ๋‹จ๊ณ„๋งŒ ๋‚จ์•˜์Šต๋‹ˆ๋‹ค: + +1. [`TrainingArguments`]์—์„œ ํ›ˆ๋ จ ํ•˜์ดํผํŒŒ๋ผ๋ฏธํ„ฐ๋ฅผ ์ •์˜ํ•˜์„ธ์š”. `output_dir`์€ ๋ชจ๋ธ์„ ์ €์žฅํ•  ๊ฒฝ๋กœ๋ฅผ ์ง€์ •ํ•˜๋Š” ์œ ์ผํ•œ ํ•„์ˆ˜ ๋งค๊ฐœ๋ณ€์ˆ˜์ž…๋‹ˆ๋‹ค. `push_to_hub=True`๋ฅผ ์„ค์ •ํ•˜์—ฌ ๋ชจ๋ธ์„ Hub์— ์—…๋กœ๋“œ ํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค(๋ชจ๋ธ์„ ์—…๋กœ๋“œํ•˜๋ ค๋ฉด Hugging Face์— ๋กœ๊ทธ์ธํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค). [`Trainer`]๋Š” ๊ฐ ์—ํญ๋งˆ๋‹ค WER์„ ํ‰๊ฐ€ํ•˜๊ณ  ํ›ˆ๋ จ ์ฒดํฌํฌ์ธํŠธ๋ฅผ ์ €์žฅํ•ฉ๋‹ˆ๋‹ค. +2. ๋ชจ๋ธ, ๋ฐ์ดํ„ฐ ์„ธํŠธ, ํ† ํฌ๋‚˜์ด์ €, ๋ฐ์ดํ„ฐ ์ฝœ๋ ˆ์ดํ„ฐ, `compute_metrics` ํ•จ์ˆ˜์™€ ํ•จ๊ป˜ [`Trainer`]์— ํ›ˆ๋ จ ์ธ์ˆ˜๋ฅผ ์ „๋‹ฌํ•˜์„ธ์š”. +3. [`~Trainer.train`]์„ ํ˜ธ์ถœํ•˜์—ฌ ๋ชจ๋ธ์„ ๋ฏธ์„ธ ์กฐ์ •ํ•˜์„ธ์š”. + +```py +>>> training_args = TrainingArguments( +... output_dir="my_awesome_asr_mind_model", +... per_device_train_batch_size=8, +... gradient_accumulation_steps=2, +... learning_rate=1e-5, +... warmup_steps=500, +... max_steps=2000, +... gradient_checkpointing=True, +... fp16=True, +... group_by_length=True, +... evaluation_strategy="steps", +... per_device_eval_batch_size=8, +... save_steps=1000, +... eval_steps=1000, +... logging_steps=25, +... load_best_model_at_end=True, +... metric_for_best_model="wer", +... greater_is_better=False, +... push_to_hub=True, +... ) + +>>> trainer = Trainer( +... model=model, +... args=training_args, +... train_dataset=encoded_minds["train"], +... eval_dataset=encoded_minds["test"], +... tokenizer=processor.feature_extractor, +... data_collator=data_collator, +... compute_metrics=compute_metrics, +... ) + +>>> trainer.train() +``` + +ํ›ˆ๋ จ์ด ์™„๋ฃŒ๋˜๋ฉด ๋ชจ๋‘๊ฐ€ ๋ชจ๋ธ์„ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ๋„๋ก [`~transformers.Trainer.push_to_hub`] ๋ฉ”์†Œ๋“œ๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ๋ชจ๋ธ์„ Hub์— ๊ณต์œ ํ•˜์„ธ์š”: + +```py +>>> trainer.push_to_hub() +``` + + + + + +์ž๋™ ์Œ์„ฑ ์ธ์‹์„ ์œ„ํ•ด ๋ชจ๋ธ์„ ๋ฏธ์„ธ ์กฐ์ •ํ•˜๋Š” ๋” ์ž์„ธํ•œ ์˜ˆ์ œ๋Š” ์˜์–ด ์ž๋™ ์Œ์„ฑ ์ธ์‹์„ ์œ„ํ•œ [๋ธ”๋กœ๊ทธ ํฌ์ŠคํŠธ](https://huggingface.co/blog/fine-tune-wav2vec2-english)์™€ ๋‹ค๊ตญ์–ด ์ž๋™ ์Œ์„ฑ ์ธ์‹์„ ์œ„ํ•œ [ํฌ์ŠคํŠธ](https://huggingface.co/blog/fine-tune-xlsr-wav2vec2)๋ฅผ ์ฐธ์กฐํ•˜์„ธ์š”. + + + +## ์ถ”๋ก ํ•˜๊ธฐ[[inference]] + +์ข‹์•„์š”, ์ด์ œ ๋ชจ๋ธ์„ ๋ฏธ์„ธ ์กฐ์ •ํ–ˆ์œผ๋‹ˆ ์ถ”๋ก ์— ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค! + +์ถ”๋ก ์— ์‚ฌ์šฉํ•  ์˜ค๋””์˜ค ํŒŒ์ผ์„ ๊ฐ€์ ธ์˜ค์„ธ์š”. ํ•„์š”ํ•œ ๊ฒฝ์šฐ ์˜ค๋””์˜ค ํŒŒ์ผ์˜ ์ƒ˜ํ”Œ๋ง ๋น„์œจ์„ ๋ชจ๋ธ์˜ ์ƒ˜ํ”Œ๋ง ๋ ˆ์ดํŠธ์— ๋งž๊ฒŒ ๋ฆฌ์ƒ˜ํ”Œ๋งํ•˜๋Š” ๊ฒƒ์„ ์žŠ์ง€ ๋งˆ์„ธ์š”! + +```py +>>> from datasets import load_dataset, Audio + +>>> dataset = load_dataset("PolyAI/minds14", "en-US", split="train") +>>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16000)) +>>> sampling_rate = dataset.features["audio"].sampling_rate +>>> audio_file = dataset[0]["audio"]["path"] +``` + +์ถ”๋ก ์„ ์œ„ํ•ด ๋ฏธ์„ธ ์กฐ์ •๋œ ๋ชจ๋ธ์„ ์‹œํ—˜ํ•ด๋ณด๋Š” ๊ฐ€์žฅ ๊ฐ„๋‹จํ•œ ๋ฐฉ๋ฒ•์€ [`pipeline`]์„ ์‚ฌ์šฉํ•˜๋Š” ๊ฒƒ์ž…๋‹ˆ๋‹ค. ๋ชจ๋ธ์„ ์‚ฌ์šฉํ•˜์—ฌ ์ž๋™ ์Œ์„ฑ ์ธ์‹์„ ์œ„ํ•œ `pipeline`์„ ์ธ์Šคํ„ด์Šคํ™”ํ•˜๊ณ  ์˜ค๋””์˜ค ํŒŒ์ผ์„ ์ „๋‹ฌํ•˜์„ธ์š”: + +```py +>>> from transformers import pipeline + +>>> transcriber = pipeline("automatic-speech-recognition", model="stevhliu/my_awesome_asr_minds_model") +>>> transcriber(audio_file) +{'text': 'I WOUD LIKE O SET UP JOINT ACOUNT WTH Y PARTNER'} +``` + + + +ํ…์ŠคํŠธ๋กœ ๋ณ€ํ™˜๋œ ๊ฒฐ๊ณผ๊ฐ€ ๊ฝค ๊ดœ์ฐฎ์ง€๋งŒ ๋” ์ข‹์„ ์ˆ˜๋„ ์žˆ์Šต๋‹ˆ๋‹ค! ๋” ๋‚˜์€ ๊ฒฐ๊ณผ๋ฅผ ์–ป์œผ๋ ค๋ฉด ๋” ๋งŽ์€ ์˜ˆ์ œ๋กœ ๋ชจ๋ธ์„ ๋ฏธ์„ธ ์กฐ์ •ํ•˜์„ธ์š”! + + + +`pipeline`์˜ ๊ฒฐ๊ณผ๋ฅผ ์ˆ˜๋™์œผ๋กœ ์žฌํ˜„ํ•  ์ˆ˜๋„ ์žˆ์Šต๋‹ˆ๋‹ค: + + + +์˜ค๋””์˜ค ํŒŒ์ผ๊ณผ ํ…์ŠคํŠธ๋ฅผ ์ „์ฒ˜๋ฆฌํ•˜๊ณ  PyTorch ํ…์„œ๋กœ `input`์„ ๋ฐ˜ํ™˜ํ•  ํ”„๋กœ์„ธ์„œ๋ฅผ ๊ฐ€์ ธ์˜ค์„ธ์š”: + +```py +>>> from transformers import AutoProcessor + +>>> processor = AutoProcessor.from_pretrained("stevhliu/my_awesome_asr_mind_model") +>>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") +``` + +์ž…๋ ฅ์„ ๋ชจ๋ธ์— ์ „๋‹ฌํ•˜๊ณ  ๋กœ์ง“์„ ๋ฐ˜ํ™˜ํ•˜์„ธ์š”: + +```py +>>> from transformers import AutoModelForCTC + +>>> model = AutoModelForCTC.from_pretrained("stevhliu/my_awesome_asr_mind_model") +>>> with torch.no_grad(): +... logits = model(**inputs).logits +``` + +๊ฐ€์žฅ ๋†’์€ ํ™•๋ฅ ์˜ `input_ids`๋ฅผ ์˜ˆ์ธกํ•˜๊ณ , ํ”„๋กœ์„ธ์„œ๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ์˜ˆ์ธก๋œ `input_ids`๋ฅผ ๋‹ค์‹œ ํ…์ŠคํŠธ๋กœ ๋””์ฝ”๋”ฉํ•˜์„ธ์š”: + +```py +>>> import torch + +>>> predicted_ids = torch.argmax(logits, dim=-1) +>>> transcription = processor.batch_decode(predicted_ids) +>>> transcription +['I WOUL LIKE O SET UP JOINT ACOUNT WTH Y PARTNER'] +``` + + \ No newline at end of file diff --git a/examples/pytorch/test_accelerate_examples.py b/examples/pytorch/test_accelerate_examples.py index da777d627285af..4cfe45b0229440 100644 --- a/examples/pytorch/test_accelerate_examples.py +++ b/examples/pytorch/test_accelerate_examples.py @@ -177,7 +177,7 @@ def test_run_ner_no_trainer(self): self.assertTrue(os.path.exists(os.path.join(tmp_dir, "epoch_0"))) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "ner_no_trainer"))) - @unittest.skip(reason="Fix me @zack") + @unittest.skip(reason="Fix me @muellerzr") @mock.patch.dict(os.environ, {"WANDB_MODE": "offline"}) def test_run_squad_no_trainer(self): tmp_dir = self.get_auto_remove_tmp_dir() @@ -270,6 +270,7 @@ def test_run_translation_no_trainer(self): --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 + --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 diff --git a/setup.py b/setup.py index f553e69bc12981..952cfcf510aad9 100644 --- a/setup.py +++ b/setup.py @@ -38,14 +38,9 @@ 7. Build both the sources and the wheel. Do not change anything in setup.py between creating the wheel and the source distribution (obviously). - Clean up your build and dist folders (to avoid re-uploading oldies): - rm -rf dist - rm -rf build + Run `make build-release`. This will build the release and do some sanity checks for you. If this ends with an error + message, you need to fix things before going further. - For the wheel, run: "python setup.py bdist_wheel" in the top level directory. - (this will build a wheel for the python version you use to build it). - - For the sources, run: "python setup.py sdist" You should now have a /dist directory with both .whl and .tar.gz source versions. 8. Check that everything looks correct by uploading the package to the pypi test server: @@ -61,6 +56,7 @@ Check you can run the following commands: python -c "from transformers import pipeline; classifier = pipeline('text-classification'); print(classifier('What a nice release'))" python -c "from transformers import *" + python utils/check_build.py --check_lib If making a patch release, double check the bug you are patching is indeed resolved. @@ -446,7 +442,7 @@ def run(self): package_dir={"": "src"}, packages=find_packages("src"), include_package_data=True, - package_data={"transformers": ["*.cu", "*.cpp", "*.cuh", "*.h", "*.pyx"]}, + package_data={"": ["**/*.cu", "**/*.cpp", "**/*.cuh", "**/*.h", "**/*.pyx"]}, zip_safe=False, extras_require=extras, entry_points={"console_scripts": ["transformers-cli=transformers.commands.transformers_cli:main"]}, diff --git a/src/transformers/modeling_flax_utils.py b/src/transformers/modeling_flax_utils.py index eee84ba5f96591..6a612293eb661f 100644 --- a/src/transformers/modeling_flax_utils.py +++ b/src/transformers/modeling_flax_utils.py @@ -474,7 +474,7 @@ def can_generate(self) -> bool: `bool`: Whether this model can generate sequences with `.generate()`. """ # Detects whether `prepare_inputs_for_generation` has been overwritten, which is a requirement for generation - if "GenerationMixin" in str(self.prepare_inputs_for_generation): + if "GenerationMixin" in str(self.prepare_inputs_for_generation.__func__): return False return True diff --git a/src/transformers/modeling_tf_utils.py b/src/transformers/modeling_tf_utils.py index 35c526379c88c4..5618472744fbf9 100644 --- a/src/transformers/modeling_tf_utils.py +++ b/src/transformers/modeling_tf_utils.py @@ -1243,7 +1243,7 @@ def can_generate(self) -> bool: `bool`: Whether this model can generate sequences with `.generate()`. """ # Detects whether `prepare_inputs_for_generation` has been overwritten, which is a requirement for generation - if "GenerationMixin" in str(self.prepare_inputs_for_generation): + if "GenerationMixin" in str(self.prepare_inputs_for_generation.__func__): return False return True diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index bf06d9c4053822..a8e8e4b2e24146 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -207,22 +207,29 @@ def get_parameter_dtype(parameter: Union[nn.Module, GenerationMixin, "ModuleUtil # if no floating dtype was found return whatever the first dtype is return last_dtype - else: - # For nn.DataParallel compatibility in PyTorch > 1.5 - def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]: - tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] - return tuples - - gen = parameter._named_members(get_members_fn=find_tensor_attributes) - last_tuple = None - for tuple in gen: - last_tuple = tuple - if tuple[1].is_floating_point(): - return tuple[1].dtype - + # For nn.DataParallel compatibility in PyTorch > 1.5 + def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]: + tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] + return tuples + + gen = parameter._named_members(get_members_fn=find_tensor_attributes) + last_tuple = None + for tuple in gen: + last_tuple = tuple + if tuple[1].is_floating_point(): + return tuple[1].dtype + + if last_tuple is not None: # fallback to the last dtype return last_tuple[1].dtype + # fallback to buffer dtype + for t in parameter.buffers(): + last_dtype = t.dtype + if t.is_floating_point(): + return t.dtype + return last_dtype + def get_state_dict_float_dtype(state_dict): """ diff --git a/src/transformers/models/albert/modeling_tf_albert.py b/src/transformers/models/albert/modeling_tf_albert.py index 247ee395dc60fe..c7f76b175b0b28 100644 --- a/src/transformers/models/albert/modeling_tf_albert.py +++ b/src/transformers/models/albert/modeling_tf_albert.py @@ -44,7 +44,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( MULTIPLE_CHOICE_DUMMY_INPUTS, ModelOutput, @@ -188,16 +188,7 @@ def call( raise ValueError("Need to provide either `input_ids` or `input_embeds`.") if input_ids is not None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.config.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.config.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/auto/auto_factory.py b/src/transformers/models/auto/auto_factory.py index 17cc7f95799fae..aad113d454428b 100644 --- a/src/transformers/models/auto/auto_factory.py +++ b/src/transformers/models/auto/auto_factory.py @@ -435,19 +435,24 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): ] hub_kwargs = {name: kwargs.pop(name) for name in hub_kwargs_names if name in kwargs} if not isinstance(config, PretrainedConfig): - kwargs_copy = copy.deepcopy(kwargs) + kwargs_orig = copy.deepcopy(kwargs) # ensure not to pollute the config object with torch_dtype="auto" - since it's # meaningless in the context of the config object - torch.dtype values are acceptable - if kwargs_copy.get("torch_dtype", None) == "auto": - _ = kwargs_copy.pop("torch_dtype") + if kwargs.get("torch_dtype", None) == "auto": + _ = kwargs.pop("torch_dtype") config, kwargs = AutoConfig.from_pretrained( pretrained_model_name_or_path, return_unused_kwargs=True, trust_remote_code=trust_remote_code, **hub_kwargs, - **kwargs_copy, + **kwargs, ) + + # if torch_dtype=auto was passed here, ensure to pass it on + if kwargs_orig.get("torch_dtype", None) == "auto": + kwargs["torch_dtype"] = "auto" + if hasattr(config, "auto_map") and cls.__name__ in config.auto_map: if not trust_remote_code: raise ValueError( diff --git a/src/transformers/models/bart/modeling_flax_bart.py b/src/transformers/models/bart/modeling_flax_bart.py index ac292cc77707db..b7ce63ffcc77df 100644 --- a/src/transformers/models/bart/modeling_flax_bart.py +++ b/src/transformers/models/bart/modeling_flax_bart.py @@ -22,7 +22,6 @@ import flax.linen as nn import jax import jax.numpy as jnp -import numpy as np from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen import combine_masks, make_causal_mask from flax.linen.attention import dot_product_attention_weights @@ -218,15 +217,15 @@ """ -def shift_tokens_right(input_ids: np.array, pad_token_id: int, decoder_start_token_id: int) -> np.ndarray: +def shift_tokens_right(input_ids: jnp.array, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray: """ Shift input ids one token to the right. """ - shifted_input_ids = np.zeros_like(input_ids) - shifted_input_ids[:, 1:] = input_ids[:, :-1] - shifted_input_ids[:, 0] = decoder_start_token_id + shifted_input_ids = jnp.zeros_like(input_ids) + shifted_input_ids = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1]) + shifted_input_ids = shifted_input_ids.at[:, 0].set(decoder_start_token_id) - shifted_input_ids = np.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids) + shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids) return shifted_input_ids diff --git a/src/transformers/models/bart/modeling_tf_bart.py b/src/transformers/models/bart/modeling_tf_bart.py index 6e29434c4df158..39537b88bfce98 100644 --- a/src/transformers/models/bart/modeling_tf_bart.py +++ b/src/transformers/models/bart/modeling_tf_bart.py @@ -40,7 +40,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( ContextManagers, add_code_sample_docstrings, @@ -763,16 +763,7 @@ def call( if hasattr(self.embed_tokens, "load_weight_prefix"): context.append(tf.name_scope(self.embed_tokens.load_weight_prefix + "/")) with ContextManagers(context): - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.embed_tokens.input_dim, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.input_dim})" - ), - ) + check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_shape) @@ -965,16 +956,7 @@ def call( if hasattr(self.embed_tokens, "load_weight_prefix"): context.append(tf.name_scope(self.embed_tokens.load_weight_prefix + "/")) with ContextManagers(context): - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.embed_tokens.input_dim, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.input_dim})" - ), - ) + check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale hidden_states = inputs_embeds diff --git a/src/transformers/models/bert/modeling_tf_bert.py b/src/transformers/models/bert/modeling_tf_bert.py index 5391d71a916c3b..50ff7f2dddaa11 100644 --- a/src/transformers/models/bert/modeling_tf_bert.py +++ b/src/transformers/models/bert/modeling_tf_bert.py @@ -49,7 +49,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( DUMMY_INPUTS, MULTIPLE_CHOICE_DUMMY_INPUTS, @@ -198,16 +198,7 @@ def call( raise ValueError("Need to provide either `input_ids` or `input_embeds`.") if input_ids is not None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.config.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.config.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/biogpt/modeling_biogpt.py b/src/transformers/models/biogpt/modeling_biogpt.py index 4fc7f7b4893120..df6c18182dc272 100755 --- a/src/transformers/models/biogpt/modeling_biogpt.py +++ b/src/transformers/models/biogpt/modeling_biogpt.py @@ -546,6 +546,12 @@ def forward( if attention_mask is None: attention_mask = torch.ones(inputs_embeds.shape[:2], dtype=torch.bool, device=inputs_embeds.device) + elif attention_mask.shape[1] != past_key_values_length + input_shape[1]: + raise ValueError( + f"The provided attention mask has length {attention_mask.shape[1]}, but its length should be " + f"{past_key_values_length + input_shape[1]} (sum of the lengths of current and past inputs)" + ) + # embed positions positions = self.embed_positions(attention_mask, past_key_values_length) diff --git a/src/transformers/models/blenderbot/modeling_flax_blenderbot.py b/src/transformers/models/blenderbot/modeling_flax_blenderbot.py index 629ddb99a80c5c..6796f48163a7cb 100644 --- a/src/transformers/models/blenderbot/modeling_flax_blenderbot.py +++ b/src/transformers/models/blenderbot/modeling_flax_blenderbot.py @@ -209,11 +209,11 @@ def shift_tokens_right(input_ids: np.array, pad_token_id: int, decoder_start_tok """ Shift input ids one token to the right. """ - shifted_input_ids = np.zeros_like(input_ids) - shifted_input_ids[:, 1:] = input_ids[:, :-1] - shifted_input_ids[:, 0] = decoder_start_token_id + shifted_input_ids = jnp.zeros_like(input_ids) + shifted_input_ids = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1]) + shifted_input_ids = shifted_input_ids.at[:, 0].set(decoder_start_token_id) - shifted_input_ids = np.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids) + shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids) return shifted_input_ids diff --git a/src/transformers/models/blenderbot/modeling_tf_blenderbot.py b/src/transformers/models/blenderbot/modeling_tf_blenderbot.py index 6b95bd56739adc..ee5755c2035cab 100644 --- a/src/transformers/models/blenderbot/modeling_tf_blenderbot.py +++ b/src/transformers/models/blenderbot/modeling_tf_blenderbot.py @@ -38,7 +38,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( ContextManagers, add_code_sample_docstrings, @@ -746,16 +746,7 @@ def call( if hasattr(self.embed_tokens, "load_weight_prefix"): context.append(tf.name_scope(self.embed_tokens.load_weight_prefix + "/")) with ContextManagers(context): - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.embed_tokens.input_dim, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.input_dim})" - ), - ) + check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_shape) @@ -956,16 +947,7 @@ def call( if hasattr(self.embed_tokens, "load_weight_prefix"): context.append(tf.name_scope(self.embed_tokens.load_weight_prefix + "/")) with ContextManagers(context): - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.embed_tokens.input_dim, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.input_dim})" - ), - ) + check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale hidden_states = inputs_embeds diff --git a/src/transformers/models/blenderbot_small/modeling_flax_blenderbot_small.py b/src/transformers/models/blenderbot_small/modeling_flax_blenderbot_small.py index 226e401c921ea5..e13b90c0600946 100644 --- a/src/transformers/models/blenderbot_small/modeling_flax_blenderbot_small.py +++ b/src/transformers/models/blenderbot_small/modeling_flax_blenderbot_small.py @@ -23,7 +23,6 @@ import flax.linen as nn import jax import jax.numpy as jnp -import numpy as np from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen import combine_masks, make_causal_mask from flax.linen.attention import dot_product_attention_weights @@ -221,11 +220,11 @@ def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int, decoder_start_ """ Shift input ids one token to the right. """ - shifted_input_ids = np.zeros_like(input_ids) - shifted_input_ids[:, 1:] = input_ids[:, :-1] - shifted_input_ids[:, 0] = decoder_start_token_id + shifted_input_ids = jnp.zeros_like(input_ids) + shifted_input_ids = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1]) + shifted_input_ids = shifted_input_ids.at[:, 0].set(decoder_start_token_id) - shifted_input_ids = np.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids) + shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids) return shifted_input_ids diff --git a/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py b/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py index 3d521ea77a4d67..e170085e91c57c 100644 --- a/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py +++ b/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py @@ -37,7 +37,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( ContextManagers, add_code_sample_docstrings, @@ -752,16 +752,7 @@ def call( if hasattr(self.embed_tokens, "load_weight_prefix"): context.append(tf.name_scope(self.embed_tokens.load_weight_prefix + "/")) with ContextManagers(context): - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.embed_tokens.input_dim, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.input_dim})" - ), - ) + check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_shape) @@ -961,16 +952,7 @@ def call( if hasattr(self.embed_tokens, "load_weight_prefix"): context.append(tf.name_scope(self.embed_tokens.load_weight_prefix + "/")) with ContextManagers(context): - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.embed_tokens.input_dim, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.input_dim})" - ), - ) + check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] diff --git a/src/transformers/models/blip/modeling_tf_blip.py b/src/transformers/models/blip/modeling_tf_blip.py index e166f40b9e2d9d..6ae7a2503cc336 100644 --- a/src/transformers/models/blip/modeling_tf_blip.py +++ b/src/transformers/models/blip/modeling_tf_blip.py @@ -29,7 +29,7 @@ shape_list, unpack_inputs, ) -from ...tf_utils import stable_softmax +from ...tf_utils import check_embeddings_within_bounds, stable_softmax from ...utils import ( ModelOutput, add_start_docstrings, @@ -316,16 +316,7 @@ def call( raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.config.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.config.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/blip/modeling_tf_blip_text.py b/src/transformers/models/blip/modeling_tf_blip_text.py index 262b2cb2796621..6e8ed8a891c04e 100644 --- a/src/transformers/models/blip/modeling_tf_blip_text.py +++ b/src/transformers/models/blip/modeling_tf_blip_text.py @@ -32,7 +32,7 @@ shape_list, unpack_inputs, ) -from ...tf_utils import invert_attention_mask, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, invert_attention_mask, stable_softmax from ...utils import add_start_docstrings_to_model_forward, logging from .configuration_blip import BlipTextConfig @@ -112,16 +112,7 @@ def call(self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_v position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] if inputs_embeds is None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.config.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.config.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = self.word_embeddings(input_ids) embeddings = inputs_embeds diff --git a/src/transformers/models/camembert/modeling_tf_camembert.py b/src/transformers/models/camembert/modeling_tf_camembert.py index 5142b3d82b04cb..c9e4c98c1467d5 100644 --- a/src/transformers/models/camembert/modeling_tf_camembert.py +++ b/src/transformers/models/camembert/modeling_tf_camembert.py @@ -46,7 +46,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( DUMMY_INPUTS, MULTIPLE_CHOICE_DUMMY_INPUTS, @@ -239,16 +239,7 @@ def call( assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.config.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.config.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/clip/modeling_tf_clip.py b/src/transformers/models/clip/modeling_tf_clip.py index d2e1b06e574a09..7cf52500aed63a 100644 --- a/src/transformers/models/clip/modeling_tf_clip.py +++ b/src/transformers/models/clip/modeling_tf_clip.py @@ -34,7 +34,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( ModelOutput, add_start_docstrings, @@ -238,16 +238,7 @@ def call( raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.config.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.config.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/convbert/modeling_tf_convbert.py b/src/transformers/models/convbert/modeling_tf_convbert.py index 3976be69eb5b86..e853da76277139 100644 --- a/src/transformers/models/convbert/modeling_tf_convbert.py +++ b/src/transformers/models/convbert/modeling_tf_convbert.py @@ -42,7 +42,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( MULTIPLE_CHOICE_DUMMY_INPUTS, add_code_sample_docstrings, @@ -124,16 +124,7 @@ def call( raise ValueError("Need to provide either `input_ids` or `input_embeds`.") if input_ids is not None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.config.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.config.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/ctrl/modeling_tf_ctrl.py b/src/transformers/models/ctrl/modeling_tf_ctrl.py index dcd3f5a03e0c92..f4742b4e33d79c 100644 --- a/src/transformers/models/ctrl/modeling_tf_ctrl.py +++ b/src/transformers/models/ctrl/modeling_tf_ctrl.py @@ -32,7 +32,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_ctrl import CTRLConfig @@ -336,16 +336,7 @@ def call( position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]]) if inputs_embeds is None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.w.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.w.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.w.vocab_size) inputs_embeds = self.w(input_ids, mode="embedding") seq_len = input_shape[-1] mask = 1 - tf.linalg.band_part(tf.ones((seq_len, seq_len)), -1, 0) diff --git a/src/transformers/models/deberta/modeling_tf_deberta.py b/src/transformers/models/deberta/modeling_tf_deberta.py index 016ce15db61825..dcd0582777eb42 100644 --- a/src/transformers/models/deberta/modeling_tf_deberta.py +++ b/src/transformers/models/deberta/modeling_tf_deberta.py @@ -39,7 +39,7 @@ get_initializer, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_deberta import DebertaConfig @@ -778,16 +778,7 @@ def call( raise ValueError("Need to provide either `input_ids` or `input_embeds`.") if input_ids is not None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.config.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.config.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py b/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py index 015eb392574087..b3c210352a32b5 100644 --- a/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py +++ b/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py @@ -38,7 +38,7 @@ get_initializer, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_deberta_v2 import DebertaV2Config @@ -867,16 +867,7 @@ def call( raise ValueError("Need to provide either `input_ids` or `input_embeds`.") if input_ids is not None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.config.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.config.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/distilbert/modeling_tf_distilbert.py b/src/transformers/models/distilbert/modeling_tf_distilbert.py index 95c3aef4261572..3013f4ca30d7fe 100644 --- a/src/transformers/models/distilbert/modeling_tf_distilbert.py +++ b/src/transformers/models/distilbert/modeling_tf_distilbert.py @@ -43,7 +43,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( MULTIPLE_CHOICE_DUMMY_INPUTS, add_code_sample_docstrings, @@ -109,16 +109,7 @@ def call(self, input_ids=None, position_ids=None, inputs_embeds=None, training=F assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.config.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.config.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/electra/modeling_tf_electra.py b/src/transformers/models/electra/modeling_tf_electra.py index b782cc987bef26..82c3381724dcea 100644 --- a/src/transformers/models/electra/modeling_tf_electra.py +++ b/src/transformers/models/electra/modeling_tf_electra.py @@ -44,7 +44,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( DUMMY_INPUTS, MULTIPLE_CHOICE_DUMMY_INPUTS, @@ -528,16 +528,7 @@ def call( raise ValueError("Need to provide either `input_ids` or `input_embeds`.") if input_ids is not None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.config.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.config.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py b/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py index b27b134ecf29dc..3548e48c595a4a 100644 --- a/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py +++ b/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py @@ -16,6 +16,7 @@ import gc +import inspect import os import tempfile import warnings @@ -245,6 +246,13 @@ def __init__( f"The encoder {self.encoder} should not have a LM Head. Please use a model without LM Head" ) + decoder_signature = set(inspect.signature(self.decoder.forward).parameters.keys()) + if "encoder_hidden_states" not in decoder_signature: + raise ValueError( + "The selected decoder is not prepared for the encoder hidden states to be passed. Please see the " + "following discussion on GitHub: https://github.com/huggingface/transformers/issues/23350" + ) + # tie encoder, decoder weights if config set accordingly self.tie_weights() diff --git a/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py b/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py index 1c90245b696c2c..5ec7f2932f5952 100644 --- a/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py +++ b/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py @@ -14,7 +14,7 @@ # limitations under the License. """ Classes to support TF Encoder-Decoder architectures""" - +import inspect import re import warnings from typing import Optional, Tuple, Union @@ -266,6 +266,13 @@ def __init__( f"The encoder {self.encoder} should not have a LM Head. Please use a model without LM Head" ) + decoder_signature = set(inspect.signature(self.decoder.call).parameters.keys()) + if "encoder_hidden_states" not in decoder_signature: + raise ValueError( + "The selected decoder is not prepared for the encoder hidden states to be passed. Please see the " + "following discussion on GitHub: https://github.com/huggingface/transformers/issues/23350" + ) + @property def dummy_inputs(self): """ diff --git a/src/transformers/models/esm/modeling_tf_esm.py b/src/transformers/models/esm/modeling_tf_esm.py index 980b6453f6d0a8..135c16a14b36dd 100644 --- a/src/transformers/models/esm/modeling_tf_esm.py +++ b/src/transformers/models/esm/modeling_tf_esm.py @@ -40,7 +40,7 @@ shape_list, unpack_inputs, ) -from ...tf_utils import stable_softmax +from ...tf_utils import check_embeddings_within_bounds, stable_softmax from ...utils import logging from .configuration_esm import EsmConfig @@ -214,16 +214,7 @@ def call( position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if inputs_embeds is None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.config.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.config.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = self.word_embeddings(input_ids) # Note that if we want to support ESM-1 (not 1b!) in future then we need to support an diff --git a/src/transformers/models/flaubert/modeling_tf_flaubert.py b/src/transformers/models/flaubert/modeling_tf_flaubert.py index 919cd6cc1e4227..b1dd523dedaf85 100644 --- a/src/transformers/models/flaubert/modeling_tf_flaubert.py +++ b/src/transformers/models/flaubert/modeling_tf_flaubert.py @@ -46,7 +46,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( MULTIPLE_CHOICE_DUMMY_INPUTS, ModelOutput, @@ -578,16 +578,7 @@ def call( # embeddings if inputs_embeds is None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.embeddings.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.embeddings.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.embeddings.vocab_size) inputs_embeds = self.embeddings(input_ids) tensor = inputs_embeds + tf.gather(self.position_embeddings, position_ids) diff --git a/src/transformers/models/funnel/modeling_tf_funnel.py b/src/transformers/models/funnel/modeling_tf_funnel.py index 2b109cdbab8a2f..84254f2b288c58 100644 --- a/src/transformers/models/funnel/modeling_tf_funnel.py +++ b/src/transformers/models/funnel/modeling_tf_funnel.py @@ -42,7 +42,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( MULTIPLE_CHOICE_DUMMY_INPUTS, ModelOutput, @@ -109,16 +109,7 @@ def call(self, input_ids=None, inputs_embeds=None, training=False): assert not (input_ids is not None and inputs_embeds is not None) if input_ids is not None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.config.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.config.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(self.weight, input_ids) final_embeddings = self.LayerNorm(inputs=inputs_embeds) diff --git a/src/transformers/models/gpt2/modeling_tf_gpt2.py b/src/transformers/models/gpt2/modeling_tf_gpt2.py index a84fdbd8066407..d0c731878d2d39 100644 --- a/src/transformers/models/gpt2/modeling_tf_gpt2.py +++ b/src/transformers/models/gpt2/modeling_tf_gpt2.py @@ -39,7 +39,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( DUMMY_INPUTS, ModelOutput, @@ -437,16 +437,7 @@ def call( position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]]) if inputs_embeds is None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.config.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.config.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = self.wte(input_ids, mode="embedding") position_embeds = tf.gather(self.wpe, position_ids) diff --git a/src/transformers/models/gptj/modeling_tf_gptj.py b/src/transformers/models/gptj/modeling_tf_gptj.py index f077a52a03ae02..fbef4f0effc733 100644 --- a/src/transformers/models/gptj/modeling_tf_gptj.py +++ b/src/transformers/models/gptj/modeling_tf_gptj.py @@ -43,7 +43,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import logging from .configuration_gptj import GPTJConfig @@ -437,16 +437,7 @@ def call( position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]]) if inputs_embeds is None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.wte.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.wte.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.wte.vocab_size) inputs_embeds = self.wte(input_ids, mode="embedding") if token_type_ids is not None: diff --git a/src/transformers/models/groupvit/modeling_tf_groupvit.py b/src/transformers/models/groupvit/modeling_tf_groupvit.py index 3826b83e7a4637..4891931c20abe5 100644 --- a/src/transformers/models/groupvit/modeling_tf_groupvit.py +++ b/src/transformers/models/groupvit/modeling_tf_groupvit.py @@ -33,7 +33,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( ModelOutput, add_start_docstrings, @@ -572,16 +572,7 @@ def call( raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.config.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.config.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/layoutlm/modeling_tf_layoutlm.py b/src/transformers/models/layoutlm/modeling_tf_layoutlm.py index 2097ae58b8bf35..2755e055370b0d 100644 --- a/src/transformers/models/layoutlm/modeling_tf_layoutlm.py +++ b/src/transformers/models/layoutlm/modeling_tf_layoutlm.py @@ -41,7 +41,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_layoutlm import LayoutLMConfig @@ -140,16 +140,7 @@ def call( assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.config.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.config.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py b/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py index 95ef5580b95409..491ef186e52275 100644 --- a/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py +++ b/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py @@ -36,6 +36,7 @@ keras_serializable, unpack_inputs, ) +from ...tf_utils import check_embeddings_within_bounds from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from .configuration_layoutlmv3 import LayoutLMv3Config @@ -240,16 +241,7 @@ def call( token_type_ids = tf.zeros(input_shape, dtype=position_ids.dtype) if inputs_embeds is None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.word_embeddings.input_dim, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.word_embeddings.input_dim})" - ), - ) + check_embeddings_within_bounds(input_ids, self.word_embeddings.input_dim) inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) diff --git a/src/transformers/models/led/modeling_tf_led.py b/src/transformers/models/led/modeling_tf_led.py index 75d4a15f194d11..324482b4d2456e 100644 --- a/src/transformers/models/led/modeling_tf_led.py +++ b/src/transformers/models/led/modeling_tf_led.py @@ -33,7 +33,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( ContextManagers, ModelOutput, @@ -1746,16 +1746,7 @@ def call( if hasattr(self.embed_tokens, "load_weight_prefix"): context.append(tf.name_scope(self.embed_tokens.load_weight_prefix + "/")) with ContextManagers(context): - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.embed_tokens.input_dim, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.input_dim})" - ), - ) + check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] @@ -2038,16 +2029,7 @@ def call( if hasattr(self.embed_tokens, "load_weight_prefix"): context.append(tf.name_scope(self.embed_tokens.load_weight_prefix + "/")) with ContextManagers(context): - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.embed_tokens.input_dim, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.input_dim})" - ), - ) + check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) hidden_states = inputs_embeds diff --git a/src/transformers/models/longformer/modeling_tf_longformer.py b/src/transformers/models/longformer/modeling_tf_longformer.py index e5e22a21276faf..c47df169655cdf 100644 --- a/src/transformers/models/longformer/modeling_tf_longformer.py +++ b/src/transformers/models/longformer/modeling_tf_longformer.py @@ -34,7 +34,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( MULTIPLE_CHOICE_DUMMY_INPUTS, ModelOutput, @@ -538,16 +538,7 @@ def call( assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.config.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.config.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/longt5/modeling_flax_longt5.py b/src/transformers/models/longt5/modeling_flax_longt5.py index 458f6d597f30c8..7fa708c59956a8 100644 --- a/src/transformers/models/longt5/modeling_flax_longt5.py +++ b/src/transformers/models/longt5/modeling_flax_longt5.py @@ -60,11 +60,11 @@ def shift_tokens_right(input_ids: np.array, pad_token_id: int, decoder_start_tok """ Shift input ids one token to the right. """ - shifted_input_ids = np.zeros_like(input_ids) - shifted_input_ids[:, 1:] = input_ids[:, :-1] - shifted_input_ids[:, 0] = decoder_start_token_id + shifted_input_ids = jnp.zeros_like(input_ids) + shifted_input_ids = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1]) + shifted_input_ids = shifted_input_ids.at[:, 0].set(decoder_start_token_id) - shifted_input_ids = np.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids) + shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids) return shifted_input_ids diff --git a/src/transformers/models/lxmert/modeling_tf_lxmert.py b/src/transformers/models/lxmert/modeling_tf_lxmert.py index 9408188100f199..948053c93e287a 100644 --- a/src/transformers/models/lxmert/modeling_tf_lxmert.py +++ b/src/transformers/models/lxmert/modeling_tf_lxmert.py @@ -32,7 +32,7 @@ shape_list, unpack_inputs, ) -from ...tf_utils import stable_softmax +from ...tf_utils import check_embeddings_within_bounds, stable_softmax from ...utils import ( ModelOutput, add_code_sample_docstrings, @@ -232,16 +232,7 @@ def call(self, input_ids=None, token_type_ids=None, inputs_embeds=None, training assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.config.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.config.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/marian/modeling_flax_marian.py b/src/transformers/models/marian/modeling_flax_marian.py index 96b26f8325ce8c..c3d89b693a6771 100644 --- a/src/transformers/models/marian/modeling_flax_marian.py +++ b/src/transformers/models/marian/modeling_flax_marian.py @@ -231,11 +231,11 @@ def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int, decoder_start_ """ Shift input ids one token to the right. """ - shifted_input_ids = np.zeros_like(input_ids) - shifted_input_ids[:, 1:] = input_ids[:, :-1] - shifted_input_ids[:, 0] = decoder_start_token_id + shifted_input_ids = jnp.zeros_like(input_ids) + shifted_input_ids = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1]) + shifted_input_ids = shifted_input_ids.at[:, 0].set(decoder_start_token_id) - shifted_input_ids = np.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids) + shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids) return shifted_input_ids diff --git a/src/transformers/models/marian/modeling_tf_marian.py b/src/transformers/models/marian/modeling_tf_marian.py index a0e26de9bd7e93..17511588320193 100644 --- a/src/transformers/models/marian/modeling_tf_marian.py +++ b/src/transformers/models/marian/modeling_tf_marian.py @@ -37,7 +37,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( ContextManagers, add_code_sample_docstrings, @@ -778,16 +778,7 @@ def call( if hasattr(self.embed_tokens, "load_weight_prefix"): context.append(tf.name_scope(self.embed_tokens.load_weight_prefix + "/")) with ContextManagers(context): - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.embed_tokens.input_dim, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.input_dim})" - ), - ) + check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_shape) @@ -990,16 +981,7 @@ def call( if hasattr(self.embed_tokens, "load_weight_prefix"): context.append(tf.name_scope(self.embed_tokens.load_weight_prefix + "/")) with ContextManagers(context): - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.embed_tokens.input_dim, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.input_dim})" - ), - ) + check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale hidden_states = inputs_embeds diff --git a/src/transformers/models/mbart/modeling_flax_mbart.py b/src/transformers/models/mbart/modeling_flax_mbart.py index 78375afce4fccc..aeeec3e583b79b 100644 --- a/src/transformers/models/mbart/modeling_flax_mbart.py +++ b/src/transformers/models/mbart/modeling_flax_mbart.py @@ -22,7 +22,6 @@ import flax.linen as nn import jax import jax.numpy as jnp -import numpy as np from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen import combine_masks, make_causal_mask from flax.linen.attention import dot_product_attention_weights @@ -223,20 +222,20 @@ def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int) -> jnp.ndarray Shift input ids one token to the right, and wrap the last non pad token (the token) Note that MBart does not have a single `decoder_start_token_id` in contrast to other Bart-like models. """ - prev_output_tokens = np.array(input_ids).copy() + prev_output_tokens = jnp.array(input_ids).copy() if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` - prev_output_tokens = np.where(prev_output_tokens == -100, pad_token_id, input_ids) - index_of_eos = (np.where(prev_output_tokens != pad_token_id, 1, 0).sum(axis=-1) - 1).reshape(-1, 1) - decoder_start_tokens = np.array( - [prev_output_tokens[i, eos_idx] for i, eos_idx in enumerate(index_of_eos)], dtype=np.int32 + prev_output_tokens = jnp.where(prev_output_tokens == -100, pad_token_id, input_ids) + index_of_eos = (jnp.where(prev_output_tokens != pad_token_id, 1, 0).sum(axis=-1) - 1).reshape(-1, 1) + decoder_start_tokens = jnp.array( + [prev_output_tokens[i, eos_idx] for i, eos_idx in enumerate(index_of_eos)], dtype=jnp.int32 ).squeeze() - prev_output_tokens[:, 1:] = prev_output_tokens[:, :-1].copy() - prev_output_tokens[:, 0] = decoder_start_tokens + prev_output_tokens = prev_output_tokens.at[:, 1:].set(prev_output_tokens[:, :-1]) + prev_output_tokens = prev_output_tokens.at[:, 0].set(decoder_start_tokens) return prev_output_tokens diff --git a/src/transformers/models/mbart/modeling_tf_mbart.py b/src/transformers/models/mbart/modeling_tf_mbart.py index 6f48062fc63795..13453bd22dbaaa 100644 --- a/src/transformers/models/mbart/modeling_tf_mbart.py +++ b/src/transformers/models/mbart/modeling_tf_mbart.py @@ -37,7 +37,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( ContextManagers, add_code_sample_docstrings, @@ -770,16 +770,7 @@ def call( if hasattr(self.embed_tokens, "load_weight_prefix"): context.append(tf.name_scope(self.embed_tokens.load_weight_prefix + "/")) with ContextManagers(context): - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.embed_tokens.input_dim, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.input_dim})" - ), - ) + check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_shape) @@ -989,16 +980,7 @@ def call( if hasattr(self.embed_tokens, "load_weight_prefix"): context.append(tf.name_scope(self.embed_tokens.load_weight_prefix + "/")) with ContextManagers(context): - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.embed_tokens.input_dim, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.input_dim})" - ), - ) + check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale hidden_states = inputs_embeds diff --git a/src/transformers/models/mobilebert/modeling_tf_mobilebert.py b/src/transformers/models/mobilebert/modeling_tf_mobilebert.py index 832a4fa3f52bef..c47cde847de424 100644 --- a/src/transformers/models/mobilebert/modeling_tf_mobilebert.py +++ b/src/transformers/models/mobilebert/modeling_tf_mobilebert.py @@ -46,7 +46,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( MULTIPLE_CHOICE_DUMMY_INPUTS, ModelOutput, @@ -212,16 +212,7 @@ def call(self, input_ids=None, position_ids=None, token_type_ids=None, inputs_em assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.config.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.config.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/mpnet/modeling_tf_mpnet.py b/src/transformers/models/mpnet/modeling_tf_mpnet.py index 48866e21d4d8f8..08db3101730854 100644 --- a/src/transformers/models/mpnet/modeling_tf_mpnet.py +++ b/src/transformers/models/mpnet/modeling_tf_mpnet.py @@ -45,7 +45,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( MULTIPLE_CHOICE_DUMMY_INPUTS, add_code_sample_docstrings, @@ -144,16 +144,7 @@ def call(self, input_ids=None, position_ids=None, inputs_embeds=None, training=F assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.config.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.config.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/mt5/modeling_flax_mt5.py b/src/transformers/models/mt5/modeling_flax_mt5.py index 6b6eaf7fd135df..86ddf477ffab56 100644 --- a/src/transformers/models/mt5/modeling_flax_mt5.py +++ b/src/transformers/models/mt5/modeling_flax_mt5.py @@ -14,7 +14,7 @@ # limitations under the License. """ Flax mT5 model.""" -import numpy as np +import jax.numpy as jnp from ...utils import logging from ..t5.modeling_flax_t5 import FlaxT5EncoderModel, FlaxT5ForConditionalGeneration, FlaxT5Model @@ -27,15 +27,15 @@ # Copied from transformers.models.bart.modeling_flax_bart.shift_tokens_right -def shift_tokens_right(input_ids: np.array, pad_token_id: int, decoder_start_token_id: int) -> np.ndarray: +def shift_tokens_right(input_ids: jnp.array, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray: """ Shift input ids one token to the right. """ - shifted_input_ids = np.zeros_like(input_ids) - shifted_input_ids[:, 1:] = input_ids[:, :-1] - shifted_input_ids[:, 0] = decoder_start_token_id + shifted_input_ids = jnp.zeros_like(input_ids) + shifted_input_ids = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1]) + shifted_input_ids = shifted_input_ids.at[:, 0].set(decoder_start_token_id) - shifted_input_ids = np.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids) + shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids) return shifted_input_ids diff --git a/src/transformers/models/openai/modeling_tf_openai.py b/src/transformers/models/openai/modeling_tf_openai.py index 5723001729df79..7c04520c9c1fe2 100644 --- a/src/transformers/models/openai/modeling_tf_openai.py +++ b/src/transformers/models/openai/modeling_tf_openai.py @@ -35,7 +35,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( ModelOutput, add_code_sample_docstrings, @@ -295,30 +295,12 @@ def call( position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]]) if inputs_embeds is None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.config.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.config.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = self.tokens_embed(input_ids, mode="embedding") position_embeds = tf.gather(self.positions_embed, position_ids) if token_type_ids is not None: token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]]) - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - token_type_ids, - tf.cast(self.config.vocab_size, dtype=token_type_ids.dtype), - message=( - "token_type_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(token_type_ids)} >= {self.config.vocab_size})" - ), - ) + check_embeddings_within_bounds(token_type_ids, self.config.vocab_size, "token_type_ids") token_type_embeds = self.tokens_embed(token_type_ids, mode="embedding") else: token_type_embeds = 0 diff --git a/src/transformers/models/opt/modeling_opt.py b/src/transformers/models/opt/modeling_opt.py index 94269ffbf0ae44..b6c84777cc1f69 100644 --- a/src/transformers/models/opt/modeling_opt.py +++ b/src/transformers/models/opt/modeling_opt.py @@ -642,6 +642,11 @@ def forward( # embed positions if attention_mask is None: attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) + elif attention_mask.shape[1] != mask_seq_length: + raise ValueError( + f"The provided attention mask has length {attention_mask.shape[1]}, but its length should be " + f"{mask_seq_length} (sum of the lengths of current and past inputs)" + ) causal_attention_mask = self._prepare_decoder_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length ) diff --git a/src/transformers/models/opt/modeling_tf_opt.py b/src/transformers/models/opt/modeling_tf_opt.py index cd34130228a61a..1855fcb1bc034a 100644 --- a/src/transformers/models/opt/modeling_tf_opt.py +++ b/src/transformers/models/opt/modeling_tf_opt.py @@ -33,7 +33,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( add_code_sample_docstrings, add_start_docstrings, @@ -631,20 +631,20 @@ def call( past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0 if inputs_embeds is None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.embed_tokens.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.embed_tokens.vocab_size) inputs_embeds = self.embed_tokens(input_ids) if attention_mask is None: attention_mask = tf.ones(inputs_embeds.shape[:2], dtype=tf.bool) + else: + tf.debugging.assert_equal( + attention_mask.shape[1], + past_key_values_length + input_shape[1], + message=( + f"The provided attention mask has length {attention_mask.shape[1]}, but its length should be " + f"{past_key_values_length + input_shape[1]} (sum of the lengths of current and past inputs)" + ), + ) pos_embeds = self.embed_positions(attention_mask, past_key_values_length) diff --git a/src/transformers/models/pegasus/modeling_flax_pegasus.py b/src/transformers/models/pegasus/modeling_flax_pegasus.py index b39e2c437e537e..ddd83709e911e2 100644 --- a/src/transformers/models/pegasus/modeling_flax_pegasus.py +++ b/src/transformers/models/pegasus/modeling_flax_pegasus.py @@ -214,11 +214,11 @@ def shift_tokens_right(input_ids: np.array, pad_token_id: int, decoder_start_tok """ Shift input ids one token to the right. """ - shifted_input_ids = np.zeros_like(input_ids) - shifted_input_ids[:, 1:] = input_ids[:, :-1] - shifted_input_ids[:, 0] = decoder_start_token_id + shifted_input_ids = jnp.zeros_like(input_ids) + shifted_input_ids = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1]) + shifted_input_ids = shifted_input_ids.at[:, 0].set(decoder_start_token_id) - shifted_input_ids = np.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids) + shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids) return shifted_input_ids diff --git a/src/transformers/models/pegasus/modeling_tf_pegasus.py b/src/transformers/models/pegasus/modeling_tf_pegasus.py index 5955d50d61c71e..1ccccc2dc5cec0 100644 --- a/src/transformers/models/pegasus/modeling_tf_pegasus.py +++ b/src/transformers/models/pegasus/modeling_tf_pegasus.py @@ -38,7 +38,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( ContextManagers, add_code_sample_docstrings, @@ -782,16 +782,7 @@ def call( if hasattr(self.embed_tokens, "load_weight_prefix"): context.append(tf.name_scope(self.embed_tokens.load_weight_prefix + "/")) with ContextManagers(context): - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.embed_tokens.input_dim, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.input_dim})" - ), - ) + check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_shape) @@ -997,16 +988,7 @@ def call( if hasattr(self.embed_tokens, "load_weight_prefix"): context.append(tf.name_scope(self.embed_tokens.load_weight_prefix + "/")) with ContextManagers(context): - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.embed_tokens.input_dim, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.input_dim})" - ), - ) + check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale hidden_states = inputs_embeds diff --git a/src/transformers/models/pix2struct/modeling_pix2struct.py b/src/transformers/models/pix2struct/modeling_pix2struct.py index 1505ff07da4025..cd08c69086f701 100644 --- a/src/transformers/models/pix2struct/modeling_pix2struct.py +++ b/src/transformers/models/pix2struct/modeling_pix2struct.py @@ -1687,6 +1687,15 @@ def forward( >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] >>> print(generated_text) A stop sign is on a street corner. + + >>> # conditional generation + >>> text = "A picture of" + >>> inputs = processor(text=text, images=image, return_tensors="pt", add_special_tokens=False) + + >>> generated_ids = model.generate(**inputs, max_new_tokens=50) + >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] + >>> print(generated_text) + A picture of a stop sign with a red stop sign on it. ``` Training: diff --git a/src/transformers/models/rembert/modeling_tf_rembert.py b/src/transformers/models/rembert/modeling_tf_rembert.py index 74683dfc0c0884..c4dc8c5a148b0c 100644 --- a/src/transformers/models/rembert/modeling_tf_rembert.py +++ b/src/transformers/models/rembert/modeling_tf_rembert.py @@ -45,7 +45,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( DUMMY_INPUTS, MULTIPLE_CHOICE_DUMMY_INPUTS, @@ -122,16 +122,7 @@ def call( assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.config.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.config.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/roberta/modeling_tf_roberta.py b/src/transformers/models/roberta/modeling_tf_roberta.py index 606afb754b4ffe..7aa2c9e07a3dff 100644 --- a/src/transformers/models/roberta/modeling_tf_roberta.py +++ b/src/transformers/models/roberta/modeling_tf_roberta.py @@ -46,7 +46,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( DUMMY_INPUTS, MULTIPLE_CHOICE_DUMMY_INPUTS, @@ -144,16 +144,7 @@ def call( assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.config.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.config.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py b/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py index 1843605bd04a26..fedfea56a7a9b2 100644 --- a/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py +++ b/src/transformers/models/roberta_prelayernorm/modeling_tf_roberta_prelayernorm.py @@ -46,7 +46,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( DUMMY_INPUTS, MULTIPLE_CHOICE_DUMMY_INPUTS, @@ -149,16 +149,7 @@ def call( assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.config.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.config.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/roformer/modeling_tf_roformer.py b/src/transformers/models/roformer/modeling_tf_roformer.py index 952250e68a04fb..2d1387d2d8d84d 100644 --- a/src/transformers/models/roformer/modeling_tf_roformer.py +++ b/src/transformers/models/roformer/modeling_tf_roformer.py @@ -46,7 +46,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( MULTIPLE_CHOICE_DUMMY_INPUTS, add_code_sample_docstrings, @@ -175,16 +175,7 @@ def call( assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.config.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.config.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/rwkv/modeling_rwkv.py b/src/transformers/models/rwkv/modeling_rwkv.py index dd85c279daeb60..3733f0db99db5a 100644 --- a/src/transformers/models/rwkv/modeling_rwkv.py +++ b/src/transformers/models/rwkv/modeling_rwkv.py @@ -679,7 +679,7 @@ def forward( all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: - return (hidden_states, state, all_hidden_states, all_self_attentions) + return tuple(x for x in [hidden_states, state, all_hidden_states, all_self_attentions] if x is not None) return RwkvOutput( last_hidden_state=hidden_states, diff --git a/src/transformers/models/sam/processing_sam.py b/src/transformers/models/sam/processing_sam.py index b5ae51d7db29bf..919ed685a86180 100644 --- a/src/transformers/models/sam/processing_sam.py +++ b/src/transformers/models/sam/processing_sam.py @@ -208,7 +208,7 @@ def _check_and_preprocess_points( input_points = input_points.numpy().tolist() if not isinstance(input_points, list) or not isinstance(input_points[0], list): - raise ValueError("Input points must be a list of list of floating integers.") + raise ValueError("Input points must be a list of list of floating points.") input_points = [np.array(input_point) for input_point in input_points] else: input_points = None @@ -232,7 +232,7 @@ def _check_and_preprocess_points( or not isinstance(input_boxes[0], list) or not isinstance(input_boxes[0][0], list) ): - raise ValueError("Input boxes must be a list of list of list of floating integers.") + raise ValueError("Input boxes must be a list of list of list of floating points.") input_boxes = [np.array(box).astype(np.float32) for box in input_boxes] else: input_boxes = None diff --git a/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py b/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py index 009d2538ea85cf..e5c38afa83cbab 100755 --- a/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py +++ b/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py @@ -36,7 +36,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( add_code_sample_docstrings, add_start_docstrings, @@ -1030,16 +1030,7 @@ def call( past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0 if inputs_embeds is None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.embed_tokens.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.embed_tokens.vocab_size) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale else: inputs_embeds = inputs_embeds diff --git a/src/transformers/models/t5/modeling_flax_t5.py b/src/transformers/models/t5/modeling_flax_t5.py index 249d4913e010ef..bc26ade028dc70 100644 --- a/src/transformers/models/t5/modeling_flax_t5.py +++ b/src/transformers/models/t5/modeling_flax_t5.py @@ -60,11 +60,11 @@ def shift_tokens_right(input_ids: np.array, pad_token_id: int, decoder_start_tok """ Shift input ids one token to the right. """ - shifted_input_ids = np.zeros_like(input_ids) - shifted_input_ids[:, 1:] = input_ids[:, :-1] - shifted_input_ids[:, 0] = decoder_start_token_id + shifted_input_ids = jnp.zeros_like(input_ids) + shifted_input_ids = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1]) + shifted_input_ids = shifted_input_ids.at[:, 0].set(decoder_start_token_id) - shifted_input_ids = np.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids) + shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids) return shifted_input_ids diff --git a/src/transformers/models/t5/modeling_tf_t5.py b/src/transformers/models/t5/modeling_tf_t5.py index f9996e15314e2b..ec3e67db26d1ed 100644 --- a/src/transformers/models/t5/modeling_tf_t5.py +++ b/src/transformers/models/t5/modeling_tf_t5.py @@ -40,7 +40,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( DUMMY_INPUTS, DUMMY_MASK, @@ -686,16 +686,7 @@ def call( if hasattr(self.embed_tokens, "load_weight_prefix"): context.append(tf.name_scope(self.embed_tokens.load_weight_prefix + "/")) with ContextManagers(context): - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.embed_tokens.input_dim, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.input_dim})" - ), - ) + check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape diff --git a/src/transformers/models/tapas/modeling_tf_tapas.py b/src/transformers/models/tapas/modeling_tf_tapas.py index 5c995aa93014a2..f876730b095d50 100644 --- a/src/transformers/models/tapas/modeling_tf_tapas.py +++ b/src/transformers/models/tapas/modeling_tf_tapas.py @@ -38,7 +38,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( ModelOutput, add_start_docstrings, @@ -231,16 +231,7 @@ def call( position_ids = tf.math.minimum(self.max_position_embeddings - 1, position - first_position) if input_ids is not None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.config.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.config.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) position_embeddings = tf.gather(self.position_embeddings, indices=position_ids) diff --git a/src/transformers/models/whisper/modeling_tf_whisper.py b/src/transformers/models/whisper/modeling_tf_whisper.py index d9a175062b36ff..0d2a2682cc97d4 100644 --- a/src/transformers/models/whisper/modeling_tf_whisper.py +++ b/src/transformers/models/whisper/modeling_tf_whisper.py @@ -36,7 +36,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_whisper import WhisperConfig @@ -882,16 +882,7 @@ def call( past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.embed_tokens.input_dim, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.input_dim})" - ), - ) + check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) attention_mask = self._prepare_decoder_attention_mask(attention_mask, input_shape, past_key_values_length) diff --git a/src/transformers/models/xglm/modeling_tf_xglm.py b/src/transformers/models/xglm/modeling_tf_xglm.py index d112e641a93e05..1a0146bf19d799 100644 --- a/src/transformers/models/xglm/modeling_tf_xglm.py +++ b/src/transformers/models/xglm/modeling_tf_xglm.py @@ -42,7 +42,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import logging from .configuration_xglm import XGLMConfig @@ -527,16 +527,7 @@ def call( position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]]) if inputs_embeds is None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.embed_tokens.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.embed_tokens.vocab_size) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale attention_mask = self._prepare_decoder_attention_mask(attention_mask, input_shape, past_key_values_length) diff --git a/src/transformers/models/xlm/modeling_tf_xlm.py b/src/transformers/models/xlm/modeling_tf_xlm.py index f77111cee450a8..da9bd1c6034fc3 100644 --- a/src/transformers/models/xlm/modeling_tf_xlm.py +++ b/src/transformers/models/xlm/modeling_tf_xlm.py @@ -45,7 +45,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( MULTIPLE_CHOICE_DUMMY_INPUTS, ModelOutput, @@ -440,16 +440,7 @@ def call( # embeddings if inputs_embeds is None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.embeddings.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.embeddings.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.embeddings.vocab_size) inputs_embeds = self.embeddings(input_ids) tensor = inputs_embeds + tf.gather(self.position_embeddings, position_ids) diff --git a/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py b/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py index b5fc694148e724..2f51c032f150db 100644 --- a/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py +++ b/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py @@ -46,7 +46,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( DUMMY_INPUTS, MULTIPLE_CHOICE_DUMMY_INPUTS, @@ -233,16 +233,7 @@ def call( assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.config.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.config.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/xlnet/modeling_tf_xlnet.py b/src/transformers/models/xlnet/modeling_tf_xlnet.py index 080dd91f2301cd..52538ced57ed7e 100644 --- a/src/transformers/models/xlnet/modeling_tf_xlnet.py +++ b/src/transformers/models/xlnet/modeling_tf_xlnet.py @@ -39,7 +39,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( MULTIPLE_CHOICE_DUMMY_INPUTS, ModelOutput, @@ -678,16 +678,7 @@ def call( if inputs_embeds is not None: word_emb_k = inputs_embeds else: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.word_embedding.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.word_embedding.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.word_embedding.vocab_size) word_emb_k = self.word_embedding(input_ids) output_h = self.dropout(word_emb_k, training=training) if target_mapping is not None: diff --git a/src/transformers/tf_utils.py b/src/transformers/tf_utils.py index 20fe71d6ae5a20..2d4fa6fda9b63c 100644 --- a/src/transformers/tf_utils.py +++ b/src/transformers/tf_utils.py @@ -96,3 +96,23 @@ def invert_attention_mask(encoder_attention_mask: tf.Tensor) -> tf.Tensor: ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask + + +def check_embeddings_within_bounds(tensor: tf.Tensor, embed_dim: int, tensor_name: str = "input_ids") -> None: + """ + `tf.gather`, on which TF embedding layers are based, won't check positive out of bound indices on GPU, returning + zeros instead. This function adds a check against that dangerous silent behavior. + + Args: + tensor (`tf.Tensor`): The tensor of indices to check. + embed_dim (`int`): The embedding dimension. + tensor_name (`str`, *optional*): The name of the tensor to use in the error message. + """ + tf.debugging.assert_less( + tensor, + tf.cast(embed_dim, dtype=tensor.dtype), + message=( + f"The maximum value of {tensor_name} ({tf.math.reduce_max(tensor)}) must be smaller than the embedding " + f"layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time." + ), + ) diff --git a/src/transformers/tokenization_utils_fast.py b/src/transformers/tokenization_utils_fast.py index 51b5acebbf271f..106e0d5bf841eb 100644 --- a/src/transformers/tokenization_utils_fast.py +++ b/src/transformers/tokenization_utils_fast.py @@ -249,10 +249,7 @@ def convert_tokens_to_ids(self, tokens: Union[str, List[str]]) -> Union[int, Lis if isinstance(tokens, str): return self._convert_token_to_id_with_added_voc(tokens) - ids = [] - for token in tokens: - ids.append(self._convert_token_to_id_with_added_voc(token)) - return ids + return [self._convert_token_to_id_with_added_voc(token) for token in tokens] def _convert_token_to_id_with_added_voc(self, token: str) -> int: index = self._tokenizer.token_to_id(token) diff --git a/src/transformers/tools/agents.py b/src/transformers/tools/agents.py index fb8649bafad351..79413954df10c1 100644 --- a/src/transformers/tools/agents.py +++ b/src/transformers/tools/agents.py @@ -92,8 +92,7 @@ def _setup_default_tools(): tools_module = main_module.tools remote_tools = get_remote_tools() - for task_name in TASK_MAPPING: - tool_class_name = TASK_MAPPING.get(task_name) + for task_name, tool_class_name in TASK_MAPPING.items(): tool_class = getattr(tools_module, tool_class_name) description = tool_class.description HUGGINGFACE_DEFAULT_TOOLS[tool_class.name] = PreTool(task=task_name, description=description, repo_id=None) @@ -198,7 +197,7 @@ class Agent: def __init__(self, chat_prompt_template=None, run_prompt_template=None, additional_tools=None): _setup_default_tools() - self.chat_prompt_template = CHAT_MESSAGE_PROMPT if chat_prompt_template is None else chat_prompt_template + self.chat_prompt_template = CHAT_PROMPT_TEMPLATE if chat_prompt_template is None else chat_prompt_template self.run_prompt_template = RUN_PROMPT_TEMPLATE if run_prompt_template is None else run_prompt_template self._toolbox = HUGGINGFACE_DEFAULT_TOOLS.copy() if additional_tools is not None: @@ -229,7 +228,7 @@ def format_prompt(self, task, chat_mode=False): description = "\n".join([f"- {name}: {tool.description}" for name, tool in self.toolbox.items()]) if chat_mode: if self.chat_history is None: - prompt = CHAT_PROMPT_TEMPLATE.replace("<>", description) + prompt = self.chat_prompt_template.replace("<>", description) else: prompt = self.chat_history prompt += CHAT_MESSAGE_PROMPT.replace("<>", task) @@ -430,7 +429,7 @@ def _completion_generate(self, prompts, stop): class HfAgent(Agent): """ - Agent that uses and inference endpoint to generate code. + Agent that uses an inference endpoint to generate code. Args: url_endpoint (`str`): @@ -491,5 +490,5 @@ def generate_one(self, prompt, stop): # Inference API returns the stop sequence for stop_seq in stop: if result.endswith(stop_seq): - result = result[: -len(stop_seq)] + return result[: -len(stop_seq)] return result diff --git a/src/transformers/tools/base.py b/src/transformers/tools/base.py index b2abdc818c2956..b97bc4a43beb12 100644 --- a/src/transformers/tools/base.py +++ b/src/transformers/tools/base.py @@ -23,8 +23,8 @@ import tempfile from typing import Any, Dict, List, Optional, Union -from huggingface_hub import CommitOperationAdd, HfFolder, create_commit, create_repo, hf_hub_download, metadata_update -from huggingface_hub.utils import RepositoryNotFoundError, get_session +from huggingface_hub import create_repo, hf_hub_download, metadata_update, upload_folder +from huggingface_hub.utils import RepositoryNotFoundError, build_hf_headers, get_session from ..dynamic_module_utils import custom_object_save, get_class_from_dynamic_module, get_imports from ..image_utils import is_pil_image @@ -173,7 +173,14 @@ def save(self, output_dir): f.write("\n".join(imports) + "\n") @classmethod - def from_hub(cls, repo_id, model_repo_id=None, token=None, remote=False, **kwargs): + def from_hub( + cls, + repo_id: str, + model_repo_id: Optional[str] = None, + token: Optional[str] = None, + remote: bool = False, + **kwargs, + ): """ Loads a tool defined on the Hub. @@ -285,22 +292,17 @@ def push_to_hub( repo_url = create_repo( repo_id=repo_id, token=token, private=private, exist_ok=True, repo_type="space", space_sdk="gradio" ) - metadata_update(repo_id, {"tags": ["tool"]}, repo_type="space") repo_id = repo_url.repo_id + metadata_update(repo_id, {"tags": ["tool"]}, repo_type="space") with tempfile.TemporaryDirectory() as work_dir: # Save all files. self.save(work_dir) - os.listdir(work_dir) - operations = [ - CommitOperationAdd(path_or_fileobj=os.path.join(work_dir, f), path_in_repo=f) - for f in os.listdir(work_dir) - ] logger.info(f"Uploading the following files to {repo_id}: {','.join(os.listdir(work_dir))}") - return create_commit( + return upload_folder( repo_id=repo_id, - operations=operations, commit_message=commit_message, + folder_path=work_dir, token=token, create_pr=create_pr, repo_type="space", @@ -482,7 +484,7 @@ def __init__( self.hub_kwargs = hub_kwargs self.hub_kwargs["use_auth_token"] = token - self.is_initialized = False + super().__init__() def setup(self): """ @@ -508,6 +510,8 @@ def setup(self): if self.device_map is None: self.model.to(self.device) + super().setup() + def encode(self, raw_inputs): """ Uses the `pre_processor` to prepare the inputs for the `model`. @@ -674,9 +678,7 @@ def inner(func): ## Will move to the Hub class EndpointClient: def __init__(self, endpoint_url: str, token: Optional[str] = None): - if token is None: - token = HfFolder().get_token() - self.headers = {"authorization": f"Bearer {token}", "Content-Type": "application/json"} + self.headers = {**build_hf_headers(token=token), "Content-Type": "application/json"} self.endpoint_url = endpoint_url @staticmethod diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index f7fb3558df7f71..ce4e1f24067187 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -1105,10 +1105,10 @@ def create_optimizer(self): for module in opt_model.modules(): if isinstance(module, nn.Embedding): skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values()) - print(f"skipped {module}: {skipped/2**20}M params") + logger.info(f"skipped {module}: {skipped/2**20}M params") manager.register_module_override(module, "weight", {"optim_bits": 32}) logger.debug(f"bitsandbytes: will optimize {module} in fp32") - print(f"skipped: {skipped/2**20}M params") + logger.info(f"skipped: {skipped/2**20}M params") if is_sagemaker_mp_enabled(): self.optimizer = smp.DistributedOptimizer(self.optimizer) @@ -3649,6 +3649,8 @@ def _push_from_checkpoint(self, checkpoint_folder): _, self.push_in_progress = self.repo.push_to_hub( commit_message=commit_message, blocking=False, auto_lfs_prune=True ) + except Exception as e: + logger.error(f"Error when pushing to hub: {e}") finally: if self.args.hub_strategy == HubStrategy.CHECKPOINT: # Move back the checkpoint to its place diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index 44f28ff99ef403..3189c5f86bb948 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -1611,7 +1611,7 @@ def _setup_devices(self) -> "torch.device": logger.info("PyTorch: setting up devices") if not is_sagemaker_mp_enabled() and not is_accelerate_available(check_partial_state=True): raise ImportError( - "Using the `Trainer` with `PyTorch` requires `accelerate`: Run `pip install --upgrade accelerate`" + "Using the `Trainer` with `PyTorch` requires `accelerate>=0.19.0`: Please run `pip install transformers[torch]` or `pip install accelerate -U`" ) if self.no_cuda: self.distributed_state = PartialState(cpu=True, backend=self.ddp_backend) diff --git a/src/transformers/utils/hub.py b/src/transformers/utils/hub.py index 58aac59d24e8e6..335547f2b8259c 100644 --- a/src/transformers/utils/hub.py +++ b/src/transformers/utils/hub.py @@ -578,7 +578,7 @@ def download_url(url, proxies=None): " that this is not compatible with the caching system (your file will be downloaded at each execution) or" " multiple processes (each process will download the file in a different temporary file)." ) - tmp_file = tempfile.mktemp() + tmp_file = tempfile.mkstemp()[1] with open(tmp_file, "wb") as f: http_get(url, f, proxies=proxies) return tmp_file diff --git a/src/transformers/utils/import_utils.py b/src/transformers/utils/import_utils.py index 512c460e15a163..037a0d96a13083 100644 --- a/src/transformers/utils/import_utils.py +++ b/src/transformers/utils/import_utils.py @@ -118,7 +118,7 @@ def _is_package_available(pkg_name: str, return_version: bool = False) -> Union[ importlib_metadata.version("scikit-learn") except importlib_metadata.PackageNotFoundError: _sklearn_available = False -_smdistributed_available = _is_package_available("smdistributed") +_smdistributed_available = importlib.util.find_spec("smdistributed") is not None _soundfile_available = _is_package_available("soundfile") _spacy_available = _is_package_available("spacy") _sudachipy_available = _is_package_available("sudachipy") @@ -502,7 +502,7 @@ def is_protobuf_available(): def is_accelerate_available(check_partial_state=False): if check_partial_state: - return _accelerate_available and version.parse(_accelerate_version) >= version.parse("0.17.0") + return _accelerate_available and version.parse(_accelerate_version) >= version.parse("0.19.0") return _accelerate_available diff --git a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py index df6adc3c4deb26..ffe5e7de95b9eb 100644 --- a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py +++ b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py @@ -53,7 +53,7 @@ keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import logging from .configuration_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.camelcase_modelname}}Config @@ -126,16 +126,7 @@ def call( assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.vocab_size, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.vocab_size})" - ), - ) + check_embeddings_within_bounds(input_ids, self.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] @@ -1670,7 +1661,7 @@ def serving_output(self, output: TFQuestionAnsweringModelOutput) -> TFQuestionAn keras_serializable, unpack_inputs, ) -from ...tf_utils import shape_list, stable_softmax +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ContextManagers, logging from .configuration_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.camelcase_modelname}}Config @@ -2311,16 +2302,7 @@ def call( if hasattr(self.embed_tokens, "load_weight_prefix"): context.append(tf.name_scope(self.embed_tokens.load_weight_prefix + "/")) with ContextManagers(context): - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.embed_tokens.input_dim, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.input_dim})" - ), - ) + check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_shape) @@ -2518,16 +2500,7 @@ def call( if hasattr(self.embed_tokens, "load_weight_prefix"): context.append(tf.name_scope(self.embed_tokens.load_weight_prefix + "/")) with ContextManagers(context): - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.embed_tokens.input_dim, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.input_dim})" - ), - ) + check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) hidden_states = inputs_embeds diff --git a/tests/generation/test_utils.py b/tests/generation/test_utils.py index ed286b990b510f..3f13dcda832e22 100644 --- a/tests/generation/test_utils.py +++ b/tests/generation/test_utils.py @@ -1645,6 +1645,77 @@ def test_left_padding_compatibility(self): self.assertTrue(no_failures) + def test_past_key_values_format(self): + # Test that the KV cache is formatted correctly. Exceptions need to explicitly overwrite this test. Having a + # standard KV cache format is important for a consistent API (and for advanced generation methods). + for model_class in self.all_generative_model_classes: + config, inputs = self.model_tester.prepare_config_and_inputs_for_common() + + # If it doesn't support cache, pass the test + if not hasattr(config, "use_cache"): + return + + model = model_class(config).to(torch_device) + if "use_cache" not in inputs: + inputs["use_cache"] = True + outputs = model(**inputs) + + # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) + if "past_key_values" not in outputs: + return + + num_hidden_layers = ( + getattr(config, "decoder_layers", None) + or getattr(config, "num_decoder_layers", None) + or config.num_hidden_layers + ) + num_attention_heads = getattr(config, "decoder_attention_heads", config.num_attention_heads) + embed_dim = getattr(config, "d_model", config.hidden_size) + per_head_embed_dim = embed_dim // num_attention_heads + + past_kv = outputs["past_key_values"] + self.assertEqual(len(past_kv), num_hidden_layers) + + # Encoder-Decoder checks + if config.is_encoder_decoder: + encoder_num_attention_heads = config.encoder_attention_heads + encoder_per_head_embed_dim = embed_dim // encoder_num_attention_heads + batch_size, seq_length = inputs["decoder_input_ids"].shape + for i in range(num_hidden_layers): + self.assertEqual(len(past_kv[i]), 4) # K V for the decoder + K V for the encoder = 4 + self.assertEqual( + past_kv[i][0].shape, (batch_size, num_attention_heads, seq_length, per_head_embed_dim) + ) + self.assertEqual( + past_kv[i][1].shape, (batch_size, num_attention_heads, seq_length, per_head_embed_dim) + ) + # The sequence length for the encoder K V depends on the model. Since it is not manipulated in + # autoregressive generation, I'm keeping the test general and not checking the 3rd dim + self.assertEqual( + (past_kv[i][2].shape[0], past_kv[i][2].shape[1], past_kv[i][2].shape[3]), + (batch_size, encoder_num_attention_heads, encoder_per_head_embed_dim), + ) + self.assertEqual( + (past_kv[i][3].shape[0], past_kv[i][3].shape[1], past_kv[i][3].shape[3]), + (batch_size, encoder_num_attention_heads, encoder_per_head_embed_dim), + ) + + # Decoder-only checks + else: + # TODO: this line is only needed because of imagegpt, where "pixel_values" = "input_ids". Fix the + # tests in imagegpt such that `prepare_config_and_inputs_for_common` returns the later (and the other + # tests use it) + key = "input_ids" if "input_ids" in inputs else "pixel_values" + batch_size, seq_length = inputs[key].shape + for i in range(num_hidden_layers): + self.assertEqual(len(past_kv[0]), 2) # K V for the decoder = 2 + self.assertEqual( + past_kv[i][0].shape, (batch_size, num_attention_heads, seq_length, per_head_embed_dim) + ) + self.assertEqual( + past_kv[i][1].shape, (batch_size, num_attention_heads, seq_length, per_head_embed_dim) + ) + def _check_outputs(self, output, input_ids, config, use_cache=False, num_return_sequences=1): batch_size, seq_length = input_ids.shape num_sequences_in_output = batch_size * num_return_sequences diff --git a/tests/models/bigbird_pegasus/test_modeling_bigbird_pegasus.py b/tests/models/bigbird_pegasus/test_modeling_bigbird_pegasus.py index d8036cb8275c7b..32b7bbcbb24a76 100644 --- a/tests/models/bigbird_pegasus/test_modeling_bigbird_pegasus.py +++ b/tests/models/bigbird_pegasus/test_modeling_bigbird_pegasus.py @@ -513,9 +513,10 @@ def test_inference_block_sparse(self): self.assertEqual(prediction_logits.shape, torch.Size((1, 16, 96103))) # fmt: off expected_prediction_logits_slice = torch.tensor( - [[1.7769, 5.8479, 6.2375, 2.2745, 8.6157, 4.7483, 5.0647, 6.5358, 2.3393, 7.8333, 3.8403, 0.0255, 7.219, 5.2759, 3.097, 6.387, 4.9341, 7.1409, 5.1179, 0.1144, 6.8268, 0.7598, 0.6258, 2.373, 0.4627, -1.9919, 1.8422, 3.4578], [1.8026, 5.9604, 5.954, 2.8642, 9.0608, 4.394, 5.3779, 7.0216, 1.543, 7.8744, 4.4231, -0.0398, 7.6091, 5.6611, 3.3536, 6.8624, 4.7699, 6.5241, 4.8893, 0.5791, 6.8368, 0.1034, 0.0338, 2.9393, 0.5034, -2.5509, 2.0172, 3.2858], [1.8426, 5.9151, 5.5374, 3.0426, 9.1762, 3.6287, 5.3916, 7.4621, 1.2582, 7.9244, 4.694, -0.1308, 7.4725, 5.5385, 3.4598, 7.0422, 4.2455, 5.797, 4.5927, 0.7478, 6.7467, -0.2695, -0.3207, 3.0269, 0.4714, -2.8134, 2.0406, 3.1089], [1.6527, 5.8416, 5.4558, 3.0044, 9.3478, 3.2607, 5.3887, 7.52, 0.9362, 7.8877, 4.8465, -0.1705, 7.3932, 5.6352, 3.5744, 7.2623, 4.0485, 5.2788, 4.5859, 0.8325, 6.6088, -0.3676, -0.6287, 3.1731, 0.4483, -3.1573, 2.0522, 2.8868]], # noqa: E231 + [[1.5118, 5.5227, 4.8125, 1.7603, 8.1704, 3.996, 4.8118, 6.7806, 2.2297, 6.9834, 3.1906, 0.103, 7.1515, 6.3679, 3.1896, 6.3054, 3.9741, 6.3772, 5.0042, -0.6338, 6.7868, 0.592, 0.5363, 1.87, -0.331, -2.4518, 1.8263, 3.1899], [1.5702, 5.8135, 4.6675, 2.3674, 8.9828, 3.7913, 5.4027, 7.6567, 1.9007, 7.3706, 3.8824, 0.0247, 7.6094, 6.6985, 3.2826, 7.0094, 3.8713, 5.6555, 5.0439, -0.3519, 7.1525, 0.4062, -0.2419, 2.2194, -0.6447, -2.9614, 2.0713, 3.248], [1.4527, 5.6003, 4.5381, 2.6382, 9.2809, 3.2969, 5.6811, 8.4011, 1.6909, 7.4937, 4.3185, -0.0878, 7.61, 6.6822, 3.4753, 7.3962, 3.5336, 4.9216, 4.943, -0.2043, 7.3326, 0.2199, -0.6016, 2.4367, -0.7043, -3.0689, 2.3215, 3.0611], [1.1084, 5.6308, 4.4886, 2.717, 9.4103, 3.0733, 5.5825, 8.4325, 1.3075, 7.5495, 4.4782, -0.1092, 7.8115, 6.6285, 3.5311, 7.6853, 3.509, 4.4994, 4.9224, -0.1384, 7.3069, -0.0473, -0.8578, 2.4632, -0.5249, -3.4627, 2.2671, 2.8818]], # noqa: E231 device=torch_device, ) + # fmt: on self.assertTrue( torch.allclose(prediction_logits[0, 4:8, 128:156], expected_prediction_logits_slice, atol=1e-4) @@ -563,24 +564,23 @@ def test_seq_to_seq_generation(self): hypotheses_batch = model.generate(**inputs) EXPECTED_LEP = ( - "motivated by some recent studies on the light cp - odd higgs boson @xmath0 in non - minimal" - " supersymmetric models, we investigate the rare @xmath1-decays @xmath2 ( @xmath3 ) in the two higgs" - " doublet model ( 2hdm ), the nearly minimal supersymmetric standard model ( nmssm ), the next - to -" - " minimal supersymmetric standard model ( nmssm ) and the minimal supersymmetric standard model ( mssm" - " ). we find that the branching ratios of @xmath4 can reach @xmath5 in 2hdm, @xmath6 in nmssm and" - " @xmath7 in mssm, which are at the level of @xmath8 in 2hdm, @xmath9 in nmssm and @xmath10 in mssm," - " respectively. these rates can be significantly enhanced in new physics models which lie within the" - " expected sensitivity of the gigaz option of the international linear collider ( ilc ). = # 1,nucl." - " phys. b * # 1" + "we study the rare decays @xmath0 ( @xmath1 ) at the gigaz option of the international linear collider " + "( ilc ). we calculate the branching ratios of @xmath2 in the two higgs doublet model ( 2hdm ), the " + "minimal supersymmetric standard model ( mssm ), the next - to - minimal supersymmetric standard model " + "( nmssm ) and the nearly minimal supersymmetric standard model ( nmssm ). we find that the branching " + "ratios of @xmath3 can reach @xmath4 in 2hdm, @xmath5 in mssm, @xmath6 in nmssm and @xmath7 in nmssm, " + "while they are much smaller than @xmath8 in 2hdm, @xmath9 in mssm, @xmath10 in nmssm and @xmath11 in " + "nmssm." ) EXPECTED_MAGNET = ( - "a positive, nonsaturating and dominantly linear magnetoresistance can appear within quite wide magnetic -" - " field range in the surface state of a topological insulator having a positive and finite effective g -" - " factor. this linear magnetoresistance shows up in the system of high carrier concentration and low" - " mobility when electrons are in extended states and spread over many smeared landau levels, and persists" - " up to room temperature, providing a possible mechanism for the recently observed linear" - " magnetoresistance in topological insulator bi@xmath0se@xmath1 nanoribbons." + "we investigate the two - dimensional magnetotransport in the surface state of a topological insulator " + "( ti ). we find that a positive, nonsaturating and dominantly linear magnetoresistance can appear " + "within quite wide magnetic - field range in the ti surface state having a positive and finite effective g " + "- factor. this linear magnetoresistance shows up in the system of high carrier concentration and low " + "mobility when electrons are in extended states and spread over many smeared landau levels, and persists " + "up to room temperature, providing a possible mechanism for the recently observed linear magnetoresistance " + "in topological insulator bi@xmath0se@xmath1 nanoribbons." ) generated = tokenizer.batch_decode( diff --git a/tests/models/bloom/test_modeling_bloom.py b/tests/models/bloom/test_modeling_bloom.py index 617998cc61b304..678c46bd0ca471 100644 --- a/tests/models/bloom/test_modeling_bloom.py +++ b/tests/models/bloom/test_modeling_bloom.py @@ -393,6 +393,10 @@ def test_bloom_weight_initialization(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_bloom_weight_initialization(*config_and_inputs) + @unittest.skip("Bloom has a non-standard KV cache format.") + def test_past_key_values_format(self): + pass + @slow def test_model_from_pretrained(self): for model_name in BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: diff --git a/tests/models/convnextv2/test_modeling_convnextv2.py b/tests/models/convnextv2/test_modeling_convnextv2.py index 85ce5b1813bb5e..c3f8804f1ccad9 100644 --- a/tests/models/convnextv2/test_modeling_convnextv2.py +++ b/tests/models/convnextv2/test_modeling_convnextv2.py @@ -353,5 +353,5 @@ def test_inference_image_classification_head(self): expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) - expected_slice = torch.tensor([-0.3083, -0.3040, -0.4344]).to(torch_device) + expected_slice = torch.tensor([0.9996, 0.1966, -0.4386]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) diff --git a/tests/models/electra/test_tokenization_electra.py b/tests/models/electra/test_tokenization_electra.py new file mode 100644 index 00000000000000..1c9b517f1f1d87 --- /dev/null +++ b/tests/models/electra/test_tokenization_electra.py @@ -0,0 +1,335 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import unittest + +from transformers import ElectraTokenizerFast +from transformers.models.electra.tokenization_electra import ( + VOCAB_FILES_NAMES, + BasicTokenizer, + ElectraTokenizer, + WordpieceTokenizer, + _is_control, + _is_punctuation, + _is_whitespace, +) +from transformers.testing_utils import require_tokenizers, slow + +from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english + + +@require_tokenizers +class ElectraTokenizationTest(TokenizerTesterMixin, unittest.TestCase): + tokenizer_class = ElectraTokenizer + rust_tokenizer_class = ElectraTokenizerFast + test_rust_tokenizer = True + space_between_special_tokens = True + from_pretrained_filter = filter_non_english + + def setUp(self): + super().setUp() + + vocab_tokens = [ + "[UNK]", + "[CLS]", + "[SEP]", + "[PAD]", + "[MASK]", + "want", + "##want", + "##ed", + "wa", + "un", + "runn", + "##ing", + ",", + "low", + "lowest", + ] + self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) + with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: + vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) + + def get_input_output_texts(self, tokenizer): + input_text = "UNwant\u00E9d,running" + output_text = "unwanted, running" + return input_text, output_text + + def test_full_tokenizer(self): + tokenizer = self.tokenizer_class(self.vocab_file) + + tokens = tokenizer.tokenize("UNwant\u00E9d,running") + self.assertListEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"]) + self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [9, 6, 7, 12, 10, 11]) + + def test_rust_and_python_full_tokenizers(self): + if not self.test_rust_tokenizer: + return + + tokenizer = self.get_tokenizer() + rust_tokenizer = self.get_rust_tokenizer() + + sequence = "UNwant\u00E9d,running" + + tokens = tokenizer.tokenize(sequence) + rust_tokens = rust_tokenizer.tokenize(sequence) + self.assertListEqual(tokens, rust_tokens) + + ids = tokenizer.encode(sequence, add_special_tokens=False) + rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) + self.assertListEqual(ids, rust_ids) + + rust_tokenizer = self.get_rust_tokenizer() + ids = tokenizer.encode(sequence) + rust_ids = rust_tokenizer.encode(sequence) + self.assertListEqual(ids, rust_ids) + + # With lower casing + tokenizer = self.get_tokenizer(do_lower_case=True) + rust_tokenizer = self.get_rust_tokenizer(do_lower_case=True) + + sequence = "UNwant\u00E9d,running" + + tokens = tokenizer.tokenize(sequence) + rust_tokens = rust_tokenizer.tokenize(sequence) + self.assertListEqual(tokens, rust_tokens) + + ids = tokenizer.encode(sequence, add_special_tokens=False) + rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) + self.assertListEqual(ids, rust_ids) + + rust_tokenizer = self.get_rust_tokenizer() + ids = tokenizer.encode(sequence) + rust_ids = rust_tokenizer.encode(sequence) + self.assertListEqual(ids, rust_ids) + + def test_chinese(self): + tokenizer = BasicTokenizer() + + self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz"), ["ah", "\u535A", "\u63A8", "zz"]) + + def test_basic_tokenizer_lower(self): + tokenizer = BasicTokenizer(do_lower_case=True) + + self.assertListEqual( + tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["hello", "!", "how", "are", "you", "?"] + ) + self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"]) + + def test_basic_tokenizer_lower_strip_accents_false(self): + tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=False) + + self.assertListEqual( + tokenizer.tokenize(" \tHรคLLo!how \n Are yoU? "), ["hรคllo", "!", "how", "are", "you", "?"] + ) + self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["h\u00E9llo"]) + + def test_basic_tokenizer_lower_strip_accents_true(self): + tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=True) + + self.assertListEqual( + tokenizer.tokenize(" \tHรคLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"] + ) + self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"]) + + def test_basic_tokenizer_lower_strip_accents_default(self): + tokenizer = BasicTokenizer(do_lower_case=True) + + self.assertListEqual( + tokenizer.tokenize(" \tHรคLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"] + ) + self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"]) + + def test_basic_tokenizer_no_lower(self): + tokenizer = BasicTokenizer(do_lower_case=False) + + self.assertListEqual( + tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["HeLLo", "!", "how", "Are", "yoU", "?"] + ) + + def test_basic_tokenizer_no_lower_strip_accents_false(self): + tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=False) + + self.assertListEqual( + tokenizer.tokenize(" \tHรคLLo!how \n Are yoU? "), ["HรคLLo", "!", "how", "Are", "yoU", "?"] + ) + + def test_basic_tokenizer_no_lower_strip_accents_true(self): + tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=True) + + self.assertListEqual( + tokenizer.tokenize(" \tHรคLLo!how \n Are yoU? "), ["HaLLo", "!", "how", "Are", "yoU", "?"] + ) + + def test_basic_tokenizer_respects_never_split_tokens(self): + tokenizer = BasicTokenizer(do_lower_case=False, never_split=["[UNK]"]) + + self.assertListEqual( + tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]"), ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] + ) + + def test_wordpiece_tokenizer(self): + vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] + + vocab = {} + for i, token in enumerate(vocab_tokens): + vocab[token] = i + tokenizer = WordpieceTokenizer(vocab=vocab, unk_token="[UNK]") + + self.assertListEqual(tokenizer.tokenize(""), []) + + self.assertListEqual(tokenizer.tokenize("unwanted running"), ["un", "##want", "##ed", "runn", "##ing"]) + + self.assertListEqual(tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"]) + + def test_is_whitespace(self): + self.assertTrue(_is_whitespace(" ")) + self.assertTrue(_is_whitespace("\t")) + self.assertTrue(_is_whitespace("\r")) + self.assertTrue(_is_whitespace("\n")) + self.assertTrue(_is_whitespace("\u00A0")) + + self.assertFalse(_is_whitespace("A")) + self.assertFalse(_is_whitespace("-")) + + def test_is_control(self): + self.assertTrue(_is_control("\u0005")) + + self.assertFalse(_is_control("A")) + self.assertFalse(_is_control(" ")) + self.assertFalse(_is_control("\t")) + self.assertFalse(_is_control("\r")) + + def test_is_punctuation(self): + self.assertTrue(_is_punctuation("-")) + self.assertTrue(_is_punctuation("$")) + self.assertTrue(_is_punctuation("`")) + self.assertTrue(_is_punctuation(".")) + + self.assertFalse(_is_punctuation("A")) + self.assertFalse(_is_punctuation(" ")) + + def test_clean_text(self): + tokenizer = self.get_tokenizer() + rust_tokenizer = self.get_rust_tokenizer() + + # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 + self.assertListEqual([tokenizer.tokenize(t) for t in ["Test", "\xad", "test"]], [["[UNK]"], [], ["[UNK]"]]) + + self.assertListEqual( + [rust_tokenizer.tokenize(t) for t in ["Test", "\xad", "test"]], [["[UNK]"], [], ["[UNK]"]] + ) + + @slow + def test_sequence_builders(self): + tokenizer = self.tokenizer_class.from_pretrained("google/electra-base-discriminator") + + text = tokenizer.encode("sequence builders", add_special_tokens=False) + text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) + + encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) + encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) + + assert encoded_sentence == [101] + text + [102] + assert encoded_pair == [101] + text + [102] + text_2 + [102] + + def test_offsets_with_special_characters(self): + for tokenizer, pretrained_name, kwargs in self.tokenizers_list: + with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): + tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) + + sentence = f"A, naรฏve {tokenizer_r.mask_token} AllenNLP sentence." + tokens = tokenizer_r.encode_plus( + sentence, + return_attention_mask=False, + return_token_type_ids=False, + return_offsets_mapping=True, + add_special_tokens=True, + ) + + do_lower_case = tokenizer_r.do_lower_case if hasattr(tokenizer_r, "do_lower_case") else False + expected_results = ( + [ + ((0, 0), tokenizer_r.cls_token), + ((0, 1), "A"), + ((1, 2), ","), + ((3, 5), "na"), + ((5, 6), "##รฏ"), + ((6, 8), "##ve"), + ((9, 15), tokenizer_r.mask_token), + ((16, 21), "Allen"), + ((21, 23), "##NL"), + ((23, 24), "##P"), + ((25, 33), "sentence"), + ((33, 34), "."), + ((0, 0), tokenizer_r.sep_token), + ] + if not do_lower_case + else [ + ((0, 0), tokenizer_r.cls_token), + ((0, 1), "a"), + ((1, 2), ","), + ((3, 8), "naive"), + ((9, 15), tokenizer_r.mask_token), + ((16, 21), "allen"), + ((21, 23), "##nl"), + ((23, 24), "##p"), + ((25, 33), "sentence"), + ((33, 34), "."), + ((0, 0), tokenizer_r.sep_token), + ] + ) + + self.assertEqual( + [e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens["input_ids"]) + ) + self.assertEqual([e[0] for e in expected_results], tokens["offset_mapping"]) + + def test_change_tokenize_chinese_chars(self): + list_of_commun_chinese_char = ["็š„", "ไบบ", "ๆœ‰"] + text_with_chinese_char = "".join(list_of_commun_chinese_char) + for tokenizer, pretrained_name, kwargs in self.tokenizers_list: + with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): + kwargs["tokenize_chinese_chars"] = True + tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) + tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) + + ids_without_spe_char_p = tokenizer_p.encode(text_with_chinese_char, add_special_tokens=False) + ids_without_spe_char_r = tokenizer_r.encode(text_with_chinese_char, add_special_tokens=False) + + tokens_without_spe_char_r = tokenizer_r.convert_ids_to_tokens(ids_without_spe_char_r) + tokens_without_spe_char_p = tokenizer_p.convert_ids_to_tokens(ids_without_spe_char_p) + + # it is expected that each Chinese character is not preceded by "##" + self.assertListEqual(tokens_without_spe_char_p, list_of_commun_chinese_char) + self.assertListEqual(tokens_without_spe_char_r, list_of_commun_chinese_char) + + kwargs["tokenize_chinese_chars"] = False + tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) + tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) + + ids_without_spe_char_r = tokenizer_r.encode(text_with_chinese_char, add_special_tokens=False) + ids_without_spe_char_p = tokenizer_p.encode(text_with_chinese_char, add_special_tokens=False) + + tokens_without_spe_char_r = tokenizer_r.convert_ids_to_tokens(ids_without_spe_char_r) + tokens_without_spe_char_p = tokenizer_p.convert_ids_to_tokens(ids_without_spe_char_p) + + # it is expected that only the first Chinese character is not preceded by "##". + expected_tokens = [ + f"##{token}" if idx != 0 else token for idx, token in enumerate(list_of_commun_chinese_char) + ] + self.assertListEqual(tokens_without_spe_char_p, expected_tokens) + self.assertListEqual(tokens_without_spe_char_r, expected_tokens) diff --git a/tests/models/gpt_bigcode/test_modeling_gpt_bigcode.py b/tests/models/gpt_bigcode/test_modeling_gpt_bigcode.py index a9f4d204bf9c55..01e6ceef9e903b 100644 --- a/tests/models/gpt_bigcode/test_modeling_gpt_bigcode.py +++ b/tests/models/gpt_bigcode/test_modeling_gpt_bigcode.py @@ -475,6 +475,10 @@ def test_cpu_offload(self): def test_disk_offload(self): pass + @unittest.skip("BigCodeGPT has a non-standard KV cache format.") + def test_past_key_values_format(self): + pass + def test_gpt_bigcode_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_gpt_bigcode_model(*config_and_inputs) diff --git a/tests/models/pix2struct/test_modeling_pix2struct.py b/tests/models/pix2struct/test_modeling_pix2struct.py index 42ee3c2b4c6a39..4dbd7e649f16e3 100644 --- a/tests/models/pix2struct/test_modeling_pix2struct.py +++ b/tests/models/pix2struct/test_modeling_pix2struct.py @@ -749,17 +749,20 @@ def test_batched_inference_image_captioning_conditioned(self): texts = ["A picture of", "An photography of"] # image only - inputs = processor(images=[image_1, image_2], text=texts, return_tensors="pt").to(torch_device) + inputs = processor(images=[image_1, image_2], text=texts, return_tensors="pt", add_special_tokens=False).to( + torch_device + ) predictions = model.generate(**inputs) self.assertEqual( - processor.decode(predictions[0], skip_special_tokens=True), "A picture of a stop sign that says yes." + processor.decode(predictions[0], skip_special_tokens=True), + "A picture of a stop sign with a red stop sign on it.", ) self.assertEqual( processor.decode(predictions[1], skip_special_tokens=True), - "An photography of the Temple Bar and a few other places.", + "An photography of the Temple Bar and the Temple Bar.", ) def test_vqa_model(self): diff --git a/tests/models/reformer/test_modeling_reformer.py b/tests/models/reformer/test_modeling_reformer.py index a7f4f2f45416f5..39e1389477b844 100644 --- a/tests/models/reformer/test_modeling_reformer.py +++ b/tests/models/reformer/test_modeling_reformer.py @@ -831,8 +831,12 @@ def _check_hidden_states_for_generate( [expected_shape] * len(iter_hidden_states), ) + @unittest.skip("Fails because the sequence length is not a multiple of 4") def test_problem_types(self): - # Fails because the sequence length is not a multiple of 4 + pass + + @unittest.skip("Fails because the sequence length is not a multiple of 4") + def test_past_key_values_format(self): pass diff --git a/tests/models/sam/test_modeling_sam.py b/tests/models/sam/test_modeling_sam.py index e51eb07dd31113..a45e5c4bf545c5 100644 --- a/tests/models/sam/test_modeling_sam.py +++ b/tests/models/sam/test_modeling_sam.py @@ -481,7 +481,7 @@ def test_inference_mask_generation_one_point_one_bb(self): model.eval() raw_image = prepare_image() - input_boxes = [[650, 900, 1000, 1250]] + input_boxes = [[[650, 900, 1000, 1250]]] input_points = [[[820, 1080]]] inputs = processor( @@ -541,7 +541,7 @@ def test_inference_mask_generation_one_point_one_bb_zero(self): model.eval() raw_image = prepare_image() - input_boxes = [[620, 900, 1000, 1255]] + input_boxes = [[[620, 900, 1000, 1255]]] input_points = [[[820, 1080]]] labels = [[0]] diff --git a/tests/models/swiftformer/test_modeling_swiftformer.py b/tests/models/swiftformer/test_modeling_swiftformer.py index ca616976fd4a78..f2785a6640cae1 100644 --- a/tests/models/swiftformer/test_modeling_swiftformer.py +++ b/tests/models/swiftformer/test_modeling_swiftformer.py @@ -303,5 +303,5 @@ def test_inference_image_classification_head(self): expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) - expected_slice = torch.tensor([[-2.1703e00, 2.1107e00, -2.0811e00]]) + expected_slice = torch.tensor([[-2.1703e00, 2.1107e00, -2.0811e00]]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index 43e06e906797a8..e6e7968ae5319a 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -2920,6 +2920,10 @@ def remove_torch_dtype(model_path): model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype="auto") self.assertEqual(model.dtype, torch.float16) + # 3. now retest that AutoModel behaves the same wrt torch_dtype="auto" as T5ForConditionalGeneration + model = AutoModel.from_pretrained(model_path, torch_dtype="auto") + self.assertEqual(model.dtype, torch.float16) + # test fp16 save_pretrained, loaded with the explicit fp16 model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype=torch.float16) self.assertEqual(model.dtype, torch.float16) diff --git a/utils/check_build.py b/utils/check_build.py new file mode 100644 index 00000000000000..a699ed4f7e0fba --- /dev/null +++ b/utils/check_build.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import importlib +from pathlib import Path + + +# Test all the extensions added in the setup +FILES_TO_FIND = [ + "kernels/rwkv/wkv_cuda.cu", + "kernels/rwkv/wkv_op.cpp", + "models/deformable_detr/custom_kernel/ms_deform_attn.h", + "models/deformable_detr/custom_kernel/cuda/ms_deform_im2col_cuda.cuh", + "models/graphormer/algos_graphormer.pyx", +] + + +def test_custom_files_are_present(transformers_path): + # Test all the extensions added in the setup + for file in FILES_TO_FIND: + if not (transformers_path / file).exists(): + return False + return True + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--check_lib", action="store_true", help="Whether to check the build or the actual package.") + args = parser.parse_args() + if args.check_lib: + transformers_module = importlib.import_module("transformers") + transformers_path = Path(transformers_module.__file__).parent + else: + transformers_path = Path.cwd() / "build/lib/transformers" + if not test_custom_files_are_present(transformers_path): + raise ValueError("The built release does not contain the custom files. Fix this before going further!") diff --git a/utils/tests_fetcher.py b/utils/tests_fetcher.py index 8aa1015f394798..05009e9759fb91 100644 --- a/utils/tests_fetcher.py +++ b/utils/tests_fetcher.py @@ -116,6 +116,26 @@ def clean_code(content): return "\n".join(lines_to_keep) +def keep_doc_examples_only(content): + """ + Remove code, docstring that is not code example, empty line or comments from `content`. + """ + # Keep doc examples only by splitting on triple "`" + splits = content.split("```") + # Add leading and trailing "```" so the navigation is easier when compared to the original input `content` + content = "```" + "```".join(splits[1::2]) + "```" + + # Remove empty lines and comments + lines_to_keep = [] + for line in content.split("\n"): + # remove anything that is after a # sign. + line = re.sub("#.*$", "", line) + if len(line) == 0 or line.isspace(): + continue + lines_to_keep.append(line) + return "\n".join(lines_to_keep) + + def get_all_tests(): """ Return a list of paths to all test folders and files under `tests`. All paths are rooted at `tests`. @@ -162,6 +182,24 @@ def diff_is_docstring_only(repo, branching_point, filename): return old_content_clean == new_content_clean +def diff_contains_doc_examples(repo, branching_point, filename): + """ + Check if the diff is only in code in a filename. + """ + folder = Path(repo.working_dir) + with checkout_commit(repo, branching_point): + with open(folder / filename, "r", encoding="utf-8") as f: + old_content = f.read() + + with open(folder / filename, "r", encoding="utf-8") as f: + new_content = f.read() + + old_content_clean = keep_doc_examples_only(old_content) + new_content_clean = keep_doc_examples_only(new_content) + + return old_content_clean != new_content_clean + + def get_diff(repo, base_commit, commits): """ Get's the diff between one or several commits and the head of the repository. @@ -216,32 +254,46 @@ def get_modified_python_files(diff_with_last_commit=False): return get_diff(repo, repo.head.commit, parent_commits) -def get_diff_for_py_and_mdx_files(repo, base_commit, commits): +def get_diff_for_doctesting(repo, base_commit, commits): """ - Get's the diff between one or several commits and the head of the repository. + Get's the diff between one or several commits and the head of the repository where some doc example(s) are changed. """ print("\n### DIFF ###\n") code_diff = [] for commit in commits: for diff_obj in commit.diff(base_commit): - # We always add new python files - if diff_obj.change_type in ["A", "M", "R"] and ( - diff_obj.b_path.endswith(".py") or diff_obj.b_path.endswith(".mdx") - ): + # We always add new python/mdx files + if diff_obj.change_type in ["A"] and (diff_obj.b_path.endswith(".py") or diff_obj.b_path.endswith(".mdx")): code_diff.append(diff_obj.b_path) + # Now for modified files + elif ( + diff_obj.change_type in ["M", "R"] + and diff_obj.b_path.endswith(".py") + or diff_obj.b_path.endswith(".mdx") + ): + # In case of renames, we'll look at the tests using both the old and new name. + if diff_obj.a_path != diff_obj.b_path: + code_diff.extend([diff_obj.a_path, diff_obj.b_path]) + else: + # Otherwise, we check modifications contain some doc example(s). + if diff_contains_doc_examples(repo, commit, diff_obj.b_path): + code_diff.append(diff_obj.a_path) + else: + print(f"Ignoring diff in {diff_obj.b_path} as it doesn't contain any doc example.") return code_diff -def get_modified_python_and_mdx_files(diff_with_last_commit=False): +def get_doctest_files(diff_with_last_commit=False): """ - Return a list of python and mdx files that have been modified between: + Return a list of python and mdx files where some doc example(s) in them have been modified between: - the current head and the main branch if `diff_with_last_commit=False` (default) - the current head and its parent commit otherwise. """ repo = Repo(PATH_TO_REPO) + test_files_to_run = [] # noqa if not diff_with_last_commit: print(f"main is at {repo.refs.main.commit}") print(f"Current head is at {repo.head.commit}") @@ -249,23 +301,14 @@ def get_modified_python_and_mdx_files(diff_with_last_commit=False): branching_commits = repo.merge_base(repo.refs.main, repo.head) for commit in branching_commits: print(f"Branching commit: {commit}") - return get_diff_for_py_and_mdx_files(repo, repo.head.commit, branching_commits) + test_files_to_run = get_diff_for_doctesting(repo, repo.head.commit, branching_commits) else: print(f"main is at {repo.head.commit}") parent_commits = repo.head.commit.parents for commit in parent_commits: print(f"Parent commit: {commit}") - return get_diff_for_py_and_mdx_files(repo, repo.head.commit, parent_commits) - - -def get_doctest_files(diff_with_last_commit=False): - """ - Return a list of python and mdx files that have been modified between: + test_files_to_run = get_diff_for_doctesting(repo, repo.head.commit, parent_commits) - - the current head and the main branch if `diff_with_last_commit=False` (default) - - the current head and its parent commit otherwise. - """ - test_files_to_run = get_modified_python_and_mdx_files(diff_with_last_commit) with open("utils/documentation_tests.txt") as fp: documentation_tests = set(fp.read().strip().split("\n")) # So far we don't have 100% coverage for doctest. This line will be removed once we achieve 100%. @@ -647,6 +690,14 @@ def infer_tests_to_run( create_json_map(test_files_to_run, json_output_file) + doctest_list = get_doctest_files() + + print(f"\n### DOCTEST TO RUN ###\n{_print_list(doctest_list)}") + if len(doctest_list) > 0: + doctest_file = Path(output_file).parent / "doctest_list.txt" + with open(doctest_file, "w", encoding="utf-8") as f: + f.write(" ".join(doctest_list)) + def filter_tests(output_file, filters): """