Skip to content

Commit 928d441

Browse files
committed
more and more
1 parent 54ae2ef commit 928d441

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

44 files changed

+144
-1102
lines changed

src/transformers/__init__.py

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -39,12 +39,9 @@
3939
# so that mypy, pylint or other static linters can recognize them,
4040
# given that they are not exported using `__all__` in this file.
4141
from .utils import is_bitsandbytes_available as is_bitsandbytes_available
42-
from .utils import is_flax_available as is_flax_available
43-
from .utils import is_keras_nlp_available as is_keras_nlp_available
4442
from .utils import is_scipy_available as is_scipy_available
4543
from .utils import is_sentencepiece_available as is_sentencepiece_available
4644
from .utils import is_speech_available as is_speech_available
47-
from .utils import is_tensorflow_text_available as is_tensorflow_text_available
4845
from .utils import is_timm_available as is_timm_available
4946
from .utils import is_tokenizers_available as is_tokenizers_available
5047
from .utils import is_torch_available as is_torch_available
@@ -223,8 +220,6 @@
223220
"is_bitsandbytes_available",
224221
"is_datasets_available",
225222
"is_faiss_available",
226-
"is_flax_available",
227-
"is_keras_nlp_available",
228223
"is_matplotlib_available",
229224
"is_phonemizer_available",
230225
"is_psutil_available",
@@ -236,7 +231,6 @@
236231
"is_sentencepiece_available",
237232
"is_sklearn_available",
238233
"is_speech_available",
239-
"is_tensorflow_text_available",
240234
"is_timm_available",
241235
"is_tokenizers_available",
242236
"is_torch_available",
@@ -573,20 +567,6 @@
573567
from .generation import EpsilonLogitsWarper as EpsilonLogitsWarper
574568
from .generation import EtaLogitsWarper as EtaLogitsWarper
575569
from .generation import ExponentialDecayLengthPenalty as ExponentialDecayLengthPenalty
576-
from .generation import FlaxForcedBOSTokenLogitsProcessor as FlaxForcedBOSTokenLogitsProcessor
577-
from .generation import FlaxForcedEOSTokenLogitsProcessor as FlaxForcedEOSTokenLogitsProcessor
578-
from .generation import FlaxForceTokensLogitsProcessor as FlaxForceTokensLogitsProcessor
579-
from .generation import FlaxGenerationMixin as FlaxGenerationMixin
580-
from .generation import FlaxLogitsProcessor as FlaxLogitsProcessor
581-
from .generation import FlaxLogitsProcessorList as FlaxLogitsProcessorList
582-
from .generation import FlaxLogitsWarper as FlaxLogitsWarper
583-
from .generation import FlaxMinLengthLogitsProcessor as FlaxMinLengthLogitsProcessor
584-
from .generation import FlaxSuppressTokensAtBeginLogitsProcessor as FlaxSuppressTokensAtBeginLogitsProcessor
585-
from .generation import FlaxSuppressTokensLogitsProcessor as FlaxSuppressTokensLogitsProcessor
586-
from .generation import FlaxTemperatureLogitsWarper as FlaxTemperatureLogitsWarper
587-
from .generation import FlaxTopKLogitsWarper as FlaxTopKLogitsWarper
588-
from .generation import FlaxTopPLogitsWarper as FlaxTopPLogitsWarper
589-
from .generation import FlaxWhisperTimeStampLogitsProcessor as FlaxWhisperTimeStampLogitsProcessor
590570
from .generation import ForcedBOSTokenLogitsProcessor as ForcedBOSTokenLogitsProcessor
591571
from .generation import ForcedEOSTokenLogitsProcessor as ForcedEOSTokenLogitsProcessor
592572
from .generation import GenerationConfig as GenerationConfig
@@ -648,18 +628,14 @@
648628
from .integrations import is_wandb_available as is_wandb_available
649629
from .integrations.executorch import TorchExportableModuleWithStaticCache as TorchExportableModuleWithStaticCache
650630
from .integrations.executorch import convert_and_export_with_cache as convert_and_export_with_cache
651-
from .keras_callbacks import KerasMetricCallback as KerasMetricCallback
652-
from .keras_callbacks import PushToHubCallback as PushToHubCallback
653631
from .masking_utils import AttentionMaskInterface as AttentionMaskInterface
654632
from .model_debugging_utils import model_addition_debugger_context as model_addition_debugger_context
655633

656634
# Model Cards
657635
from .modelcard import ModelCard as ModelCard
658-
from .modeling_flax_utils import FlaxPreTrainedModel as FlaxPreTrainedModel
659636
from .modeling_layers import GradientCheckpointingLayer as GradientCheckpointingLayer
660637
from .modeling_rope_utils import ROPE_INIT_FUNCTIONS as ROPE_INIT_FUNCTIONS
661638
from .modeling_rope_utils import dynamic_rope_update as dynamic_rope_update
662-
663639
from .modeling_utils import AttentionInterface as AttentionInterface
664640
from .modeling_utils import PreTrainedModel as PreTrainedModel
665641
from .models import *

src/transformers/data/data_collator.py

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -685,7 +685,6 @@ class DataCollatorForLanguageModeling(DataCollatorMixin):
685685
mask_replace_prob: float = 0.8
686686
random_replace_prob: float = 0.1
687687
pad_to_multiple_of: Optional[int] = None
688-
tf_experimental_compile: bool = False
689688
return_tensors: str = "pt"
690689
seed: Optional[int] = None
691690

@@ -1293,13 +1292,6 @@ def torch_call(self, examples: list[Union[list[int], Any, dict[str, Any]]]) -> d
12931292
inputs, perm_mask, target_mapping, labels = self.torch_mask_tokens(batch)
12941293
return {"input_ids": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "labels": labels}
12951294

1296-
def tf_call(self, examples: list[Union[list[int], Any, dict[str, Any]]]) -> dict[str, Any]:
1297-
if isinstance(examples[0], Mapping):
1298-
examples = [e["input_ids"] for e in examples]
1299-
batch = _tf_collate_batch(examples, self.tokenizer)
1300-
inputs, perm_mask, target_mapping, labels = self.tf_mask_tokens(batch)
1301-
return {"input_ids": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "labels": labels}
1302-
13031295
def numpy_call(self, examples: list[Union[list[int], Any, dict[str, Any]]]) -> dict[str, Any]:
13041296
if isinstance(examples[0], Mapping):
13051297
examples = [e["input_ids"] for e in examples]

src/transformers/feature_extraction_sequence_utils.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
import numpy as np
2121

2222
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
23-
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
23+
from .utils import PaddingStrategy, TensorType, is_torch_tensor, logging, to_numpy
2424

2525

2626
logger = logging.get_logger(__name__)
@@ -116,7 +116,6 @@ def pad(
116116
return_tensors (`str` or [`~utils.TensorType`], *optional*):
117117
If set, will return tensors instead of list of python integers. Acceptable values are:
118118
119-
- `'tf'`: Return TensorFlow `tf.constant` objects.
120119
- `'pt'`: Return PyTorch `torch.Tensor` objects.
121120
- `'np'`: Return Numpy `np.ndarray` objects.
122121
"""
@@ -145,7 +144,7 @@ def pad(
145144
processed_features["attention_mask"] = []
146145
return processed_features
147146

148-
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
147+
# If we have PyTorch tensors or lists as inputs, we cast them as Numpy arrays
149148
# and rebuild them afterwards if no return_tensors is specified
150149
# Note that we lose the specific device the tensor may be on for PyTorch
151150

@@ -159,9 +158,7 @@ def pad(
159158
first_element = required_input[index][0]
160159

161160
if return_tensors is None:
162-
if is_tf_tensor(first_element):
163-
return_tensors = "tf"
164-
elif is_torch_tensor(first_element):
161+
if is_torch_tensor(first_element):
165162
return_tensors = "pt"
166163
elif isinstance(first_element, (int, float, list, tuple, np.ndarray)):
167164
return_tensors = "np"

src/transformers/file_utils.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -47,9 +47,6 @@
4747
TORCH_FX_REQUIRED_VERSION,
4848
TRANSFORMERS_CACHE,
4949
TRANSFORMERS_DYNAMIC_MODULE_NAME,
50-
USE_JAX,
51-
USE_TF,
52-
USE_TORCH,
5350
WEIGHTS_INDEX_NAME,
5451
WEIGHTS_NAME,
5552
ContextManagers,
@@ -80,7 +77,6 @@
8077
is_datasets_available,
8178
is_detectron2_available,
8279
is_faiss_available,
83-
is_flax_available,
8480
is_ftfy_available,
8581
is_g2p_en_available,
8682
is_in_notebook,

src/transformers/generation/__init__.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414

1515
from typing import TYPE_CHECKING
1616

17-
from ..utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available
17+
from ..utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
1818

1919

2020
_import_structure = {
@@ -124,7 +124,6 @@
124124
]
125125

126126

127-
128127
if TYPE_CHECKING:
129128
from .configuration_utils import (
130129
BaseWatermarkingConfig,

src/transformers/integrations/integration_utils.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -48,16 +48,13 @@
4848
flatten_dict,
4949
is_datasets_available,
5050
is_pandas_available,
51-
is_tf_available,
5251
is_torch_available,
5352
logging,
5453
)
5554

5655

5756
logger = logging.get_logger(__name__)
5857

59-
if is_tf_available():
60-
from .. import TFPreTrainedModel
6158

6259
if is_torch_available():
6360
import torch
@@ -760,12 +757,6 @@ def save_model_architecture_to_file(model: Any, output_dir: str):
760757
with open(f"{output_dir}/model_architecture.txt", "w+") as f:
761758
if isinstance(model, PreTrainedModel):
762759
print(model, file=f)
763-
elif is_tf_available() and isinstance(model, TFPreTrainedModel):
764-
765-
def print_to_file(s):
766-
print(s, file=f)
767-
768-
model.summary(print_fn=print_to_file)
769760
elif is_torch_available() and (
770761
isinstance(model, (torch.nn.Module, PushToHubMixin)) and hasattr(model, "base_model")
771762
):

src/transformers/models/conditional_detr/image_processing_conditional_detr.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,8 +56,6 @@
5656
from ...utils import (
5757
TensorType,
5858
is_scipy_available,
59-
is_tf_available,
60-
is_tf_tensor,
6159
is_torch_available,
6260
is_torch_tensor,
6361
is_vision_available,

src/transformers/models/data2vec/modeling_data2vec_text.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@
5050

5151

5252
# Copied from transformers.models.roberta.modeling_roberta.RobertaEmbeddings with Roberta->Data2VecText
53-
class Data2VecTextEmbeddings(nn.Module):
53+
class Data2VecTextForTextEmbeddings(nn.Module):
5454
"""
5555
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
5656
"""

src/transformers/models/decision_transformer/modeling_decision_transformer.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@
1515
"""PyTorch DecisionTransformer model."""
1616

1717
import math
18-
import os
1918
from dataclasses import dataclass
2019
from typing import Callable, Optional, Union
2120

src/transformers/models/luke/tokenization_luke.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@
3737
TruncationStrategy,
3838
to_py_obj,
3939
)
40-
from ...utils import add_end_docstrings, is_tf_tensor, is_torch_tensor, logging
40+
from ...utils import add_end_docstrings, is_torch_tensor, logging
4141

4242

4343
logger = logging.get_logger(__name__)
@@ -1441,7 +1441,6 @@ def pad(
14411441
return_tensors (`str` or [`~utils.TensorType`], *optional*):
14421442
If set, will return tensors instead of list of python integers. Acceptable values are:
14431443
1444-
- `'tf'`: Return TensorFlow `tf.constant` objects.
14451444
- `'pt'`: Return PyTorch `torch.Tensor` objects.
14461445
- `'np'`: Return Numpy `np.ndarray` objects.
14471446
verbose (`bool`, *optional*, defaults to `True`):
@@ -1466,7 +1465,7 @@ def pad(
14661465
encoded_inputs["attention_mask"] = []
14671466
return encoded_inputs
14681467

1469-
# If we have PyTorch/TF/NumPy tensors/arrays as inputs, we cast them as python objects
1468+
# If we have PyTorch/NumPy tensors/arrays as inputs, we cast them as python objects
14701469
# and rebuild them afterwards if no return_tensors is specified
14711470
# Note that we lose the specific device the tensor may be on for PyTorch
14721471

@@ -1480,9 +1479,7 @@ def pad(
14801479
first_element = required_input[index][0]
14811480
# At this state, if `first_element` is still a list/tuple, it's an empty one so there is nothing to do.
14821481
if not isinstance(first_element, (int, list, tuple)):
1483-
if is_tf_tensor(first_element):
1484-
return_tensors = "tf" if return_tensors is None else return_tensors
1485-
elif is_torch_tensor(first_element):
1482+
if is_torch_tensor(first_element):
14861483
return_tensors = "pt" if return_tensors is None else return_tensors
14871484
elif isinstance(first_element, np.ndarray):
14881485
return_tensors = "np" if return_tensors is None else return_tensors

0 commit comments

Comments
 (0)