Skip to content

Commit f99c279

Browse files
cyyeverSunMarcMekkCyber
authored
Remove deprecated code (#37059)
* Remove deprecated code * fix get_loading_attributes * fix error * skip test --------- Co-authored-by: Marc Sun <57196510+SunMarc@users.noreply.github.com> Co-authored-by: Mohamed Mekkouri <93391238+MekkCyber@users.noreply.github.com>
1 parent d1efaf0 commit f99c279

File tree

8 files changed

+13
-66
lines changed

8 files changed

+13
-66
lines changed

examples/pytorch/language-modeling/run_fim.py

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@
4747
Trainer,
4848
TrainingArguments,
4949
default_data_collator,
50-
is_torch_tpu_available,
50+
is_torch_xla_available,
5151
set_seed,
5252
)
5353
from transformers.integrations import is_deepspeed_zero3_enabled
@@ -525,7 +525,7 @@ def main():
525525
if torch.cuda.is_availble():
526526
pad_factor = 8
527527

528-
elif is_torch_tpu_available():
528+
elif is_torch_xla_available(check_is_tpu=True):
529529
pad_factor = 128
530530

531531
# Add the new tokens to the tokenizer
@@ -795,9 +795,13 @@ def compute_metrics(eval_preds):
795795
processing_class=tokenizer,
796796
# Data collator will default to DataCollatorWithPadding, so we change it.
797797
data_collator=default_data_collator,
798-
compute_metrics=compute_metrics if training_args.do_eval and not is_torch_tpu_available() else None,
798+
compute_metrics=compute_metrics
799+
if training_args.do_eval and not is_torch_xla_available(check_is_tpu=True)
800+
else None,
799801
preprocess_logits_for_metrics=(
800-
preprocess_logits_for_metrics if training_args.do_eval and not is_torch_tpu_available() else None
802+
preprocess_logits_for_metrics
803+
if training_args.do_eval and not is_torch_xla_available(check_is_tpu=True)
804+
else None
801805
),
802806
)
803807

examples/pytorch/language-modeling/run_fim_no_trainer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@
5252
SchedulerType,
5353
default_data_collator,
5454
get_scheduler,
55-
is_torch_tpu_available,
55+
is_torch_xla_available,
5656
)
5757
from transformers.integrations import is_deepspeed_zero3_enabled
5858
from transformers.utils import check_min_version, send_example_telemetry
@@ -492,7 +492,7 @@ def main():
492492
if torch.cuda.is_availble():
493493
pad_factor = 8
494494

495-
elif is_torch_tpu_available():
495+
elif is_torch_xla_available(check_is_tpu=True):
496496
pad_factor = 128
497497

498498
# Add the new tokens to the tokenizer

src/transformers/__init__.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1037,7 +1037,6 @@
10371037
"is_torch_musa_available",
10381038
"is_torch_neuroncore_available",
10391039
"is_torch_npu_available",
1040-
"is_torch_tpu_available",
10411040
"is_torchvision_available",
10421041
"is_torch_xla_available",
10431042
"is_torch_xpu_available",
@@ -6341,7 +6340,6 @@
63416340
is_torch_musa_available,
63426341
is_torch_neuroncore_available,
63436342
is_torch_npu_available,
6344-
is_torch_tpu_available,
63456343
is_torch_xla_available,
63466344
is_torch_xpu_available,
63476345
is_torchvision_available,

src/transformers/image_transforms.py

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
import warnings
1615
from collections.abc import Collection, Iterable
1716
from math import ceil
1817
from typing import Optional, Union
@@ -453,7 +452,6 @@ def center_crop(
453452
size: tuple[int, int],
454453
data_format: Optional[Union[str, ChannelDimension]] = None,
455454
input_data_format: Optional[Union[str, ChannelDimension]] = None,
456-
return_numpy: Optional[bool] = None,
457455
) -> np.ndarray:
458456
"""
459457
Crops the `image` to the specified `size` using a center crop. Note that if the image is too small to be cropped to
@@ -474,22 +472,11 @@ def center_crop(
474472
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
475473
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
476474
If unset, will use the inferred format of the input image.
477-
return_numpy (`bool`, *optional*):
478-
Whether or not to return the cropped image as a numpy array. Used for backwards compatibility with the
479-
previous ImageFeatureExtractionMixin method.
480-
- Unset: will return the same type as the input image.
481-
- `True`: will return a numpy array.
482-
- `False`: will return a `PIL.Image.Image` object.
483475
Returns:
484476
`np.ndarray`: The cropped image.
485477
"""
486478
requires_backends(center_crop, ["vision"])
487479

488-
if return_numpy is not None:
489-
warnings.warn("return_numpy is deprecated and will be removed in v.4.33", FutureWarning)
490-
491-
return_numpy = True if return_numpy is None else return_numpy
492-
493480
if not isinstance(image, np.ndarray):
494481
raise TypeError(f"Input image must be of type np.ndarray, got {type(image)}")
495482

@@ -541,9 +528,6 @@ def center_crop(
541528
new_image = new_image[..., max(0, top) : min(new_height, bottom), max(0, left) : min(new_width, right)]
542529
new_image = to_channel_dimension_format(new_image, output_data_format, ChannelDimension.FIRST)
543530

544-
if not return_numpy:
545-
new_image = to_pil_image(new_image)
546-
547531
return new_image
548532

549533

src/transformers/utils/__init__.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -228,7 +228,6 @@
228228
is_torch_sdpa_available,
229229
is_torch_tensorrt_fx_available,
230230
is_torch_tf32_available,
231-
is_torch_tpu_available,
232231
is_torch_xla_available,
233232
is_torch_xpu_available,
234233
is_torchao_available,

src/transformers/utils/import_utils.py

Lines changed: 0 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -675,31 +675,6 @@ def is_g2p_en_available():
675675
return _g2p_en_available
676676

677677

678-
@lru_cache()
679-
def is_torch_tpu_available(check_device=True):
680-
"Checks if `torch_xla` is installed and potentially if a TPU is in the environment"
681-
warnings.warn(
682-
"`is_torch_tpu_available` is deprecated and will be removed in 4.41.0. "
683-
"Please use the `is_torch_xla_available` instead.",
684-
FutureWarning,
685-
)
686-
687-
if not _torch_available:
688-
return False
689-
if importlib.util.find_spec("torch_xla") is not None:
690-
if check_device:
691-
# We need to check if `xla_device` can be found, will raise a RuntimeError if not
692-
try:
693-
import torch_xla.core.xla_model as xm
694-
695-
_ = xm.xla_device()
696-
return True
697-
except RuntimeError:
698-
return False
699-
return True
700-
return False
701-
702-
703678
@lru_cache
704679
def is_torch_xla_available(check_is_tpu=False, check_is_gpu=False):
705680
"""

src/transformers/utils/quantization_config.py

Lines changed: 2 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -682,15 +682,13 @@ def __init__(
682682
self.use_exllama = use_exllama
683683
self.max_input_length = max_input_length
684684
self.exllama_config = exllama_config
685-
self.disable_exllama = kwargs.pop("disable_exllama", None)
686685
self.cache_block_outputs = cache_block_outputs
687686
self.modules_in_block_to_quantize = modules_in_block_to_quantize
688687
self.post_init()
689688

690689
def get_loading_attributes(self):
691690
attibutes_dict = copy.deepcopy(self.__dict__)
692691
loading_attibutes = [
693-
"disable_exllama",
694692
"use_exllama",
695693
"exllama_config",
696694
"use_cuda_fp16",
@@ -739,20 +737,9 @@ def post_init(self):
739737
self.use_exllama = False
740738

741739
# auto-gptq specific kernel control logic
742-
if self.disable_exllama is None and self.use_exllama is None:
740+
if self.use_exllama is None:
743741
# New default behaviour
744742
self.use_exllama = True
745-
elif self.disable_exllama is not None and self.use_exllama is None:
746-
# Follow pattern of old config
747-
logger.warning(
748-
"Using `disable_exllama` is deprecated and will be removed in version 4.37. Use `use_exllama` instead and specify the version with `exllama_config`."
749-
"The value of `use_exllama` will be overwritten by `disable_exllama` passed in `GPTQConfig` or stored in your config file."
750-
)
751-
self.use_exllama = not self.disable_exllama
752-
self.disable_exllama = None
753-
elif self.disable_exllama is not None and self.use_exllama is not None:
754-
# Only happens if user explicitly passes in both arguments
755-
raise ValueError("Cannot specify both `disable_exllama` and `use_exllama`. Please use just `use_exllama`")
756743

757744
if self.exllama_config is None:
758745
self.exllama_config = {"version": ExllamaVersion.ONE}
@@ -809,7 +796,7 @@ def from_dict_optimum(cls, config_dict):
809796
if "disable_exllama" in config_dict:
810797
config_dict["use_exllama"] = not config_dict["disable_exllama"]
811798
# switch to None to not trigger the warning
812-
config_dict["disable_exllama"] = None
799+
config_dict.pop("disable_exllama")
813800

814801
config = cls(**config_dict)
815802
return config

tests/models/seamless_m4t_v2/test_modeling_seamless_m4t_v2.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -592,7 +592,7 @@ def test_attention_outputs(self):
592592
# TODO: @ydshieh: refer to #34968
593593
@unittest.skip(reason="Failing on multi-gpu runner")
594594
def test_retain_grad_hidden_states_attentions(self):
595-
pass
595+
self.skipTest(reason="Failing on multi-gpu runner")
596596

597597

598598
@require_torch

0 commit comments

Comments
 (0)