diff --git a/src/transformers/data/data_collator.py b/src/transformers/data/data_collator.py index 1bff72cf338c..1b6a4f402d4b 100644 --- a/src/transformers/data/data_collator.py +++ b/src/transformers/data/data_collator.py @@ -716,8 +716,6 @@ def get_generator(self, seed): return torch.Generator().manual_seed(seed) else: - import numpy as np - return np.random.default_rng(seed) def create_rng(self): diff --git a/src/transformers/integrations/integration_utils.py b/src/transformers/integrations/integration_utils.py index 6cec1183c5c7..267b1be82fd1 100755 --- a/src/transformers/integrations/integration_utils.py +++ b/src/transformers/integrations/integration_utils.py @@ -544,8 +544,6 @@ def run_hp_search_sigopt(trainer, n_trials: int, direction: str, **kwargs) -> Be def run_hp_search_wandb(trainer, n_trials: int, direction: str, **kwargs) -> BestRun: - from ..integrations import is_wandb_available - if not is_wandb_available(): raise ImportError("This function needs wandb installed: `pip install wandb`") import wandb diff --git a/src/transformers/integrations/tensor_parallel.py b/src/transformers/integrations/tensor_parallel.py index 3f9d40f13388..5855acd09a7e 100644 --- a/src/transformers/integrations/tensor_parallel.py +++ b/src/transformers/integrations/tensor_parallel.py @@ -1103,8 +1103,6 @@ def distribute_model(model, distributed_config, device_mesh, tp_size): raise ValueError(f"Unsupported tensor parallel style {v}. Supported styles are {ALL_PARALLEL_STYLES}") for name, module in model.named_modules(): if not getattr(module, "_is_hooked", False): - from transformers.integrations.tensor_parallel import add_tensor_parallel_hooks_to_module - plan = _get_parameter_tp_plan(parameter_name=name, tp_plan=model_plan, is_weight=False) add_tensor_parallel_hooks_to_module( model=model, diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index 31783d041fe4..c5778222aa4a 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -2242,8 +2242,6 @@ def tp_plan(self, plan: dict[str, str]): flexible_matched = True break if not flexible_matched: - import warnings - warnings.warn( f"Layer pattern '{layer_pattern}' does not match any parameters in the model. " f"This rule may not be applied during tensor parallelization." diff --git a/src/transformers/testing_utils.py b/src/transformers/testing_utils.py index 32732560bb37..21209042192a 100644 --- a/src/transformers/testing_utils.py +++ b/src/transformers/testing_utils.py @@ -15,6 +15,7 @@ import ast import collections import contextlib +import copy import doctest import functools import gc @@ -2752,8 +2753,6 @@ def wrapper(*args, **kwargs): else: test = " ".join(os.environ.get("PYTEST_CURRENT_TEST").split(" ")[:-1]) try: - import copy - env = copy.deepcopy(os.environ) env["_INSIDE_SUB_PROCESS"] = "1" # This prevents the entries in `short test summary info` given by the subprocess being truncated. so the diff --git a/src/transformers/utils/hub.py b/src/transformers/utils/hub.py index f873175a5d49..d056fc0e8778 100644 --- a/src/transformers/utils/hub.py +++ b/src/transformers/utils/hub.py @@ -1084,7 +1084,6 @@ def get_checkpoint_shard_files( For the description of each arg, see [`PreTrainedModel.from_pretrained`]. `index_filename` is the full path to the index (downloaded and cached if `pretrained_model_name_or_path` is a model ID on the Hub). """ - import json use_auth_token = deprecated_kwargs.pop("use_auth_token", None) if use_auth_token is not None: diff --git a/src/transformers/utils/metrics.py b/src/transformers/utils/metrics.py index 33623b385ce3..3703ddaca1fb 100644 --- a/src/transformers/utils/metrics.py +++ b/src/transformers/utils/metrics.py @@ -105,8 +105,6 @@ def decorator(func): if not _has_opentelemetry: return func - import functools - @functools.wraps(func) def wrapper(*args, **kwargs): instance = args[0] if args and (hasattr(func, "__self__") and func.__self__ is not None) else None