Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions src/transformers/data/data_collator.py
Original file line number Diff line number Diff line change
Expand Up @@ -716,8 +716,6 @@ def get_generator(self, seed):

return torch.Generator().manual_seed(seed)
else:
import numpy as np

return np.random.default_rng(seed)

def create_rng(self):
Expand Down
2 changes: 0 additions & 2 deletions src/transformers/integrations/integration_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -544,8 +544,6 @@ def run_hp_search_sigopt(trainer, n_trials: int, direction: str, **kwargs) -> Be


def run_hp_search_wandb(trainer, n_trials: int, direction: str, **kwargs) -> BestRun:
from ..integrations import is_wandb_available

if not is_wandb_available():
raise ImportError("This function needs wandb installed: `pip install wandb`")
import wandb
Expand Down
2 changes: 0 additions & 2 deletions src/transformers/integrations/tensor_parallel.py
Original file line number Diff line number Diff line change
Expand Up @@ -1103,8 +1103,6 @@ def distribute_model(model, distributed_config, device_mesh, tp_size):
raise ValueError(f"Unsupported tensor parallel style {v}. Supported styles are {ALL_PARALLEL_STYLES}")
for name, module in model.named_modules():
if not getattr(module, "_is_hooked", False):
from transformers.integrations.tensor_parallel import add_tensor_parallel_hooks_to_module

plan = _get_parameter_tp_plan(parameter_name=name, tp_plan=model_plan, is_weight=False)
add_tensor_parallel_hooks_to_module(
model=model,
Expand Down
2 changes: 0 additions & 2 deletions src/transformers/modeling_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2242,8 +2242,6 @@ def tp_plan(self, plan: dict[str, str]):
flexible_matched = True
break
if not flexible_matched:
import warnings

warnings.warn(
f"Layer pattern '{layer_pattern}' does not match any parameters in the model. "
f"This rule may not be applied during tensor parallelization."
Expand Down
3 changes: 1 addition & 2 deletions src/transformers/testing_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
import ast
import collections
import contextlib
import copy
import doctest
import functools
import gc
Expand Down Expand Up @@ -2752,8 +2753,6 @@ def wrapper(*args, **kwargs):
else:
test = " ".join(os.environ.get("PYTEST_CURRENT_TEST").split(" ")[:-1])
try:
import copy

env = copy.deepcopy(os.environ)
env["_INSIDE_SUB_PROCESS"] = "1"
# This prevents the entries in `short test summary info` given by the subprocess being truncated. so the
Expand Down
1 change: 0 additions & 1 deletion src/transformers/utils/hub.py
Original file line number Diff line number Diff line change
Expand Up @@ -1084,7 +1084,6 @@ def get_checkpoint_shard_files(
For the description of each arg, see [`PreTrainedModel.from_pretrained`]. `index_filename` is the full path to the
index (downloaded and cached if `pretrained_model_name_or_path` is a model ID on the Hub).
"""
import json

use_auth_token = deprecated_kwargs.pop("use_auth_token", None)
if use_auth_token is not None:
Expand Down
2 changes: 0 additions & 2 deletions src/transformers/utils/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,8 +105,6 @@ def decorator(func):
if not _has_opentelemetry:
return func

import functools

@functools.wraps(func)
def wrapper(*args, **kwargs):
instance = args[0] if args and (hasattr(func, "__self__") and func.__self__ is not None) else None
Expand Down