Skip to content

Commit 18434a3

Browse files
committed
Fix unused arguments
Signed-off-by: Yuanyuan Chen <cyyever@outlook.com>
1 parent cccef4b commit 18434a3

File tree

8 files changed

+16
-26
lines changed

8 files changed

+16
-26
lines changed

src/transformers/generation/candidate_generator.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -524,7 +524,7 @@ def get_candidates(self, input_ids: torch.LongTensor) -> tuple[torch.LongTensor,
524524
self.assistant_kwargs.pop("attention_mask", None)
525525

526526
assistant_output = self.assistant_model.generate(**generation_args, **self.assistant_kwargs)
527-
new_target_ids = self._process_assistant_outputs(input_ids, assistant_output.sequences, assistant_input_ids)
527+
new_target_ids = self._process_assistant_outputs(input_ids, assistant_output.sequences)
528528

529529
# Update state
530530
self.prev_target_ids_len = input_ids.shape[1]
@@ -583,7 +583,7 @@ def _prepare_assistant_input_ids(self, input_ids: torch.LongTensor) -> tuple[tor
583583
return assistant_input_ids, remove_from_pkv
584584

585585
def _process_assistant_outputs(
586-
self, input_ids: torch.LongTensor, assistant_sequences: torch.LongTensor, assistant_input_ids: torch.LongTensor
586+
self, input_ids: torch.LongTensor, assistant_sequences: torch.LongTensor
587587
) -> torch.LongTensor:
588588
"""Processes assistant outputs to obtain target input IDs."""
589589
num_prev_assistant = self.prev_assistant_ids.shape[1]

src/transformers/modeling_utils.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1487,7 +1487,6 @@ def _find_missing_and_unexpected_keys(
14871487
checkpoint_keys: list[str],
14881488
loading_base_model_from_task_state_dict: bool,
14891489
hf_quantizer: Optional[HfQuantizer],
1490-
device_map: dict,
14911490
) -> tuple[list[str], list[str]]:
14921491
"""Find missing keys (keys that are part of the model parameters but were NOT found in the loaded state dict keys) and unexpected keys
14931492
(keys found in the loaded state dict keys, but that are NOT part of the model parameters)
@@ -2805,7 +2804,7 @@ def _check_and_adjust_attn_implementation(
28052804
try:
28062805
self._sdpa_can_dispatch(is_init_check)
28072806
applicable_attn_implementation = "sdpa"
2808-
except (ValueError, ImportError) as e:
2807+
except (ValueError, ImportError):
28092808
applicable_attn_implementation = "eager"
28102809
else:
28112810
applicable_attn_implementation = self.get_correct_attn_implementation(
@@ -5468,7 +5467,6 @@ def _load_pretrained_model(
54685467
checkpoint_keys,
54695468
loading_base_model_from_task_state_dict,
54705469
hf_quantizer,
5471-
device_map,
54725470
)
54735471
# Find all the keys with shape mismatch (if we ignore the mismatch, the weights need to be newly initialized the
54745472
# same way as missing keys)

src/transformers/models/sew_d/modeling_sew_d.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -510,7 +510,7 @@ def forward(ctx, input, mask, dim):
510510
@staticmethod
511511
def backward(ctx, grad_output):
512512
(output,) = ctx.saved_tensors
513-
inputGrad = softmax_backward_data(ctx, grad_output, output, ctx.dim, output)
513+
inputGrad = softmax_backward_data(ctx, grad_output, output)
514514
return inputGrad, None, None
515515

516516
@staticmethod

src/transformers/pytorch_utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -50,15 +50,15 @@
5050
_torch_distributed_available = torch.distributed.is_available()
5151

5252

53-
def softmax_backward_data(parent, grad_output, output, dim, self):
53+
def softmax_backward_data(parent, grad_output, output):
5454
"""
5555
A function that calls the internal `_softmax_backward_data` PyTorch method and that adjusts the arguments according
5656
to the torch version detected.
5757
"""
5858

5959
from torch import _softmax_backward_data
6060

61-
return _softmax_backward_data(grad_output, output, parent.dim, self.dtype)
61+
return _softmax_backward_data(grad_output, output, parent.dim, output.dtype)
6262

6363

6464
def prune_linear_layer(layer: nn.Linear, index: torch.LongTensor, dim: int = 0) -> nn.Linear:

src/transformers/trainer_pt_utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -929,7 +929,7 @@ def _secs2timedelta(secs):
929929
return f"{datetime.timedelta(seconds=int(secs))}.{msec:02d}"
930930

931931

932-
def metrics_format(self, metrics: dict[str, float]) -> dict[str, float]:
932+
def metrics_format(metrics: dict[str, float]) -> dict[str, float]:
933933
"""
934934
Reformat Trainer metrics values to a human-readable format.
935935
@@ -1038,7 +1038,7 @@ def log_metrics(self, split, metrics):
10381038
return
10391039

10401040
print(f"***** {split} metrics *****")
1041-
metrics_formatted = self.metrics_format(metrics)
1041+
metrics_formatted = metrics_format(metrics)
10421042
k_width = max(len(str(x)) for x in metrics_formatted)
10431043
v_width = max(len(str(x)) for x in metrics_formatted.values())
10441044
for key in sorted(metrics_formatted.keys()):

src/transformers/utils/auto_docstring.py

Lines changed: 5 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1210,7 +1210,7 @@ def get_checkpoint_from_config_class(config_class):
12101210
return checkpoint
12111211

12121212

1213-
def add_intro_docstring(func, class_name, parent_class=None, indent_level=0):
1213+
def add_intro_docstring(func, class_name, indent_level=0):
12141214
intro_docstring = ""
12151215
if func.__name__ == "forward":
12161216
intro_docstring = rf"""The [`{class_name}`] forward method, overrides the `__call__` special method.
@@ -1452,17 +1452,14 @@ def find_sig_line(lines, line_end):
14521452
return sig_line_end
14531453

14541454

1455-
def _process_kwargs_parameters(
1456-
sig, func, parent_class, model_name_lowercase, documented_kwargs, indent_level, undocumented_parameters
1457-
):
1455+
def _process_kwargs_parameters(sig, func, parent_class, documented_kwargs, indent_level, undocumented_parameters):
14581456
"""
14591457
Process **kwargs parameters if needed.
14601458
14611459
Args:
14621460
sig (`inspect.Signature`): Function signature
14631461
func (`function`): Function the parameters belong to
14641462
parent_class (`class`): Parent class of the function
1465-
model_name_lowercase (`str`): Lowercase model name
14661463
documented_kwargs (`dict`): Dictionary of kwargs that are already documented
14671464
indent_level (`int`): Indentation level
14681465
undocumented_parameters (`list`): List to append undocumented parameters to
@@ -1493,7 +1490,7 @@ def _process_kwargs_parameters(
14931490
# Extract documentation for kwargs
14941491
kwargs_documentation = kwarg_param.annotation.__args__[0].__doc__
14951492
if kwargs_documentation is not None:
1496-
documented_kwargs, _ = parse_docstring(kwargs_documentation)
1493+
documented_kwargs = parse_docstring(kwargs_documentation)[0]
14971494

14981495
# Process each kwarg parameter
14991496
for param_name, param_type_annotation in kwarg_param.annotation.__args__[0].__annotations__.items():
@@ -1580,7 +1577,7 @@ def _process_parameters_section(
15801577

15811578
# Process **kwargs parameters if needed
15821579
kwargs_docstring = _process_kwargs_parameters(
1583-
sig, func, parent_class, model_name_lowercase, documented_kwargs, indent_level, undocumented_parameters
1580+
sig, func, parent_class, documented_kwargs, indent_level, undocumented_parameters
15841581
)
15851582
docstring += kwargs_docstring
15861583

@@ -1740,9 +1737,7 @@ def auto_method_docstring(
17401737
if not docstring.strip().endswith("\n"):
17411738
docstring += "\n"
17421739
else:
1743-
docstring = add_intro_docstring(
1744-
func, class_name=class_name, parent_class=parent_class, indent_level=indent_level
1745-
)
1740+
docstring = add_intro_docstring(func, class_name=class_name, indent_level=indent_level)
17461741

17471742
# Process Parameters section
17481743
docstring += _process_parameters_section(

src/transformers/utils/import_utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -818,7 +818,7 @@ def is_torch_xla_available(check_is_tpu=False, check_is_gpu=False) -> bool:
818818

819819

820820
@lru_cache
821-
def is_torch_neuroncore_available(check_device=True) -> bool:
821+
def is_torch_neuroncore_available() -> bool:
822822
if importlib.util.find_spec("torch_neuronx") is not None:
823823
return is_torch_xla_available()
824824
return False
@@ -844,7 +844,7 @@ def is_torch_npu_available(check_device=False) -> bool:
844844

845845

846846
@lru_cache
847-
def is_torch_mlu_available(check_device=False) -> bool:
847+
def is_torch_mlu_available() -> bool:
848848
"""
849849
Checks if `mlu` is available via an `cndev-based` check which won't trigger the drivers and leave mlu
850850
uninitialized.

src/transformers/video_utils.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -714,7 +714,6 @@ def sample_indices_fn_func(metadata, **fn_kwargs):
714714

715715
def convert_to_rgb(
716716
video: np.ndarray,
717-
data_format: Optional[ChannelDimension] = None,
718717
input_data_format: Optional[Union[str, ChannelDimension]] = None,
719718
) -> np.ndarray:
720719
"""
@@ -723,15 +722,13 @@ def convert_to_rgb(
723722
Args:
724723
video (`np.array`):
725724
The video to convert.
726-
data_format (`ChannelDimension`, *optional*):
727-
The channel dimension format of the output video. If unset, will use the inferred format from the input.
728725
input_data_format (`ChannelDimension`, *optional*):
729726
The channel dimension format of the input video. If unset, will use the inferred format from the input.
730727
"""
731728
if not isinstance(video, np.ndarray):
732729
raise TypeError(f"Video has to be a numpy array to convert to RGB format, but found {type(video)}")
733730

734-
# np.array usually comes with ChannelDimension.LAST so leet's convert it
731+
# np.array usually comes with ChannelDimension.LAST so let's convert it
735732
if input_data_format is None:
736733
input_data_format = infer_channel_dimension_format(video)
737734
video = to_channel_dimension_format(video, ChannelDimension.FIRST, input_channel_dim=input_data_format)

0 commit comments

Comments
 (0)