Skip to content

Commit ae60692

Browse files
authored
Remove unused arguments (#40916)
* Fix unused arguments Signed-off-by: Yuanyuan Chen <cyyever@outlook.com> * More fixes Signed-off-by: Yuanyuan Chen <cyyever@outlook.com> --------- Signed-off-by: Yuanyuan Chen <cyyever@outlook.com>
1 parent f682797 commit ae60692

File tree

10 files changed

+18
-28
lines changed

10 files changed

+18
-28
lines changed

src/transformers/generation/candidate_generator.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -524,7 +524,7 @@ def get_candidates(self, input_ids: torch.LongTensor) -> tuple[torch.LongTensor,
524524
self.assistant_kwargs.pop("attention_mask", None)
525525

526526
assistant_output = self.assistant_model.generate(**generation_args, **self.assistant_kwargs)
527-
new_target_ids = self._process_assistant_outputs(input_ids, assistant_output.sequences, assistant_input_ids)
527+
new_target_ids = self._process_assistant_outputs(input_ids, assistant_output.sequences)
528528

529529
# Update state
530530
self.prev_target_ids_len = input_ids.shape[1]
@@ -583,7 +583,7 @@ def _prepare_assistant_input_ids(self, input_ids: torch.LongTensor) -> tuple[tor
583583
return assistant_input_ids, remove_from_pkv
584584

585585
def _process_assistant_outputs(
586-
self, input_ids: torch.LongTensor, assistant_sequences: torch.LongTensor, assistant_input_ids: torch.LongTensor
586+
self, input_ids: torch.LongTensor, assistant_sequences: torch.LongTensor
587587
) -> torch.LongTensor:
588588
"""Processes assistant outputs to obtain target input IDs."""
589589
num_prev_assistant = self.prev_assistant_ids.shape[1]

src/transformers/modeling_utils.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1412,7 +1412,6 @@ def _find_missing_and_unexpected_keys(
14121412
checkpoint_keys: list[str],
14131413
loading_base_model_from_task_state_dict: bool,
14141414
hf_quantizer: Optional[HfQuantizer],
1415-
device_map: dict,
14161415
) -> tuple[list[str], list[str]]:
14171416
"""Find missing keys (keys that are part of the model parameters but were NOT found in the loaded state dict keys) and unexpected keys
14181417
(keys found in the loaded state dict keys, but that are NOT part of the model parameters)
@@ -2713,7 +2712,7 @@ def _check_and_adjust_attn_implementation(
27132712
try:
27142713
self._sdpa_can_dispatch(is_init_check)
27152714
applicable_attn_implementation = "sdpa"
2716-
except (ValueError, ImportError) as e:
2715+
except (ValueError, ImportError):
27172716
applicable_attn_implementation = "eager"
27182717
else:
27192718
applicable_attn_implementation = self.get_correct_attn_implementation(
@@ -5318,7 +5317,6 @@ def _load_pretrained_model(
53185317
checkpoint_keys,
53195318
loading_base_model_from_task_state_dict,
53205319
hf_quantizer,
5321-
device_map,
53225320
)
53235321
# Find all the keys with shape mismatch (if we ignore the mismatch, the weights need to be newly initialized the
53245322
# same way as missing keys)

src/transformers/models/sew_d/modeling_sew_d.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -509,7 +509,7 @@ def forward(ctx, input, mask, dim):
509509
@staticmethod
510510
def backward(ctx, grad_output):
511511
(output,) = ctx.saved_tensors
512-
inputGrad = softmax_backward_data(ctx, grad_output, output, ctx.dim, output)
512+
inputGrad = softmax_backward_data(ctx, grad_output, output)
513513
return inputGrad, None, None
514514

515515
@staticmethod

src/transformers/pipelines/fill_mask.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,7 @@ def postprocess(self, model_outputs, top_k=5, target_ids=None):
163163
return result[0]
164164
return result
165165

166-
def get_target_ids(self, targets, top_k=None):
166+
def get_target_ids(self, targets):
167167
if isinstance(targets, str):
168168
targets = [targets]
169169
try:
@@ -213,7 +213,7 @@ def _sanitize_parameters(self, top_k=None, targets=None, tokenizer_kwargs=None):
213213
postprocess_params = {}
214214

215215
if targets is not None:
216-
target_ids = self.get_target_ids(targets, top_k)
216+
target_ids = self.get_target_ids(targets)
217217
postprocess_params["target_ids"] = target_ids
218218

219219
if top_k is not None:

src/transformers/pipelines/table_question_answering.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -306,7 +306,7 @@ def _sanitize_parameters(self, sequential=None, padding=None, truncation=None, *
306306

307307
return preprocess_params, forward_params, {}
308308

309-
def preprocess(self, pipeline_input, sequential=None, padding=True, truncation=None):
309+
def preprocess(self, pipeline_input, padding=True, truncation=None):
310310
if truncation is None:
311311
if self.type == "tapas":
312312
truncation = "drop_rows_to_fit"

src/transformers/pytorch_utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -50,15 +50,15 @@
5050
_torch_distributed_available = torch.distributed.is_available()
5151

5252

53-
def softmax_backward_data(parent, grad_output, output, dim, self):
53+
def softmax_backward_data(parent, grad_output, output):
5454
"""
5555
A function that calls the internal `_softmax_backward_data` PyTorch method and that adjusts the arguments according
5656
to the torch version detected.
5757
"""
5858

5959
from torch import _softmax_backward_data
6060

61-
return _softmax_backward_data(grad_output, output, parent.dim, self.dtype)
61+
return _softmax_backward_data(grad_output, output, parent.dim, output.dtype)
6262

6363

6464
def prune_linear_layer(layer: nn.Linear, index: torch.LongTensor, dim: int = 0) -> nn.Linear:

src/transformers/trainer_pt_utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -929,7 +929,7 @@ def _secs2timedelta(secs):
929929
return f"{datetime.timedelta(seconds=int(secs))}.{msec:02d}"
930930

931931

932-
def metrics_format(self, metrics: dict[str, float]) -> dict[str, float]:
932+
def metrics_format(metrics: dict[str, float]) -> dict[str, float]:
933933
"""
934934
Reformat Trainer metrics values to a human-readable format.
935935
@@ -1038,7 +1038,7 @@ def log_metrics(self, split, metrics):
10381038
return
10391039

10401040
print(f"***** {split} metrics *****")
1041-
metrics_formatted = self.metrics_format(metrics)
1041+
metrics_formatted = metrics_format(metrics)
10421042
k_width = max(len(str(x)) for x in metrics_formatted)
10431043
v_width = max(len(str(x)) for x in metrics_formatted.values())
10441044
for key in sorted(metrics_formatted.keys()):

src/transformers/utils/auto_docstring.py

Lines changed: 5 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1227,7 +1227,7 @@ def get_checkpoint_from_config_class(config_class):
12271227
return checkpoint
12281228

12291229

1230-
def add_intro_docstring(func, class_name, parent_class=None, indent_level=0):
1230+
def add_intro_docstring(func, class_name, indent_level=0):
12311231
intro_docstring = ""
12321232
if func.__name__ == "forward":
12331233
intro_docstring = rf"""The [`{class_name}`] forward method, overrides the `__call__` special method.
@@ -1469,17 +1469,14 @@ def find_sig_line(lines, line_end):
14691469
return sig_line_end
14701470

14711471

1472-
def _process_kwargs_parameters(
1473-
sig, func, parent_class, model_name_lowercase, documented_kwargs, indent_level, undocumented_parameters
1474-
):
1472+
def _process_kwargs_parameters(sig, func, parent_class, documented_kwargs, indent_level, undocumented_parameters):
14751473
"""
14761474
Process **kwargs parameters if needed.
14771475
14781476
Args:
14791477
sig (`inspect.Signature`): Function signature
14801478
func (`function`): Function the parameters belong to
14811479
parent_class (`class`): Parent class of the function
1482-
model_name_lowercase (`str`): Lowercase model name
14831480
documented_kwargs (`dict`): Dictionary of kwargs that are already documented
14841481
indent_level (`int`): Indentation level
14851482
undocumented_parameters (`list`): List to append undocumented parameters to
@@ -1510,7 +1507,7 @@ def _process_kwargs_parameters(
15101507
# Extract documentation for kwargs
15111508
kwargs_documentation = kwarg_param.annotation.__args__[0].__doc__
15121509
if kwargs_documentation is not None:
1513-
documented_kwargs, _ = parse_docstring(kwargs_documentation)
1510+
documented_kwargs = parse_docstring(kwargs_documentation)[0]
15141511

15151512
# Process each kwarg parameter
15161513
for param_name, param_type_annotation in kwarg_param.annotation.__args__[0].__annotations__.items():
@@ -1597,7 +1594,7 @@ def _process_parameters_section(
15971594

15981595
# Process **kwargs parameters if needed
15991596
kwargs_docstring = _process_kwargs_parameters(
1600-
sig, func, parent_class, model_name_lowercase, documented_kwargs, indent_level, undocumented_parameters
1597+
sig, func, parent_class, documented_kwargs, indent_level, undocumented_parameters
16011598
)
16021599
docstring += kwargs_docstring
16031600

@@ -1757,9 +1754,7 @@ def auto_method_docstring(
17571754
if not docstring.strip().endswith("\n"):
17581755
docstring += "\n"
17591756
else:
1760-
docstring = add_intro_docstring(
1761-
func, class_name=class_name, parent_class=parent_class, indent_level=indent_level
1762-
)
1757+
docstring = add_intro_docstring(func, class_name=class_name, indent_level=indent_level)
17631758

17641759
# Process Parameters section
17651760
docstring += _process_parameters_section(

src/transformers/utils/import_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -762,7 +762,7 @@ def is_torch_npu_available(check_device=False) -> bool:
762762

763763

764764
@lru_cache
765-
def is_torch_mlu_available(check_device=False) -> bool:
765+
def is_torch_mlu_available() -> bool:
766766
"""
767767
Checks if `mlu` is available via an `cndev-based` check which won't trigger the drivers and leave mlu
768768
uninitialized.

src/transformers/video_utils.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -714,7 +714,6 @@ def sample_indices_fn_func(metadata, **fn_kwargs):
714714

715715
def convert_to_rgb(
716716
video: np.ndarray,
717-
data_format: Optional[ChannelDimension] = None,
718717
input_data_format: Optional[Union[str, ChannelDimension]] = None,
719718
) -> np.ndarray:
720719
"""
@@ -723,15 +722,13 @@ def convert_to_rgb(
723722
Args:
724723
video (`np.ndarray`):
725724
The video to convert.
726-
data_format (`ChannelDimension`, *optional*):
727-
The channel dimension format of the output video. If unset, will use the inferred format from the input.
728725
input_data_format (`ChannelDimension`, *optional*):
729726
The channel dimension format of the input video. If unset, will use the inferred format from the input.
730727
"""
731728
if not isinstance(video, np.ndarray):
732729
raise TypeError(f"Video has to be a numpy array to convert to RGB format, but found {type(video)}")
733730

734-
# np.array usually comes with ChannelDimension.LAST so leet's convert it
731+
# np.array usually comes with ChannelDimension.LAST so let's convert it
735732
if input_data_format is None:
736733
input_data_format = infer_channel_dimension_format(video)
737734
video = to_channel_dimension_format(video, ChannelDimension.FIRST, input_channel_dim=input_data_format)

0 commit comments

Comments
 (0)