Skip to content

Commit c6af1ca

Browse files
cyyeverArthurZucker
authored andcommitted
Fix pylint warnings (#41222)
* Remove unused variables Signed-off-by: Yuanyuan Chen <cyyever@outlook.com> * Remove reimported packages Signed-off-by: Yuanyuan Chen <cyyever@outlook.com> * Fix code Signed-off-by: Yuanyuan Chen <cyyever@outlook.com> * Fix pylint warnings Signed-off-by: Yuanyuan Chen <cyyever@outlook.com> * Simplify Signed-off-by: Yuanyuan Chen <cyyever@outlook.com> --------- Signed-off-by: Yuanyuan Chen <cyyever@outlook.com>
1 parent 7aca328 commit c6af1ca

File tree

10 files changed

+7
-19
lines changed

10 files changed

+7
-19
lines changed

src/transformers/__init__.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -928,7 +928,6 @@
928928
from .utils import is_torch_npu_available as is_torch_npu_available
929929
from .utils import is_torch_xla_available as is_torch_xla_available
930930
from .utils import is_torch_xpu_available as is_torch_xpu_available
931-
from .utils import logging as logging
932931

933932
# bitsandbytes config
934933
from .utils.quantization_config import AqlmConfig as AqlmConfig

src/transformers/commands/chat.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -59,9 +59,7 @@
5959

6060
from transformers import (
6161
AutoModelForCausalLM,
62-
AutoTokenizer,
6362
BitsAndBytesConfig,
64-
GenerationConfig,
6563
)
6664

6765
ALLOWED_KEY_CHARS = set(string.ascii_letters + string.whitespace)
@@ -533,7 +531,7 @@ def parse_eos_tokens(
533531
# -----------------------------------------------------------------------------------------------------------------
534532
# Model loading and performance automation methods
535533
@staticmethod
536-
def get_quantization_config(model_args: ChatArguments) -> Optional["BitsAndBytesConfig"]:
534+
def get_quantization_config(model_args: ChatArguments) -> Optional[BitsAndBytesConfig]:
537535
if model_args.load_in_4bit:
538536
quantization_config = BitsAndBytesConfig(
539537
load_in_4bit=True,

src/transformers/data/metrics/squad_metrics.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -148,7 +148,7 @@ def find_best_thresh_v2(preds, scores, na_probs, qid_to_has_ans):
148148
best_score = cur_score
149149
best_thresh = 0.0
150150
qid_list = sorted(na_probs, key=lambda k: na_probs[k])
151-
for i, qid in enumerate(qid_list):
151+
for qid in qid_list:
152152
if qid not in scores:
153153
continue
154154
if qid_to_has_ans[qid]:

src/transformers/generation/logits_process.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -369,7 +369,6 @@ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> to
369369

370370
if scores.dim() == 3:
371371
if self.logits_indices is not None and self.cu_seq_lens_q is not None:
372-
batch_size, seq_len, vocab_size = scores.shape
373372
last_positions = self.logits_indices
374373
last_scores = scores[0, last_positions, :]
375374

src/transformers/generation/watermarking.py

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -24,14 +24,9 @@
2424
from torch.nn import BCELoss
2525

2626
from ..modeling_utils import PreTrainedModel
27-
from ..utils import ModelOutput, is_torch_available, logging
27+
from ..utils import ModelOutput, logging
2828
from .configuration_utils import PretrainedConfig, WatermarkingConfig
29-
30-
31-
if is_torch_available():
32-
import torch
33-
34-
from .logits_process import SynthIDTextWatermarkLogitsProcessor, WatermarkLogitsProcessor
29+
from .logits_process import SynthIDTextWatermarkLogitsProcessor, WatermarkLogitsProcessor
3530

3631

3732
logger = logging.get_logger(__name__)

src/transformers/integrations/tensor_parallel.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1009,7 +1009,7 @@ def add_tensor_parallel_hooks_to_module(
10091009

10101010

10111011
def shard_and_distribute_module(
1012-
model, param, empty_param, parameter_name, param_casting_dtype, is_contiguous, rank, device_mesh, set_param=True
1012+
model, param, empty_param, parameter_name, param_casting_dtype, is_contiguous, rank, device_mesh
10131013
): # TODO: rename to shard_and_distribute_param
10141014
r"""
10151015
This function is called in `from_pretrained` when loading a model's checkpoints.

src/transformers/modeling_utils.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2195,8 +2195,6 @@ def tp_plan(self, plan: dict[str, str]):
21952195
if hasattr(self, "named_parameters"):
21962196
model_param_names = [name for name, _ in self.named_parameters()]
21972197
if model_param_names: # Only validate if model has parameters
2198-
import re
2199-
22002198
for layer_pattern in plan.keys():
22012199
# Convert pattern to regex (replace * with .*)
22022200
regex_pattern = layer_pattern.replace("*", r"\d+")

src/transformers/pipelines/__init__.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -946,7 +946,6 @@ def pipeline(
946946

947947
# Retrieve the task
948948
if task in custom_tasks:
949-
normalized_task = task
950949
targeted_task, task_options = clean_custom_task(custom_tasks[task])
951950
if pipeline_class is None:
952951
if not trust_remote_code:

src/transformers/pipelines/question_answering.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -678,7 +678,7 @@ def span_to_answer(self, text: str, start: int, end: int) -> dict[str, Union[str
678678
words = []
679679
token_idx = char_start_idx = char_end_idx = chars_idx = 0
680680

681-
for i, word in enumerate(text.split(" ")):
681+
for word in text.split(" "):
682682
token = self.tokenizer.tokenize(word)
683683

684684
# Append words if they are in the span

src/transformers/tokenization_utils_base.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2176,7 +2176,7 @@ def _from_pretrained(
21762176
if template_file is None:
21772177
continue # I think this should never happen, but just in case
21782178
template_name = extra_chat_template.removeprefix("chat_template_")
2179-
with open(template_file) as chat_template_handle:
2179+
with open(template_file, encoding="utf8") as chat_template_handle:
21802180
chat_templates[template_name] = chat_template_handle.read()
21812181
if len(chat_templates) == 1 and "default" in chat_templates:
21822182
init_kwargs["chat_template"] = chat_templates["default"]

0 commit comments

Comments
 (0)