Skip to content

Commit 894a2bd

Browse files
authored
Fix pylint generator warnings (#41258)
Fix pylint generator warnings Signed-off-by: cyy <cyyever@outlook.com>
1 parent 1cc9069 commit 894a2bd

31 files changed

+44
-48
lines changed

examples/legacy/run_chinese_ref.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ def get_chinese_word(tokens: list[str]):
5555
def add_sub_symbol(bert_tokens: list[str], chinese_word_set: set()):
5656
if not chinese_word_set:
5757
return bert_tokens
58-
max_word_len = max([len(w) for w in chinese_word_set])
58+
max_word_len = max(len(w) for w in chinese_word_set)
5959

6060
bert_word = bert_tokens
6161
start, end = 0, len(bert_word)

examples/pytorch/question-answering/run_qa_no_trainer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -950,7 +950,7 @@ def create_and_fill_np_array(start_or_end_logits, dataset, max_len):
950950
all_start_logits.append(accelerator.gather_for_metrics(start_logits).cpu().numpy())
951951
all_end_logits.append(accelerator.gather_for_metrics(end_logits).cpu().numpy())
952952

953-
max_len = max([x.shape[1] for x in all_start_logits]) # Get the max_length of the tensor
953+
max_len = max(x.shape[1] for x in all_start_logits) # Get the max_length of the tensor
954954

955955
# concatenate the numpy array
956956
start_logits_concat = create_and_fill_np_array(all_start_logits, eval_dataset, max_len)
@@ -989,7 +989,7 @@ def create_and_fill_np_array(start_or_end_logits, dataset, max_len):
989989
all_start_logits.append(accelerator.gather_for_metrics(start_logits).cpu().numpy())
990990
all_end_logits.append(accelerator.gather_for_metrics(end_logits).cpu().numpy())
991991

992-
max_len = max([x.shape[1] for x in all_start_logits]) # Get the max_length of the tensor
992+
max_len = max(x.shape[1] for x in all_start_logits) # Get the max_length of the tensor
993993
# concatenate the numpy array
994994
start_logits_concat = create_and_fill_np_array(all_start_logits, predict_dataset, max_len)
995995
end_logits_concat = create_and_fill_np_array(all_end_logits, predict_dataset, max_len)

src/transformers/generation/stopping_criteria.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -249,7 +249,7 @@ def __init__(self, tokenizer: PreTrainedTokenizerBase, stop_strings: Union[str,
249249
token_list, token_indices, tokenizer
250250
)
251251

252-
self.maximum_token_len = max([len(stop_string) for stop_string in self.stop_strings])
252+
self.maximum_token_len = max(len(stop_string) for stop_string in self.stop_strings)
253253
self.num_stop_strings = len(self.stop_strings)
254254
self.target_lens = torch.tensor([len(stop_string) for stop_string in stop_strings], dtype=torch.int32)
255255

src/transformers/modeling_utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4103,9 +4103,9 @@ def get_memory_footprint(self, return_buffers=True):
41034103
are tensors that do not require gradients and not registered as parameters. E.g. mean and std in batch
41044104
norm layers. Please see: https://discuss.pytorch.org/t/what-pytorch-means-by-buffers/120266/2
41054105
"""
4106-
mem = sum([param.nelement() * param.element_size() for param in self.parameters()])
4106+
mem = sum(param.nelement() * param.element_size() for param in self.parameters())
41074107
if return_buffers:
4108-
mem_bufs = sum([buf.nelement() * buf.element_size() for buf in self.buffers()])
4108+
mem_bufs = sum(buf.nelement() * buf.element_size() for buf in self.buffers())
41094109
mem = mem + mem_bufs
41104110
return mem
41114111

src/transformers/models/deprecated/mctct/modeling_mctct.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ def __init__(self, config):
9696
def forward(self, input_features):
9797
# NOTE: in reference to the NOTE in __init__, right now it just calculates padding as if
9898
# there will be just one conv layer.
99-
padding = sum([size // 2 for size in self.kernel_size]) # (7, 7) -> (3, 3)
99+
padding = sum(size // 2 for size in self.kernel_size) # (7, 7) -> (3, 3)
100100

101101
input_features = torch.nn.functional.pad(input_features, (0, 0, padding, padding), "constant", 0)
102102
hidden_states = input_features.transpose(1, 2).contiguous() # -> Batch x Frame x Time

src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -425,8 +425,8 @@ def _get_new_num_tokens_layer(self, new_num_tokens, layer):
425425

426426
new_num_tokens_layer = (
427427
new_num_tokens
428-
- sum([emb.weight.shape[0] for emb in embeddings.emb_layers[:layer]])
429-
- sum([emb.weight.shape[0] for emb in embeddings.emb_layers[layer + 1 :]])
428+
- sum(emb.weight.shape[0] for emb in embeddings.emb_layers[:layer])
429+
- sum(emb.weight.shape[0] for emb in embeddings.emb_layers[layer + 1 :])
430430
)
431431
return new_num_tokens_layer, layer
432432

src/transformers/models/deprecated/tvlt/feature_extraction_tvlt.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -202,7 +202,7 @@ def __call__(
202202

203203
# Create audio attention mask
204204
max_patch_len = max(
205-
[ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len for feature in audio_features]
205+
ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len for feature in audio_features
206206
) # The maximum number of audio patches in a batch
207207
if return_attention_mask:
208208
audio_mask = [

src/transformers/models/deprecated/tvlt/image_processing_tvlt.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -392,7 +392,7 @@ def preprocess(
392392
f"number of frames must not be greater than the maximum frames of the model {self.num_frames}."
393393
)
394394

395-
max_num_frames = max([len(video) for video in videos])
395+
max_num_frames = max(len(video) for video in videos)
396396
num_patches_per_image = (size["shortest_edge"] // patch_size[0]) ** 2
397397
video_masks = np.array(
398398
[

src/transformers/models/emu3/image_processing_emu3.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -266,8 +266,8 @@ def _pad_for_batching(
266266
"""
267267

268268
max_shape = (
269-
max([size[0] for size in image_sizes]),
270-
max([size[1] for size in image_sizes]),
269+
max(size[0] for size in image_sizes),
270+
max(size[1] for size in image_sizes),
271271
)
272272
pixel_values = [
273273
pad(

src/transformers/models/eomt/modeling_eomt.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -628,7 +628,7 @@ def get_num_masks(self, class_labels: torch.Tensor, device: torch.device) -> tor
628628
"""
629629
Computes the average number of target masks across the batch, for normalization purposes.
630630
"""
631-
num_masks = sum([len(classes) for classes in class_labels])
631+
num_masks = sum(len(classes) for classes in class_labels)
632632
num_masks = torch.as_tensor(num_masks, dtype=torch.float, device=device)
633633
world_size = 1
634634
if is_accelerate_available():

0 commit comments

Comments
 (0)