Skip to content

Commit ae1b53d

Browse files
committed
Fix pylint generator warnings
Signed-off-by: cyy <cyyever@outlook.com>
1 parent 01c9e1b commit ae1b53d

32 files changed

+47
-51
lines changed

examples/legacy/run_chinese_ref.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ def get_chinese_word(tokens: list[str]):
5555
def add_sub_symbol(bert_tokens: list[str], chinese_word_set: set()):
5656
if not chinese_word_set:
5757
return bert_tokens
58-
max_word_len = max([len(w) for w in chinese_word_set])
58+
max_word_len = max(len(w) for w in chinese_word_set)
5959

6060
bert_word = bert_tokens
6161
start, end = 0, len(bert_word)

examples/pytorch/question-answering/run_qa_no_trainer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -950,7 +950,7 @@ def create_and_fill_np_array(start_or_end_logits, dataset, max_len):
950950
all_start_logits.append(accelerator.gather_for_metrics(start_logits).cpu().numpy())
951951
all_end_logits.append(accelerator.gather_for_metrics(end_logits).cpu().numpy())
952952

953-
max_len = max([x.shape[1] for x in all_start_logits]) # Get the max_length of the tensor
953+
max_len = max(x.shape[1] for x in all_start_logits) # Get the max_length of the tensor
954954

955955
# concatenate the numpy array
956956
start_logits_concat = create_and_fill_np_array(all_start_logits, eval_dataset, max_len)
@@ -989,7 +989,7 @@ def create_and_fill_np_array(start_or_end_logits, dataset, max_len):
989989
all_start_logits.append(accelerator.gather_for_metrics(start_logits).cpu().numpy())
990990
all_end_logits.append(accelerator.gather_for_metrics(end_logits).cpu().numpy())
991991

992-
max_len = max([x.shape[1] for x in all_start_logits]) # Get the max_length of the tensor
992+
max_len = max(x.shape[1] for x in all_start_logits) # Get the max_length of the tensor
993993
# concatenate the numpy array
994994
start_logits_concat = create_and_fill_np_array(all_start_logits, predict_dataset, max_len)
995995
end_logits_concat = create_and_fill_np_array(all_end_logits, predict_dataset, max_len)

src/transformers/generation/beam_constraints.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -218,7 +218,7 @@ def __init__(self, nested_token_ids: list[list[int]], no_subsets=True):
218218
r"""
219219
A helper class that builds a trie with the words represented in `nested_token_ids`.
220220
"""
221-
self.max_height = max([len(one) for one in nested_token_ids])
221+
self.max_height = max(len(one) for one in nested_token_ids)
222222

223223
root = {}
224224
for token_ids in nested_token_ids:
@@ -260,7 +260,7 @@ def count_leaves(self, root):
260260
if len(next_nodes) == 0:
261261
return 1
262262
else:
263-
return sum([self.count_leaves(nn) for nn in next_nodes])
263+
return sum(self.count_leaves(nn) for nn in next_nodes)
264264

265265
def has_subsets(self, trie, nested_token_ids):
266266
"""
@@ -373,7 +373,7 @@ def __init__(self, constraints: list[Constraint]):
373373
self.constraints = constraints
374374

375375
# max # of steps required to fulfill a given constraint
376-
self.max_seqlen = max([c.seqlen for c in constraints])
376+
self.max_seqlen = max(c.seqlen for c in constraints)
377377
self.n_constraints = len(constraints)
378378
self.completed = False
379379

src/transformers/generation/stopping_criteria.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -249,7 +249,7 @@ def __init__(self, tokenizer: PreTrainedTokenizerBase, stop_strings: Union[str,
249249
token_list, token_indices, tokenizer
250250
)
251251

252-
self.maximum_token_len = max([len(stop_string) for stop_string in self.stop_strings])
252+
self.maximum_token_len = max(len(stop_string) for stop_string in self.stop_strings)
253253
self.num_stop_strings = len(self.stop_strings)
254254
self.target_lens = torch.tensor([len(stop_string) for stop_string in stop_strings], dtype=torch.int32)
255255

src/transformers/modeling_utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4098,9 +4098,9 @@ def get_memory_footprint(self, return_buffers=True):
40984098
are tensors that do not require gradients and not registered as parameters. E.g. mean and std in batch
40994099
norm layers. Please see: https://discuss.pytorch.org/t/what-pytorch-means-by-buffers/120266/2
41004100
"""
4101-
mem = sum([param.nelement() * param.element_size() for param in self.parameters()])
4101+
mem = sum(param.nelement() * param.element_size() for param in self.parameters())
41024102
if return_buffers:
4103-
mem_bufs = sum([buf.nelement() * buf.element_size() for buf in self.buffers()])
4103+
mem_bufs = sum(buf.nelement() * buf.element_size() for buf in self.buffers())
41044104
mem = mem + mem_bufs
41054105
return mem
41064106

src/transformers/models/deprecated/mctct/modeling_mctct.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ def __init__(self, config):
9696
def forward(self, input_features):
9797
# NOTE: in reference to the NOTE in __init__, right now it just calculates padding as if
9898
# there will be just one conv layer.
99-
padding = sum([size // 2 for size in self.kernel_size]) # (7, 7) -> (3, 3)
99+
padding = sum(size // 2 for size in self.kernel_size) # (7, 7) -> (3, 3)
100100

101101
input_features = torch.nn.functional.pad(input_features, (0, 0, padding, padding), "constant", 0)
102102
hidden_states = input_features.transpose(1, 2).contiguous() # -> Batch x Frame x Time

src/transformers/models/deprecated/transfo_xl/modeling_transfo_xl.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -425,8 +425,8 @@ def _get_new_num_tokens_layer(self, new_num_tokens, layer):
425425

426426
new_num_tokens_layer = (
427427
new_num_tokens
428-
- sum([emb.weight.shape[0] for emb in embeddings.emb_layers[:layer]])
429-
- sum([emb.weight.shape[0] for emb in embeddings.emb_layers[layer + 1 :]])
428+
- sum(emb.weight.shape[0] for emb in embeddings.emb_layers[:layer])
429+
- sum(emb.weight.shape[0] for emb in embeddings.emb_layers[layer + 1 :])
430430
)
431431
return new_num_tokens_layer, layer
432432

src/transformers/models/deprecated/tvlt/feature_extraction_tvlt.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -202,7 +202,7 @@ def __call__(
202202

203203
# Create audio attention mask
204204
max_patch_len = max(
205-
[ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len for feature in audio_features]
205+
ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len for feature in audio_features
206206
) # The maximum number of audio patches in a batch
207207
if return_attention_mask:
208208
audio_mask = [

src/transformers/models/deprecated/tvlt/image_processing_tvlt.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -392,7 +392,7 @@ def preprocess(
392392
f"number of frames must not be greater than the maximum frames of the model {self.num_frames}."
393393
)
394394

395-
max_num_frames = max([len(video) for video in videos])
395+
max_num_frames = max(len(video) for video in videos)
396396
num_patches_per_image = (size["shortest_edge"] // patch_size[0]) ** 2
397397
video_masks = np.array(
398398
[

src/transformers/models/emu3/image_processing_emu3.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -266,8 +266,8 @@ def _pad_for_batching(
266266
"""
267267

268268
max_shape = (
269-
max([size[0] for size in image_sizes]),
270-
max([size[1] for size in image_sizes]),
269+
max(size[0] for size in image_sizes),
270+
max(size[1] for size in image_sizes),
271271
)
272272
pixel_values = [
273273
pad(

0 commit comments

Comments
 (0)