Skip to content

Commit 2bb805b

Browse files
committed
more ruff
Summary: Test Plan: Reviewers: Subscribers: Tasks: Tags:
1 parent 5109a26 commit 2bb805b

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

torchao/prototype/moe_quant/quantizable_moe_modules.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -216,7 +216,8 @@ def _group_tokens_by_expert(ordered_token_indices, cum_tokens_per_expert):
216216
ordered_token_indices[
217217
cum_tokens_per_expert[expert] : cum_tokens_per_expert[expert + 1]
218218
]
219-
for expert in range(num_experts) if cum_tokens_per_expert[expert] < cum_tokens_per_expert[expert + 1]
219+
for expert in range(num_experts)
220+
if cum_tokens_per_expert[expert] < cum_tokens_per_expert[expert + 1]
220221
] # [T'(e1)], [T'(e2)] ...
221222
return token_indices_per_expert
222223

@@ -257,7 +258,6 @@ def _group_tokens_by_expert(ordered_token_indices, cum_tokens_per_expert):
257258
cur_out = F.linear(y1, cur_down_proj)
258259
outs.append(cur_out)
259260

260-
261261
# weigh outputs
262262
ordered_outs = torch.cat(outs, dim=0) # [T*A, D]
263263
ordered_scores = scores.view(-1, 1)[ordered_token_activations] # [T*A, 1]

0 commit comments

Comments
 (0)