Skip to content

Commit 63e4fee

Browse files
author
angazenn
committed
fix yapf && ruff check
Signed-off-by: angazenn <zengyanjia@huawei.com>
1 parent f5a0aba commit 63e4fee

File tree

1 file changed

+1
-4
lines changed

1 file changed

+1
-4
lines changed

vllm_ascend/ops/fused_moe.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
# This file is a part of the vllm-ascend project.
1616
# Adapted from vllm/tests/kernels/test_moe.py
1717

18-
from typing import Callable, List, Optional
18+
from typing import Callable, Optional
1919

2020
import torch
2121
import torch.distributed as dist
@@ -217,7 +217,6 @@ def fused_experts_with_all2all(
217217
sorted_local_expert_idx, local_num_experts).to(torch.int64)
218218

219219
hidden_states = hidden_states[sorted_idx]
220-
group_list_type = 0
221220
else:
222221
row_idx_len = num_tokens * top_k
223222
row_idx = torch.arange(0,
@@ -234,7 +233,6 @@ def fused_experts_with_all2all(
234233
expert_tokens = torch_npu.npu_moe_compute_expert_tokens(
235234
expanded_expert_idx, num_experts)
236235
expert_tokens = expert_tokens.to(torch.int64)
237-
group_list_type = 0
238236

239237
w1 = w1.transpose(1, 2)
240238
gate_up_out_list = torch_npu.npu_grouped_matmul(
@@ -687,7 +685,6 @@ def apply(
687685
top_k=top_k,
688686
expert_map=expert_map,
689687
ep_group=self.ep_group)
690-
691688

692689

693690
class AscendFusedMoE(FusedMoE):

0 commit comments

Comments
 (0)