Skip to content

Commit a9480d5

Browse files
authored
[Fix] Adjust use_aclgraph logic (#2156)
### What this PR does / why we need it? Updates the FusedMoE method to determine whether to use ACL Graph based on the `torchair_graph_config` This is equivalent to #2154 on v0.9.1-dev. ### Does this PR introduce _any_ user-facing change? None. ### How was this patch tested? None needed. - vLLM version: v0.10.0 - vLLM main: vllm-project/vllm@ad57f23 Signed-off-by: Yizhou Liu <liu_yizhou@outlook.com>
1 parent 688350a commit a9480d5

File tree

2 files changed

+11
-2
lines changed

2 files changed

+11
-2
lines changed

vllm_ascend/ops/common_fused_moe.py

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
from vllm.model_executor.layers.fused_moe.layer import \
2323
UnquantizedFusedMoEMethod
2424

25+
from vllm_ascend.ascend_config import get_ascend_config
2526
from vllm_ascend.ops.fused_moe import (fused_experts, fused_experts_moge,
2627
select_experts)
2728
from vllm_ascend.utils import is_310p
@@ -33,7 +34,15 @@ def unquantized_fused_moe_init_func(self, *args, **kwargs):
3334
original_unquantized_fused_moe_init_func(self, *args, **kwargs)
3435
vllm_config = get_current_vllm_config()
3536
self.max_num_batched_tokens = vllm_config.scheduler_config.max_num_batched_tokens
36-
self.use_aclgraph = vllm_config.compilation_config.level == CompilationLevel.PIECEWISE and not vllm_config.model_config.enforce_eager
37+
38+
ascend_config = get_ascend_config()
39+
40+
if ascend_config.torchair_graph_config.enabled:
41+
self.use_aclgraph = False
42+
else:
43+
self.use_aclgraph = (vllm_config.compilation_config.level
44+
== CompilationLevel.PIECEWISE
45+
and not vllm_config.model_config.enforce_eager)
3746

3847

3948
def forward_oot(

vllm_ascend/ops/fused_moe.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1105,7 +1105,7 @@ def apply(
11051105
# this is a naive implementation for experts load balance so as
11061106
# to avoid accumulating too much tokens on a single rank.
11071107
# currently it is only activated when doing profile runs.
1108-
if enable_force_load_balance:
1108+
if enable_force_load_balance and not self.use_aclgraph:
11091109
topk_ids = torch.randint_like(topk_ids, 0, global_num_experts)
11101110

11111111
fused_moe_state = get_forward_context().fused_moe_state

0 commit comments

Comments
 (0)