Skip to content

Commit 64ea69c

Browse files
committed
[Fix] Fix update_aclgraph_sizes when running MoE models
Signed-off-by: Yizhou Liu <liu_yizhou@outlook.com>
1 parent 7aa4f85 commit 64ea69c

File tree

5 files changed

+48
-35
lines changed

5 files changed

+48
-35
lines changed

vllm_ascend/distributed/parallel_state.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -22,18 +22,17 @@ def get_etp_group() -> GroupCoordinator:
2222

2323

2424
def init_ascend_model_parallel(
25-
tensor_model_parallel_size: int = 1,
26-
pipeline_model_parallel_size: int = 1,
25+
expert_parallel_size: int = 1,
2726
expert_tensor_parallel_size: int = 1,
27+
world_size: Optional[int] = None,
2828
backend: Optional[str] = None,
2929
):
3030
assert torch.distributed.is_initialized()
31-
world_size: int = torch.distributed.get_world_size()
31+
world_size = world_size or torch.distributed.get_world_size()
3232
backend = backend or torch.distributed.get_backend(
3333
get_world_group().device_group)
34-
num_expert_parallel_groups: int = expert_tensor_parallel_size
35-
num_expert_tensor_parallel_groups: int = (world_size //
36-
expert_tensor_parallel_size)
34+
num_expert_parallel_groups = expert_tensor_parallel_size
35+
num_expert_tensor_parallel_groups = expert_parallel_size
3736

3837
global _EP
3938
group_ranks = []

vllm_ascend/platform.py

Lines changed: 26 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -130,6 +130,27 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None:
130130
enforce_eager = getattr(vllm_config.model_config, "enforce_eager",
131131
False)
132132

133+
additional_config = vllm_config.additional_config
134+
parallel_config = vllm_config.parallel_config
135+
cache_config = vllm_config.cache_config
136+
137+
if parallel_config:
138+
# Default value for expert tensor parallel size
139+
parallel_config.expert_tensor_parallel_size = 1
140+
141+
# NOTE: When enable_expert_parallel is True, we follow vLLM convention:
142+
# ep_size = world_size, which means expert_tensor_parallel_size must be 1
143+
if (additional_config
144+
and "expert_tensor_parallel_size" in additional_config
145+
and not parallel_config.enable_expert_parallel):
146+
parallel_config.expert_tensor_parallel_size = int(
147+
additional_config["expert_tensor_parallel_size"])
148+
149+
# Calculate expert parallel size based on world size
150+
parallel_config.expert_parallel_size = (
151+
parallel_config.world_size //
152+
parallel_config.expert_tensor_parallel_size)
153+
133154
# TODO(Yizhou): Override the value of enforce_eager to True before
134155
# the CANN and torch_npu support NPU compilation.
135156
enforce_eager = True
@@ -157,21 +178,20 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None:
157178
["vllm.unified_ascend_attention_with_output"])
158179
update_aclgraph_sizes(vllm_config)
159180

160-
if vllm_config.additional_config is not None:
161-
enable_graph_mode = vllm_config.additional_config.get(
162-
"enable_graph_mode", False)
181+
if additional_config is not None:
182+
enable_graph_mode = additional_config.get("enable_graph_mode",
183+
False)
163184
if enable_graph_mode and not supports_dynamo():
164185
logger.warning(
165186
"enable_graph_mode is not supported because the version of torch is too low, forcing close enable_graph_mode"
166187
)
167-
vllm_config.additional_config["enable_graph_mode"] = False
188+
additional_config["enable_graph_mode"] = False
168189
if enable_graph_mode and envs.VLLM_USE_V1 and envs.VLLM_MLA_DISABLE:
169190
logger.warning(
170191
"NPU graph mode is still experimental and not supported for V1 without mla currently, "
171192
"it has been disabled automatically.")
172-
vllm_config.additional_config["enable_graph_mode"] = False
193+
additional_config["enable_graph_mode"] = False
173194

174-
parallel_config = vllm_config.parallel_config
175195
if parallel_config and parallel_config.worker_cls == "auto":
176196
if envs.VLLM_USE_V1:
177197
parallel_config.worker_cls = "vllm_ascend.worker.worker_v1.NPUWorker"
@@ -183,7 +203,6 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None:
183203
else:
184204
parallel_config.worker_cls = "vllm_ascend.worker.worker.NPUWorker"
185205

186-
cache_config = vllm_config.cache_config
187206
if cache_config:
188207
if cache_config.block_size is None:
189208
cache_config.block_size = 128
@@ -199,7 +218,6 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None:
199218
# If ascend_scheduler_config exists in additional_config,
200219
# extents original scheduler_config to use AscendScheduler.
201220

202-
additional_config = vllm_config.additional_config
203221
if additional_config and additional_config.get(
204222
"ascend_scheduler_config", None) is not None:
205223
additional_scheduler_config = additional_config.get(

vllm_ascend/utils.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -126,14 +126,16 @@ def update_aclgraph_sizes(vllm_config: VllmConfig) -> None:
126126
original_sizes, compilation_config.cudagraph_capture_sizes = \
127127
compilation_config.cudagraph_capture_sizes, None
128128

129-
# Calculate parallel configuration factor (increases with DP or TP)
130-
# TODO(Yizhou): This is a temporary solution, need to be improved
131-
# in the future, taking into account the other parallel configurations.
129+
# Calculate parallel configuration factor
132130
num_hidden_layers = vllm_config.model_config.hf_config.num_hidden_layers
133131
parallel_config = vllm_config.parallel_config
132+
133+
# TODO: Find out whether we need to take into account the pp_size
134134
parallel_factor = 1 + sum(size > 1 for size in [
135-
parallel_config.data_parallel_size,
136-
parallel_config.tensor_parallel_size
135+
parallel_config.data_parallel_size_local,
136+
parallel_config.tensor_parallel_size,
137+
parallel_config.expert_parallel_size,
138+
parallel_config.expert_tensor_parallel_size,
137139
])
138140

139141
# Calculate maximum supported batch sizes considering model architecture

vllm_ascend/worker/worker.py

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -534,21 +534,18 @@ def _init_worker_distributed_environment(
534534
backend: str = "hccl") -> None:
535535
"""Initialize the distributed environment."""
536536
parallel_config = self.parallel_config
537-
additional_config = self.vllm_config.additional_config
538537
set_custom_all_reduce(not parallel_config.disable_custom_all_reduce)
539538
init_distributed_environment(parallel_config.world_size, rank,
540539
distributed_init_method, local_rank,
541540
backend)
542541
ensure_model_parallel_initialized(
543542
parallel_config.tensor_parallel_size,
544543
parallel_config.pipeline_parallel_size)
545-
expert_tensor_parallel_size = 1
546-
if additional_config:
547-
expert_tensor_parallel_size = additional_config.get(
548-
"expert_tensor_parallel_size", 1)
549-
init_ascend_model_parallel(parallel_config.tensor_parallel_size,
550-
parallel_config.pipeline_parallel_size,
551-
expert_tensor_parallel_size)
544+
init_ascend_model_parallel(
545+
parallel_config.expert_parallel_size,
546+
parallel_config.expert_tensor_parallel_size,
547+
parallel_config.world_size,
548+
)
552549
ensure_kv_transfer_initialized(vllm_config)
553550

554551

vllm_ascend/worker/worker_v1.py

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -221,7 +221,6 @@ def execute_dummy_batch(self) -> None:
221221

222222
def _init_worker_distributed_environment(self) -> None:
223223
"""Initialize the distributed environment."""
224-
additional_config = self.vllm_config.additional_config
225224
parallel_config = self.vllm_config.parallel_config
226225
set_custom_all_reduce(
227226
not self.parallel_config.disable_custom_all_reduce)
@@ -231,13 +230,11 @@ def _init_worker_distributed_environment(self) -> None:
231230
ensure_model_parallel_initialized(
232231
self.parallel_config.tensor_parallel_size,
233232
self.parallel_config.pipeline_parallel_size)
234-
expert_tensor_parallel_size = 1
235-
if additional_config is not None and "expert_tensor_parallel_size" in additional_config:
236-
expert_tensor_parallel_size = int(
237-
additional_config["expert_tensor_parallel_size"])
238-
init_ascend_model_parallel(parallel_config.tensor_parallel_size,
239-
parallel_config.pipeline_parallel_size,
240-
expert_tensor_parallel_size)
233+
init_ascend_model_parallel(
234+
parallel_config.expert_parallel_size,
235+
parallel_config.expert_tensor_parallel_size,
236+
parallel_config.world_size,
237+
)
241238
ensure_kv_transfer_initialized(self.vllm_config)
242239

243240
def _init_profiler(self):

0 commit comments

Comments
 (0)