Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 38 additions & 0 deletions tests/e2e/multicard/test_ep_etp.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
import os

import pytest

from tests.conftest import VllmRunner
from tests.model_utils import check_outputs_equal


@pytest.mark.skipif(os.getenv("VLLM_USE_V1") == "0",
reason="ep is not supported on v0")
@pytest.mark.parametrize("model_name", ["deepseek-ai/DeepSeek-V2-Lite-Chat"])
def test_e2e_ep_etp_correctness(model_name):
example_prompts = [
"Hello, my name is",
"The president of the United States is",
"The capital of France is",
"The future of AI is",
]
max_tokens = 5

with VllmRunner(model_name,
tensor_parallel_size=2,
additional_config={
"expert_tensor_parallel_size": 2,
}) as vllm_model:
etp_output = vllm_model.generate_greedy(example_prompts, max_tokens)

with VllmRunner(model_name,
tensor_parallel_size=2,
enable_expert_parallel=True) as vllm_model:
ep_output = vllm_model.generate_greedy(example_prompts, max_tokens)

check_outputs_equal(
outputs_0_lst=ep_output,
outputs_1_lst=etp_output,
name_0="ep_output",
name_1="etp_output",
)
29 changes: 13 additions & 16 deletions vllm_ascend/distributed/parallel_state.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,37 +28,34 @@
def init_ascend_model_parallel(
expert_parallel_size: int = 1,
expert_tensor_parallel_size: int = 1,
world_size: Optional[int] = None,
backend: Optional[str] = None,
):
if model_parallel_initialized():
return
assert torch.distributed.is_initialized()
world_size = world_size or torch.distributed.get_world_size()
world_size = torch.distributed.get_world_size()

Check warning on line 36 in vllm_ascend/distributed/parallel_state.py

View check run for this annotation

Codecov / codecov/patch

vllm_ascend/distributed/parallel_state.py#L36

Added line #L36 was not covered by tests
backend = backend or torch.distributed.get_backend(
get_world_group().device_group)
num_expert_parallel_groups = expert_tensor_parallel_size
num_expert_tensor_parallel_groups = expert_parallel_size

global _EP
group_ranks = []
for i in range(num_expert_parallel_groups):
ranks = list(range(i, world_size, num_expert_parallel_groups))
group_ranks.append(ranks)
# The layout of all ranks: ExternalDP * EP * ETP
# ExternalDP is the data parallel group that is not part of the model,
# every dp rank can generate independently (in verl integration).
all_ranks = torch.arange(world_size).reshape(-1, expert_parallel_size,

Check warning on line 43 in vllm_ascend/distributed/parallel_state.py

View check run for this annotation

Codecov / codecov/patch

vllm_ascend/distributed/parallel_state.py#L43

Added line #L43 was not covered by tests
expert_tensor_parallel_size)

global _EP
group_ranks = all_ranks.transpose(1,

Check warning on line 47 in vllm_ascend/distributed/parallel_state.py

View check run for this annotation

Codecov / codecov/patch

vllm_ascend/distributed/parallel_state.py#L47

Added line #L47 was not covered by tests
2).view(-1,
expert_parallel_size).unbind(0)
group_ranks = [x.tolist() for x in group_ranks]

Check warning on line 50 in vllm_ascend/distributed/parallel_state.py

View check run for this annotation

Codecov / codecov/patch

vllm_ascend/distributed/parallel_state.py#L50

Added line #L50 was not covered by tests
_EP = init_model_parallel_group(group_ranks,
get_world_group().local_rank,
backend,
group_name="ep")

group_ranks = []
global _ETP
for i in range(num_expert_tensor_parallel_groups):
ranks = list(
range(i * expert_tensor_parallel_size,
(i + 1) * expert_tensor_parallel_size))
group_ranks.append(ranks)

group_ranks = all_ranks.view(-1, expert_tensor_parallel_size).unbind(0)
group_ranks = [x.tolist() for x in group_ranks]

Check warning on line 58 in vllm_ascend/distributed/parallel_state.py

View check run for this annotation

Codecov / codecov/patch

vllm_ascend/distributed/parallel_state.py#L57-L58

Added lines #L57 - L58 were not covered by tests
_ETP = init_model_parallel_group(group_ranks,
get_world_group().local_rank,
backend,
Expand Down
1 change: 0 additions & 1 deletion vllm_ascend/worker/worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -554,7 +554,6 @@ def _init_worker_distributed_environment(
init_ascend_model_parallel(
parallel_config.expert_parallel_size,
parallel_config.expert_tensor_parallel_size,
parallel_config.world_size_across_dp,
)
ensure_kv_transfer_initialized(vllm_config)

Expand Down
1 change: 0 additions & 1 deletion vllm_ascend/worker/worker_v1.py
Original file line number Diff line number Diff line change
Expand Up @@ -272,7 +272,6 @@ def _init_worker_distributed_environment(self) -> None:
init_ascend_model_parallel(
parallel_config.expert_parallel_size,
parallel_config.expert_tensor_parallel_size,
parallel_config.world_size_across_dp,
)
ensure_kv_transfer_initialized(self.vllm_config)

Expand Down
Loading