Skip to content

Commit 30513d1

Browse files
authored
[Bugfix] fix xpu communicator (#13368)
Signed-off-by: yan ma <yan.ma@intel.com>
1 parent 1f69c4a commit 30513d1

File tree

2 files changed

+58
-0
lines changed

2 files changed

+58
-0
lines changed
Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
# SPDX-License-Identifier: Apache-2.0
2+
3+
from typing import Optional
4+
5+
import torch
6+
import torch.distributed as dist
7+
from torch.distributed import ProcessGroup
8+
9+
from .base_device_communicator import DeviceCommunicatorBase
10+
11+
12+
class XpuCommunicator(DeviceCommunicatorBase):
13+
14+
def __init__(self,
15+
cpu_group: ProcessGroup,
16+
device: Optional[torch.device] = None,
17+
device_group: Optional[ProcessGroup] = None,
18+
unique_name: str = ""):
19+
super().__init__(cpu_group, device, device_group, unique_name)
20+
21+
def all_reduce(self, input_) -> torch.Tensor:
22+
dist.all_reduce(input_, group=self.device_group)
23+
return input_
24+
25+
def gather(self,
26+
input_: torch.Tensor,
27+
dst: int = 0,
28+
dim: int = -1) -> Optional[torch.Tensor]:
29+
assert -input_.dim() <= dim < input_.dim(), (
30+
f"Invalid dim ({dim}) for input tensor with shape {input_.size()}")
31+
if dim < 0:
32+
# Convert negative dim to positive.
33+
dim += input_.dim()
34+
# For xpu path, gather doesn't work properly together with ray
35+
# cluster so we use all_gather instead for now.
36+
input_size = input_.size()
37+
# Allocate output tensor.
38+
output_tensor = torch.empty((self.world_size, ) + input_size,
39+
dtype=input_.dtype,
40+
device=input_.device)
41+
# All-gather.
42+
dist.all_gather_into_tensor(output_tensor,
43+
input_,
44+
group=self.device_group)
45+
if self.rank_in_group == dst:
46+
# Reshape
47+
output_tensor = output_tensor.movedim(0, dim)
48+
output_tensor = output_tensor.reshape(input_size[:dim] +
49+
(self.world_size *
50+
input_size[dim], ) +
51+
input_size[dim + 1:])
52+
else:
53+
output_tensor = None
54+
return output_tensor

vllm/platforms/xpu.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -135,3 +135,7 @@ def device_support_bf16(cls) -> bool:
135135
logger.warning("Unknown device name %s, always use float16",
136136
device_name)
137137
return False
138+
139+
@classmethod
140+
def get_device_communicator_cls(cls) -> str:
141+
return "vllm.distributed.device_communicators.xpu_communicator.XpuCommunicator" # noqa

0 commit comments

Comments
 (0)