File tree Expand file tree Collapse file tree 3 files changed +5
-5
lines changed
distributed/device_communicators Expand file tree Collapse file tree 3 files changed +5
-5
lines changed Original file line number Diff line number Diff line change @@ -72,7 +72,7 @@ def __init__(self,
7272 # currently be an MI300 series.
7373 self .qr_comm = QuickAllReduce (group = self .cpu_group ,
7474 device = self .device )
75- if envs .VLLM_USE_SYMM_MEM and current_platform .is_cuda ():
75+ if envs .VLLM_ALLREDUCE_USE_SYMM_MEM and current_platform .is_cuda ():
7676 self .symm_mem_comm = SymmMemCommunicator (
7777 group = self .cpu_group ,
7878 device = self .device ,
Original file line number Diff line number Diff line change @@ -117,7 +117,7 @@ def __init__(self,
117117 # now `device` is a `torch.device` object
118118 assert isinstance (device , torch .device )
119119 self .device = device
120- if current_platform .is_cuda () and envs .VLLM_USE_SYMM_MEM :
120+ if current_platform .is_cuda () and envs .VLLM_ALLREDUCE_USE_SYMM_MEM :
121121 max_size = CustomAllreduce ._MAX_SIZES [world_size ]
122122
123123 cuda_visible_devices = envs .CUDA_VISIBLE_DEVICES
Original file line number Diff line number Diff line change 141141 VLLM_NIXL_ABORT_REQUEST_TIMEOUT : int = 120
142142 VLLM_USE_CUDNN_PREFILL : bool = False
143143 VLLM_LOOPBACK_IP : str = ""
144- VLLM_USE_SYMM_MEM : bool = False
144+ VLLM_ALLREDUCE_USE_SYMM_MEM : bool = False
145145
146146
147147def get_default_cache_root ():
@@ -975,8 +975,8 @@ def get_vllm_port() -> Optional[int]:
975975 lambda : int (os .getenv ("VLLM_NIXL_ABORT_REQUEST_TIMEOUT" , "120" )),
976976
977977 # Whether to use pytorch symmetric memory for allreduce
978- "VLLM_USE_SYMM_MEM " :
979- lambda : bool (int (os .getenv ("VLLM_USE_SYMM_MEM " , "0" ))),
978+ "VLLM_ALLREDUCE_USE_SYMM_MEM " :
979+ lambda : bool (int (os .getenv ("VLLM_ALLREDUCE_USE_SYMM_MEM " , "0" ))),
980980}
981981
982982# --8<-- [end:env-vars-definition]
You can’t perform that action at this time.
0 commit comments