@@ -324,21 +324,26 @@ def run(self) -> None:
324324
325325
326326def _is_hpu () -> bool :
327- is_hpu_available = True
327+ # if VLLM_TARGET_DEVICE env var was set explicitly, skip HPU autodetection
328+ if os .getenv ("VLLM_TARGET_DEVICE" , None ) == VLLM_TARGET_DEVICE :
329+ return VLLM_TARGET_DEVICE == "hpu"
330+
331+ # if VLLM_TARGET_DEVICE was not set explicitly, check if hl-smi succeeds,
332+ # and if it doesn't, check if habanalabs driver is loaded
333+ is_hpu_available = False
328334 try :
329- subprocess .run (["hl-smi" ], capture_output = True , check = True )
335+ out = subprocess .run (["hl-smi" ], capture_output = True , check = True )
336+ is_hpu_available = out .returncode == 0
330337 except (FileNotFoundError , PermissionError , subprocess .CalledProcessError ):
331- if not os .path .exists ('/dev/accel/accel0' ) and not os .path .exists (
332- '/dev/accel/accel_controlD0' ):
333- # last resort...
338+ if sys .platform .startswith ("linux" ):
334339 try :
335340 output = subprocess .check_output (
336341 'lsmod | grep habanalabs | wc -l' , shell = True )
337342 is_hpu_available = int (output ) > 0
338343 except (ValueError , FileNotFoundError , PermissionError ,
339344 subprocess .CalledProcessError ):
340- is_hpu_available = False
341- return is_hpu_available or VLLM_TARGET_DEVICE == "hpu"
345+ pass
346+ return is_hpu_available
342347
343348
344349def _no_device () -> bool :
0 commit comments