Skip to content

Commit edda199

Browse files
jeejeeleexuebwang-amd
authored andcommitted
[Bugfix] Fix GPUModelRunner has no attribute lora_manager (vllm-project#24762)
Signed-off-by: Jee Jee Li <pandaleefree@gmail.com> Signed-off-by: xuebwang-amd <xuebwang@amd.com>
1 parent cb96492 commit edda199

File tree

1 file changed

+10
-10
lines changed

1 file changed

+10
-10
lines changed

vllm/v1/worker/lora_model_runner_mixin.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -63,8 +63,7 @@ def load_lora_model(self, model: nn.Module, model_config: ModelConfig,
6363
def _set_active_loras(self, prompt_lora_mapping: tuple[int, ...],
6464
token_lora_mapping: tuple[int, ...],
6565
lora_requests: set[LoRARequest]) -> None:
66-
if not self.lora_manager:
67-
raise RuntimeError("LoRA is not enabled.")
66+
self._ensure_lora_enabled()
6867

6968
# Set is_prefill to True, so we always use the SGMV kernels on
7069
# non-cuda platforms.
@@ -75,6 +74,11 @@ def _set_active_loras(self, prompt_lora_mapping: tuple[int, ...],
7574
is_prefill=True)
7675
self.lora_manager.set_active_adapters(lora_requests, lora_mapping)
7776

77+
def _ensure_lora_enabled(self) -> None:
78+
if not hasattr(self, "lora_manager"):
79+
raise RuntimeError(
80+
"LoRA is not enabled. Use --enable-lora to enable LoRA.")
81+
7882
def set_active_loras(self, input_batch: InputBatch,
7983
num_scheduled_tokens: np.ndarray) -> None:
8084

@@ -172,21 +176,17 @@ def maybe_remove_all_loras(self, lora_config: Optional[LoRAConfig]):
172176
self.lora_manager.remove_all_adapters()
173177

174178
def add_lora(self, lora_request: LoRARequest) -> bool:
175-
if not self.lora_manager:
176-
raise RuntimeError("LoRA is not enabled.")
179+
self._ensure_lora_enabled()
177180
return self.lora_manager.add_adapter(lora_request)
178181

179182
def remove_lora(self, lora_id: int) -> bool:
180-
if not self.lora_manager:
181-
raise RuntimeError("LoRA is not enabled.")
183+
self._ensure_lora_enabled()
182184
return self.lora_manager.remove_adapter(lora_id)
183185

184186
def pin_lora(self, lora_id: int) -> bool:
185-
if not self.lora_manager:
186-
raise RuntimeError("LoRA is not enabled.")
187+
self._ensure_lora_enabled()
187188
return self.lora_manager.pin_adapter(lora_id)
188189

189190
def list_loras(self) -> set[int]:
190-
if not self.lora_manager:
191-
raise RuntimeError("LoRA is not enabled.")
191+
self._ensure_lora_enabled()
192192
return self.lora_manager.list_adapters()

0 commit comments

Comments
 (0)