Skip to content

Commit a944f8e

Browse files
authored
[Misc] Delete LoRA-related redundancy code (#17841)
Signed-off-by: Jee Jee Li <pandaleefree@gmail.com>
1 parent 015815f commit a944f8e

File tree

4 files changed

+3
-17
lines changed

4 files changed

+3
-17
lines changed

vllm/lora/models.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -344,7 +344,7 @@ def __init__(
344344

345345
self.supported_lora_modules = get_supported_lora_modules(self.model)
346346
assert self.supported_lora_modules, "No supported LoRA modules found in"
347-
f"{self.model.__class__.__name__}."
347+
f" {self.model.__class__.__name__}."
348348
if lora_config.long_lora_scaling_factors:
349349
# We need to replace rotary emb layer to do batch computation
350350
# for long lora.

vllm/model_executor/models/grok1.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -504,9 +504,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
504504
self.unpadded_vocab_size,
505505
config.hidden_size,
506506
org_num_embeddings=config.vocab_size,
507-
padding_size=DEFAULT_VOCAB_PADDING_SIZE
508-
# We need bigger padding if using lora for kernel compatibility
509-
if not lora_config else lora_config.lora_vocab_padding_size,
507+
padding_size=DEFAULT_VOCAB_PADDING_SIZE,
510508
quant_config=quant_config,
511509
prefix=maybe_prefix(prefix, "lm_head"),
512510
)

vllm/model_executor/models/nemotron_nas.py

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -334,14 +334,6 @@ class DeciLMForCausalLM(nn.Module, SupportsLoRA, SupportsPP, HasNoOps):
334334
}
335335

336336
# LoRA specific attributes
337-
supported_lora_modules = [
338-
"qkv_proj",
339-
"o_proj",
340-
"gate_up_proj",
341-
"down_proj",
342-
"embed_tokens",
343-
"lm_head",
344-
]
345337
embedding_modules = {
346338
"embed_tokens": "input_embeddings",
347339
"lm_head": "output_embeddings",

vllm/model_executor/models/phi4mm.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -955,11 +955,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
955955
self.unpadded_vocab_size,
956956
config.hidden_size,
957957
org_num_embeddings=config.vocab_size,
958-
padding_size=(
959-
DEFAULT_VOCAB_PADDING_SIZE
960-
# We need bigger padding if using lora for kernel
961-
# compatibility
962-
if not lora_config else lora_config.lora_vocab_padding_size),
958+
padding_size=DEFAULT_VOCAB_PADDING_SIZE,
963959
quant_config=quant_config,
964960
)
965961
if config.tie_word_embeddings:

0 commit comments

Comments
 (0)