Skip to content

Commit c8851a4

Browse files
authored
Add deprecation warning for lora_extra_vocab_size (#23635)
Signed-off-by: Jinheng Li <ahengljh@gmail.com>
1 parent f48a9af commit c8851a4

File tree

1 file changed

+8
-2
lines changed

1 file changed

+8
-2
lines changed

vllm/config/__init__.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2439,8 +2439,8 @@ class LoRAConfig:
24392439
lora_dtype: Union[torch.dtype, LoRADType] = "auto"
24402440
"""Data type for LoRA. If auto, will default to base model dtype."""
24412441
lora_extra_vocab_size: int = 256
2442-
"""Maximum size of extra vocabulary that can be present in a LoRA adapter
2443-
(added to the base model vocabulary)."""
2442+
"""(Deprecated) Maximum size of extra vocabulary that can be present in a
2443+
LoRA adapter. Will be removed in v0.12.0."""
24442444
lora_vocab_padding_size: ClassVar[int] = current_platform\
24452445
.get_lora_vocab_padding_size()
24462446

@@ -2482,6 +2482,12 @@ def compute_hash(self) -> str:
24822482
return hash_str
24832483

24842484
def __post_init__(self):
2485+
# Deprecation warning for lora_extra_vocab_size
2486+
logger.warning(
2487+
"`lora_extra_vocab_size` is deprecated and will be removed "
2488+
"in v0.12.0. Additional vocabulary support for "
2489+
"LoRA adapters is being phased out.")
2490+
24852491
# Setting the maximum rank to 512 should be able to satisfy the vast
24862492
# majority of applications.
24872493
possible_max_ranks = (8, 16, 32, 64, 128, 256, 320, 512)

0 commit comments

Comments
 (0)