We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 8e9ffd3 commit 8a4a2efCopy full SHA for 8a4a2ef
vllm/v1/structured_output/__init__.py
@@ -27,7 +27,6 @@
27
class StructuredOutputManager:
28
29
def __init__(self, vllm_config: VllmConfig):
30
- self.vocab_size = vllm_config.model_config.get_vocab_size()
31
self.vllm_config = vllm_config
32
self.init_complete = False
33
@@ -41,6 +40,7 @@ def _delayed_init(self):
41
40
tokenizer_group.ping()
42
43
tokenizer = tokenizer_group.get_lora_tokenizer(None)
+ self.vocab_size = tokenizer.max_token_id
44
if isinstance(tokenizer, MistralTokenizer):
45
# NOTE: ideally, xgrammar should handle this accordingly.
46
# refer to https://github.com/mlc-ai/xgrammar/blob/d77c0a0173ef14779c918e3be7966ba852f7910f/python/xgrammar/tokenizer_info.py#L98
0 commit comments