Skip to content

Commit a8039cf

Browse files
committed
also fix apply_hf_chat_template
Signed-off-by: Linkun <github@lkchen.net>
1 parent 5de2f7f commit a8039cf

File tree

5 files changed

+9
-9
lines changed

5 files changed

+9
-9
lines changed

tests/entrypoints/openai/test_chat_template.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -122,10 +122,10 @@ def test_get_gen_prompt(model, template, add_generation_prompt,
122122

123123
# Call the function and get the result
124124
result = apply_hf_chat_template(
125-
model_config,
126-
tokenizer,
125+
tokenizer=tokenizer,
127126
conversation=mock_request.messages,
128127
chat_template=mock_request.chat_template or template_content,
128+
model_config=model_config,
129129
tools=None,
130130
add_generation_prompt=mock_request.add_generation_prompt,
131131
continue_final_message=mock_request.continue_final_message,

tests/entrypoints/test_chat_utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -793,10 +793,10 @@ def get_conversation(is_hf: bool):
793793
)
794794

795795
vllm_result = apply_hf_chat_template(
796-
model_config,
797-
tokenizer,
796+
tokenizer=tokenizer,
798797
conversation=conversation,
799798
chat_template=None,
799+
model_config=model_config,
800800
tools=None,
801801
add_generation_prompt=True,
802802
)

vllm/entrypoints/chat_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1201,12 +1201,12 @@ def parse_chat_messages_futures(
12011201

12021202

12031203
def apply_hf_chat_template(
1204-
model_config: ModelConfig,
12051204
tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast],
12061205
conversation: list[ConversationMessage],
12071206
chat_template: Optional[str],
12081207
tools: Optional[list[dict[str, Any]]],
12091208
*,
1209+
model_config: ModelConfig,
12101210
tokenize: bool = False, # Different from HF's default
12111211
**kwargs: Any,
12121212
) -> str:

vllm/entrypoints/llm.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -767,9 +767,9 @@ def chat(
767767
)
768768
else:
769769
prompt_str = apply_hf_chat_template(
770-
model_config,
771-
tokenizer,
770+
tokenizer=tokenizer,
772771
conversation=conversation,
772+
model_config=model_config,
773773
**_chat_template_kwargs,
774774
)
775775
# Special tokens are already included in chat templates so

vllm/entrypoints/openai/serving_engine.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -701,9 +701,9 @@ async def _preprocess_chat(
701701
)
702702
else:
703703
request_prompt = apply_hf_chat_template(
704-
model_config,
705-
tokenizer,
704+
tokenizer=tokenizer,
706705
conversation=conversation,
706+
model_config=model_config,
707707
**_chat_template_kwargs,
708708
)
709709

0 commit comments

Comments
 (0)