Skip to content

Commit

Permalink
ENH: InternLM2 chat template (#919)
Browse files Browse the repository at this point in the history
  • Loading branch information
aresnow1 authored Jan 25, 2024
1 parent 055d3b7 commit 2086f0e
Show file tree
Hide file tree
Showing 4 changed files with 12 additions and 12 deletions.
2 changes: 1 addition & 1 deletion setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ install_requires =
openai>1 # For typing
python-jose[cryptography]
passlib[bcrypt]
aioprometheus[starlette]
aioprometheus[starlette]>=23.12.0

[options.packages.find]
exclude =
Expand Down
12 changes: 6 additions & 6 deletions xinference/model/llm/llm_family.json
Original file line number Diff line number Diff line change
Expand Up @@ -3184,7 +3184,7 @@
"none"
],
"model_id": "internlm/internlm2-chat-7b",
"model_revision": "5797f79825bab7013932d57e2babaac1b8de6b4f"
"model_revision": "2292b86b21cb856642782cebed0a453997453b1f"
},
{
"model_format": "pytorch",
Expand All @@ -3193,22 +3193,22 @@
"none"
],
"model_id": "internlm/internlm2-chat-20b",
"model_revision": "3ccaf3ae82d5d01c0a95eecf40ee550f9c543635"
"model_revision": "b666125047cd98c5a7c85ca28720b44a06aed124"
}
],
"prompt_style": {
"style_name": "INTERNLM2",
"system_prompt": "You are InternLM (书生·浦语), a helpful, honest, and harmless AI assistant developed by Shanghai AI Laboratory (上海人工智能实验室).",
"roles": [
"[UNUSED_TOKEN_146]user",
"[UNUSED_TOKEN_146]assistant"
"<|im_start|>user",
"<|im_start|>assistant"
],
"intra_message_sep": "[UNUSED_TOKEN_145]",
"intra_message_sep": "<|im_end|>",
"stop_token_ids": [
92542
],
"stop": [
"[UNUSED_TOKEN_145]"
"<|im_end|>"
]
}
},
Expand Down
8 changes: 4 additions & 4 deletions xinference/model/llm/llm_family_modelscope.json
Original file line number Diff line number Diff line change
Expand Up @@ -1817,15 +1817,15 @@
"style_name": "INTERNLM2",
"system_prompt": "You are InternLM (书生·浦语), a helpful, honest, and harmless AI assistant developed by Shanghai AI Laboratory (上海人工智能实验室).",
"roles": [
"[UNUSED_TOKEN_146]user",
"[UNUSED_TOKEN_146]assistant"
"<|im_start|>user",
"<|im_start|>assistant"
],
"intra_message_sep": "[UNUSED_TOKEN_145]",
"intra_message_sep": "<|im_end|>",
"stop_token_ids": [
92542
],
"stop": [
"[UNUSED_TOKEN_145]"
"<|im_end|>"
]
}
},
Expand Down
2 changes: 1 addition & 1 deletion xinference/model/llm/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -310,7 +310,7 @@ def get_prompt(
ret = (
"<s>"
if prompt_style.system_prompt == ""
else "<s>[UNUSED_TOKEN_146]system\n"
else "<s><|im_start|>system\n"
+ prompt_style.system_prompt
+ prompt_style.intra_message_sep
+ "\n"
Expand Down

0 comments on commit 2086f0e

Please sign in to comment.