Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

log user name during chat #2032

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -160,4 +160,6 @@ test.*
temp.*
objdump*
*.min.*.js
TODO
TODO
experimental_mods
search_results
4 changes: 2 additions & 2 deletions request_llms/bridge_chatgpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -341,7 +341,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
# 前者是API2D的结束条件,后者是OPENAI的结束条件
if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0):
# 判定为数据流的结束,gpt_replying_buffer也写完了
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user())
break
# 处理数据流的主体
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
Expand Down Expand Up @@ -375,7 +375,7 @@ def handle_o1_model_special(response, inputs, llm_kwargs, chatbot, history):
try:
chunkjson = json.loads(response.content.decode())
gpt_replying_buffer = chunkjson['choices'][0]["message"]["content"]
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user())
history[-1] = gpt_replying_buffer
chatbot[-1] = (history[-2], history[-1])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
Expand Down
2 changes: 1 addition & 1 deletion request_llms/bridge_chatgpt_vision.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ def make_media_input(inputs, image_paths):
# 判定为数据流的结束,gpt_replying_buffer也写完了
lastmsg = chatbot[-1][-1] + f"\n\n\n\n「{llm_kwargs['llm_model']}调用结束,该模型不具备上下文对话能力,如需追问,请及时切换模型。」"
yield from update_ui_lastest_msg(lastmsg, chatbot, history, delay=1)
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user())
break
# 处理数据流的主体
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
Expand Down
2 changes: 1 addition & 1 deletion request_llms/bridge_claude.py
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
if need_to_pass:
pass
elif is_last_chunk:
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user())
# logger.info(f'[response] {gpt_replying_buffer}')
break
else:
Expand Down
2 changes: 1 addition & 1 deletion request_llms/bridge_cohere.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
chatbot[-1] = (history[-2], history[-1])
yield from update_ui(chatbot=chatbot, history=history, msg="正常") # 刷新界面
if chunkjson['event_type'] == 'stream-end':
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user())
history[-1] = gpt_replying_buffer
chatbot[-1] = (history[-2], history[-1])
yield from update_ui(chatbot=chatbot, history=history, msg="正常") # 刷新界面
Expand Down
2 changes: 1 addition & 1 deletion request_llms/bridge_google_gemini.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
gpt_replying_buffer += paraphrase['text'] # 使用 json 解析库进行处理
chatbot[-1] = (inputs, gpt_replying_buffer)
history[-1] = gpt_replying_buffer
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user())
yield from update_ui(chatbot=chatbot, history=history)
if error_match:
history = history[-2] # 错误的不纳入对话
Expand Down
2 changes: 1 addition & 1 deletion request_llms/bridge_moonshot.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
history = history[:-2]
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
break
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_bro_result)
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_bro_result, user_name=chatbot.get_user())

def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None,
console_slience=False):
Expand Down
4 changes: 2 additions & 2 deletions request_llms/bridge_openrouter.py
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,7 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
# 前者是API2D的结束条件,后者是OPENAI的结束条件
if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0):
# 判定为数据流的结束,gpt_replying_buffer也写完了
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user())
break
# 处理数据流的主体
status_text = f"finish_reason: {chunkjson['choices'][0].get('finish_reason', 'null')}"
Expand Down Expand Up @@ -371,7 +371,7 @@ def handle_o1_model_special(response, inputs, llm_kwargs, chatbot, history):
try:
chunkjson = json.loads(response.content.decode())
gpt_replying_buffer = chunkjson['choices'][0]["message"]["content"]
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer)
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=gpt_replying_buffer, user_name=chatbot.get_user())
history[-1] = gpt_replying_buffer
chatbot[-1] = (history[-2], history[-1])
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
Expand Down
2 changes: 1 addition & 1 deletion request_llms/bridge_qwen.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
chatbot[-1] = (inputs, response)
yield from update_ui(chatbot=chatbot, history=history)

log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response)
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response, user_name=chatbot.get_user())
# 总结输出
if response == f"[Local Message] 等待{model_name}响应中 ...":
response = f"[Local Message] {model_name}响应异常 ..."
Expand Down
2 changes: 1 addition & 1 deletion request_llms/bridge_taichu.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,5 +68,5 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
chatbot[-1] = [inputs, response]
yield from update_ui(chatbot=chatbot, history=history)
history.extend([inputs, response])
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response)
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response, user_name=chatbot.get_user())
yield from update_ui(chatbot=chatbot, history=history)
2 changes: 1 addition & 1 deletion request_llms/bridge_zhipu.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,5 +97,5 @@ def predict(inputs:str, llm_kwargs:dict, plugin_kwargs:dict, chatbot:ChatBotWith
chatbot[-1] = [inputs, response]
yield from update_ui(chatbot=chatbot, history=history)
history.extend([inputs, response])
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response)
log_chat(llm_model=llm_kwargs["llm_model"], input_str=inputs, output_str=response, user_name=chatbot.get_user())
yield from update_ui(chatbot=chatbot, history=history)
8 changes: 4 additions & 4 deletions toolbox.py
Original file line number Diff line number Diff line change
Expand Up @@ -1029,7 +1029,7 @@ def check_repeat_upload(new_pdf_path, pdf_hash):
# 如果所有页的内容都相同,返回 True
return False, None

def log_chat(llm_model: str, input_str: str, output_str: str):
def log_chat(llm_model: str, input_str: str, output_str: str, user_name: str=default_user_name):
try:
if output_str and input_str and llm_model:
uid = str(uuid.uuid4().hex)
Expand All @@ -1038,15 +1038,15 @@ def log_chat(llm_model: str, input_str: str, output_str: str):
logger.bind(chat_msg=True).info(dedent(
"""
╭──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
[UID]
{uid}
[UID/USER]
{uid}/{user_name}
[Model]
{llm_model}
[Query]
{input_str}
[Response]
{output_str}
╰──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╯
""").format(uid=uid, llm_model=llm_model, input_str=input_str, output_str=output_str))
""").format(uid=uid, user_name=user_name, llm_model=llm_model, input_str=input_str, output_str=output_str))
except:
logger.error(trimmed_format_exc())