From 60cb13d76c61b8953dd5651f9b64178f81cba288 Mon Sep 17 00:00:00 2001 From: wxg0103 <727495428@qq.com> Date: Tue, 22 Oct 2024 14:42:33 +0800 Subject: [PATCH] =?UTF-8?q?fix:=20=E4=BF=AE=E5=A4=8D=E8=B1=86=E5=8C=85?= =?UTF-8?q?=E6=99=BA=E8=83=BD=E4=BD=93=E5=AF=B9=E8=AF=9D=E6=8A=A5=E9=94=99?= =?UTF-8?q?=E7=9A=84=E7=BC=BA=E9=99=B7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --bug=1047570 --user=王孝刚 【github#1374】【大模型】豆包模型对接企业微信,可以正常询问问题。对接豆包智能体大模型,在企业微信询问问题,后台日志报错 https://www.tapd.cn/57709429/s/1595490 --- .../models_provider/impl/base_chat_open_ai.py | 21 +++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/apps/setting/models_provider/impl/base_chat_open_ai.py b/apps/setting/models_provider/impl/base_chat_open_ai.py index 76337eb26c6..c0594d913c2 100644 --- a/apps/setting/models_provider/impl/base_chat_open_ai.py +++ b/apps/setting/models_provider/impl/base_chat_open_ai.py @@ -1,13 +1,13 @@ # coding=utf-8 from typing import List, Dict, Optional, Any, Iterator, Type, cast -from langchain_core.callbacks import CallbackManagerForLLMRun from langchain_core.language_models import LanguageModelInput -from langchain_core.messages import BaseMessage, AIMessageChunk, BaseMessageChunk +from langchain_core.messages import BaseMessage, get_buffer_string from langchain_core.outputs import ChatGenerationChunk, ChatGeneration from langchain_core.runnables import RunnableConfig, ensure_config from langchain_openai import ChatOpenAI -from langchain_openai.chat_models.base import _convert_delta_to_message_chunk + +from common.config.tokenizer_manage_config import TokenizerManage class BaseChatOpenAI(ChatOpenAI): @@ -17,9 +17,21 @@ def get_last_generation_info(self) -> Optional[Dict[str, Any]]: return self.usage_metadata def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: + if self.usage_metadata is None or self.usage_metadata == {}: + try: + return super().get_num_tokens_from_messages(messages) + except Exception as e: + tokenizer = TokenizerManage.get_tokenizer() + return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages]) return self.usage_metadata.get('input_tokens', 0) def get_num_tokens(self, text: str) -> int: + if self.usage_metadata is None or self.usage_metadata == {}: + try: + return super().get_num_tokens(text) + except Exception as e: + tokenizer = TokenizerManage.get_tokenizer() + return len(tokenizer.encode(text)) return self.get_last_generation_info().get('output_tokens', 0) def _stream( @@ -54,5 +66,6 @@ def invoke( **kwargs, ).generations[0][0], ).message - self.usage_metadata = chat_result.response_metadata['token_usage'] if 'token_usage' in chat_result.response_metadata else chat_result.usage_metadata + self.usage_metadata = chat_result.response_metadata[ + 'token_usage'] if 'token_usage' in chat_result.response_metadata else chat_result.usage_metadata return chat_result