Skip to content

Commit

Permalink
u
Browse files Browse the repository at this point in the history
  • Loading branch information
DoroWolf committed Dec 19, 2024
1 parent df2818d commit 4962425
Show file tree
Hide file tree
Showing 4 changed files with 12 additions and 12 deletions.
6 changes: 3 additions & 3 deletions modules/ask/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from core.logger import Logger
from core.utils.cooldown import CoolDown
from .formatting import generate_latex, generate_code_snippet
from .petal import count_petal
from .petal import count_token_petal

if Config("openai_api_key", secret=True, cfg_type=str):
client = AsyncOpenAI(
Expand Down Expand Up @@ -104,8 +104,8 @@ async def _(msg: Bot.MessageSession):
res = messages.data[0].content[0].text.value
tokens = count_token(res)

petal = await count_petal(msg, tokens)
# petal = await count_petal(msg, tokens, gpt4)
petal = await count_token_petal(msg, tokens)
# petal = await count_token_petal(msg, tokens, gpt4)

res = await check(res, msg=msg)
resm = ""
Expand Down
5 changes: 2 additions & 3 deletions modules/ask/__old_init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from core.constants.exceptions import ConfigValueError
from core.dirty_check import check_bool, rickroll
from core.utils.cooldown import CoolDown
from .petal import count_petal
from .petal import count_token_petal

os.environ["LANGCHAIN_TRACING_V2"] = str(Config("enable_langsmith"))
if Config("enable_langsmith"):
Expand Down Expand Up @@ -48,8 +48,7 @@ async def _(msg: Bot.MessageSession):
res = await agent_executor.arun(question)
tokens = cb.total_tokens
if not is_superuser:
petal = await count_petal(msg, tokens)
msg.data.modify_petal(-petal)
petal = await count_token_petal(msg, tokens)
else:
petal = 0

Expand Down
9 changes: 5 additions & 4 deletions modules/ask/petal.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,15 +54,15 @@ async def load_or_refresh_cache():
return exchanged_petal_data["exchanged_petal"]


async def count_petal(msg: Bot.MessageSession, tokens: int, gpt4: bool = False):
async def count_token_petal(msg: Bot.MessageSession, tokens: int, gpt4: bool = False) -> int:
"""计算并减少使用功能时消耗的花瓣数量。
:param msg: 消息会话。
:param tokens: 使用功能时花费的token数量。
:param gpt4: 是否以GPT-4的开销计算。
:returns: 消耗的花瓣数量,保留两位小数。
"""
Logger.info(f"{tokens} tokens have been consumed while calling AI.")
Logger.info(f"{tokens} tokens have been consumed while calling GPT.")
if Config("enable_petal", False) and not msg.check_super_user():
petal_exchange_rate = await load_or_refresh_cache()
if gpt4:
Expand All @@ -77,6 +77,7 @@ async def count_petal(msg: Bot.MessageSession, tokens: int, gpt4: bool = False):
)
petal = price * USD_TO_CNY * CNY_TO_PETAL

petal = round(int(petal))
msg.info.modify_petal(-petal)
return round(petal, 2)
return 0.00
return petal
return 0
4 changes: 2 additions & 2 deletions modules/summary/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from core.dirty_check import check, check_bool, rickroll
from core.logger import Logger
from core.utils.cooldown import CoolDown
from modules.ask.petal import count_petal
from modules.ask.petal import count_token_petal

client = (
AsyncOpenAI(
Expand Down Expand Up @@ -98,7 +98,7 @@ async def _(msg: Bot.MessageSession):
output = completion.choices[0].message.content
tokens = completion.usage.total_tokens

petal = await count_petal(msg, tokens)
petal = await count_token_petal(msg, tokens)
if petal != 0:
output = f"{output}\n{msg.locale.t('petal.message.cost', amount=petal)}"
await wait_msg.delete()
Expand Down

0 comments on commit 4962425

Please sign in to comment.