Skip to content

Commit

Permalink
Merge pull request OpenBMB#52 from kierangilliam/report-usage
Browse files Browse the repository at this point in the history
Report dollars spent on OpenAI
  • Loading branch information
chenweize1998 authored Oct 9, 2023
2 parents e7acdd4 + f6d60c0 commit abd444e
Show file tree
Hide file tree
Showing 7 changed files with 95 additions and 3 deletions.
9 changes: 9 additions & 0 deletions agentverse/agents/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,15 @@ def add_message_to_memory(self, messages: List[Message]) -> None:
"""Add a message to the memory"""
pass

def get_spend(self) -> float:
return self.llm.get_spend()

def get_spend_formatted(self) -> str:
two_trailing = f"${self.get_spend():.2f}"
if two_trailing == "$0.00":
return f"${self.get_spend():.6f}"
return two_trailing

def get_all_prompts(self, **kwargs):
prepend_prompt = Template(self.prepend_prompt_template).safe_substitute(
**kwargs
Expand Down
7 changes: 7 additions & 0 deletions agentverse/environments/base.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from __future__ import annotations
from agentverse.logging import logger

from abc import abstractmethod
from typing import TYPE_CHECKING, Any, Dict, List
Expand Down Expand Up @@ -46,6 +47,12 @@ def reset(self) -> None:
"""Reset the environment"""
pass

def report_metrics(self) -> None:
"""Report useful metrics"""
total_spent = sum([agent.get_spend() for agent in self.agents])
logger.info(f"Total spent: ${total_spent}")
pass

def is_done(self) -> bool:
"""Check if the environment is done"""
return self.cnt_turn >= self.max_turns
23 changes: 23 additions & 0 deletions agentverse/environments/tasksolving_env/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,29 @@ async def step(
self.cnt_turn += 1
return flatten_result, advice, flatten_plan, logs, self.success

def iter_agents(self):
for role, agent_or_agents in self.agents.items():
if isinstance(agent_or_agents, list):
for agent in agent_or_agents:
yield role, agent
else:
yield role, agent_or_agents

def get_spend(self):
total_spent = sum([agent.get_spend() for (_, agent) in self.iter_agents()])
return total_spent

def report_metrics(self) -> None:
logger.info("", "Agent spend:", Fore.GREEN)
for role, agent in self.iter_agents():
name = agent.name.split(":")[0]
logger.info(
"",
f"Agent (Role: {role}) {name}: {agent.get_spend_formatted()}",
Fore.GREEN,
)
logger.info("", f"Total spent: ${self.get_spend():.6f}", Fore.GREEN)

def is_done(self):
"""Check if the environment is done"""
return self.cnt_turn >= self.max_turn or self.success
Expand Down
7 changes: 7 additions & 0 deletions agentverse/llms/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,13 @@ class BaseLLM(BaseModel):
args: BaseModelArgs = Field(default_factory=BaseModelArgs)
max_retry: int = Field(default=3)

@abstractmethod
def get_spend(self) -> float:
"""
Number of USD spent
"""
return -1.0

@abstractmethod
def generate_response(self, **kwargs) -> LLMResult:
pass
Expand Down
43 changes: 43 additions & 0 deletions agentverse/llms/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,9 @@ class OpenAIChatArgs(BaseModelArgs):
class OpenAIChat(BaseChatModel):
args: OpenAIChatArgs = Field(default_factory=OpenAIChatArgs)

total_prompt_tokens: int = 0
total_completion_tokens: int = 0

def __init__(self, max_retry: int = 3, **kwargs):
args = OpenAIChatArgs()
args = args.dict()
Expand Down Expand Up @@ -133,6 +136,7 @@ def generate_response(
**self.args.dict(),
)
if response["choices"][0]["message"].get("function_call") is not None:
self.collect_metrics(response)
return LLMResult(
content=response["choices"][0]["message"].get("content", ""),
function_name=response["choices"][0]["message"][
Expand All @@ -148,6 +152,7 @@ def generate_response(
total_tokens=response["usage"]["total_tokens"],
)
else:
self.collect_metrics(response)
return LLMResult(
content=response["choices"][0]["message"]["content"],
send_tokens=response["usage"]["prompt_tokens"],
Expand All @@ -160,6 +165,7 @@ def generate_response(
messages=messages,
**self.args.dict(),
)
self.collect_metrics(response)
return LLMResult(
content=response["choices"][0]["message"]["content"],
send_tokens=response["usage"]["prompt_tokens"],
Expand Down Expand Up @@ -235,6 +241,7 @@ async def agenerate_response(
raise ValueError(
"The returned argument in function call is not valid json."
)
self.collect_metrics(response)
return LLMResult(
function_name=function_name,
function_arguments=arguments,
Expand All @@ -244,6 +251,7 @@ async def agenerate_response(
)

else:
self.collect_metrics(response)
return LLMResult(
content=response["choices"][0]["message"]["content"],
send_tokens=response["usage"]["prompt_tokens"],
Expand All @@ -258,6 +266,7 @@ async def agenerate_response(
messages=messages,
**self.args.dict(),
)
self.collect_metrics(response)
return LLMResult(
content=response["choices"][0]["message"]["content"],
send_tokens=response["usage"]["prompt_tokens"],
Expand All @@ -279,6 +288,40 @@ def construct_messages(
messages.append({"role": "user", "content": append_prompt})
return messages

def collect_metrics(self, response):
self.total_prompt_tokens += response["usage"]["prompt_tokens"]
self.total_completion_tokens += response["usage"]["completion_tokens"]

def get_spend(self) -> int:
input_cost_map = {
"gpt-3.5-turbo": 0.0015,
"gpt-3.5-turbo-16k": 0.003,
"gpt-3.5-turbo-0613": 0.0015,
"gpt-3.5-turbo-16k-0613": 0.003,
"gpt-4": 0.03,
"gpt-4-0613": 0.03,
"gpt-4-32k": 0.06,
}

output_cost_map = {
"gpt-3.5-turbo": 0.002,
"gpt-3.5-turbo-16k": 0.004,
"gpt-3.5-turbo-0613": 0.002,
"gpt-3.5-turbo-16k-0613": 0.004,
"gpt-4": 0.06,
"gpt-4-0613": 0.06,
"gpt-4-32k": 0.12,
}

model = self.args.model
if model not in input_cost_map or model not in output_cost_map:
raise ValueError(f"Model type {model} not supported")

return (
self.total_prompt_tokens * input_cost_map[model] / 1000.0
+ self.total_completion_tokens * output_cost_map[model] / 1000.0
)


@retry(
stop=stop_after_attempt(3),
Expand Down
1 change: 1 addition & 0 deletions agentverse/simulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ def run(self):
self.environment.reset()
while not self.environment.is_done():
asyncio.run(self.environment.step())
self.environment.report_metrics()

def reset(self):
self.environment.reset()
Expand Down
8 changes: 5 additions & 3 deletions agentverse/tasksolving.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,8 @@ def run(self):
self.environment.step(advice, previous_plan)
)
self.logs += logs
self.save_result(previous_plan, result)
self.environment.report_metrics()
self.save_result(previous_plan, result, self.environment.get_spend())
return previous_plan, result, self.logs

def singleagent_thinking(self, preliminary_solution, advice) -> str:
Expand All @@ -80,10 +81,11 @@ def singleagent_thinking(self, preliminary_solution, advice) -> str:
def reset(self):
self.environment.reset()

def save_result(self, plan: str, result: str):
def save_result(self, plan: str, result: str, spend: float):
"""Save the result to the result file"""
result_file_path = "../results/" + self.task + ".txt"
result_file_path = "./results/" + self.task + ".txt"
os.makedirs(os.path.dirname(result_file_path), exist_ok=True)
with open(result_file_path, "w") as f:
f.write("[Final Plan]\n" + plan + "\n\n")
f.write("[Result]\n" + result)
f.write(f"[Spent]\n${spend}")

0 comments on commit abd444e

Please sign in to comment.