Skip to content

Commit

Permalink
(fix) SpendLogs Table
Browse files Browse the repository at this point in the history
  • Loading branch information
ishaan-jaff committed Jan 26, 2024
1 parent 0fc8876 commit 2a1104d
Show file tree
Hide file tree
Showing 5 changed files with 25 additions and 13 deletions.
6 changes: 3 additions & 3 deletions litellm/proxy/_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -346,9 +346,9 @@ class LiteLLM_SpendLogs(LiteLLMBase):
model: Optional[str] = ""
call_type: str
spend: Optional[float] = 0.0
total_tokens: Optional[float] = 0.0
prompt_tokens: Optional[float] = 0.0
completion_tokens: Optional[float] = 0.0
total_tokens: Optional[int] = 0
prompt_tokens: Optional[int] = 0
completion_tokens: Optional[int] = 0
startTime: Union[str, datetime, None]
endTime: Union[str, datetime, None]
user: Optional[str] = ""
Expand Down
8 changes: 7 additions & 1 deletion litellm/proxy/proxy_config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,12 @@ model_list:
output_cost_per_token: 0.00003
max_tokens: 4096
base_model: gpt-3.5-turbo
- model_name: gpt-4
litellm_params:
model: azure/chatgpt-v-2
api_base: https://openai-gpt-4-test-v-1.openai.azure.com/
api_version: "2023-05-15"
api_key: os.environ/AZURE_API_KEY # The `os.environ/` prefix tells litellm to read this from the env. See https://docs.litellm.ai/docs/simple_proxy#load-api-keys-from-vault
- model_name: gpt-vision
litellm_params:
model: azure/gpt-4-vision
Expand Down Expand Up @@ -61,7 +67,7 @@ model_list:
litellm_settings:
fallbacks: [{"openai-gpt-3.5": ["azure-gpt-3.5"]}]
success_callback: ['langfuse']
max_budget: 0.025 # global budget for proxy
max_budget: 10 # global budget for proxy
budget_duration: 30d # global budget duration, will reset after 30d
# cache: True
# setting callback class
Expand Down
6 changes: 3 additions & 3 deletions litellm/proxy/schema.prisma
Original file line number Diff line number Diff line change
Expand Up @@ -50,9 +50,9 @@ model LiteLLM_SpendLogs {
call_type String
api_key String @default ("")
spend Float @default(0.0)
total_tokens Float @default(0.0)
prompt_tokens Float @default(0.0)
completion_tokens Float @default(0.0)
total_tokens Int @default(0)
prompt_tokens Int @default(0)
completion_tokens Int @default(0)
startTime DateTime // Assuming start_time is a DateTime field
endTime DateTime // Assuming end_time is a DateTime field
model String @default("")
Expand Down
6 changes: 3 additions & 3 deletions schema.prisma
Original file line number Diff line number Diff line change
Expand Up @@ -53,9 +53,9 @@ model LiteLLM_SpendLogs {
call_type String
api_key String @default ("")
spend Float @default(0.0)
total_tokens Float @default(0.0)
prompt_tokens Float @default(0.0)
completion_tokens Float @default(0.0)
total_tokens Int @default(0)
prompt_tokens Int @default(0)
completion_tokens Int @default(0)
startTime DateTime // Assuming start_time is a DateTime field
endTime DateTime // Assuming end_time is a DateTime field
model String @default("")
Expand Down
12 changes: 9 additions & 3 deletions tests/test_keys.py
Original file line number Diff line number Diff line change
Expand Up @@ -281,14 +281,20 @@ async def test_key_info_spend_values():
await asyncio.sleep(5)
spend_logs = await get_spend_logs(session=session, request_id=response["id"])
print(f"spend_logs: {spend_logs}")
usage = spend_logs[0]["usage"]
completion_tokens = spend_logs[0]["completion_tokens"]
prompt_tokens = spend_logs[0]["prompt_tokens"]
print(f"prompt_tokens: {prompt_tokens}; completion_tokens: {completion_tokens}")

litellm.set_verbose = True
prompt_cost, completion_cost = litellm.cost_per_token(
model="gpt-35-turbo",
prompt_tokens=usage["prompt_tokens"],
completion_tokens=usage["completion_tokens"],
prompt_tokens=prompt_tokens,
completion_tokens=completion_tokens,
custom_llm_provider="azure",
)
print("prompt_cost: ", prompt_cost, "completion_cost: ", completion_cost)
response_cost = prompt_cost + completion_cost
print(f"response_cost: {response_cost}")
await asyncio.sleep(5) # allow db log to be updated
key_info = await get_key_info(session=session, get_key=key, call_key=key)
print(
Expand Down

0 comments on commit 2a1104d

Please sign in to comment.