Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add and document abilty to use LiteLLM Logging Observability tools #1145

Merged
merged 3 commits into from
Aug 22, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions docs/docs/usage-guide/additional_configurations.md
Original file line number Diff line number Diff line change
Expand Up @@ -91,3 +91,24 @@ user="""
"""
```
Note that the new prompt will need to generate an output compatible with the relevant [post-process function](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/tools/pr_description.py#L137).

## Integrating with Logging Observability Platforms

Various logging observability tools can be used out-of-the box when using the default LiteLLM AI Handler. Simply configure the LiteLLM callback settings in `configuration.toml` and set environment variables according to the LiteLLM [documentation](https://docs.litellm.ai/docs/).

For example, to use [LangSmith](https://www.langchain.com/langsmith) you can add the following to your `configuration.toml` file:
```
[litellm]
...
success_callback = ["langsmith"]
failure_callback = ["langsmith"]
service_callback = []
```

Then set the following environment variables:

```
LANGSMITH_API_KEY=<api_key>
LANGSMITH_PROJECT=<project>
LANGSMITH_BASE_URL=<url>
```
2 changes: 1 addition & 1 deletion pr_agent/agent/pr_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ async def handle_request(self, pr_url, request, notify=None) -> bool:
if action not in command2class:
get_logger().debug(f"Unknown command: {action}")
return False
with get_logger().contextualize(command=action):
with get_logger().contextualize(command=action, pr_url=pr_url):
get_logger().info("PR-Agent request handler started", analytics=True)
if action == "reflect_and_review":
get_settings().pr_reviewer.ask_and_reflect = True
Expand Down
66 changes: 65 additions & 1 deletion pr_agent/algo/ai_handlers/litellm_ai_handler.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
import os
import requests
import boto3
import litellm
import openai
from litellm import acompletion
from tenacity import retry, retry_if_exception_type, stop_after_attempt

from pr_agent.algo.ai_handlers.base_ai_handler import BaseAiHandler
from pr_agent.config_loader import get_settings
from pr_agent.log import get_logger
Expand Down Expand Up @@ -44,6 +44,12 @@ def __init__(self):
litellm.use_client = True
if get_settings().get("LITELLM.DROP_PARAMS", None):
litellm.drop_params = get_settings().litellm.drop_params
if get_settings().get("LITELLM.SUCCESS_CALLBACK", None):
litellm.success_callback = get_settings().litellm.success_callback
if get_settings().get("LITELLM.FAILURE_CALLBACK", None):
litellm.failure_callback = get_settings().litellm.failure_callback
if get_settings().get("LITELLM.SERVICE_CALLBACK", None):
litellm.service_callback = get_settings().litellm.service_callback
if get_settings().get("OPENAI.ORG", None):
litellm.organization = get_settings().openai.org
if get_settings().get("OPENAI.API_TYPE", None):
Expand Down Expand Up @@ -89,6 +95,60 @@ def prepare_logs(self, response, system, user, resp, finish_reason):
response_log['main_pr_language'] = 'unknown'
return response_log

def add_litellm_callbacks(selfs, kwargs) -> dict:
captured_extra = []

def capture_logs(message):
# Parsing the log message and context
record = message.record
log_entry = {}
if record.get('extra', None).get('command', None) is not None:
log_entry.update({"command": record['extra']["command"]})
if record.get('extra', {}).get('pr_url', None) is not None:
log_entry.update({"pr_url": record['extra']["pr_url"]})

# Append the log entry to the captured_logs list
captured_extra.append(log_entry)

# Adding the custom sink to Loguru
handler_id = get_logger().add(capture_logs)
get_logger().debug("Capturing logs for litellm callbacks")
get_logger().remove(handler_id)

context = captured_extra[0] if len(captured_extra) > 0 else None

command = context.get("command", "unknown")
pr_url = context.get("pr_url", "unknown")
git_provider = get_settings().config.git_provider

metadata = dict()
callbacks = litellm.success_callback + litellm.failure_callback + litellm.service_callback
if "langfuse" in callbacks:
metadata.update({
"trace_name": command,
"tags": [git_provider, command],
"trace_metadata": {
"command": command,
"pr_url": pr_url,
},
})
if "langsmith" in callbacks:
metadata.update({
"run_name": command,
"tags": [git_provider, command],
"extra": {
"metadata": {
"command": command,
"pr_url": pr_url,
}
},
})

# Adding the captured logs to the kwargs
kwargs["metadata"] = metadata

return kwargs

@property
def deployment_id(self):
"""
Expand Down Expand Up @@ -133,6 +193,10 @@ async def chat_completion(self, model: str, system: str, user: str, temperature:
"force_timeout": get_settings().config.ai_timeout,
"api_base": self.api_base,
}

if get_settings().litellm.get("enable_callbacks", False):
kwargs = self.add_litellm_callbacks(kwargs)

seed = get_settings().config.get("seed", -1)
if temperature > 0 and seed >= 0:
raise ValueError(f"Seed ({seed}) is not supported with temperature ({temperature}) > 0")
Expand Down
4 changes: 4 additions & 0 deletions pr_agent/settings/configuration.toml
Original file line number Diff line number Diff line change
Expand Up @@ -264,6 +264,10 @@ pr_commands = [
[litellm]
# use_client = false
# drop_params = false
enable_callbacks = false
success_callback = []
failure_callback = []
service_callback = []

[pr_similar_issue]
skip_comments = false
Expand Down
Loading