Skip to content

Commit

Permalink
fix(langsmith.py): support 'run_id' for langsmith
Browse files Browse the repository at this point in the history
Fixes #6862
  • Loading branch information
krrishdholakia committed Dec 1, 2024
1 parent 927f9fa commit 147dfa6
Show file tree
Hide file tree
Showing 6 changed files with 49 additions and 37 deletions.
6 changes: 5 additions & 1 deletion litellm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,11 @@
_turn_on_json,
log_level,
)
from litellm.constants import ROUTER_MAX_FALLBACKS
from litellm.constants import (
DEFAULT_BATCH_SIZE,
DEFAULT_FLUSH_INTERVAL_SECONDS,
ROUTER_MAX_FALLBACKS,
)
from litellm.types.guardrails import GuardrailItem
from litellm.proxy._types import (
KeyManagementSystem,
Expand Down
2 changes: 2 additions & 0 deletions litellm/constants.py
Original file line number Diff line number Diff line change
@@ -1 +1,3 @@
ROUTER_MAX_FALLBACKS = 5
DEFAULT_BATCH_SIZE = 512
DEFAULT_FLUSH_INTERVAL_SECONDS = 5
13 changes: 5 additions & 8 deletions litellm/integrations/custom_batch_logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,34 +8,31 @@
import time
from typing import List, Literal, Optional

import litellm
from litellm._logging import verbose_logger
from litellm.integrations.custom_logger import CustomLogger

DEFAULT_BATCH_SIZE = 512
DEFAULT_FLUSH_INTERVAL_SECONDS = 5


class CustomBatchLogger(CustomLogger):

def __init__(
self,
flush_lock: Optional[asyncio.Lock] = None,
batch_size: Optional[int] = DEFAULT_BATCH_SIZE,
flush_interval: Optional[int] = DEFAULT_FLUSH_INTERVAL_SECONDS,
batch_size: Optional[int] = None,
flush_interval: Optional[int] = None,
**kwargs,
) -> None:
"""
Args:
flush_lock (Optional[asyncio.Lock], optional): Lock to use when flushing the queue. Defaults to None. Only used for custom loggers that do batching
"""
self.log_queue: List = []
self.flush_interval = flush_interval or DEFAULT_FLUSH_INTERVAL_SECONDS
self.batch_size: int = batch_size or DEFAULT_BATCH_SIZE
self.flush_interval = flush_interval or litellm.DEFAULT_FLUSH_INTERVAL_SECONDS
self.batch_size: int = batch_size or litellm.DEFAULT_BATCH_SIZE
self.last_flush_time = time.time()
self.flush_lock = flush_lock

super().__init__(**kwargs)
pass

async def periodic_flush(self):
while True:
Expand Down
23 changes: 19 additions & 4 deletions litellm/integrations/langsmith.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ def __init__(
self.log_queue: List[LangsmithQueueObject] = []
asyncio.create_task(self.periodic_flush())
self.flush_lock = asyncio.Lock()

super().__init__(**kwargs, flush_lock=self.flush_lock)

def get_credentials_from_env(
Expand Down Expand Up @@ -122,7 +123,7 @@ def _prepare_log_data(
"project_name", credentials["LANGSMITH_PROJECT"]
)
run_name = metadata.get("run_name", self.langsmith_default_run_name)
run_id = metadata.get("id", None)
run_id = metadata.get("id", metadata.get("run_id", None))
parent_run_id = metadata.get("parent_run_id", None)
trace_id = metadata.get("trace_id", None)
session_id = metadata.get("session_id", None)
Expand Down Expand Up @@ -173,14 +174,28 @@ def _prepare_log_data(
if dotted_order:
data["dotted_order"] = dotted_order

run_id: Optional[str] = data.get("id") # type: ignore
if "id" not in data or data["id"] is None:
"""
for /batch langsmith requires id, trace_id and dotted_order passed as params
"""
run_id = str(uuid.uuid4())
data["id"] = str(run_id)
data["trace_id"] = str(run_id)
data["dotted_order"] = self.make_dot_order(run_id=run_id)

data["id"] = run_id

if (
"trace_id" not in data
or data["trace_id"] is None
and (run_id is not None and isinstance(run_id, str))
):
data["trace_id"] = run_id

if (
"dotted_order" not in data
or data["dotted_order"] is None
and (run_id is not None and isinstance(run_id, str))
):
data["dotted_order"] = self.make_dot_order(run_id=run_id) # type: ignore

verbose_logger.debug("Langsmith Logging data on langsmith: %s", data)

Expand Down
20 changes: 1 addition & 19 deletions litellm/proxy/_new_secret_config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,24 +15,6 @@ model_list:
litellm_params:
model: openai/gpt-4o-realtime-preview-2024-10-01
api_key: os.environ/OPENAI_API_KEY

router_settings:
routing_strategy: usage-based-routing-v2
#redis_url: "os.environ/REDIS_URL"
redis_host: "os.environ/REDIS_HOST"
redis_port: "os.environ/REDIS_PORT"

litellm_settings:
cache: true
cache_params:
type: redis
host: "os.environ/REDIS_HOST"
port: "os.environ/REDIS_PORT"
namespace: "litellm.caching"
ttl: 600
# key_generation_settings:
# team_key_generation:
# allowed_team_member_roles: ["admin"]
# required_params: ["tags"] # require team admins to set tags for cost-tracking when generating a team key
# personal_key_generation: # maps to 'Default Team' on UI
# allowed_user_roles: ["proxy_admin"]
success_callback: ["langsmith"]
22 changes: 17 additions & 5 deletions tests/local_testing/test_langsmith.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,10 +53,17 @@ def test_async_langsmith_logging_with_metadata():
@pytest.mark.asyncio
async def test_async_langsmith_logging_with_streaming_and_metadata(sync_mode):
try:
litellm.DEFAULT_BATCH_SIZE = 1
litellm.DEFAULT_FLUSH_INTERVAL_SECONDS = 1
test_langsmith_logger = LangsmithLogger()
litellm.success_callback = ["langsmith"]
litellm.set_verbose = True
run_id = str(uuid.uuid4())
run_id = "497f6eca-6276-4993-bfeb-53cbbbba6f08"
run_name = "litellmRUN"
test_metadata = {
"run_name": run_name, # langsmith run name
"run_id": run_id, # langsmith run id
}

messages = [{"role": "user", "content": "what llm are u"}]
if sync_mode is True:
Expand All @@ -66,7 +73,7 @@ async def test_async_langsmith_logging_with_streaming_and_metadata(sync_mode):
max_tokens=10,
temperature=0.2,
stream=True,
metadata={"id": run_id},
metadata=test_metadata,
)
for cb in litellm.callbacks:
if isinstance(cb, LangsmithLogger):
Expand All @@ -82,7 +89,7 @@ async def test_async_langsmith_logging_with_streaming_and_metadata(sync_mode):
temperature=0.2,
mock_response="This is a mock request",
stream=True,
metadata={"id": run_id},
metadata=test_metadata,
)
for cb in litellm.callbacks:
if isinstance(cb, LangsmithLogger):
Expand All @@ -100,11 +107,16 @@ async def test_async_langsmith_logging_with_streaming_and_metadata(sync_mode):

input_fields_on_langsmith = logged_run_on_langsmith.get("inputs")

extra_fields_on_langsmith = logged_run_on_langsmith.get("extra").get(
extra_fields_on_langsmith = logged_run_on_langsmith.get("extra", {}).get(
"invocation_params"
)

assert logged_run_on_langsmith.get("run_type") == "llm"
assert (
logged_run_on_langsmith.get("run_type") == "llm"
), f"run_type should be llm. Got: {logged_run_on_langsmith.get('run_type')}"
assert (
logged_run_on_langsmith.get("name") == run_name
), f"run_type should be llm. Got: {logged_run_on_langsmith.get('run_type')}"
print("\nLogged INPUT ON LANGSMITH", input_fields_on_langsmith)

print("\nextra fields on langsmith", extra_fields_on_langsmith)
Expand Down

0 comments on commit 147dfa6

Please sign in to comment.