Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(langchain): instrument chat models #741

Merged
merged 1 commit into from
Apr 3, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,10 @@
llm_wrapper,
allm_wrapper,
)
from opentelemetry.instrumentation.langchain.custom_chat_wrapper import (
chat_wrapper,
achat_wrapper,
)
from opentelemetry.instrumentation.langchain.version import __version__

from opentelemetry.semconv.ai import TraceloopSpanKindValues
Expand Down Expand Up @@ -101,14 +105,14 @@
{
"package": "langchain.chat_models.base",
"object": "BaseChatModel",
"method": "invoke",
"wrapper": task_wrapper,
"method": "generate",
"wrapper": chat_wrapper,
},
{
"package": "langchain.chat_models.base",
"object": "BaseChatModel",
"method": "ainvoke",
"wrapper": atask_wrapper,
"method": "agenerate",
"wrapper": achat_wrapper,
},
{
"package": "langchain.schema",
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
import json
from opentelemetry import context as context_api
from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY

from opentelemetry.semconv.ai import SpanAttributes, LLMRequestTypeValues

from opentelemetry.instrumentation.langchain.utils import _with_tracer_wrapper
from opentelemetry.instrumentation.langchain.utils import should_send_prompts


@_with_tracer_wrapper
def chat_wrapper(tracer, to_wrap, wrapped, instance, args, kwargs):
"""Instruments and calls every function defined in TO_WRAP."""
if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
return wrapped(*args, **kwargs)

name = f"langchain.task.{instance.__class__.__name__}"
with tracer.start_as_current_span(name) as span:
_handle_request(span, args, kwargs, instance)
return_value = wrapped(*args, **kwargs)
_handle_response(span, return_value)

return return_value


@_with_tracer_wrapper
async def achat_wrapper(tracer, to_wrap, wrapped, instance, args, kwargs):
"""Instruments and calls every function defined in TO_WRAP."""
if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
return wrapped(*args, **kwargs)

name = f"langchain.task.{instance.__class__.__name__}"
with tracer.start_as_current_span(name) as span:
_handle_request(span, args, kwargs, instance)
return_value = await wrapped(*args, **kwargs)
_handle_response(span, return_value)

return return_value


def _handle_request(span, args, kwargs, instance):
model = instance.model if hasattr(instance, "model") else instance.model_name
span.set_attribute(SpanAttributes.LLM_REQUEST_TYPE, LLMRequestTypeValues.CHAT.value)
span.set_attribute(SpanAttributes.LLM_REQUEST_MODEL, model)
span.set_attribute(SpanAttributes.LLM_RESPONSE_MODEL, model)

if should_send_prompts():
for idx, prompt in enumerate(args[0][0]):
if isinstance(prompt.content, list):
span.set_attribute(
f"{SpanAttributes.LLM_PROMPTS}.{idx}.user",
json.dumps(prompt.content),
)
else:
span.set_attribute(
f"{SpanAttributes.LLM_PROMPTS}.{idx}.user", prompt.content
)


def _handle_response(span, return_value):
if should_send_prompts():
for idx, generation in enumerate(return_value.generations):
span.set_attribute(
f"{SpanAttributes.LLM_COMPLETIONS}.{idx}.content",
generation[0].text,
)
Loading