-
Notifications
You must be signed in to change notification settings - Fork 133
[feat]: LangSmith tracing integration #72
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
b32dd28
74db2c2
2e5c7f1
6488370
7e8544b
ea6c235
529f22c
613d168
aaa9c8b
23fe6bf
67858f8
3d37fae
cb3499a
89051f3
c64cf95
2182e84
a702d36
0c8d5ec
01dd7f0
3feac3d
3fb8be5
d4daecb
2f2fe9f
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,7 +1,22 @@ | ||
| SUPABASE_URL= | ||
| SUPABASE_SERVICE_ROLE_KEY= | ||
| # API Configuration | ||
| # PORT=8000 | ||
| # CORS_ORIGINS=http://localhost:3000 | ||
|
|
||
| CORS_ORIGINS=http://localhost:3000 | ||
| GITHUB_TOKEN= | ||
| # SUPABASE_URL= | ||
| # SUPABASE_SERVICE_ROLE_KEY= | ||
|
|
||
| DISCORD_BOT_TOKEN= | ||
| # ENABLE_DISCORD_BOT=true | ||
|
|
||
| # EMBEDDING_MODEL=BAAI/bge-small-en-v1.5 | ||
| # EMBEDDING_MAX_BATCH_SIZE=32 | ||
| # EMBEDDING_DEVICE=cpu | ||
|
|
||
| GEMINI_API_KEY= | ||
| TAVILY_API_KEY= | ||
|
|
||
| # Langsmith | ||
| LANGSMITH_TRACING= | ||
| LANGSMITH_ENDPOINT= | ||
| LANGSMITH_API_KEY= | ||
| LANGSMITH_PROJECT= |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,10 @@ | ||
| from .devrel.agent import DevRelAgent | ||
| from .shared.base_agent import BaseAgent, AgentState | ||
| from .shared.classification_router import ClassificationRouter | ||
|
|
||
| __all__ = [ | ||
| "DevRelAgent", | ||
| "BaseAgent", | ||
| "AgentState", | ||
| "ClassificationRouter" | ||
| ] | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,100 @@ | ||
| import logging | ||
| from typing import Dict, Any | ||
| from functools import partial | ||
| from langgraph.graph import StateGraph, END | ||
| from langchain_google_genai import ChatGoogleGenerativeAI | ||
| from ..shared.base_agent import BaseAgent, AgentState | ||
| from ..shared.classification_router import MessageCategory | ||
| from .tools.search_tool import TavilySearchTool | ||
| from .tools.faq_tool import FAQTool | ||
| from app.core.config import settings | ||
| from .nodes.gather_context_node import gather_context_node | ||
| from .nodes.handle_faq_node import handle_faq_node | ||
| from .nodes.handle_web_search_node import handle_web_search_node | ||
| from .nodes.handle_technical_support_node import handle_technical_support_node | ||
| from .nodes.handle_onboarding_node import handle_onboarding_node | ||
| from .nodes.generate_response_node import generate_response_node | ||
|
|
||
| logger = logging.getLogger(__name__) | ||
|
|
||
| class DevRelAgent(BaseAgent): | ||
| """DevRel LangGraph Agent for community support and engagement""" | ||
|
|
||
| def __init__(self, config: Dict[str, Any] = None): | ||
| self.config = config or {} | ||
| self.llm = ChatGoogleGenerativeAI( | ||
| model=settings.devrel_agent_model, | ||
| temperature=0.3, | ||
| google_api_key=settings.gemini_api_key | ||
| ) | ||
| self.search_tool = TavilySearchTool() | ||
| self.faq_tool = FAQTool() | ||
| super().__init__("DevRelAgent", self.config) | ||
|
|
||
| def _build_graph(self): | ||
| """Build the DevRel agent workflow graph""" | ||
| workflow = StateGraph(AgentState) | ||
|
|
||
| # Add nodes | ||
| workflow.add_node("gather_context", gather_context_node) | ||
| workflow.add_node("handle_faq", partial(handle_faq_node, faq_tool=self.faq_tool)) | ||
| workflow.add_node("handle_web_search", partial( | ||
| handle_web_search_node, search_tool=self.search_tool, llm=self.llm)) | ||
| workflow.add_node("handle_technical_support", handle_technical_support_node) | ||
| workflow.add_node("handle_onboarding", handle_onboarding_node) | ||
| workflow.add_node("generate_response", partial(generate_response_node, llm=self.llm)) | ||
|
|
||
| # Add edges | ||
| workflow.add_conditional_edges( | ||
| "gather_context", | ||
| self._route_to_handler, | ||
| { | ||
| MessageCategory.FAQ: "handle_faq", | ||
| MessageCategory.WEB_SEARCH: "handle_web_search", | ||
| MessageCategory.ONBOARDING: "handle_onboarding", | ||
| MessageCategory.TECHNICAL_SUPPORT: "handle_technical_support", | ||
| MessageCategory.COMMUNITY_ENGAGEMENT: "handle_technical_support", | ||
| MessageCategory.DOCUMENTATION: "handle_technical_support", | ||
| MessageCategory.BUG_REPORT: "handle_technical_support", | ||
| MessageCategory.FEATURE_REQUEST: "handle_technical_support", | ||
| MessageCategory.NOT_DEVREL: "handle_technical_support" | ||
| } | ||
| ) | ||
|
|
||
| # All handlers lead to response generation | ||
| for node in ["handle_faq", "handle_web_search", "handle_technical_support", "handle_onboarding"]: | ||
| workflow.add_edge(node, "generate_response") | ||
|
|
||
| workflow.add_edge("generate_response", END) | ||
|
|
||
| # Set entry point | ||
| workflow.set_entry_point("gather_context") | ||
|
|
||
| self.graph = workflow.compile() | ||
|
|
||
| def _route_to_handler(self, state: AgentState) -> str: | ||
| """Route to the appropriate handler based on intent""" | ||
| classification = state.context.get("classification", {}) | ||
| intent = classification.get("category") | ||
|
|
||
| if isinstance(intent, str): | ||
| try: | ||
| intent = MessageCategory(intent.lower()) | ||
| except ValueError: | ||
| logger.warning(f"Unknown intent string '{intent}', defaulting to TECHNICAL_SUPPORT") | ||
| intent = MessageCategory.TECHNICAL_SUPPORT | ||
|
|
||
| logger.info(f"Routing based on intent: {intent} for session {state.session_id}") | ||
|
|
||
| # Mapping from MessageCategory enum to string keys used in add_conditional_edges | ||
| if intent in [MessageCategory.FAQ, MessageCategory.WEB_SEARCH, | ||
| MessageCategory.ONBOARDING, MessageCategory.TECHNICAL_SUPPORT, | ||
| MessageCategory.COMMUNITY_ENGAGEMENT, MessageCategory.DOCUMENTATION, | ||
| MessageCategory.BUG_REPORT, MessageCategory.FEATURE_REQUEST, | ||
| MessageCategory.NOT_DEVREL]: | ||
| logger.info(f"Routing to handler for: {intent}") | ||
| return intent | ||
|
|
||
| # Later to be changed to handle anomalies | ||
| logger.info(f"Unknown intent '{intent}', routing to technical support") | ||
| return MessageCategory.TECHNICAL_SUPPORT |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,22 @@ | ||
| import logging | ||
| from app.agents.shared.state import AgentState | ||
| from app.agents.shared.classification_router import MessageCategory | ||
|
|
||
| logger = logging.getLogger(__name__) | ||
|
|
||
| async def gather_context_node(state: AgentState) -> AgentState: | ||
| """Gather additional context for the user and their request""" | ||
| logger.info(f"Gathering context for session {state.session_id}") | ||
|
|
||
| # TODO: Add context gathering from databases | ||
| # Currently, context is simple | ||
| # In production, query databases for user history, etc. | ||
| context_data = { | ||
| "user_profile": {"user_id": state.user_id, "platform": state.platform}, | ||
| "conversation_context": len(state.messages), | ||
| "session_info": {"session_id": state.session_id} | ||
| } | ||
smokeyScraper marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
|
||
| state.context.update(context_data) | ||
| state.current_task = "context_gathered" | ||
| return state | ||
smokeyScraper marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,76 @@ | ||
| import logging | ||
| from typing import Dict, Any | ||
| from app.agents.shared.state import AgentState | ||
| from langchain_core.messages import HumanMessage | ||
| from ..prompts.base_prompt import GENERAL_LLM_RESPONSE_PROMPT | ||
|
|
||
| logger = logging.getLogger(__name__) | ||
|
|
||
| async def _create_search_response(task_result: Dict[str, Any]) -> str: | ||
| """Create a response string from search results.""" | ||
| query = task_result.get("query") | ||
| results = task_result.get("results", []) | ||
| if not results: | ||
| return f"I couldn't find any information for '{query}'. You might want to try rephrasing your search." | ||
|
|
||
| response_parts = [f"Here's what I found for '{query}':"] | ||
| for i, result in enumerate(results[:3]): | ||
| title = result.get('title', 'N/A') | ||
| snippet = result.get('snippet', 'N/A') | ||
| url = result.get('url', '#') | ||
| result_line = f"{i+1}. {title}: {snippet}" | ||
| response_parts.append(result_line) | ||
| response_parts.append(f" (Source: {url})") | ||
| response_parts.append("You can ask me to search again with a different query if these aren't helpful.") | ||
| return "\n".join(response_parts) | ||
|
|
||
| async def _create_llm_response(state: AgentState, task_result: Dict[str, Any], llm) -> str: | ||
| """Generate a response using the LLM based on the current state and task result.""" | ||
| logger.info(f"Creating LLM response for session {state.session_id}") | ||
|
|
||
| latest_message = "" | ||
| if state.messages: | ||
| latest_message = state.messages[-1].get("content", "") | ||
| elif state.context.get("original_message"): | ||
| latest_message = state.context["original_message"] | ||
|
|
||
| conversation_history_str = "\n".join([ | ||
| f"{msg.get('type', 'unknown')}: {msg.get('content', '')}" | ||
| for msg in state.conversation_history[-5:] | ||
| ]) | ||
| current_context_str = str(state.context) | ||
| task_type_str = str(task_result.get("type", "N/A")) | ||
| task_details_str = str(task_result) | ||
|
|
||
| try: | ||
| prompt = GENERAL_LLM_RESPONSE_PROMPT.format( | ||
| latest_message=latest_message, | ||
| conversation_history=conversation_history_str, | ||
| current_context=current_context_str, | ||
| task_type=task_type_str, | ||
| task_details=task_details_str | ||
| ) | ||
| except KeyError as e: | ||
| logger.error(f"Missing key in GENERAL_LLM_RESPONSE_PROMPT: {e}") | ||
| return "Error: Response template formatting error." | ||
|
|
||
| response = await llm.ainvoke([HumanMessage(content=prompt)]) | ||
| return response.content.strip() | ||
smokeyScraper marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
|
||
| async def generate_response_node(state: AgentState, llm) -> AgentState: | ||
| """Generate final response to user""" | ||
| logger.info(f"Generating response for session {state.session_id}") | ||
| task_result = state.task_result or {} | ||
|
|
||
| if task_result.get("type") == "faq": | ||
| state.final_response = task_result.get("response", "I don't have a specific answer for that question.") | ||
| elif task_result.get("type") == "web_search": | ||
| response = await _create_search_response(task_result) | ||
| state.final_response = response | ||
| else: | ||
| # Pass the llm instance to _create_llm_response | ||
| response = await _create_llm_response(state, task_result, llm) | ||
| state.final_response = response | ||
|
|
||
| state.current_task = "response_generated" | ||
| return state | ||
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
| @@ -0,0 +1,26 @@ | ||||||
| import logging | ||||||
| from app.agents.shared.state import AgentState | ||||||
|
|
||||||
| logger = logging.getLogger(__name__) | ||||||
|
|
||||||
| async def handle_faq_node(state: AgentState, faq_tool) -> AgentState: | ||||||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🛠️ Refactor suggestion Add type hint for the faq_tool parameter. The -async def handle_faq_node(state: AgentState, faq_tool) -> AgentState:
+async def handle_faq_node(state: AgentState, faq_tool: "FAQTool") -> AgentState:Note: Use string annotation to avoid circular import issues if needed. 📝 Committable suggestion
Suggested change
🤖 Prompt for AI Agents |
||||||
| """Handle FAQ requests""" | ||||||
| logger.info(f"Handling FAQ for session {state.session_id}") | ||||||
|
|
||||||
| latest_message = "" | ||||||
| if state.messages: | ||||||
| latest_message = state.messages[-1].get("content", "") | ||||||
| elif state.context.get("original_message"): | ||||||
| latest_message = state.context["original_message"] | ||||||
|
|
||||||
| # faq_tool will be passed from the agent, similar to llm for classify_intent | ||||||
| faq_response = await faq_tool.get_response(latest_message) | ||||||
smokeyScraper marked this conversation as resolved.
Show resolved
Hide resolved
|
||||||
|
|
||||||
| state.task_result = { | ||||||
| "type": "faq", | ||||||
| "response": faq_response, | ||||||
| "source": "faq_database" | ||||||
| } | ||||||
smokeyScraper marked this conversation as resolved.
Show resolved
Hide resolved
|
||||||
|
|
||||||
| state.current_task = "faq_handled" | ||||||
| return state | ||||||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,17 @@ | ||
| import logging | ||
| from app.agents.shared.state import AgentState | ||
|
|
||
| logger = logging.getLogger(__name__) | ||
|
|
||
| async def handle_onboarding_node(state: AgentState) -> AgentState: | ||
| """Handle onboarding requests""" | ||
| logger.info(f"Handling onboarding for session {state.session_id}") | ||
|
|
||
| state.task_result = { | ||
| "type": "onboarding", | ||
| "action": "welcome_and_guide", | ||
| "next_steps": ["setup_environment", "first_contribution", "join_community"] | ||
| } | ||
|
|
||
| state.current_task = "onboarding_handled" | ||
| return state | ||
smokeyScraper marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,17 @@ | ||
| import logging | ||
| from app.agents.shared.state import AgentState | ||
|
|
||
| logger = logging.getLogger(__name__) | ||
|
|
||
| async def handle_technical_support_node(state: AgentState) -> AgentState: | ||
| """Handle technical support requests""" | ||
| logger.info(f"Handling technical support for session {state.session_id}") | ||
|
|
||
| state.task_result = { | ||
| "type": "technical_support", | ||
| "action": "provide_guidance", | ||
| "requires_human_review": False | ||
| } | ||
|
|
||
| state.current_task = "technical_support_handled" | ||
| return state | ||
smokeyScraper marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,42 @@ | ||
| import logging | ||
| from app.agents.shared.state import AgentState | ||
| from langchain_core.messages import HumanMessage | ||
| from ..prompts.search_prompt import EXTRACT_SEARCH_QUERY_PROMPT | ||
|
|
||
| logger = logging.getLogger(__name__) | ||
|
|
||
| async def _extract_search_query(message: str, llm) -> str: | ||
| """Extract a concise search query from the user's message.""" | ||
| logger.info(f"Extracting search query from: {message[:100]}") | ||
| try: | ||
| prompt = EXTRACT_SEARCH_QUERY_PROMPT.format(message=message) | ||
| except KeyError as e: | ||
| logger.error(f"Missing key in EXTRACT_SEARCH_QUERY_PROMPT: {e}") | ||
| return message # Fallback | ||
| response = await llm.ainvoke([HumanMessage(content=prompt)]) | ||
| search_query = response.content.strip() | ||
| logger.info(f"Extracted search query: {search_query}") | ||
| return search_query | ||
|
|
||
| async def handle_web_search_node(state: AgentState, search_tool, llm) -> AgentState: | ||
| """Handle web search requests""" | ||
| logger.info(f"Handling web search for session {state.session_id}") | ||
|
|
||
| latest_message = "" | ||
| if state.messages: | ||
| latest_message = state.messages[-1].get("content", "") | ||
| elif state.context.get("original_message"): | ||
| latest_message = state.context["original_message"] | ||
|
|
||
| search_query = await _extract_search_query(latest_message, llm) | ||
| search_results = await search_tool.search(search_query) | ||
|
|
||
| state.task_result = { | ||
| "type": "web_search", | ||
| "query": search_query, | ||
| "results": search_results, | ||
| "source": "tavily_search" | ||
| } | ||
| state.tools_used.append("tavily_search") | ||
| state.current_task = "web_search_handled" | ||
| return state | ||
smokeyScraper marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,16 @@ | ||
| GENERAL_LLM_RESPONSE_PROMPT = ( | ||
| "You are a helpful DevRel assistant. " | ||
| "Your goal is to assist users with their technical questions, onboarding, and community engagement.\n\n" | ||
| "User's message: \"{latest_message}\"\n" | ||
| "Conversation history (last 5): \n" | ||
| "{conversation_history}\n\n" | ||
| "Current context:\n" | ||
| "{current_context}\n\n" | ||
| "Task that was just handled: {task_type}\n" | ||
| "Details of task result: \n" | ||
| "{task_details}\n\n" | ||
smokeyScraper marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| "Based on all this information, provide a helpful and concise response.\n" | ||
| "If the task was 'technical_support' and no specific solution was found, offer to escalate or suggest resources.\n" | ||
| "If the task was 'onboarding', provide welcoming and guiding information.\n" | ||
| "Response: " | ||
| ) | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,4 @@ | ||
| EXTRACT_SEARCH_QUERY_PROMPT = """Extract the core search query from the following user message. | ||
| User Message: "{message}" | ||
| Focus on the main topic or question. Be concise. | ||
| Search Query: """ |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1 @@ | ||
| # Placeholder to enhance ..shared/state.py if required |
Uh oh!
There was an error while loading. Please reload this page.