Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
b32dd28
poetry updates
smokeyScraper Jun 2, 2025
74db2c2
scoping codebase
smokeyScraper Jun 3, 2025
2e5c7f1
add global configuration
smokeyScraper Jun 3, 2025
6488370
add base agent and classification router
smokeyScraper Jun 3, 2025
7e8544b
add tools
smokeyScraper Jun 3, 2025
ea6c235
add orchestration logic
smokeyScraper Jun 3, 2025
529f22c
update config dependency
smokeyScraper Jun 3, 2025
613d168
implement devrel agent and coordinator
smokeyScraper Jun 3, 2025
aaa9c8b
implement discord bot
smokeyScraper Jun 3, 2025
23fe6bf
main.py and poetry updates
smokeyScraper Jun 3, 2025
67858f8
poetry update
smokeyScraper Jun 3, 2025
3d37fae
fix: replace sync call and use lazy logging
smokeyScraper Jun 4, 2025
cb3499a
refactor: discordBot to discord_bot
smokeyScraper Jun 4, 2025
89051f3
refactor: coderabbit refactorings
smokeyScraper Jun 4, 2025
c64cf95
refactor: coderabbit refactoring
smokeyScraper Jun 4, 2025
2182e84
refactor: modularizing codebase
smokeyScraper Jun 5, 2025
a702d36
[feature]: add naive archived thread deletion logic with timeout set …
smokeyScraper Jun 8, 2025
0c8d5ec
[fix]: fix extra classfication handled by DevRel
smokeyScraper Jun 8, 2025
01dd7f0
[chore]: update .env
smokeyScraper Jun 7, 2025
3feac3d
[chore]: poetry update
smokeyScraper Jun 7, 2025
3fb8be5
[chore]: update config.py to support langsmith
smokeyScraper Jun 7, 2025
d4daecb
[feat]: add langsmith tracing
smokeyScraper Jun 7, 2025
2f2fe9f
[refactor]: refactor for coderabbit suggested changes
smokeyScraper Jun 8, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 19 additions & 4 deletions backend/.env.example
Original file line number Diff line number Diff line change
@@ -1,7 +1,22 @@
SUPABASE_URL=
SUPABASE_SERVICE_ROLE_KEY=
# API Configuration
# PORT=8000
# CORS_ORIGINS=http://localhost:3000

CORS_ORIGINS=http://localhost:3000
GITHUB_TOKEN=
# SUPABASE_URL=
# SUPABASE_SERVICE_ROLE_KEY=

DISCORD_BOT_TOKEN=
# ENABLE_DISCORD_BOT=true

# EMBEDDING_MODEL=BAAI/bge-small-en-v1.5
# EMBEDDING_MAX_BATCH_SIZE=32
# EMBEDDING_DEVICE=cpu

GEMINI_API_KEY=
TAVILY_API_KEY=

# Langsmith
LANGSMITH_TRACING=
LANGSMITH_ENDPOINT=
LANGSMITH_API_KEY=
LANGSMITH_PROJECT=
10 changes: 10 additions & 0 deletions backend/app/agents/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
from .devrel.agent import DevRelAgent
from .shared.base_agent import BaseAgent, AgentState
from .shared.classification_router import ClassificationRouter

__all__ = [
"DevRelAgent",
"BaseAgent",
"AgentState",
"ClassificationRouter"
]
100 changes: 100 additions & 0 deletions backend/app/agents/devrel/agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
import logging
from typing import Dict, Any
from functools import partial
from langgraph.graph import StateGraph, END
from langchain_google_genai import ChatGoogleGenerativeAI
from ..shared.base_agent import BaseAgent, AgentState
from ..shared.classification_router import MessageCategory
from .tools.search_tool import TavilySearchTool
from .tools.faq_tool import FAQTool
from app.core.config import settings
from .nodes.gather_context_node import gather_context_node
from .nodes.handle_faq_node import handle_faq_node
from .nodes.handle_web_search_node import handle_web_search_node
from .nodes.handle_technical_support_node import handle_technical_support_node
from .nodes.handle_onboarding_node import handle_onboarding_node
from .nodes.generate_response_node import generate_response_node

logger = logging.getLogger(__name__)

class DevRelAgent(BaseAgent):
"""DevRel LangGraph Agent for community support and engagement"""

def __init__(self, config: Dict[str, Any] = None):
self.config = config or {}
self.llm = ChatGoogleGenerativeAI(
model=settings.devrel_agent_model,
temperature=0.3,
google_api_key=settings.gemini_api_key
)
self.search_tool = TavilySearchTool()
self.faq_tool = FAQTool()
super().__init__("DevRelAgent", self.config)

def _build_graph(self):
"""Build the DevRel agent workflow graph"""
workflow = StateGraph(AgentState)

# Add nodes
workflow.add_node("gather_context", gather_context_node)
workflow.add_node("handle_faq", partial(handle_faq_node, faq_tool=self.faq_tool))
workflow.add_node("handle_web_search", partial(
handle_web_search_node, search_tool=self.search_tool, llm=self.llm))
workflow.add_node("handle_technical_support", handle_technical_support_node)
workflow.add_node("handle_onboarding", handle_onboarding_node)
workflow.add_node("generate_response", partial(generate_response_node, llm=self.llm))

# Add edges
workflow.add_conditional_edges(
"gather_context",
self._route_to_handler,
{
MessageCategory.FAQ: "handle_faq",
MessageCategory.WEB_SEARCH: "handle_web_search",
MessageCategory.ONBOARDING: "handle_onboarding",
MessageCategory.TECHNICAL_SUPPORT: "handle_technical_support",
MessageCategory.COMMUNITY_ENGAGEMENT: "handle_technical_support",
MessageCategory.DOCUMENTATION: "handle_technical_support",
MessageCategory.BUG_REPORT: "handle_technical_support",
MessageCategory.FEATURE_REQUEST: "handle_technical_support",
MessageCategory.NOT_DEVREL: "handle_technical_support"
}
)

# All handlers lead to response generation
for node in ["handle_faq", "handle_web_search", "handle_technical_support", "handle_onboarding"]:
workflow.add_edge(node, "generate_response")

workflow.add_edge("generate_response", END)

# Set entry point
workflow.set_entry_point("gather_context")

self.graph = workflow.compile()

def _route_to_handler(self, state: AgentState) -> str:
"""Route to the appropriate handler based on intent"""
classification = state.context.get("classification", {})
intent = classification.get("category")

if isinstance(intent, str):
try:
intent = MessageCategory(intent.lower())
except ValueError:
logger.warning(f"Unknown intent string '{intent}', defaulting to TECHNICAL_SUPPORT")
intent = MessageCategory.TECHNICAL_SUPPORT

logger.info(f"Routing based on intent: {intent} for session {state.session_id}")

# Mapping from MessageCategory enum to string keys used in add_conditional_edges
if intent in [MessageCategory.FAQ, MessageCategory.WEB_SEARCH,
MessageCategory.ONBOARDING, MessageCategory.TECHNICAL_SUPPORT,
MessageCategory.COMMUNITY_ENGAGEMENT, MessageCategory.DOCUMENTATION,
MessageCategory.BUG_REPORT, MessageCategory.FEATURE_REQUEST,
MessageCategory.NOT_DEVREL]:
logger.info(f"Routing to handler for: {intent}")
return intent

# Later to be changed to handle anomalies
logger.info(f"Unknown intent '{intent}', routing to technical support")
return MessageCategory.TECHNICAL_SUPPORT
Empty file.
Empty file.
22 changes: 22 additions & 0 deletions backend/app/agents/devrel/nodes/gather_context_node.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import logging
from app.agents.shared.state import AgentState
from app.agents.shared.classification_router import MessageCategory

logger = logging.getLogger(__name__)

async def gather_context_node(state: AgentState) -> AgentState:
"""Gather additional context for the user and their request"""
logger.info(f"Gathering context for session {state.session_id}")

# TODO: Add context gathering from databases
# Currently, context is simple
# In production, query databases for user history, etc.
context_data = {
"user_profile": {"user_id": state.user_id, "platform": state.platform},
"conversation_context": len(state.messages),
"session_info": {"session_id": state.session_id}
}

state.context.update(context_data)
state.current_task = "context_gathered"
return state
76 changes: 76 additions & 0 deletions backend/app/agents/devrel/nodes/generate_response_node.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
import logging
from typing import Dict, Any
from app.agents.shared.state import AgentState
from langchain_core.messages import HumanMessage
from ..prompts.base_prompt import GENERAL_LLM_RESPONSE_PROMPT

logger = logging.getLogger(__name__)

async def _create_search_response(task_result: Dict[str, Any]) -> str:
"""Create a response string from search results."""
query = task_result.get("query")
results = task_result.get("results", [])
if not results:
return f"I couldn't find any information for '{query}'. You might want to try rephrasing your search."

response_parts = [f"Here's what I found for '{query}':"]
for i, result in enumerate(results[:3]):
title = result.get('title', 'N/A')
snippet = result.get('snippet', 'N/A')
url = result.get('url', '#')
result_line = f"{i+1}. {title}: {snippet}"
response_parts.append(result_line)
response_parts.append(f" (Source: {url})")
response_parts.append("You can ask me to search again with a different query if these aren't helpful.")
return "\n".join(response_parts)

async def _create_llm_response(state: AgentState, task_result: Dict[str, Any], llm) -> str:
"""Generate a response using the LLM based on the current state and task result."""
logger.info(f"Creating LLM response for session {state.session_id}")

latest_message = ""
if state.messages:
latest_message = state.messages[-1].get("content", "")
elif state.context.get("original_message"):
latest_message = state.context["original_message"]

conversation_history_str = "\n".join([
f"{msg.get('type', 'unknown')}: {msg.get('content', '')}"
for msg in state.conversation_history[-5:]
])
current_context_str = str(state.context)
task_type_str = str(task_result.get("type", "N/A"))
task_details_str = str(task_result)

try:
prompt = GENERAL_LLM_RESPONSE_PROMPT.format(
latest_message=latest_message,
conversation_history=conversation_history_str,
current_context=current_context_str,
task_type=task_type_str,
task_details=task_details_str
)
except KeyError as e:
logger.error(f"Missing key in GENERAL_LLM_RESPONSE_PROMPT: {e}")
return "Error: Response template formatting error."

response = await llm.ainvoke([HumanMessage(content=prompt)])
return response.content.strip()

async def generate_response_node(state: AgentState, llm) -> AgentState:
"""Generate final response to user"""
logger.info(f"Generating response for session {state.session_id}")
task_result = state.task_result or {}

if task_result.get("type") == "faq":
state.final_response = task_result.get("response", "I don't have a specific answer for that question.")
elif task_result.get("type") == "web_search":
response = await _create_search_response(task_result)
state.final_response = response
else:
# Pass the llm instance to _create_llm_response
response = await _create_llm_response(state, task_result, llm)
state.final_response = response

state.current_task = "response_generated"
return state
26 changes: 26 additions & 0 deletions backend/app/agents/devrel/nodes/handle_faq_node.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
import logging
from app.agents.shared.state import AgentState

logger = logging.getLogger(__name__)

async def handle_faq_node(state: AgentState, faq_tool) -> AgentState:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Add type hint for the faq_tool parameter.

The faq_tool parameter lacks a type hint, which reduces code clarity and IDE support.

-async def handle_faq_node(state: AgentState, faq_tool) -> AgentState:
+async def handle_faq_node(state: AgentState, faq_tool: "FAQTool") -> AgentState:

Note: Use string annotation to avoid circular import issues if needed.

📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
async def handle_faq_node(state: AgentState, faq_tool) -> AgentState:
async def handle_faq_node(state: AgentState, faq_tool: "FAQTool") -> AgentState:
🤖 Prompt for AI Agents
In backend/app/agents/devrel/nodes/handle_faq_node.py at line 6, add a type hint
for the faq_tool parameter to improve code clarity and IDE support. Use a string
annotation for the type hint if importing the actual type would cause circular
import issues. Update the function signature to include this type hint
accordingly.

"""Handle FAQ requests"""
logger.info(f"Handling FAQ for session {state.session_id}")

latest_message = ""
if state.messages:
latest_message = state.messages[-1].get("content", "")
elif state.context.get("original_message"):
latest_message = state.context["original_message"]

# faq_tool will be passed from the agent, similar to llm for classify_intent
faq_response = await faq_tool.get_response(latest_message)

state.task_result = {
"type": "faq",
"response": faq_response,
"source": "faq_database"
}

state.current_task = "faq_handled"
return state
17 changes: 17 additions & 0 deletions backend/app/agents/devrel/nodes/handle_onboarding_node.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
import logging
from app.agents.shared.state import AgentState

logger = logging.getLogger(__name__)

async def handle_onboarding_node(state: AgentState) -> AgentState:
"""Handle onboarding requests"""
logger.info(f"Handling onboarding for session {state.session_id}")

state.task_result = {
"type": "onboarding",
"action": "welcome_and_guide",
"next_steps": ["setup_environment", "first_contribution", "join_community"]
}

state.current_task = "onboarding_handled"
return state
17 changes: 17 additions & 0 deletions backend/app/agents/devrel/nodes/handle_technical_support_node.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
import logging
from app.agents.shared.state import AgentState

logger = logging.getLogger(__name__)

async def handle_technical_support_node(state: AgentState) -> AgentState:
"""Handle technical support requests"""
logger.info(f"Handling technical support for session {state.session_id}")

state.task_result = {
"type": "technical_support",
"action": "provide_guidance",
"requires_human_review": False
}

state.current_task = "technical_support_handled"
return state
42 changes: 42 additions & 0 deletions backend/app/agents/devrel/nodes/handle_web_search_node.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
import logging
from app.agents.shared.state import AgentState
from langchain_core.messages import HumanMessage
from ..prompts.search_prompt import EXTRACT_SEARCH_QUERY_PROMPT

logger = logging.getLogger(__name__)

async def _extract_search_query(message: str, llm) -> str:
"""Extract a concise search query from the user's message."""
logger.info(f"Extracting search query from: {message[:100]}")
try:
prompt = EXTRACT_SEARCH_QUERY_PROMPT.format(message=message)
except KeyError as e:
logger.error(f"Missing key in EXTRACT_SEARCH_QUERY_PROMPT: {e}")
return message # Fallback
response = await llm.ainvoke([HumanMessage(content=prompt)])
search_query = response.content.strip()
logger.info(f"Extracted search query: {search_query}")
return search_query

async def handle_web_search_node(state: AgentState, search_tool, llm) -> AgentState:
"""Handle web search requests"""
logger.info(f"Handling web search for session {state.session_id}")

latest_message = ""
if state.messages:
latest_message = state.messages[-1].get("content", "")
elif state.context.get("original_message"):
latest_message = state.context["original_message"]

search_query = await _extract_search_query(latest_message, llm)
search_results = await search_tool.search(search_query)

state.task_result = {
"type": "web_search",
"query": search_query,
"results": search_results,
"source": "tavily_search"
}
state.tools_used.append("tavily_search")
state.current_task = "web_search_handled"
return state
Empty file.
Empty file.
Empty file.
Empty file.
16 changes: 16 additions & 0 deletions backend/app/agents/devrel/prompts/base_prompt.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
GENERAL_LLM_RESPONSE_PROMPT = (
"You are a helpful DevRel assistant. "
"Your goal is to assist users with their technical questions, onboarding, and community engagement.\n\n"
"User's message: \"{latest_message}\"\n"
"Conversation history (last 5): \n"
"{conversation_history}\n\n"
"Current context:\n"
"{current_context}\n\n"
"Task that was just handled: {task_type}\n"
"Details of task result: \n"
"{task_details}\n\n"
"Based on all this information, provide a helpful and concise response.\n"
"If the task was 'technical_support' and no specific solution was found, offer to escalate or suggest resources.\n"
"If the task was 'onboarding', provide welcoming and guiding information.\n"
"Response: "
)
Empty file.
Empty file.
4 changes: 4 additions & 0 deletions backend/app/agents/devrel/prompts/search_prompt.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
EXTRACT_SEARCH_QUERY_PROMPT = """Extract the core search query from the following user message.
User Message: "{message}"
Focus on the main topic or question. Be concise.
Search Query: """
Empty file.
1 change: 1 addition & 0 deletions backend/app/agents/devrel/state.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# Placeholder to enhance ..shared/state.py if required
Empty file.
Loading