Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion backend/app/agents/devrel/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,8 @@ def _build_graph(self):
# Phase 2: ReAct Supervisor - Decide what to do next
workflow.add_node("react_supervisor", partial(react_supervisor_node, llm=self.llm))
workflow.add_node("web_search_tool", partial(web_search_tool_node, search_tool=self.search_tool, llm=self.llm))
workflow.add_node("faq_handler_tool", partial(faq_handler_tool_node, faq_tool=self.faq_tool))
workflow.add_node("faq_handler_tool", partial(
faq_handler_tool_node, search_tool=self.search_tool, llm=self.llm))
workflow.add_node("onboarding_tool", onboarding_tool_node)
workflow.add_node("github_toolkit_tool", partial(github_toolkit_tool_node, github_toolkit=self.github_toolkit))

Expand Down
141 changes: 135 additions & 6 deletions backend/app/agents/devrel/nodes/handlers/faq.py
Original file line number Diff line number Diff line change
@@ -1,26 +1,155 @@
import logging
from typing import List, Dict
from app.agents.state import AgentState
from langchain_core.messages import HumanMessage

logger = logging.getLogger(__name__)

async def handle_faq_node(state: AgentState, faq_tool) -> dict:
"""Handle FAQ requests"""
logger.info(f"Handling FAQ for session {state.session_id}")
async def handle_faq_node(state: AgentState, search_tool, llm) -> dict:
"""Handle FAQ requests dynamically using web search and AI synthesis"""
logger.info(f"Handling dynamic FAQ for session {state.session_id}")

latest_message = ""
if state.messages:
latest_message = state.messages[-1].get("content", "")
elif state.context.get("original_message"):
latest_message = state.context["original_message"]

Comment on lines 12 to 17
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

πŸ› οΈ Refactor suggestion

Guard against empty user message

If no message is present, return a helpful prompt instead of calling the pipeline with an empty query.

     latest_message = ""
     if state.messages:
         latest_message = state.messages[-1].get("content", "")
     elif state.context.get("original_message"):
         latest_message = state.context["original_message"]
 
+    if not (latest_message or "").strip():
+        logger.info("No message found for FAQ; returning guidance")
+        return {
+            "task_result": {
+                "type": "faq",
+                "response": "Ask me anything about our organization (e.g., 'How does this organization work?' or 'What projects do you maintain?').",
+                "source": "dynamic_web_search"
+            },
+            "current_task": "faq_handled"
+        }
πŸ“ Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
latest_message = ""
if state.messages:
latest_message = state.messages[-1].get("content", "")
elif state.context.get("original_message"):
latest_message = state.context["original_message"]
latest_message = ""
if state.messages:
latest_message = state.messages[-1].get("content", "")
elif state.context.get("original_message"):
latest_message = state.context["original_message"]
if not (latest_message or "").strip():
logger.info("No message found for FAQ; returning guidance")
return {
"task_result": {
"type": "faq",
"response": "Ask me anything about our organization (e.g., 'How does this organization work?' or 'What projects do you maintain?').",
"source": "dynamic_web_search"
},
"current_task": "faq_handled"
}
πŸ€– Prompt for AI Agents
In backend/app/agents/devrel/nodes/handlers/faq.py around lines 12 to 17, the
code assigns latest_message from state messages or context but does not handle
the case when latest_message is empty. Add a guard to check if latest_message is
empty or None, and if so, return a helpful prompt message instead of proceeding
to call the pipeline with an empty query. This prevents unnecessary processing
and improves user experience.

# faq_tool will be passed from the agent, similar to llm for classify_intent
faq_response = await faq_tool.get_response(latest_message)
# Dynamic FAQ processing (replaces static faq_tool.get_response)
faq_response = await _dynamic_faq_process(latest_message, search_tool, llm, org_name="Devr.AI")

Comment on lines +18 to 20
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

πŸ› οΈ Refactor suggestion

Avoid hard-coded org name; make it configurable

Hard-coding "Devr.AI" limits reuse. Derive from state/context or settings with a sensible fallback.

-from app.agents.state import AgentState
+from app.agents.state import AgentState
+from app.core.config import settings
@@
-    # Dynamic FAQ processing (replaces static faq_tool.get_response)
-    faq_response = await _dynamic_faq_process(latest_message, search_tool, llm, org_name="Devr.AI")
+    # Dynamic FAQ processing (replaces static faq_tool.get_response)
+    org_name = (state.context.get("org_name") if getattr(state, "context", None) else None) \
+        or getattr(settings, "org_name", None) \
+        or "Devr.AI"
+    faq_response = await _dynamic_faq_process(latest_message, search_tool, llm, org_name=org_name)
πŸ“ Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
# Dynamic FAQ processing (replaces static faq_tool.get_response)
faq_response = await _dynamic_faq_process(latest_message, search_tool, llm, org_name="Devr.AI")
from app.agents.state import AgentState
from app.core.config import settings
# Dynamic FAQ processing (replaces static faq_tool.get_response)
org_name = (state.context.get("org_name") if getattr(state, "context", None) else None) \
or getattr(settings, "org_name", None) \
or "Devr.AI"
faq_response = await _dynamic_faq_process(latest_message, search_tool, llm, org_name=org_name)
πŸ€– Prompt for AI Agents
In backend/app/agents/devrel/nodes/handlers/faq.py around lines 18 to 20, the
org_name parameter is hard-coded as "Devr.AI" in the call to
_dynamic_faq_process. Modify the code to obtain the org_name dynamically from
the current state, context, or configuration settings, and provide a sensible
default fallback if none is available, instead of using a fixed string.

return {
"task_result": {
"type": "faq",
"response": faq_response,
"source": "faq_database"
"source": "dynamic_web_search" # Updated source
},
"current_task": "faq_handled"
}

async def _dynamic_faq_process(message: str, search_tool, llm, org_name: str = "Devr.AI") -> str:
"""
Dynamic FAQ handler that implements the 5-step process:
1. Intent Detection & Query Refinement
2. Web Search (DuckDuckGo)
3. AI-Powered Synthesis
4. Generate Final Response
5. Format with Sources
"""

try:
# Step 1: Intent Detection & Query Refinement
logger.info(f"Step 1: Refining FAQ query for org '{org_name}'")
refined_query = await _refine_faq_query(message, llm, org_name)

# Step 2: Dynamic Web Search
logger.info(f"Step 2: Searching for: {refined_query}")
search_results = await search_tool.search(refined_query)

if not search_results:
return _generate_fallback_response(message, org_name)

# Step 3 & 4: AI-Powered Synthesis & Response Generation
logger.info("Step 3-4: Synthesizing search results into FAQ response")
synthesized_response = await _synthesize_faq_response(
message, search_results, llm, org_name
)

# Step 5: Format Final Response with Sources
logger.info("Step 5: Formatting final response with sources")
final_response = _format_faq_response(synthesized_response, search_results)

return final_response

except Exception as e:
logger.error(f"Error in dynamic FAQ process: {e}")
return _generate_fallback_response(message, org_name)

async def _refine_faq_query(message: str, llm, org_name: str) -> str:
"""Step 1: Refine user query for organization-specific FAQ search"""

refinement_prompt = f"""
You are helping someone find information about {org_name}.
Transform their question into an effective search query that will find official information about the organization.

User Question: "{message}"

Create a search query that focuses on:
- Official {org_name} information
- The organization's website, blog, or documentation
- Adding terms like "about", "mission", "projects" if relevant

Return only the refined search query, nothing else.

Examples:
- "What does this org do?" β†’ "{org_name} about mission what we do"
- "How do you work?" β†’ "{org_name} how it works process methodology"
- "What projects do you have?" β†’ "{org_name} projects portfolio what we build"
"""

response = await llm.ainvoke([HumanMessage(content=refinement_prompt)])
refined_query = response.content.strip()
logger.info(f"Refined query: {refined_query}")
return refined_query

async def _synthesize_faq_response(message: str, search_results: List[Dict], llm, org_name: str) -> str:
"""Step 3-4: Use LLM to synthesize search results into a comprehensive FAQ answer"""

# Prepare search results context
results_context = ""
for i, result in enumerate(search_results[:5]): # Top 5 results
title = result.get('title', 'N/A')
content = result.get('content', 'N/A')
url = result.get('url', 'N/A')
results_context += f"\nResult {i+1}:\nTitle: {title}\nContent: {content}\nURL: {url}\n"

Comment on lines +98 to +105
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

πŸ› οΈ Refactor suggestion

Bound prompt size and sanitize result content to prevent token bloat

Cap per-result content length, collapse whitespace, and fall back to snippet when content is missing.

-    results_context = ""
-    for i, result in enumerate(search_results[:5]):  # Top 5 results
-        title = result.get('title', 'N/A')
-        content = result.get('content', 'N/A')
-        url = result.get('url', 'N/A')
-        results_context += f"\nResult {i+1}:\nTitle: {title}\nContent: {content}\nURL: {url}\n"
+    results_context = ""
+    for i, result in enumerate(search_results[:5]):  # Top 5 results
+        title = result.get('title', 'N/A')
+        content = result.get('content') or result.get('snippet') or 'N/A'
+        # Normalize whitespace and trim overly long blobs
+        content = ' '.join(str(content).split())
+        if len(content) > 800:
+            content = content[:797] + "..."
+        url = result.get('url', 'N/A')
+        results_context += (
+            f"\nResult {i+1}:\nTitle: {title}\nContent: {content}\nURL: {url}\n"
+        )
πŸ“ Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
# Prepare search results context
results_context = ""
for i, result in enumerate(search_results[:5]): # Top 5 results
title = result.get('title', 'N/A')
content = result.get('content', 'N/A')
url = result.get('url', 'N/A')
results_context += f"\nResult {i+1}:\nTitle: {title}\nContent: {content}\nURL: {url}\n"
# Prepare search results context
results_context = ""
for i, result in enumerate(search_results[:5]): # Top 5 results
title = result.get('title', 'N/A')
content = result.get('content') or result.get('snippet') or 'N/A'
# Normalize whitespace and trim overly long blobs
content = ' '.join(str(content).split())
if len(content) > 800:
content = content[:797] + "..."
url = result.get('url', 'N/A')
results_context += (
f"\nResult {i+1}:\nTitle: {title}\nContent: {content}\nURL: {url}\n"
)
πŸ€– Prompt for AI Agents
In backend/app/agents/devrel/nodes/handlers/faq.py around lines 98 to 105, the
current code appends full content from search results which can cause token
bloat in prompts. To fix this, limit the length of the content string to a
reasonable maximum, sanitize it by collapsing multiple whitespace characters
into single spaces, and if the content is missing or empty, use the 'snippet'
field as a fallback. This will keep the prompt size bounded and cleaner.

synthesis_prompt = f"""
You are an AI assistant representing {org_name}. A user asked: "{message}"

Based on the following search results from official sources, provide a comprehensive, helpful answer about {org_name}.

Search Results:
{results_context}

Instructions:
1. Answer the user's question directly and conversationally
2. Focus on the most relevant and recent information
3. Be informative but concise (2-3 paragraphs max)
4. If the search results don't fully answer the question, acknowledge what you found
5. Sound helpful and knowledgeable about {org_name}
6. Don't mention "search results" in your response - speak as if you know about the organization

Your response:
"""

response = await llm.ainvoke([HumanMessage(content=synthesis_prompt)])
synthesized_answer = response.content.strip()
logger.info(f"Synthesized FAQ response: {synthesized_answer[:100]}...")
return synthesized_answer

def _format_faq_response(synthesized_answer: str, search_results: List[Dict]) -> str:
"""Step 5: Format the final response with sources"""

# Start with the synthesized answer
formatted_response = synthesized_answer

# Add sources section
if search_results:
formatted_response += "\n\n**πŸ“š Sources:**"
for i, result in enumerate(search_results[:3]): # Top 3 sources
title = result.get('title', 'Source')
url = result.get('url', '#')
formatted_response += f"\n{i+1}. [{title}]({url})"

return formatted_response

def _generate_fallback_response(message: str, org_name: str) -> str:
"""Generate a helpful fallback when search fails"""
return f"""I'd be happy to help you learn about {org_name}, but I couldn't find current information to answer your question: "{message}"

This might be because:
- The information isn't publicly available yet
- The search terms need to be more specific
- There might be connectivity issues

Try asking a more specific question, or check out our official website and documentation for the most up-to-date information about {org_name}."""
4 changes: 2 additions & 2 deletions backend/app/agents/devrel/nodes/handlers/web_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def create_search_response(task_result: Dict[str, Any]) -> str:
"""
Create a user-friendly response string from search results.
"""

query = task_result.get("query")
results = task_result.get("results", [])

Expand All @@ -61,7 +61,7 @@ def create_search_response(task_result: Dict[str, Any]) -> str:
response_parts = [f"Here's what I found for '{query}':"]
for i, result in enumerate(results[:5]):
title = result.get('title', 'N/A')
snippet = result.get('content', 'N/A')
snippet = result.get('content', 'N/A')
url = result.get('url', '#')
response_parts.append(f"{i+1}. {title}: {snippet}")
response_parts.append(f" (Source: {url})")
Expand Down
Loading