Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 22 additions & 10 deletions new-backend/app/modules/langgraph_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,25 @@
error_handler
)

from typing_extensions import TypedDict


class MyState(TypedDict):
cleaned_text: str
facts: list[dict]
sentiment: str
perspective: str
score: int
retries: int
status: str


def build_langgraph():
graph = StateGraph()
graph = StateGraph(MyState)

graph.add_node(
"sentiment_analysis",
sentiment.run_sentiment
sentiment.run_sentiment_sdk
)
graph.add_node(
"fact_checking",
Expand All @@ -34,21 +46,21 @@ def build_langgraph():
)
graph.add_node(
"error_handler",
error_handler
error_handler.error_handler
)

graph.set_entry_point(
"sentiment_analysis"
"sentiment_analysis",
)

graph.set_conditional_edges(
graph.add_conditional_edges(
"sentiment_analysis",
lambda x: (
"error_handler" if x.get("status") == "error" else "fact_checking"
)
)

graph.set_conditional_edges(
graph.add_conditional_edges(
"fact_checking",
lambda x: (
"error_handler"
Expand All @@ -57,7 +69,7 @@ def build_langgraph():
)
)

graph.set_conditional_edges(
graph.add_conditional_edges(
"generate_perspective",
lambda x: (
"error_handler"
Expand All @@ -66,7 +78,7 @@ def build_langgraph():
)
)

graph.set_conditional_edges(
graph.add_conditional_edges(
"judge_perspective",
lambda state: (
"error_handler"
Expand All @@ -80,12 +92,12 @@ def build_langgraph():
else "store_and_send"
)
)
graph.set_conditional_edges(
graph.add_conditional_edges(
"store_and_send",
lambda x: (
"error_handler"
if x.get("status") == "error"
else None
else "__end__"
)
)

Expand Down
41 changes: 27 additions & 14 deletions new-backend/app/modules/langgraph_nodes/generate_perspective.py
Original file line number Diff line number Diff line change
@@ -1,21 +1,27 @@
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from app.utils.prompt_templates import generation_prompt
from langchain_groq import ChatGroq
from pydantic import BaseModel, Field

prompt = PromptTemplate(
input_variables=["text", "facts"],
template="""Given the following article:
{text}

And the following verified facts:
{facts}
prompt = generation_prompt

Generate a reasoned opposing perspective using chain-of-thought logic.
"""

class PerspectiveOutput(BaseModel):
reasoning: str = Field(..., description="Chain-of-thought reasoning steps")
perspective: str = Field(..., description="Generated opposite perspective")


my_llm = "llama-3.3-70b-versatile"

llm = ChatGroq(
model=my_llm,
temperature=0.7
)

my_llm = "groq llm"
structured_llm = llm.with_structured_output(PerspectiveOutput)


chain = LLMChain(prompt=prompt, llm=my_llm)
chain = prompt | structured_llm


def generate_perspective(state):
Expand All @@ -31,8 +37,15 @@ def generate_perspective(state):
elif not facts:
raise ValueError("Missing or empty 'facts' in state")

facts = "\n".join([f["snippet"] for f in state["facts"]])
result = chain.run({"text": text, "facts": facts})
facts_str = "\n".join([f"Claim: {f['original_claim']}\n"
"Verdict: {f['verdict']}\nExplanation: "
"{f['explanation']}" for f in state["facts"]])
Comment on lines +40 to +42
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Fix critical string formatting bug in facts_str.

The string formatting is using positional placeholders {} but accessing dictionary keys directly, which will cause a KeyError.

-        facts_str = "\n".join([f"Claim: {f['original_claim']}\n"
-                               "Verdict: {f['verdict']}\nExplanation: "
-                               "{f['explanation']}" for f in state["facts"]])
+        facts_str = "\n".join([f"Claim: {f['original_claim']}\n"
+                               f"Verdict: {f['verdict']}\nExplanation: "
+                               f"{f['explanation']}" for f in state["facts"]])
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
facts_str = "\n".join([f"Claim: {f['original_claim']}\n"
"Verdict: {f['verdict']}\nExplanation: "
"{f['explanation']}" for f in state["facts"]])
facts_str = "\n".join([f"Claim: {f['original_claim']}\n"
f"Verdict: {f['verdict']}\nExplanation: "
f"{f['explanation']}" for f in state["facts"]])
🤖 Prompt for AI Agents
In new-backend/app/modules/langgraph_nodes/generate_perspective.py around lines
40 to 42, the string construction for facts_str incorrectly uses curly braces
without an f-string prefix, causing dictionary keys not to be interpolated and
leading to a KeyError. Fix this by converting the multiline string inside the
join to a proper f-string so that the dictionary keys are correctly formatted
into the string.


result = chain.invoke({
"cleaned_article": text,
"facts": facts_str,
"sentiment": state.get("sentiment", "neutral")
})
except Exception as e:
print(f"some error occured in generate_perspective:{e}")
return {
Expand Down
2 changes: 1 addition & 1 deletion new-backend/app/modules/langgraph_nodes/judge.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ def judge_perspective(state):
if not perspective:
raise ValueError("Missing or empty 'perspective' in state")

score = 85 if "reasoned" in perspective else 40
score = 85 if "reasoning" in perspective else 40
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Replace dummy scoring with proper evaluation logic.

The current scoring mechanism based on keyword presence ("reasoning") is overly simplistic and fragile. Consider implementing a more robust evaluation system that assesses the quality, coherence, and logical structure of the perspective.

-        score = 85 if "reasoning" in perspective else 40
+        # TODO: Implement proper perspective evaluation logic
+        # Consider factors like: logical coherence, factual accuracy, 
+        # argument structure, and counter-perspective quality
+        score = self._evaluate_perspective_quality(perspective, state.get("facts", []))

Committable suggestion skipped: line range outside the PR's diff.

🤖 Prompt for AI Agents
In new-backend/app/modules/langgraph_nodes/judge.py at line 9, replace the
simplistic keyword-based scoring with a more robust evaluation method that
analyzes the quality, coherence, and logical structure of the perspective.
Implement logic that goes beyond keyword checks, such as using NLP techniques or
scoring criteria that assess argument strength and clarity, to generate a
meaningful score.

except Exception as e:
print(f"some error occured in judge_perspetive:{e}")
return {
Expand Down
9 changes: 5 additions & 4 deletions new-backend/app/modules/langgraph_nodes/store_and_send.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
from utils.vector_store import save_to_vector_db
# from app.utils.vector_store import save_to_vector_db


def store_and_send(state):
# to store data in vector db
try:
save_to_vector_db({
**state
})
print(state)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Replace print with proper logging.

Using print statements for debugging in production code is not recommended. Use the logging module instead.

+import logging

-        print(state)
+        logging.info(f"Processing state: {state}")
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
print(state)
# At the top of new-backend/app/modules/langgraph_nodes/store_and_send.py
import logging
# … other code …
# Around line 7, replace:
- print(state)
+ logging.info(f"Processing state: {state}")
🤖 Prompt for AI Agents
In new-backend/app/modules/langgraph_nodes/store_and_send.py at line 7, replace
the print statement with a logging call. Import the logging module if not
already imported, configure a logger for the module, and use logger.debug or
logger.info to output the state instead of print, ensuring consistent and
configurable logging.

# save_to_vector_db({
# **state
# })
except Exception as e:
print(f"some error occured in store_and_send:{e}")
return {
Expand Down
1 change: 1 addition & 0 deletions new-backend/app/modules/pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
from app.modules.scraper.cleaner import clean_extracted_text
from app.modules.scraper.keywords import extract_keywords
from app.modules.langgraph_builder import build_langgraph

import json

# Compile once when module loads
Expand Down
3 changes: 2 additions & 1 deletion new-backend/app/utils/fact_check_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,9 @@ def run_fact_check_pipeline(state):

result = run_claim_extractor_sdk(state)


if state["status"] != "success":
return result

# Step 1: Extract claims
raw_output = result["verifiable_claims"]

Expand All @@ -31,6 +31,7 @@ def run_fact_check_pipeline(state):
print(f"❌ Search failed for: {claim} -> {e}")
time.sleep(4) # Add 4 second delay to prevent rate-limit


if not search_results:
return [], "All claim searches failed or returned no results."

Expand Down
32 changes: 32 additions & 0 deletions new-backend/app/utils/prompt_templates.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
from langchain.prompts import ChatPromptTemplate

generation_prompt = ChatPromptTemplate.from_template("""
You are an AI assistant that generates a well-reasoned '
'counter-perspective to a given article.

## Article:
{cleaned_article}

## Sentiment:
{sentiment}

## Verified Facts:
{facts}

---

Generate a logical and respectful *opposite perspective* to the article.
Use *step-by-step reasoning* and return your output in this JSON format:

```json
{{
"counter_perspective": "<your opposite point of view>",
"reasoning_steps": [
"<step 1>",
"<step 2>",
"<step 3>",
"...",
"<final reasoning>"
]
}}
""")
2 changes: 2 additions & 0 deletions new-backend/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@ dependencies = [
"fastapi>=0.115.12",
"groq>=0.28.0",
"langchain>=0.3.25",
"langchain-community>=0.3.25",
"langchain-groq>=0.3.2",
"langgraph>=0.4.8",
"logging>=0.4.9.6",
"newspaper3k>=0.2.8",
Expand Down
Loading