diff --git a/new-backend/app/modules/langgraph_nodes/judge.py b/new-backend/app/modules/langgraph_nodes/judge.py index ddecb190..8ad862bf 100644 --- a/new-backend/app/modules/langgraph_nodes/judge.py +++ b/new-backend/app/modules/langgraph_nodes/judge.py @@ -1,23 +1,53 @@ +import re +from langchain_groq import ChatGroq +from langchain.schema import HumanMessage + +# Init once +groq_llm = ChatGroq( + model="gemma2-9b-it", + temperature=0.0, + max_tokens=10, +) + + def judge_perspective(state): - # Dummy scoring try: - perspective = state.get("perspective") + perspective_obj = state.get("perspective") + text = getattr(perspective_obj, "perspective", "").strip() + if not text: + raise ValueError("Empty 'perspective' for scoring") + + prompt = f""" +You are an expert evaluator. Please rate the following counter-perspective +on originality, reasoning quality, and factual grounding. Provide ONLY +a single integer score from 0 (very poor) to 100 (excellent). + +=== Perspective to score === +{text} +""" - if not perspective: - raise ValueError("Missing or empty 'perspective' in state") + response = groq_llm.invoke([HumanMessage(content=prompt)]) + + if isinstance(response, list) and response: + raw = response[0].content.strip() + elif hasattr(response, "content"): + raw = response.content.strip() + else: + raw = str(response).strip() + + # 5) Pull the first integer 0–100 + m = re.search(r"\b(\d{1,3})\b", raw) + if not m: + raise ValueError(f"Couldn’t parse a score from: '{raw}'") + + score = max(0, min(100, int(m.group(1)))) + + return {**state, "score": score, "status": "success"} - score = 85 if "reasoning" in perspective else 40 except Exception as e: - print(f"some error occured in judge_perspetive:{e}") + print(f"Error in judge_perspective: {e}") return { "status": "error", "error_from": "judge_perspective", - "message": f"{e}", - } - return { - **state, - "score": score, - "status": "success" + "message": str(e), } - -# llm based score assignment diff --git a/new-backend/app/utils/fact_check_utils.py b/new-backend/app/utils/fact_check_utils.py index caa356c0..8d5b446e 100644 --- a/new-backend/app/utils/fact_check_utils.py +++ b/new-backend/app/utils/fact_check_utils.py @@ -9,9 +9,9 @@ def run_fact_check_pipeline(state): result = run_claim_extractor_sdk(state) - if state["status"] != "success": return result + # Step 1: Extract claims raw_output = result["verifiable_claims"] @@ -31,7 +31,6 @@ def run_fact_check_pipeline(state): print(f"❌ Search failed for: {claim} -> {e}") time.sleep(4) # Add 4 second delay to prevent rate-limit - if not search_results: return [], "All claim searches failed or returned no results."