diff --git a/README.md b/README.md index 0fc88eb9..90ac0e13 100644 --- a/README.md +++ b/README.md @@ -134,6 +134,15 @@ sequenceDiagram ## Setup & Deployment ### Frontend Setup + +Setup environment variables:* + - add .env file in `/frontend`directory. + - add following environment variable in your .env file. +``` +NEXT_PUBLIC_API_URL = http://localhost:8000 + +``` + ```bash cd frontend npm install @@ -151,10 +160,10 @@ npm run dev *Setup environment variables:* - - add .env file in `/new-backend`directory. + - add .env file in `/backend`directory. - add following environment variable in your .env file. ``` - GROQ_API_KEY= +GROQ_API_KEY= PINECONE_API_KEY = PORT = 8000 SEARCH_KEY = diff --git a/backend/app/db/vector_store.py b/backend/app/db/vector_store.py index f2141f93..5427c4ae 100644 --- a/backend/app/db/vector_store.py +++ b/backend/app/db/vector_store.py @@ -6,7 +6,6 @@ if not PINECONE_API_KEY: raise ValueError("PINECONE_API_KEY environment variable is required") try: - # Initialize Pinecone client pc = Pinecone(api_key=PINECONE_API_KEY) @@ -25,10 +24,7 @@ name=INDEX_NAME, dimension=DIMENSIONS, metric=METRIC, - spec=ServerlessSpec( - cloud=CloudProvider.AWS, - region=AwsRegion.US_EAST_1 - ) + spec=ServerlessSpec(cloud=CloudProvider.AWS, region=AwsRegion.US_EAST_1), ) else: print(f"Index '{INDEX_NAME}' already exists") @@ -37,5 +33,4 @@ # Connect to the index index = pc.Index(INDEX_NAME) except Exception as e: - raise RuntimeError("Error occured while " - f"connecting to the index {INDEX_NAME}:{e}") + raise RuntimeError(f"Error occured while connecting to the index {INDEX_NAME}:{e}") diff --git a/backend/app/modules/bias_detection/check_bias.py b/backend/app/modules/bias_detection/check_bias.py index b4b39801..138689a6 100644 --- a/backend/app/modules/bias_detection/check_bias.py +++ b/backend/app/modules/bias_detection/check_bias.py @@ -12,7 +12,7 @@ def check_bias(text): try: print(text) print(json.dumps(text)) - + if not text: raise ValueError("Missing or empty 'cleaned_text'") @@ -30,10 +30,7 @@ def check_bias(text): }, { "role": "user", - "content": ( - "Give bias score to the following article " - f"\n\n{text}" - ), + "content": (f"Give bias score to the following article \n\n{text}"), }, ], model="gemma2-9b-it", diff --git a/backend/app/modules/chat/embed_query.py b/backend/app/modules/chat/embed_query.py index e1260c9f..229f858d 100644 --- a/backend/app/modules/chat/embed_query.py +++ b/backend/app/modules/chat/embed_query.py @@ -4,7 +4,6 @@ def embed_query(query: str): - embeddings = embedder.encode(query).tolist() return embeddings diff --git a/backend/app/modules/chat/get_rag_data.py b/backend/app/modules/chat/get_rag_data.py index efaaa60b..3ce241c6 100644 --- a/backend/app/modules/chat/get_rag_data.py +++ b/backend/app/modules/chat/get_rag_data.py @@ -10,22 +10,15 @@ def search_pinecone(query: str, top_k: int = 5): - embeddings = embed_query(query) results = index.query( - vector=embeddings, - top_k=top_k, - include_metadata=True, - namespace="default" - + vector=embeddings, top_k=top_k, include_metadata=True, namespace="default" ) matches = [] for match in results["matches"]: - matches.append({ - "id": match["id"], - "score": match["score"], - "metadata": match["metadata"] - }) + matches.append( + {"id": match["id"], "score": match["score"], "metadata": match["metadata"]} + ) return matches diff --git a/backend/app/modules/chat/llm_processing.py b/backend/app/modules/chat/llm_processing.py index 2141905d..50b8f22f 100644 --- a/backend/app/modules/chat/llm_processing.py +++ b/backend/app/modules/chat/llm_processing.py @@ -8,8 +8,10 @@ def build_context(docs): - - return "\n".join(f"{m['metadata'].get('explanation') or m['metadata'].get('reasoning', '')}"for m in docs) + return "\n".join( + f"{m['metadata'].get('explanation') or m['metadata'].get('reasoning', '')}" + for m in docs + ) def ask_llm(question, docs): @@ -28,8 +30,8 @@ def ask_llm(question, docs): model="gemma2-9b-it", messages=[ {"role": "system", "content": "Use only the context to answer."}, - {"role": "user", "content": prompt} - ] + {"role": "user", "content": prompt}, + ], ) return response.choices[0].message.content diff --git a/backend/app/modules/facts_check/web_search.py b/backend/app/modules/facts_check/web_search.py index d98c76f6..ff69e145 100644 --- a/backend/app/modules/facts_check/web_search.py +++ b/backend/app/modules/facts_check/web_search.py @@ -6,14 +6,17 @@ GOOGLE_SEARCH = os.getenv("SEARCH_KEY") + def search_google(query): - results = requests.get(f"https://www.googleapis.com/customsearch/v1?key={GOOGLE_SEARCH}&cx=f637ab77b5d8b4a3c&q={query}") + results = requests.get( + f"https://www.googleapis.com/customsearch/v1?key={GOOGLE_SEARCH}&cx=f637ab77b5d8b4a3c&q={query}" + ) res = results.json() first = {} first["title"] = res["items"][0]["title"] first["link"] = res["items"][0]["link"] first["snippet"] = res["items"][0]["snippet"] - + return [ first, - ] \ No newline at end of file + ] diff --git a/backend/app/modules/langgraph_builder.py b/backend/app/modules/langgraph_builder.py index fe607106..08409e9f 100644 --- a/backend/app/modules/langgraph_builder.py +++ b/backend/app/modules/langgraph_builder.py @@ -5,8 +5,8 @@ generate_perspective, judge, store_and_send, - error_handler - ) + error_handler, +) from typing_extensions import TypedDict @@ -24,58 +24,34 @@ class MyState(TypedDict): def build_langgraph(): graph = StateGraph(MyState) - graph.add_node( - "sentiment_analysis", - sentiment.run_sentiment_sdk - ) - graph.add_node( - "fact_checking", - fact_check.run_fact_check - ) - graph.add_node( - "generate_perspective", - generate_perspective.generate_perspective - ) - graph.add_node( - "judge_perspective", - judge.judge_perspective - ) - graph.add_node( - "store_and_send", - store_and_send.store_and_send - ) - graph.add_node( - "error_handler", - error_handler.error_handler - ) + graph.add_node("sentiment_analysis", sentiment.run_sentiment_sdk) + graph.add_node("fact_checking", fact_check.run_fact_check) + graph.add_node("generate_perspective", generate_perspective.generate_perspective) + graph.add_node("judge_perspective", judge.judge_perspective) + graph.add_node("store_and_send", store_and_send.store_and_send) + graph.add_node("error_handler", error_handler.error_handler) graph.set_entry_point( - "sentiment_analysis", - ) + "sentiment_analysis", + ) graph.add_conditional_edges( "sentiment_analysis", - lambda x: ( - "error_handler" if x.get("status") == "error" else "fact_checking" - ) + lambda x: ("error_handler" if x.get("status") == "error" else "fact_checking"), ) graph.add_conditional_edges( "fact_checking", lambda x: ( - "error_handler" - if x.get("status") == "error" - else "generate_perspective" - ) + "error_handler" if x.get("status") == "error" else "generate_perspective" + ), ) graph.add_conditional_edges( "generate_perspective", lambda x: ( - "error_handler" - if x.get("status") == "error" - else "judge_perspective" - ) + "error_handler" if x.get("status") == "error" else "judge_perspective" + ), ) graph.add_conditional_edges( @@ -90,15 +66,11 @@ def build_langgraph(): ) if state.get("score", 0) < 70 else "store_and_send" - ) + ), ) graph.add_conditional_edges( "store_and_send", - lambda x: ( - "error_handler" - if x.get("status") == "error" - else "__end__" - ) + lambda x: ("error_handler" if x.get("status") == "error" else "__end__"), ) graph.set_finish_point("store_and_send") diff --git a/backend/app/modules/langgraph_nodes/error_handler.py b/backend/app/modules/langgraph_nodes/error_handler.py index 384a8754..dd3f8688 100644 --- a/backend/app/modules/langgraph_nodes/error_handler.py +++ b/backend/app/modules/langgraph_nodes/error_handler.py @@ -1,11 +1,10 @@ - - def error_handler(input): print("Error detected!") print(f"From: {input.get('error_from')}") print(f"Message: {input.get('message')}") - return {"status": "stopped_due_to_error", - "from": [input.get("error_from")], - "error": [input.get("message")] - } + return { + "status": "stopped_due_to_error", + "from": [input.get("error_from")], + "error": [input.get("message")], + } diff --git a/backend/app/modules/langgraph_nodes/fact_check.py b/backend/app/modules/langgraph_nodes/fact_check.py index 34285f97..a61623ad 100644 --- a/backend/app/modules/langgraph_nodes/fact_check.py +++ b/backend/app/modules/langgraph_nodes/fact_check.py @@ -1,4 +1,3 @@ - from app.utils.fact_check_utils import run_fact_check_pipeline @@ -25,9 +24,5 @@ def run_fact_check(state): "status": "error", "error_from": "fact_checking", "message": f"{e}", - } - return { - **state, - "facts": verifications, - "status": "success" } + return {**state, "facts": verifications, "status": "success"} diff --git a/backend/app/modules/langgraph_nodes/generate_perspective.py b/backend/app/modules/langgraph_nodes/generate_perspective.py index b2a12e06..c17db82e 100644 --- a/backend/app/modules/langgraph_nodes/generate_perspective.py +++ b/backend/app/modules/langgraph_nodes/generate_perspective.py @@ -13,10 +13,7 @@ class PerspectiveOutput(BaseModel): my_llm = "llama-3.3-70b-versatile" -llm = ChatGroq( - model=my_llm, - temperature=0.7 -) +llm = ChatGroq(model=my_llm, temperature=0.7) structured_llm = llm.with_structured_output(PerspectiveOutput) @@ -37,15 +34,22 @@ def generate_perspective(state): elif not facts: raise ValueError("Missing or empty 'facts' in state") - facts_str = "\n".join([f"Claim: {f['original_claim']}\n" - "Verdict: {f['verdict']}\nExplanation: " - "{f['explanation']}" for f in state["facts"]]) - - result = chain.invoke({ - "cleaned_article": text, - "facts": facts_str, - "sentiment": state.get("sentiment", "neutral") - }) + facts_str = "\n".join( + [ + f"Claim: {f['original_claim']}\n" + "Verdict: {f['verdict']}\nExplanation: " + "{f['explanation']}" + for f in state["facts"] + ] + ) + + result = chain.invoke( + { + "cleaned_article": text, + "facts": facts_str, + "sentiment": state.get("sentiment", "neutral"), + } + ) except Exception as e: print(f"some error occured in generate_perspective:{e}") return { @@ -53,8 +57,4 @@ def generate_perspective(state): "error_from": "generate_perspective", "message": f"{e}", } - return { - **state, - "perspective": result, - "status": "success" - } + return {**state, "perspective": result, "status": "success"} diff --git a/backend/app/modules/langgraph_nodes/sentiment.py b/backend/app/modules/langgraph_nodes/sentiment.py index 66ddb5c1..15d02407 100644 --- a/backend/app/modules/langgraph_nodes/sentiment.py +++ b/backend/app/modules/langgraph_nodes/sentiment.py @@ -25,9 +25,9 @@ def run_sentiment_sdk(state): }, { "role": "user", - "content": ("Analyze the sentiment of the following text:" - f"\n\n{text}" - ), + "content": ( + f"Analyze the sentiment of the following text:\n\n{text}" + ), }, ], model="gemma2-9b-it", diff --git a/backend/app/modules/langgraph_nodes/store_and_send.py b/backend/app/modules/langgraph_nodes/store_and_send.py index bf49de64..6f8988e3 100644 --- a/backend/app/modules/langgraph_nodes/store_and_send.py +++ b/backend/app/modules/langgraph_nodes/store_and_send.py @@ -31,7 +31,4 @@ def store_and_send(state): "message": f"{e}", } # sending to frontend - return { - **state, - "status": "success" - } + return {**state, "status": "success"} diff --git a/backend/app/modules/scraper/cleaner.py b/backend/app/modules/scraper/cleaner.py index 4bbb371c..8047bb26 100644 --- a/backend/app/modules/scraper/cleaner.py +++ b/backend/app/modules/scraper/cleaner.py @@ -2,12 +2,12 @@ import nltk try: - nltk.data.find('corpora/stopwords') - nltk.data.find('corpora/punkt_tab') + nltk.data.find("corpora/stopwords") + nltk.data.find("corpora/punkt_tab") except LookupError: - nltk.download('stopwords') - nltk.download('punkt_tab') + nltk.download("stopwords") + nltk.download("punkt_tab") def clean_extracted_text(text: str): @@ -19,7 +19,7 @@ def clean_extracted_text(text: str): return "" # 1. Removing multiple line breaks to single line break - text = re.sub(r'\n{2,}', '\n\n', text) + text = re.sub(r"\n{2,}", "\n\n", text) # 2. Removing common boilerplate patterns # (example: "Read more at...", "Subscribe", etc.) @@ -32,7 +32,7 @@ def clean_extracted_text(text: str): r"sponsored content", r"promoted by.*", r"recommended for you", - r"© \d{4}.*", # copyright lines + r"© \d{4}.*", # copyright lines r"all rights reserved", r"terms of service", r"privacy policy", @@ -71,16 +71,16 @@ def clean_extracted_text(text: str): r"powered by .*", ] for pattern in boilerplate_phrases: - text = re.sub(pattern, '', text, flags=re.IGNORECASE) + text = re.sub(pattern, "", text, flags=re.IGNORECASE) # 3. Remove lines with too few characters (likely junk) - lines = text.split('\n') + lines = text.split("\n") cleaned_lines = [line.strip() for line in lines if len(line.strip()) > 30] # 4. Join lines back with a double newline for paragraphs - cleaned_text = '\n\n'.join(cleaned_lines) + cleaned_text = "\n\n".join(cleaned_lines) # 5. Optional: Fix multiple spaces and trim - cleaned_text = re.sub(r'[ \t]{2,}', ' ', cleaned_text).strip() + cleaned_text = re.sub(r"[ \t]{2,}", " ", cleaned_text).strip() return cleaned_text diff --git a/backend/app/modules/scraper/extractor.py b/backend/app/modules/scraper/extractor.py index 01c39264..00ec0031 100644 --- a/backend/app/modules/scraper/extractor.py +++ b/backend/app/modules/scraper/extractor.py @@ -11,16 +11,15 @@ class Article_extractor: - def __init__(self, url): self.url = url self.headers = { - 'User-Agent': ( - 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)' - ' AppleWebKit/537.36 ' - '(KHTML, like Gecko) Chrome/113.0 Safari/537.36' - ) - } + "User-Agent": ( + "Mozilla/5.0 (Windows NT 10.0; Win64; x64)" + " AppleWebKit/537.36 " + "(KHTML, like Gecko) Chrome/113.0 Safari/537.36" + ) + } def _fetch_html(self): try: @@ -41,8 +40,8 @@ def extract_with_trafilatura(self): include_comments=False, include_tables=False, favor_recall=True, - output_format='json' - ) + output_format="json", + ) if result: return json.loads(result) return {} @@ -56,8 +55,9 @@ def extract_with_newspaper(self) -> dict: "title": article.title, "text": article.text, "authors": article.authors, - "publish_date": (article.publish_date.isoformat() - if article.publish_date else None) + "publish_date": ( + article.publish_date.isoformat() if article.publish_date else None + ), } except Exception as e: logging.error(f"Newspaper3k failed: {e}") @@ -70,9 +70,9 @@ def extract_with_bs4(self) -> dict: try: doc = Document(html) - soup = BeautifulSoup(doc.summary(), 'html.parser') + soup = BeautifulSoup(doc.summary(), "html.parser") title = doc.title() - text = soup.get_text(separator='\n') + text = soup.get_text(separator="\n") return {"title": title, "text": text} except Exception as e: logging.error(f"BS4 + Readability fallback failed: {e}") @@ -82,15 +82,11 @@ def extract(self): methods = [ self.extract_with_trafilatura, self.extract_with_newspaper, - self.extract_with_bs4 - ] + self.extract_with_bs4, + ] for method in methods: result = method() if result and result.get("text"): result["url"] = self.url return result - return { - "url": self.url, - "text": "", - "error": "Failed to extract article." - } + return {"url": self.url, "text": "", "error": "Failed to extract article."} diff --git a/backend/app/modules/scraper/keywords.py b/backend/app/modules/scraper/keywords.py index aa7855d1..2156f3a8 100644 --- a/backend/app/modules/scraper/keywords.py +++ b/backend/app/modules/scraper/keywords.py @@ -18,10 +18,7 @@ def extract_keywords(text: str, max_keywords: int = 15): keywords_with_scores = rake.get_ranked_phrases_with_scores() # Sort and limit - keywords = [phrase for score, phrase in sorted( - keywords_with_scores, - reverse=True - )] + keywords = [phrase for score, phrase in sorted(keywords_with_scores, reverse=True)] return keywords[:max_keywords] @@ -40,5 +37,5 @@ def extract_keyword_data(text: str) -> Dict: return { "keywords": keywords, "top_phrase": keywords[0] if keywords else None, - "count": len(keywords) + "count": len(keywords), } diff --git a/backend/app/modules/vector_store/chunk_rag_data.py b/backend/app/modules/vector_store/chunk_rag_data.py index b9068711..9c1f833f 100644 --- a/backend/app/modules/vector_store/chunk_rag_data.py +++ b/backend/app/modules/vector_store/chunk_rag_data.py @@ -31,40 +31,40 @@ def chunk_rag_data(data): ): raise ValueError("Perspective object missing required fields") - chunks.append({ - "id": f"{article_id}-perspective", - "text": perspective_obj.perspective, - "metadata": { - "type": "counter-perspective", - "reasoning": perspective_obj.reasoning, - "article_id": article_id + chunks.append( + { + "id": f"{article_id}-perspective", + "text": perspective_obj.perspective, + "metadata": { + "type": "counter-perspective", + "reasoning": perspective_obj.reasoning, + "article_id": article_id, + }, } - }) + ) # Add each fact as a separate chunk for i, fact in enumerate(data["facts"]): - fact_fields = [ - "original_claim", - "verdict", - "explanation", - "source_link" - ] + fact_fields = ["original_claim", "verdict", "explanation", "source_link"] for field in fact_fields: if field not in fact: - raise ValueError("Missing required fact field:" - f" {field} in fact index {i}") + raise ValueError( + f"Missing required fact field: {field} in fact index {i}" + ) - chunks.append({ - "id": f"{article_id}-fact-{i}", - "text": fact["original_claim"], - "metadata": { - "type": "fact", - "verdict": fact["verdict"], - "explanation": fact["explanation"], - "source_link": fact["source_link"], - "article_id": article_id + chunks.append( + { + "id": f"{article_id}-fact-{i}", + "text": fact["original_claim"], + "metadata": { + "type": "fact", + "verdict": fact["verdict"], + "explanation": fact["explanation"], + "source_link": fact["source_link"], + "article_id": article_id, + }, } - }) + ) return chunks diff --git a/backend/app/modules/vector_store/embed.py b/backend/app/modules/vector_store/embed.py index a5933bcc..68a2ed69 100644 --- a/backend/app/modules/vector_store/embed.py +++ b/backend/app/modules/vector_store/embed.py @@ -5,7 +5,6 @@ def embed_chunks(chunks: List[Dict[str, Any]]) -> List[Dict[str, Any]]: - if not chunks: return [] @@ -13,19 +12,15 @@ def embed_chunks(chunks: List[Dict[str, Any]]) -> List[Dict[str, Any]]: for i, chunk in enumerate(chunks): if not isinstance(chunk, dict) or "text" not in chunk: raise ValueError( - f"Invalid chunk structure at index {i}:" - " missing 'text' field" - ) + f"Invalid chunk structure at index {i}: missing 'text' field" + ) texts = [chunk["text"] for chunk in chunks] embeddings = embedder.encode(texts).tolist() vectors = [] for chunk, embedding in zip(chunks, embeddings): - vectors.append({ - "id": chunk["id"], - "values": embedding, - "metadata": chunk["metadata"] - }) + vectors.append( + {"id": chunk["id"], "values": embedding, "metadata": chunk["metadata"]} + ) return vectors - diff --git a/backend/app/routes/routes.py b/backend/app/routes/routes.py index 4d93a8bb..cc20ff20 100644 --- a/backend/app/routes/routes.py +++ b/backend/app/routes/routes.py @@ -42,7 +42,6 @@ async def run_pipelines(request: URlRequest): @router.post("/chat") async def answer_query(request: ChatQuery): - query = request.message results = search_pinecone(query) answer = ask_llm(query, results) diff --git a/backend/app/utils/fact_check_utils.py b/backend/app/utils/fact_check_utils.py index 5b5cfa6e..5fbaa9a3 100644 --- a/backend/app/utils/fact_check_utils.py +++ b/backend/app/utils/fact_check_utils.py @@ -1,8 +1,8 @@ from app.modules.facts_check.web_search import search_google from app.modules.facts_check.llm_processing import ( run_claim_extractor_sdk, - run_fact_verifier_sdk - ) + run_fact_verifier_sdk, +) import re import time diff --git a/backend/app/utils/store_vectors.py b/backend/app/utils/store_vectors.py index 14db2d51..4e4ec2d1 100644 --- a/backend/app/utils/store_vectors.py +++ b/backend/app/utils/store_vectors.py @@ -24,9 +24,9 @@ def store(vectors: List[Dict[str, Any]], namespace: str = "default") -> None: try: index.upsert(vectors, namespace=namespace) - logger.info(f"Successfully stored {len(vectors)} " - f"vectors in namespace '{namespace}'") + logger.info( + f"Successfully stored {len(vectors)} vectors in namespace '{namespace}'" + ) except Exception as e: - logger.error("Failed to store " - f"vectors in namespace '{namespace}': {e}") + logger.error(f"Failed to store vectors in namespace '{namespace}': {e}") raise RuntimeError(f"Vector storage failed: {e}") diff --git a/backend/main.py b/backend/main.py index 04ebe2a0..6df16141 100644 --- a/backend/main.py +++ b/backend/main.py @@ -5,10 +5,7 @@ app = FastAPI( title="Perspective API", version="1.0.0", - description=( - 'An API to generate alternative' - ' perspectives on biased articles' - ) + description=("An API to generate alternative perspectives on biased articles"), ) app.add_middleware( @@ -25,6 +22,6 @@ import uvicorn import os - port = int(os.environ.get("PORT", 7860)) + port = int(os.environ.get("PORT", 7860)) print(f"Server is running on http://0.0.0.0:{port}") uvicorn.run(app, host="0.0.0.0", port=port) diff --git a/backend/start.sh b/backend/start.sh deleted file mode 100755 index 78fdec65..00000000 --- a/backend/start.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -set -e - -# Install uv if not present -pip install uv - -# Sync environment and run app -uv sync -uv run main.py diff --git a/frontend/app/analyze/loading/page.tsx b/frontend/app/analyze/loading/page.tsx index 1d55ace3..fdd06e1e 100644 --- a/frontend/app/analyze/loading/page.tsx +++ b/frontend/app/analyze/loading/page.tsx @@ -16,6 +16,8 @@ import { import ThemeToggle from "@/components/theme-toggle"; import axios from "axios"; +const backend_url = process.env.NEXT_PUBLIC_API_URL; + /** * Displays a multi-step animated loading and progress interface for the article analysis workflow. * @@ -70,21 +72,14 @@ export default function LoadingPage() { try { const [processRes, biasRes] = await Promise.all([ - axios.post( - "https://Thunder1245-perspective-backend.hf.space/api/process", - { - url: storedUrl, - } - ), - axios.post( - "http://Thunder1245-perspective-backend.hf.space/api/bias", - { - url: storedUrl, - } - ), + axios.post(`${backend_url}/api/process`, { + url: storedUrl, + }), + axios.post(`${backend_url}/api/bias`, { + url: storedUrl, + }), ]); - sessionStorage.setItem("BiasScore", JSON.stringify(biasRes.data)); console.log("Bias score saved"); @@ -99,7 +94,6 @@ export default function LoadingPage() { console.log("Analysis result saved"); console.log(processRes); - // optional logging } catch (err) { console.error("Failed to process article:", err); diff --git a/frontend/app/analyze/results/page.tsx b/frontend/app/analyze/results/page.tsx index a3aaacbc..31ba9b26 100644 --- a/frontend/app/analyze/results/page.tsx +++ b/frontend/app/analyze/results/page.tsx @@ -4,10 +4,7 @@ import type React from "react"; import { useState, useEffect, useRef } from "react"; import { useRouter } from "next/navigation"; import Link from "next/link"; -import { - Send, - Link as LinkIcon, -} from "lucide-react"; +import { Send, Link as LinkIcon } from "lucide-react"; import { Button } from "@/components/ui/button"; import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs"; import { @@ -22,6 +19,7 @@ import { Badge } from "@/components/ui/badge"; import BiasMeter from "@/components/bias-meter"; import axios from "axios"; +const backend_url = process.env.NEXT_PUBLIC_API_URL; /** * Renders the article analysis page with summary, perspectives, fact checks, bias meter, AI chat, and sources. @@ -47,16 +45,15 @@ export default function AnalyzePage() { useEffect(() => { const storedBiasScore = sessionStorage.getItem("BiasScore"); const storedData = sessionStorage.getItem("analysisResult"); - if (storedBiasScore && storedData){ + if (storedBiasScore && storedData) { setIsLoading(false); - } + } if (storedBiasScore) setBiasScore(JSON.parse(storedBiasScore).bias_score); else console.warn("No bias score found."); if (storedData) setAnalysisData(JSON.parse(storedData)); else console.warn("No analysis result found"); - }, []); useEffect(() => { @@ -64,40 +61,37 @@ export default function AnalyzePage() { return; } - const storedData = sessionStorage.getItem("analysisResult"); const storedBiasScore = sessionStorage.getItem("BiasScore"); if (storedBiasScore && storedData) { - // inside here TS knows storedBiasScore and storedData are strings - setBiasScore(JSON.parse(storedBiasScore).bias_score); - setAnalysisData(JSON.parse(storedData)); - setIsLoading(false); - } else { - console.warn("No bias or data found. Redirecting..."); - router.push("/analyze"); + // inside here TS knows storedBiasScore and storedData are strings + setBiasScore(JSON.parse(storedBiasScore).bias_score); + setAnalysisData(JSON.parse(storedData)); + setIsLoading(false); + } else { + console.warn("No bias or data found. Redirecting..."); + router.push("/analyze"); } - }, [router]); - async function handleSendMessage(e: React.FormEvent){ + async function handleSendMessage(e: React.FormEvent) { e.preventDefault(); if (!message.trim()) return; const newMessages = [...messages, { role: "user", content: message }]; setMessages(newMessages); setMessage(""); - const res = await axios.post("http://Thunder1245-perspective-backend.hf.space/api/chat", { - message: message + const res = await axios.post(`${backend_url}/api/chat`, { + message: message, }); - const data = res.data; - - console.log(data) + const data = res.data; - // 🔹 Step 2: Append LLM’s response - setMessages([...newMessages, { role: "assistant", content: data.answer }]); -}; + console.log(data); + // 🔹 Step 2: Append LLM’s response + setMessages([...newMessages, { role: "assistant", content: data.answer }]); + } if (isLoading || !analysisData || !biasScore) { return ( @@ -115,7 +109,7 @@ export default function AnalyzePage() { score, } = analysisData; - return( + return (
{/* Header omitted for brevity */}
diff --git a/frontend/app/page.tsx b/frontend/app/page.tsx index 65a61623..bc1f0d76 100644 --- a/frontend/app/page.tsx +++ b/frontend/app/page.tsx @@ -1,11 +1,25 @@ -"use client" +"use client"; -import { useRouter } from "next/navigation" -import { Button } from "@/components/ui/button" -import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "@/components/ui/card" -import { Badge } from "@/components/ui/badge" -import { Shield, Brain, Database, CheckCircle, Globe, ArrowRight, Sparkles } from "lucide-react" -import ThemeToggle from "@/components/theme-toggle" +import { useRouter } from "next/navigation"; +import { Button } from "@/components/ui/button"; +import { + Card, + CardContent, + CardDescription, + CardHeader, + CardTitle, +} from "@/components/ui/card"; +import { Badge } from "@/components/ui/badge"; +import { + Shield, + Brain, + Database, + CheckCircle, + Globe, + ArrowRight, + Sparkles, +} from "lucide-react"; +import ThemeToggle from "@/components/theme-toggle"; /** * Renders the main landing page for the Perspective application, showcasing its features, technology stack, and calls to action. @@ -15,7 +29,7 @@ import ThemeToggle from "@/components/theme-toggle" * @returns The complete landing page React element for the Perspective app. */ export default function Home() { - const router = useRouter() + const router = useRouter(); const features = [ { icon: Brain, @@ -27,41 +41,56 @@ export default function Home() { { icon: Shield, title: "Bias Detection", - description: "Sophisticated algorithms identify and highlight potential biases in article content.", + description: + "Sophisticated algorithms identify and highlight potential biases in article content.", color: "from-emerald-500 to-teal-600", }, { icon: CheckCircle, title: "Fact Checking", - description: "Cross-references claims with reliable sources to ensure accuracy and credibility.", + description: + "Cross-references claims with reliable sources to ensure accuracy and credibility.", color: "from-blue-500 to-cyan-600", }, { icon: Database, title: "Vector Database", - description: "Efficient storage and retrieval system enables chat-based exploration of perspectives.", + description: + "Efficient storage and retrieval system enables chat-based exploration of perspectives.", color: "from-orange-500 to-red-600", }, - ] + ]; const technologies = [ { name: "Python", color: "bg-gradient-to-r from-blue-500 to-blue-600" }, - { name: "TypeScript", color: "bg-gradient-to-r from-blue-600 to-indigo-600" }, - { name: "FastAPI", color: "bg-gradient-to-r from-green-500 to-emerald-600" }, + { + name: "TypeScript", + color: "bg-gradient-to-r from-blue-600 to-indigo-600", + }, + { + name: "FastAPI", + color: "bg-gradient-to-r from-green-500 to-emerald-600", + }, { name: "Next.js", color: "bg-gradient-to-r from-gray-700 to-gray-900" }, - { name: "Tailwind CSS", color: "bg-gradient-to-r from-cyan-500 to-blue-500" }, - { name: "LangChain", color: "bg-gradient-to-r from-purple-500 to-indigo-600" }, + { + name: "Tailwind CSS", + color: "bg-gradient-to-r from-cyan-500 to-blue-500", + }, + { + name: "LangChain", + color: "bg-gradient-to-r from-purple-500 to-indigo-600", + }, { name: "LangGraph", color: "bg-gradient-to-r from-pink-500 to-rose-600" }, { name: "NLP", color: "bg-gradient-to-r from-amber-500 to-orange-600" }, { name: "Vector DB", color: "bg-gradient-to-r from-teal-500 to-cyan-600" }, - ] + ]; const stats = [ { label: "Articles Analyzed", value: "10,000+", color: "text-blue-600" }, { label: "Biases Detected", value: "95%", color: "text-emerald-600" }, { label: "Fact Accuracy", value: "98%", color: "text-purple-600" }, { label: "User Satisfaction", value: "4.9/5", color: "text-orange-600" }, - ] + ]; return (
@@ -104,8 +133,10 @@ export default function Home() {

- Combat bias and one-sided narratives with AI-generated alternative perspectives. Get fact-based, balanced - viewpoints on any online article through our advanced NLP pipeline powered by LangGraph and LangChain. + Combat bias and one-sided narratives with AI-generated alternative + perspectives. Get fact-based, balanced viewpoints on any online + article through our advanced NLP pipeline powered by LangGraph and + LangChain.

- No sign in required. It’s completely free. + No sign in required. It’s completely free.

- + {/* Floating stats */}
{stats.map((stat, index) => ( @@ -128,7 +159,9 @@ export default function Home() { > {stat.value}
-
{stat.label}
+
+ {stat.label} +
))}
@@ -142,10 +175,12 @@ export default function Home() { What is Perspective?

- Perspective addresses the critical problem of biased and one-sided narratives in online articles. Our - AI-powered solution provides readers with fact-based, well-structured alternative perspectives by analyzing - article content, extracting key points, and generating logical counter-perspectives using cutting-edge - natural language processing technology. + Perspective addresses the critical problem of biased and one-sided + narratives in online articles. Our AI-powered solution provides + readers with fact-based, well-structured alternative perspectives by + analyzing article content, extracting key points, and generating + logical counter-perspectives using cutting-edge natural language + processing technology.

@@ -157,8 +192,8 @@ export default function Home() { How Perspective Works

- Our advanced AI pipeline processes articles through multiple stages to deliver balanced, fact-checked - perspectives. + Our advanced AI pipeline processes articles through multiple stages + to deliver balanced, fact-checked perspectives.

@@ -199,8 +234,8 @@ export default function Home() { Built with Cutting-Edge Technology

- Powered by the latest in AI, NLP, and web technologies to deliver accurate, fast, and reliable - perspective analysis. + Powered by the latest in AI, NLP, and web technologies to + deliver accurate, fast, and reliable perspective analysis.

@@ -226,8 +261,8 @@ export default function Home() { Ready to See Every Side of the Story?

- Join thousands of readers who are already discovering balanced perspectives and combating bias in online - content. + Join thousands of readers who are already discovering balanced + perspectives and combating bias in online content.