diff --git a/.github/workflows/deploy-backend-to-hf.yml b/.github/workflows/deploy-backend-to-hf.yml index 2fc6d7fd..26b7f42c 100644 --- a/.github/workflows/deploy-backend-to-hf.yml +++ b/.github/workflows/deploy-backend-to-hf.yml @@ -3,32 +3,37 @@ name: πŸš€ Deploy Backend to HF Space on: push: branches: - - main # or your primary branch + - main paths: - - "backend/**" # only trigger when anything under backend/ changes + - "backend/**" jobs: deploy: runs-on: ubuntu-latest + # set your HF username here (or replace with a secret if you prefer) + env: + HF_USER: Thunder1245 + HF_REPO: perspective-backend + steps: - name: πŸ‘‰ Checkout code uses: actions/checkout@v4 with: fetch-depth: 0 - - name: πŸ”’ Install HF CLI - run: pip install huggingface_hub - - - name: πŸ”‘ HF login - env: - HF_TOKEN: ${{ secrets.HF_TOKEN }} - run: huggingface-cli login --token "$HF_TOKEN" + - name: πŸ” Ensure HF_TOKEN is set + run: | + if [ -z "${{ secrets.HF_TOKEN }}" ]; then + echo "ERROR: HF_TOKEN secret is not set. Add it in repository secrets: Settings β†’ Secrets & variables β†’ Actions." + exit 1 + fi - - name: πŸ“‚ Prepare Space repo + - name: πŸ“‚ Prepare Space repo (clone) env: HF_TOKEN: ${{ secrets.HF_TOKEN }} run: | - rm -rf space-backend + rm -rf space-backend || true + # clone using token in URL (this authenticates the clone) git clone https://Thunder1245:${HF_TOKEN}@huggingface.co/spaces/Thunder1245/perspective-backend.git space-backend - name: πŸ“¦ Install rsync @@ -36,24 +41,29 @@ jobs: sudo apt-get update sudo apt-get install -y rsync - - name: πŸ“€ Sync backend code + - name: πŸ“€ Sync backend code to Space env: HF_TOKEN: ${{ secrets.HF_TOKEN }} run: | + set -e + cd space-backend - # Only remove tracked files (preserve .git and config) + # Remove tracked files while preserving .git and config (ignore failure) git rm -r . || true cd .. - # Copy new backend files in + # Copy backend files into the cloned space directory cp -R backend/. space-backend/ - # Push new code to HF Space + # Commit & push cd space-backend git config user.name "github-actions[bot]" git config user.email "github-actions[bot]@users.noreply.github.com" git add --all - git commit -m "Auto‑deploy backend: ${{ github.sha }}" || echo "No changes to commit" + git commit -m "Auto-deploy backend: ${{ github.sha }}" || echo "No changes to commit" git push origin main + - name: βœ… Done + run: | + echo "Backend deployed to Hugging Face Space: https://huggingface.co/spaces/${HF_USER}/${HF_REPO}" diff --git a/README.md b/README.md index c2058d15..0fc88eb9 100644 --- a/README.md +++ b/README.md @@ -2,15 +2,32 @@ ![Perspective banner](frontend/public/perspective_banner.jpg) ### Table of Contents -- [System Overview](#system-overview) -- [Architecture Components](#architecture-components) -- [Technical Stack](#technical-stack) -- [Core Features](#core-features) -- [Data Flow & Security](#data-flow--security) -- [Setup & Deployment](#setup--deployment) -- [Detailed Architecture Diagram](#detailed-architecture-diagram) -- [Expected Outcomes](#expected-outcomes) -- [Required Skills](#required-skills) +- [Perspective-AI](#perspective-ai) + - [Table of Contents](#table-of-contents) + - [System Overview](#system-overview) + - [High-Level Concept](#high-level-concept) + - [Architecture Components](#architecture-components) + - [1. Frontend Layer](#1-frontend-layer) + - [3. Core Backend](#3-core-backend) + - [4. AI \& NLP Integration](#4-ai--nlp-integration) + - [5. Data Storage](#5-data-storage) + - [Technical Stack](#technical-stack) + - [Frontend Technologies](#frontend-technologies) + - [Backend Technologies](#backend-technologies) + - [I Integration](#i-integration) + - [Core Features](#core-features) + - [1. Counter-Perspective Generation](#1-counter-perspective-generation) + - [2. Reasoned Thinking](#2-reasoned-thinking) + - [3. Updated Facts](#3-updated-facts) + - [4. Seamless Integration](#4-seamless-integration) + - [5. Real-Time Analysis](#5-real-time-analysis) + - [Data Flow \& Security](#data-flow--security) + - [Setup \& Deployment](#setup--deployment) + - [Frontend Setup](#frontend-setup) + - [Backend Setup](#backend-setup) + - [Architecture Diagram](#architecture-diagram) + - [Expected Outcomes](#expected-outcomes) + - [Required Skills](#required-skills) --- @@ -137,20 +154,25 @@ npm run dev - add .env file in `/new-backend`directory. - add following environment variable in your .env file. ``` - HF_TOKEN = + GROQ_API_KEY= +PINECONE_API_KEY = +PORT = 8000 +SEARCH_KEY = ``` *Run backend:* ```bash -cd new-backend +cd backend uv sync # Creating virtual environment at: .venv uv run main.py #Runs the backend server ``` --- + ## Architecture Diagram + ```mermaid graph TB %% Define Subgraphs with Colors and Text Styles @@ -168,6 +190,7 @@ graph TB Analyzer[Content Analyzer] CNEngine[Counter-Narrative Engine] Context[Context Manager] + end subgraph AI & NLP Layer @@ -212,7 +235,7 @@ graph TB ## Required Skills -- **Frontend Development**: Experience with Next.js and modern UI frameworks. +- **Frontend Development**: Experience with Next.js and modern UI frameworks. - **Backend Development**: Proficiency in Python and FastAPI. - **AI & NLP**: Familiarity with LangChain, Langgraph, and prompt engineering techniques. - **Database Management**: Knowledge of vector databases system. diff --git a/backend/app/modules/bias_detection/__init__.py b/backend/app/modules/bias_detection/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/backend/app/modules/bias_detection/check_bias.py b/backend/app/modules/bias_detection/check_bias.py new file mode 100644 index 00000000..b4b39801 --- /dev/null +++ b/backend/app/modules/bias_detection/check_bias.py @@ -0,0 +1,57 @@ +import os +from groq import Groq +from dotenv import load_dotenv +import json + +load_dotenv() + +client = Groq(api_key=os.getenv("GROQ_API_KEY")) + + +def check_bias(text): + try: + print(text) + print(json.dumps(text)) + + if not text: + raise ValueError("Missing or empty 'cleaned_text'") + + chat_completion = client.chat.completions.create( + messages=[ + { + "role": "system", + "content": ( + "You are an assistant that checks " + "if given article is biased and give" + "score to each based on biasness where 0 is lowest bias and 100 is highest bias" + "Only return a number between 0 to 100 base on bias." + "only return Number No Text" + ), + }, + { + "role": "user", + "content": ( + "Give bias score to the following article " + f"\n\n{text}" + ), + }, + ], + model="gemma2-9b-it", + temperature=0.3, + max_tokens=512, + ) + + bias_score = chat_completion.choices[0].message.content.strip() + + return { + "bias_score": bias_score, + "status": "success", + } + + except Exception as e: + print(f"Error in bias_detection: {e}") + return { + "status": "error", + "error_from": "bias_detection", + "message": str(e), + } diff --git a/backend/app/modules/chat/__init__.py b/backend/app/modules/chat/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/backend/app/modules/chat/embed_query.py b/backend/app/modules/chat/embed_query.py new file mode 100644 index 00000000..e1260c9f --- /dev/null +++ b/backend/app/modules/chat/embed_query.py @@ -0,0 +1,10 @@ +from sentence_transformers import SentenceTransformer + +embedder = SentenceTransformer("all-MiniLM-L6-v2") + + +def embed_query(query: str): + + embeddings = embedder.encode(query).tolist() + + return embeddings diff --git a/backend/app/modules/chat/get_rag_data.py b/backend/app/modules/chat/get_rag_data.py new file mode 100644 index 00000000..efaaa60b --- /dev/null +++ b/backend/app/modules/chat/get_rag_data.py @@ -0,0 +1,31 @@ +from pinecone import Pinecone +from dotenv import load_dotenv +from app.modules.chat.embed_query import embed_query +import os + +load_dotenv() + +pc = Pinecone(os.getenv("PINECONE_API_KEY")) +index = pc.Index("perspective") + + +def search_pinecone(query: str, top_k: int = 5): + + embeddings = embed_query(query) + + results = index.query( + vector=embeddings, + top_k=top_k, + include_metadata=True, + namespace="default" + + ) + + matches = [] + for match in results["matches"]: + matches.append({ + "id": match["id"], + "score": match["score"], + "metadata": match["metadata"] + }) + return matches diff --git a/backend/app/modules/chat/llm_processing.py b/backend/app/modules/chat/llm_processing.py new file mode 100644 index 00000000..2141905d --- /dev/null +++ b/backend/app/modules/chat/llm_processing.py @@ -0,0 +1,35 @@ +import os +from groq import Groq +from dotenv import load_dotenv + +load_dotenv() + +client = Groq(api_key=os.getenv("GROQ_API_KEY")) + + +def build_context(docs): + + return "\n".join(f"{m['metadata'].get('explanation') or m['metadata'].get('reasoning', '')}"for m in docs) + + +def ask_llm(question, docs): + context = build_context(docs) + print(context) + prompt = f"""You are an assistant that answers based on context. + +Context: +{context} + +Question: +{question} +""" + + response = client.chat.completions.create( + model="gemma2-9b-it", + messages=[ + {"role": "system", "content": "Use only the context to answer."}, + {"role": "user", "content": prompt} + ] + ) + + return response.choices[0].message.content diff --git a/backend/app/modules/vector_store/embed.py b/backend/app/modules/vector_store/embed.py index ee7dfc7d..a5933bcc 100644 --- a/backend/app/modules/vector_store/embed.py +++ b/backend/app/modules/vector_store/embed.py @@ -28,3 +28,4 @@ def embed_chunks(chunks: List[Dict[str, Any]]) -> List[Dict[str, Any]]: "metadata": chunk["metadata"] }) return vectors + diff --git a/backend/app/routes/routes.py b/backend/app/routes/routes.py index c443e83c..4d93a8bb 100644 --- a/backend/app/routes/routes.py +++ b/backend/app/routes/routes.py @@ -2,6 +2,10 @@ from pydantic import BaseModel from app.modules.pipeline import run_scraper_pipeline from app.modules.pipeline import run_langgraph_workflow +from app.modules.bias_detection.check_bias import check_bias +from app.modules.chat.get_rag_data import search_pinecone +from app.modules.chat.llm_processing import ask_llm +import asyncio import json router = APIRouter() @@ -11,14 +15,37 @@ class URlRequest(BaseModel): url: str +class ChatQuery(BaseModel): + message: str + + @router.get("/") async def home(): return {"message": "Perspective API is live!"} +@router.post("/bias") +async def bias_detection(request: URlRequest): + content = await asyncio.to_thread(run_scraper_pipeline, (request.url)) + bias_score = await asyncio.to_thread(check_bias, (content)) + print(bias_score) + return bias_score + + @router.post("/process") async def run_pipelines(request: URlRequest): - article_text = run_scraper_pipeline(request.url) + article_text = await asyncio.to_thread(run_scraper_pipeline, (request.url)) print(json.dumps(article_text, indent=2)) - data = run_langgraph_workflow(article_text) + data = await asyncio.to_thread(run_langgraph_workflow, (article_text)) return data + + +@router.post("/chat") +async def answer_query(request: ChatQuery): + + query = request.message + results = search_pinecone(query) + answer = ask_llm(query, results) + print(answer) + + return {"answer": answer} diff --git a/frontend/app/analyze/loading/page.tsx b/frontend/app/analyze/loading/page.tsx index 055a1a08..1d55ace3 100644 --- a/frontend/app/analyze/loading/page.tsx +++ b/frontend/app/analyze/loading/page.tsx @@ -1,12 +1,20 @@ -"use client" +"use client"; -import { useEffect, useState } from "react" -import { useRouter } from "next/navigation" -import { Card } from "@/components/ui/card" -import { Badge } from "@/components/ui/badge" -import { Globe, Brain, Shield, CheckCircle, Database, Sparkles, Zap } from "lucide-react" -import ThemeToggle from "@/components/theme-toggle" -import axios from "axios" +import { useEffect, useState } from "react"; +import { useRouter } from "next/navigation"; +import { Card } from "@/components/ui/card"; +import { Badge } from "@/components/ui/badge"; +import { + Globe, + Brain, + Shield, + CheckCircle, + Database, + Sparkles, + Zap, +} from "lucide-react"; +import ThemeToggle from "@/components/theme-toggle"; +import axios from "axios"; /** * Displays a multi-step animated loading and progress interface for the article analysis workflow. @@ -16,10 +24,10 @@ import axios from "axios" * @remark This component manages its own navigation and redirects based on session state. */ export default function LoadingPage() { - const [currentStep, setCurrentStep] = useState(0) - const [progress, setProgress] = useState(0) - const [articleUrl, setArticleUrl] = useState("") - const router = useRouter() + const [currentStep, setCurrentStep] = useState(0); + const [progress, setProgress] = useState(0); + const [articleUrl, setArticleUrl] = useState(""); + const router = useRouter(); const steps = [ { @@ -52,67 +60,88 @@ export default function LoadingPage() { description: "Creating balanced alternative viewpoints", color: "from-pink-500 to-rose-500", }, - ] + ]; useEffect(() => { - const runAnalysis = async () => { - const storedUrl = sessionStorage.getItem("articleUrl") - if (storedUrl) { - setArticleUrl(storedUrl) + const runAnalysis = async () => { + const storedUrl = sessionStorage.getItem("articleUrl"); + if (storedUrl) { + setArticleUrl(storedUrl); - try { - const res = await axios.post("https://Thunder1245-perspective-backend.hf.space/api/process", { - url: storedUrl, - }) + try { + const [processRes, biasRes] = await Promise.all([ + axios.post( + "https://Thunder1245-perspective-backend.hf.space/api/process", + { + url: storedUrl, + } + ), + axios.post( + "http://Thunder1245-perspective-backend.hf.space/api/bias", + { + url: storedUrl, + } + ), + ]); - // Save response to sessionStorage - sessionStorage.setItem("analysisResult", JSON.stringify(res.data)) - // optional logging - console.log("Analysis result saved") - console.log(res) - } catch (err) { - console.error("Failed to process article:", err) - router.push("/analyze") // fallback in case of error - return - } + sessionStorage.setItem("BiasScore", JSON.stringify(biasRes.data)); - // Progress and step simulation - const stepInterval = setInterval(() => { - setCurrentStep((prev) => { - if (prev < steps.length - 1) { - return prev + 1 - } else { - clearInterval(stepInterval) - setTimeout(() => { - router.push("/analyze/results") - }, 2000) - return prev - } - }) - }, 2000) + console.log("Bias score saved"); + console.log(biasRes); - const progressInterval = setInterval(() => { - setProgress((prev) => { - if (prev < 100) { - return prev + 1 - } - return prev - }) - }, 100) + // Save response to sessionStorage + sessionStorage.setItem( + "analysisResult", + JSON.stringify(processRes.data) + ); - return () => { - clearInterval(stepInterval) - clearInterval(progressInterval) - } - } else { - router.push("/analyze") - } - } + console.log("Analysis result saved"); + console.log(processRes); + + + // optional logging + } catch (err) { + console.error("Failed to process article:", err); + router.push("/analyze"); // fallback in case of error + return; + } + + // Progress and step simulation + const stepInterval = setInterval(() => { + setCurrentStep((prev) => { + if (prev < steps.length - 1) { + return prev + 1; + } else { + clearInterval(stepInterval); + setTimeout(() => { + router.push("/analyze/results"); + }, 2000); + return prev; + } + }); + }, 2000); - runAnalysis() -}, [router]) + const progressInterval = setInterval(() => { + setProgress((prev) => { + if (prev < 100) { + return prev + 1; + } + return prev; + }); + }, 100); + return () => { + clearInterval(stepInterval); + clearInterval(progressInterval); + }; + } else { + router.push("/analyze"); + } + }; + + runAnalysis(); + }, [router]); return (
@@ -162,8 +191,12 @@ export default function LoadingPage() { {/* Article URL Display */}
-

Processing:

-

{articleUrl}

+

+ Processing: +

+

+ {articleUrl} +

{/* Progress Bar */} @@ -171,7 +204,9 @@ export default function LoadingPage() {
@@ -190,8 +225,8 @@ export default function LoadingPage() { index === currentStep ? "bg-white dark:bg-slate-800 shadow-2xl scale-105 ring-2 ring-blue-500/50" : index < currentStep - ? "bg-white/80 dark:bg-slate-800/80 shadow-lg opacity-75" - : "bg-white/40 dark:bg-slate-800/40 shadow-md opacity-50" + ? "bg-white/80 dark:bg-slate-800/80 shadow-lg opacity-75" + : "bg-white/40 dark:bg-slate-800/40 shadow-md opacity-50" }`} >
@@ -200,8 +235,8 @@ export default function LoadingPage() { index === currentStep ? `bg-gradient-to-br ${step.color} animate-pulse shadow-lg` : index < currentStep - ? "bg-gradient-to-br from-emerald-500 to-teal-500 shadow-md" - : "bg-slate-200 dark:bg-slate-700" + ? "bg-gradient-to-br from-emerald-500 to-teal-500 shadow-md" + : "bg-slate-200 dark:bg-slate-700" }`} > {index < currentStep ? ( @@ -221,13 +256,15 @@ export default function LoadingPage() { index === currentStep ? "text-blue-600 dark:text-blue-400" : index < currentStep - ? "text-emerald-600 dark:text-emerald-400" - : "text-slate-500 dark:text-slate-400" + ? "text-emerald-600 dark:text-emerald-400" + : "text-slate-500 dark:text-slate-400" }`} > {step.title} -

{step.description}

+

+ {step.description} +

{index === currentStep && (
@@ -262,5 +299,5 @@ export default function LoadingPage() {
- ) + ); } diff --git a/frontend/app/analyze/page.tsx b/frontend/app/analyze/page.tsx index 541b0dd2..c86c6c9e 100644 --- a/frontend/app/analyze/page.tsx +++ b/frontend/app/analyze/page.tsx @@ -1,15 +1,29 @@ -"use client" +"use client"; -import type React from "react" +import type React from "react"; -import { useState } from "react" -import { Button } from "@/components/ui/button" -import { Input } from "@/components/ui/input" -import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "@/components/ui/card" -import { Badge } from "@/components/ui/badge" -import { Globe, ArrowRight, Link, Sparkles, Shield, Brain, CheckCircle } from "lucide-react" -import { useRouter } from "next/navigation" -import ThemeToggle from "@/components/theme-toggle" +import { useState } from "react"; +import { Button } from "@/components/ui/button"; +import { Input } from "@/components/ui/input"; +import { + Card, + CardContent, + CardDescription, + CardHeader, + CardTitle, +} from "@/components/ui/card"; +import { Badge } from "@/components/ui/badge"; +import { + Globe, + ArrowRight, + Link, + Sparkles, + Shield, + Brain, + CheckCircle, +} from "lucide-react"; +import { useRouter } from "next/navigation"; +import ThemeToggle from "@/components/theme-toggle"; /** * Renders the main page for submitting an article URL to initiate AI-powered analysis. @@ -17,36 +31,36 @@ import ThemeToggle from "@/components/theme-toggle" * Provides a user interface for entering and validating an article URL, displays real-time feedback on URL validity, and enables users to trigger analysis. Features include a branded header, a hero section, a URL input card with validation, a grid highlighting analysis capabilities, and example article URLs for quick testing. On valid submission, the URL is stored in sessionStorage and the user is navigated to a loading page for further processing. */ export default function AnalyzePage() { - const [url, setUrl] = useState("") - const [isValidUrl, setIsValidUrl] = useState(false) - const router = useRouter() + const [url, setUrl] = useState(""); + const [isValidUrl, setIsValidUrl] = useState(false); + const router = useRouter(); const validateUrl = (inputUrl: string) => { try { - new URL(inputUrl) - setIsValidUrl(true) + new URL(inputUrl); + setIsValidUrl(true); } catch { - setIsValidUrl(false) + setIsValidUrl(false); } - } + }; const handleUrlChange = (e: React.ChangeEvent) => { - const inputUrl = e.target.value - setUrl(inputUrl) + const inputUrl = e.target.value; + setUrl(inputUrl); if (inputUrl.length > 0) { - validateUrl(inputUrl) + validateUrl(inputUrl); } else { - setIsValidUrl(false) + setIsValidUrl(false); } - } + }; const handleAnalyze = () => { if (isValidUrl && url) { // Store the URL in sessionStorage to pass to loading page - sessionStorage.setItem("articleUrl", url) - router.push("/analyze/loading") + sessionStorage.setItem("articleUrl", url); + router.push("/analyze/loading"); } - } + }; const features = [ { @@ -64,7 +78,7 @@ export default function AnalyzePage() { title: "Fact Verification", description: "Cross-references claims with reliable sources", }, - ] + ]; return (
@@ -109,8 +123,8 @@ export default function AnalyzePage() {

- Paste the URL of any online article and get AI-powered bias detection, fact-checking, and alternative - perspectives in seconds. + Paste the URL of any online article and get AI-powered bias + detection, fact-checking, and alternative perspectives in seconds.

@@ -121,7 +135,8 @@ export default function AnalyzePage() { Enter Article URL - Provide the link to the article you want to analyze for bias and alternative perspectives + Provide the link to the article you want to analyze for bias and + alternative perspectives @@ -157,7 +172,9 @@ export default function AnalyzePage() {
{url && !isValidUrl && ( -

Please enter a valid URL

+

+ Please enter a valid URL +

)} @@ -204,8 +221,8 @@ export default function AnalyzePage() {