diff --git a/new-backend/app/modules/langgraph_builder.py b/new-backend/app/modules/langgraph_builder.py index c74de4f3..8ae343dc 100644 --- a/new-backend/app/modules/langgraph_builder.py +++ b/new-backend/app/modules/langgraph_builder.py @@ -9,6 +9,11 @@ def build_langgraph(): + """ + Constructs and compiles a state graph workflow for sequential text processing tasks. + + The workflow consists of sentiment analysis, fact checking, perspective generation, and judgment steps, with conditional reruns based on a score threshold. The process concludes by storing and sending the results. Returns the compiled graph ready for execution. + """ graph = StateGraph() graph.add_node( diff --git a/new-backend/app/modules/langgraph_nodes/fact_check.py b/new-backend/app/modules/langgraph_nodes/fact_check.py index a7c47b97..09b1cf3a 100644 --- a/new-backend/app/modules/langgraph_nodes/fact_check.py +++ b/new-backend/app/modules/langgraph_nodes/fact_check.py @@ -1,10 +1,26 @@ # web search + fact check def search_web(): + """ + Placeholder for a web search function. + + Currently returns None and does not perform any search operations. + """ return None def run_fact_check(state): + """ + Performs a web-based fact check using provided text and keywords. + + Combines the input text and keywords into a search query, retrieves web search results, and returns a list of fact snippets and their corresponding URLs. + + Args: + state: A dictionary containing "text" (the statement to check) and "keywords" (a list of related terms). + + Returns: + A dictionary with a "facts" key mapping to a list of sources, each containing a "snippet" and "url". + """ text = state["text"] keywords = state["keywords"] results = search_web(text + " " + " ".join(keywords)) diff --git a/new-backend/app/modules/langgraph_nodes/generate_perspective.py b/new-backend/app/modules/langgraph_nodes/generate_perspective.py index 65fa5413..6454ff2b 100644 --- a/new-backend/app/modules/langgraph_nodes/generate_perspective.py +++ b/new-backend/app/modules/langgraph_nodes/generate_perspective.py @@ -19,6 +19,15 @@ def generate_perspective(state): + """ + Generates a reasoned opposing perspective on an article using provided factual snippets. + + Args: + state: A dictionary containing "text" (the article) and "facts" (a list of fact dictionaries with "snippet" fields). + + Returns: + A dictionary with the generated opposing perspective under the key "perspective". + """ text = state["text"] facts = "\n".join([f["snippet"] for f in state["facts"]]) result = chain.run({"text": text, "facts": facts}) diff --git a/new-backend/app/modules/langgraph_nodes/judge.py b/new-backend/app/modules/langgraph_nodes/judge.py index 97bf4ebe..9b7f8b4d 100644 --- a/new-backend/app/modules/langgraph_nodes/judge.py +++ b/new-backend/app/modules/langgraph_nodes/judge.py @@ -1,4 +1,16 @@ def judge_perspective(state): + """ + Evaluates the 'perspective' in the given state and assigns a score. + + Assigns a score of 85 if the substring "reasoned" is present in the perspective; + otherwise, assigns a score of 40. Returns the score in a dictionary. + + Args: + state: A dictionary containing a 'perspective' key. + + Returns: + A dictionary with the assigned score under the key 'score'. + """ perspective = state["perspective"] # Dummy scoring score = 85 if "reasoned" in perspective else 40 diff --git a/new-backend/app/modules/langgraph_nodes/sentiment.py b/new-backend/app/modules/langgraph_nodes/sentiment.py index 38dd9f16..aba02524 100644 --- a/new-backend/app/modules/langgraph_nodes/sentiment.py +++ b/new-backend/app/modules/langgraph_nodes/sentiment.py @@ -4,6 +4,15 @@ def run_sentiment(state): + """ + Analyzes the sentiment of the provided text and returns the result. + + Args: + state: A dictionary containing a "text" key with the input string to analyze. + + Returns: + A dictionary with the sentiment label and its confidence score. + """ text = state["text"] result = sentiment_pipeline(text)[0] return {"sentiment": result["label"], "sentiment_score": result["score"]} diff --git a/new-backend/app/modules/langgraph_nodes/store_and_send.py b/new-backend/app/modules/langgraph_nodes/store_and_send.py index e8645cb4..47543592 100644 --- a/new-backend/app/modules/langgraph_nodes/store_and_send.py +++ b/new-backend/app/modules/langgraph_nodes/store_and_send.py @@ -3,6 +3,11 @@ def store_and_send(state): # to store data in vector db + """ + Stores selected data from the input state in a vector database and returns a success status. + + Extracts the "text", "perspective", and "facts" fields from the provided state dictionary, stores them in a vector database, and returns a dictionary indicating successful completion. + """ save_to_vector_db({ "text": state["text"], "perspective": state["perspective"], diff --git a/new-backend/app/modules/pipeline.py b/new-backend/app/modules/pipeline.py index edd4b44a..d3dc3c19 100644 --- a/new-backend/app/modules/pipeline.py +++ b/new-backend/app/modules/pipeline.py @@ -6,6 +6,17 @@ def run_scraper_pipeline(url: str) -> dict: + """ + Extracts and processes article content from a given URL. + + The function retrieves the article text from the specified URL, cleans the extracted text, and identifies relevant keywords. Returns a dictionary containing the cleaned text and extracted keywords. + + Args: + url: The URL of the article to process. + + Returns: + A dictionary with 'cleaned_text' and 'keywords' keys. + """ extractor = Article_extractor(url) raw_text = extractor.extract() @@ -25,6 +36,15 @@ def run_scraper_pipeline(url: str) -> dict: def run_langgraph_workflow(state: dict): + """ + Executes a language graph workflow with the provided state. + + Args: + state: A dictionary representing the initial state for the workflow. + + Returns: + The result produced by invoking the language graph workflow with the given state. + """ langgraph_workflow = build_langgraph() result = langgraph_workflow.invoke(state) return result diff --git a/new-backend/app/routes/routes.py b/new-backend/app/routes/routes.py index c443e83c..47925a36 100644 --- a/new-backend/app/routes/routes.py +++ b/new-backend/app/routes/routes.py @@ -18,6 +18,12 @@ async def home(): @router.post("/process") async def run_pipelines(request: URlRequest): + """ + Processes a URL by extracting article text and running a language workflow. + + Accepts a request containing a URL, extracts the article text using a scraper pipeline, + and processes the extracted text through a language workflow. Returns the result of the workflow. + """ article_text = run_scraper_pipeline(request.url) print(json.dumps(article_text, indent=2)) data = run_langgraph_workflow(article_text)