Skip to content

Commit 3410b88

Browse files
committed
ui-read
Signed-off-by: Arya Pratap Singh <notaryasingh@gmail.com>
1 parent 0f90060 commit 3410b88

24 files changed

+10903
-2386
lines changed
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
from .complexity_analyzer import analyze_complexity
2+
from .visualizer import generate_visualization
3+
from .test_generator import generate_test_cases
4+
5+
__all__ = ['analyze_complexity', 'generate_visualization', 'generate_test_cases']

backend-services/dsa_agent/agent.py

Lines changed: 136 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,136 @@
1+
# langgraph_agent.py
2+
from typing import Dict, List, Tuple, Any
3+
from langchain_core.messages import HumanMessage, SystemMessage
4+
from langgraph.prebuilt import ToolExecutor
5+
from langgraph.graph import Graph, END
6+
from langchain_groq import ChatGroq
7+
from langchain_core.tools import Tool
8+
import operator
9+
from typing import TypedDict, Annotated, Sequence
10+
import json
11+
12+
# Define types for our graph
13+
class AgentState(TypedDict):
14+
messages: Annotated[Sequence[Dict], operator.add]
15+
next: str
16+
17+
# Initialize Groq LLM
18+
llm = ChatGroq(
19+
api_key="your-groq-api-key",
20+
model_name="mixtral-8x7b-32768"
21+
)
22+
23+
# Define our tools
24+
def analyze_complexity(code: str) -> Dict[str, str]:
25+
"""Analyzes time and space complexity of given code"""
26+
prompt = f"Analyze the following code and provide its time and space complexity:\n{code}"
27+
response = llm.invoke(prompt)
28+
return {
29+
"timeComplexity": response.content.split("Time Complexity:")[1].split("Space Complexity:")[0].strip(),
30+
"spaceComplexity": response.content.split("Space Complexity:")[1].strip()
31+
}
32+
33+
def generate_visualization(problem: str) -> str:
34+
"""Generates a Mermaid diagram for the problem"""
35+
prompt = f"Create a Mermaid diagram to visualize the solution approach for this problem:\n{problem}"
36+
response = llm.invoke(prompt)
37+
return response.content
38+
39+
def generate_test_cases(problem: str) -> List[Dict[str, str]]:
40+
"""Generates test cases for the problem"""
41+
prompt = f"Generate 3 diverse test cases for this problem:\n{problem}"
42+
response = llm.invoke(prompt)
43+
return json.loads(response.content)
44+
45+
tools = [
46+
Tool(
47+
name="complexity_analyzer",
48+
description="Analyzes time and space complexity of code",
49+
func=analyze_complexity
50+
),
51+
Tool(
52+
name="visualizer",
53+
description="Generates Mermaid diagram for visualization",
54+
func=generate_visualization
55+
),
56+
Tool(
57+
name="test_generator",
58+
description="Generates test cases",
59+
func=generate_test_cases
60+
)
61+
]
62+
63+
# Create tool executor
64+
tool_executor = ToolExecutor(tools)
65+
66+
# Define agent functions
67+
def should_use_tool(state: AgentState) -> Tuple[str, str]:
68+
"""Decide if and which tool to use"""
69+
messages = state["messages"]
70+
response = llm.invoke(
71+
messages + [SystemMessage(content="What tool should be used next? Reply with 'END' if no tool is needed.")]
72+
)
73+
tool_name = response.content
74+
return "tool" if tool_name != "END" else "end", tool_name
75+
76+
def call_tool(state: AgentState, tool_name: str) -> AgentState:
77+
"""Call the specified tool"""
78+
messages = state["messages"]
79+
# Extract relevant info from messages and call tool
80+
result = tool_executor.execute(tool_name, messages[-1].content)
81+
return {
82+
"messages": messages + [HumanMessage(content=str(result))],
83+
"next": "agent"
84+
}
85+
86+
def process_response(state: AgentState) -> AgentState:
87+
"""Process the final response"""
88+
messages = state["messages"]
89+
final_response = llm.invoke(messages + [SystemMessage(content="Provide final solution summary")])
90+
return {
91+
"messages": messages + [final_response],
92+
"next": "end"
93+
}
94+
95+
# Create the graph
96+
workflow = Graph()
97+
98+
# Add nodes
99+
workflow.add_node("agent", should_use_tool)
100+
workflow.add_node("tool", call_tool)
101+
workflow.add_node("process", process_response)
102+
103+
# Add edges
104+
workflow.add_edge("agent", "tool")
105+
workflow.add_edge("tool", "agent")
106+
workflow.add_edge("agent", "process")
107+
workflow.add_edge("process", END)
108+
109+
# Compile the graph
110+
app = workflow.compile()
111+
112+
def solve_dsa_problem(problem: str, test_cases: List[Dict[str, str]] = None) -> Dict:
113+
"""Main function to solve DSA problems"""
114+
initial_state = {
115+
"messages": [HumanMessage(content=problem)],
116+
"next": "agent"
117+
}
118+
result = app.invoke(initial_state)
119+
120+
# Process result into structured format
121+
final_message = result["messages"][-1].content
122+
# Parse the response into required format
123+
try:
124+
response_dict = json.loads(final_message)
125+
except:
126+
# Fallback structure if parsing fails
127+
response_dict = {
128+
"explanation": final_message,
129+
"code": "",
130+
"timeComplexity": "",
131+
"spaceComplexity": "",
132+
"visualization": "",
133+
"testCases": test_cases or []
134+
}
135+
136+
return response_dict
Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
from typing import Dict
2+
from langchain_groq import ChatGroq
3+
4+
def analyze_complexity(code: str, llm: ChatGroq) -> Dict[str, str]:
5+
"""Analyzes time and space complexity of given code"""
6+
prompt = f"""Analyze the following code and provide its time and space complexity.
7+
Format the response as JSON with keys 'timeComplexity' and 'spaceComplexity'.
8+
9+
Code:
10+
{code}
11+
"""
12+
13+
response = llm.invoke(prompt)
14+
15+
try:
16+
return eval(response.content)
17+
except:
18+
return {
19+
"timeComplexity": "Unknown",
20+
"spaceComplexity": "Unknown"
21+
}
Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
from typing import List, Dict
2+
from langchain_groq import ChatGroq
3+
import json
4+
5+
def generate_test_cases(problem: str, llm: ChatGroq) -> List[Dict[str, str]]:
6+
"""Generates test cases for the problem"""
7+
prompt = f"""Generate 3 diverse test cases for this problem.
8+
Include edge cases and normal cases.
9+
Format as JSON array with 'input' and 'output' keys.
10+
11+
Problem:
12+
{problem}
13+
"""
14+
15+
response = llm.invoke(prompt)
16+
17+
try:
18+
return json.loads(response.content)
19+
except:
20+
return [{"input": "", "output": ""}]
Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
from langchain_groq import ChatGroq
2+
3+
def generate_visualization(problem: str, llm: ChatGroq) -> str:
4+
"""Generates a Mermaid diagram for the problem"""
5+
prompt = f"""Create a Mermaid diagram to visualize the solution approach for this problem.
6+
Use proper Mermaid syntax and focus on the algorithm's flow.
7+
8+
Problem:
9+
{problem}
10+
"""
11+
12+
response = llm.invoke(prompt)
13+
return response.content.strip()
Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,73 @@
1+
// import { NextRequest } from "next/server";
2+
// import {
3+
// CopilotRuntime,
4+
// GroqAdapter,
5+
// copilotRuntimeNextJSAppRouterEndpoint,
6+
// } from "@copilotkit/runtime";
7+
8+
// const serviceAdapter = new GroqAdapter({ model: "llama-3.3-70b-versatile" });
9+
10+
// const runtime = new CopilotRuntime({
11+
// remoteEndpoints: [
12+
// {
13+
// url: process.env.REMOTE_ACTION_URL || "http://localhost:8000/copilotkit",
14+
// },
15+
// ],
16+
// });
17+
18+
// export const POST = async (req: NextRequest) => {
19+
// const { handleRequest } = copilotRuntimeNextJSAppRouterEndpoint({
20+
// runtime,
21+
// serviceAdapter,
22+
// endpoint: "/api/copilotkit",
23+
// });
24+
25+
// return handleRequest(req);
26+
// };
27+
28+
import { NextResponse } from 'next/server';
29+
30+
import { analyzeDSAProblem } from '@/lib/python-bridge';
31+
import { createGroqClient } from '@/lib/groq';
32+
33+
const groq = createGroqClient();
34+
35+
export async function POST(req: Request) {
36+
try {
37+
const { question, testCases } = await req.json();
38+
39+
interface DSASolution {
40+
explanation: string;
41+
[key: string]: any;
42+
}
43+
44+
// Call Python agent through API
45+
const solution = await analyzeDSAProblem(question, testCases) as DSASolution;
46+
47+
// Get additional insights from Groq
48+
const additionalInsights = await groq.chat.completions.create({
49+
messages: [
50+
{
51+
role: "system",
52+
content: "You are a DSA expert. Provide additional insights for the solution."
53+
},
54+
{
55+
role: "user",
56+
content: `Problem: ${question}\nSolution: ${solution.explanation}`
57+
}
58+
],
59+
model: "mixtral-8x7b-32768",
60+
});
61+
62+
return NextResponse.json({
63+
...solution,
64+
additionalInsights: additionalInsights.choices[0].message.content
65+
});
66+
} catch (error) {
67+
console.error('Error:', error);
68+
return NextResponse.json(
69+
{ error: 'Failed to process request' },
70+
{ status: 500 }
71+
);
72+
}
73+
}

frontend-interface/app/globals.css

Lines changed: 73 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -3,19 +3,84 @@
33
@tailwind utilities;
44

55
:root {
6-
--background: #ffffff;
7-
--foreground: #171717;
6+
--foreground-rgb: 0, 0, 0;
7+
--background-start-rgb: 214, 219, 220;
8+
--background-end-rgb: 255, 255, 255;
89
}
910

1011
@media (prefers-color-scheme: dark) {
1112
:root {
12-
--background: #0a0a0a;
13-
--foreground: #ededed;
13+
--foreground-rgb: 255, 255, 255;
14+
--background-start-rgb: 0, 0, 0;
15+
--background-end-rgb: 0, 0, 0;
1416
}
1517
}
1618

17-
body {
18-
color: var(--foreground);
19-
background: var(--background);
20-
font-family: Arial, Helvetica, sans-serif;
19+
/* Ensure Comfortaa is used for all text */
20+
body, input, textarea, button {
21+
font-family: 'Comfortaa', sans-serif;
2122
}
23+
@layer base {
24+
:root {
25+
--background: 0 0% 100%;
26+
--foreground: 0 0% 3.9%;
27+
--card: 0 0% 100%;
28+
--card-foreground: 0 0% 3.9%;
29+
--popover: 0 0% 100%;
30+
--popover-foreground: 0 0% 3.9%;
31+
--primary: 0 0% 9%;
32+
--primary-foreground: 0 0% 98%;
33+
--secondary: 0 0% 96.1%;
34+
--secondary-foreground: 0 0% 9%;
35+
--muted: 0 0% 96.1%;
36+
--muted-foreground: 0 0% 45.1%;
37+
--accent: 0 0% 96.1%;
38+
--accent-foreground: 0 0% 9%;
39+
--destructive: 0 84.2% 60.2%;
40+
--destructive-foreground: 0 0% 98%;
41+
--border: 0 0% 89.8%;
42+
--input: 0 0% 89.8%;
43+
--ring: 0 0% 3.9%;
44+
--chart-1: 12 76% 61%;
45+
--chart-2: 173 58% 39%;
46+
--chart-3: 197 37% 24%;
47+
--chart-4: 43 74% 66%;
48+
--chart-5: 27 87% 67%;
49+
--radius: 0.5rem;
50+
}
51+
.dark {
52+
--background: 0 0% 3.9%;
53+
--foreground: 0 0% 98%;
54+
--card: 0 0% 3.9%;
55+
--card-foreground: 0 0% 98%;
56+
--popover: 0 0% 3.9%;
57+
--popover-foreground: 0 0% 98%;
58+
--primary: 0 0% 98%;
59+
--primary-foreground: 0 0% 9%;
60+
--secondary: 0 0% 14.9%;
61+
--secondary-foreground: 0 0% 98%;
62+
--muted: 0 0% 14.9%;
63+
--muted-foreground: 0 0% 63.9%;
64+
--accent: 0 0% 14.9%;
65+
--accent-foreground: 0 0% 98%;
66+
--destructive: 0 62.8% 30.6%;
67+
--destructive-foreground: 0 0% 98%;
68+
--border: 0 0% 14.9%;
69+
--input: 0 0% 14.9%;
70+
--ring: 0 0% 83.1%;
71+
--chart-1: 220 70% 50%;
72+
--chart-2: 160 60% 45%;
73+
--chart-3: 30 80% 55%;
74+
--chart-4: 280 65% 60%;
75+
--chart-5: 340 75% 55%;
76+
}
77+
}
78+
@layer base {
79+
* {
80+
@apply border-border;
81+
}
82+
body {
83+
@apply bg-background text-foreground;
84+
}
85+
}
86+

0 commit comments

Comments
 (0)