Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,3 +38,16 @@ Open [http://localhost:3000](http://localhost:3000)
## License

MIT

## AI Provider configuration

# Default provider (OpenAI)
AI_PROVIDER=openai
OPENAI_API_KEY=your_openai_api_key
OPENAI_MODEL=gpt-4o-mini
OPENAI_BASE_URL=https://api.openai.com/v1

# Alternative providers (optional)
ANTHROPIC_API_KEY=your_anthropic_api_key
GEMINI_API_KEY=your_gemini_api_key
GROQ_API_KEY=your_groq_api_key
98 changes: 98 additions & 0 deletions app/api/ai/chat/route.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
import { NextRequest, NextResponse } from 'next/server';
import { streamChat, ChatMessage } from '@/lib/ai/openai';

// Opt into the Edge runtime. This allows streaming responses with low
// latency and keeps dependencies out of the Node.js layer.
export const runtime = 'edge';

/**
* POST /api/ai/chat
*
* Accepts a JSON body containing a list of chat messages and optional model
* configuration. Invokes the OpenAI chat completion API and streams the
* assistant's response back as raw text. If another AI provider is
* configured via AI_PROVIDER, a 400 will be returned.
*/
export async function POST(req: NextRequest) {
try {
const { messages, model, temperature } = await req.json();

// Basic validation
if (!Array.isArray(messages)) {
return NextResponse.json({ success: false, error: 'messages must be an array' }, { status: 400 });
}

// Only support openai provider for now
const provider = process.env.AI_PROVIDER || 'openai';
if (provider !== 'openai') {
return NextResponse.json({ success: false, error: `Unsupported AI provider: ${provider}` }, { status: 400 });
}

// Call OpenAI and forward the response
const response = await streamChat({
messages: messages as ChatMessage[],
model,
temperature,
});

if (!response.ok || !response.body) {
let errorMessage: string;
try {
const data = await response.json();
errorMessage = data?.error?.message || response.statusText;
} catch {
errorMessage = response.statusText;
}
return NextResponse.json({ success: false, error: errorMessage }, { status: response.status });
}

// Transform OpenAI's SSE stream into raw text
const encoder = new TextEncoder();
const openaiStream = response.body;
const stream = new ReadableStream<Uint8Array>({
async start(controller) {
const reader = openaiStream!.getReader();
const decoder = new TextDecoder('utf-8');
let buffer = '';
const push = (text: string) => {
controller.enqueue(encoder.encode(text));
};
while (true) {
const { value, done } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split('\n');
buffer = lines.pop() ?? '';
for (const line of lines) {
const trimmed = line.trim();
if (!trimmed.startsWith('data:')) continue;
const payload = trimmed.replace(/^data:\s*/, '');
if (payload === '[DONE]') {
controller.close();
return;
}
try {
const parsed = JSON.parse(payload);
const delta: string = parsed.choices?.[0]?.delta?.content ?? '';
if (delta) {
push(delta);
}
} catch {
// Skip malformed lines
}
}
}
controller.close();
},
});

return new Response(stream, {
headers: {
'Content-Type': 'text/plain; charset=utf-8',
},
});
} catch (err) {
console.error('[api/ai/chat] Error:', err);
return NextResponse.json({ success: false, error: (err as Error)?.message || 'Internal error' }, { status: 500 });
}
}
36 changes: 36 additions & 0 deletions app/api/health/route.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
import { NextResponse } from 'next/server';

export const runtime = 'edge';

export async function GET() {
const services: Record<string, any> = {};
const provider = process.env.AI_PROVIDER || 'openai';
if (provider === 'openai') {
const model = process.env.OPENAI_MODEL || 'gpt-4o-mini';
const apiKey = process.env.OPENAI_API_KEY;
if (!apiKey) {
services.openai = { ok: false, model, error: 'Missing OPENAI_API_KEY' };
} else {
try {
const baseUrl = (process.env.OPENAI_BASE_URL?.replace(/\/+$/, '') || 'https://api.openai.com/v1');
const res = await fetch(`${baseUrl}/models`, { headers: { Authorization: `Bearer ${apiKey}` } });
if (res.ok) {
services.openai = { ok: true, model };
} else {
let errorMessage: string;
try {
const data = await res.json();
errorMessage = data?.error?.message || res.statusText;
} catch {
errorMessage = res.statusText;
}
services.openai = { ok: false, model, error: errorMessage };
}
} catch (error) {
services.openai = { ok: false, model, error: (error as Error).message };
}
}
}
const status = Object.values(services).every((svc: any) => svc?.ok) ? 'ok' : 'error';
return NextResponse.json({ status, services });
}
83 changes: 83 additions & 0 deletions lib/ai/openai.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
/**
* Minimal OpenAI client for StarStack.
*
* This module reads configuration from environment variables and exposes a
* helper that performs chat completions with streaming support. It is
* deliberately small and self‑contained to avoid pulling heavy dependencies
* into the Edge runtime.
*
* Expected environment variables:
* - AI_PROVIDER: when set to "openai" this client will be used. Other
* values are ignored.
* - OPENAI_API_KEY: your OpenAI API key (required).
* - OPENAI_MODEL: optional override of the default model. If absent the
* fallback is "gpt-4o-mini" to align with the project default.
* - OPENAI_BASE_URL: optional override for the API base URL. When unset
* the standard https://api.openai.com/v1 endpoint is used.
*/

/**
* Chat message interface compatible with OpenAI's API.
*/
export interface ChatMessage {
role: 'system' | 'user' | 'assistant' | string;
content: string;
}

/**
* Returns the currently configured OpenAI model. Falls back to
* `gpt-4o-mini` if no override is provided.
*/
export function getDefaultModel(): string {
return process.env.OPENAI_MODEL || 'gpt-4o-mini';
}

/**
* Internal helper that constructs the full API URL. Allows overriding the
* base via OPENAI_BASE_URL while falling back to the public OpenAI API.
*/
function buildUrl(path: string): string {
const base = (process.env.OPENAI_BASE_URL?.replace(/\/+$/, '') ||
'https://api.openai.com/v1');
return `${base}${path.startsWith('/') ? '' : '/'}${path}`;
}

/**
* Performs a chat completion request against the OpenAI API and returns the
* streaming Response. The returned Response can be piped directly to a
* Next.js API route or consumed manually.
*
* @param messages The chat history. Each message must include a `role`
* ("system" | "user" | "assistant") and `content` string.
* @param model Optional model override. Defaults to getDefaultModel().
* @param temperature Optional sampling temperature. Defaults to 0.5.
*/
export async function streamChat({
messages,
model,
temperature,
}: {
messages: ChatMessage[];
model?: string;
temperature?: number;
}): Promise<Response> {
const apiKey = process.env.OPENAI_API_KEY;
if (!apiKey) {
throw new Error('Missing OPENAI_API_KEY');
}
const resolvedModel = model || getDefaultModel();

return fetch(buildUrl('/chat/completions'), {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${apiKey}`,
},
body: JSON.stringify({
model: resolvedModel,
messages,
temperature: typeof temperature === 'number' ? temperature : 0.5,
stream: true,
}),
});
}