forked from vercel/ai
-
-
Notifications
You must be signed in to change notification settings - Fork 0
/
chat.ts
58 lines (50 loc) · 1.47 KB
/
chat.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
// ./api/chat.ts
import { Configuration, OpenAIApi } from 'openai-edge'
import { OpenAIStream } from 'ai'
import type { H3Event } from 'h3'
let openai: OpenAIApi
export default defineEventHandler(async (event: any) => {
if (!openai) {
let apiKey = useRuntimeConfig().openaiApiKey as string
if (apiKey.length === 0) {
apiKey = event.context.cloudflare.env.NUXT_OPENAI_API_KEY
}
const config = new Configuration({ apiKey })
openai = new OpenAIApi(config)
}
// Extract the `prompt` from the body of the request
const { messages } = await readBody(event)
// Ask OpenAI for a streaming chat completion given the prompt
const response = await openai.createChatCompletion({
model: 'gpt-3.5-turbo',
stream: true,
messages: messages.map((message: any) => ({
content: message.content,
role: message.role
}))
})
// Convert the response into a friendly text-stream
const stream = OpenAIStream(response)
// Respond with the stream
return sendStream(event, stream)
})
function sendStream(event: H3Event, stream: ReadableStream) {
// Mark to prevent h3 handling response
event._handled = true
// Workers (unenv)
// @ts-expect-error _data will be there.
event.node.res._data = stream
// Node.js
if (event.node.res.socket) {
stream.pipeTo(
new WritableStream({
write(chunk) {
event.node.res.write(chunk)
},
close() {
event.node.res.end()
}
})
)
}
}