From fb9b2562bf2b8a1fcb35d13f2946d7eebb9bd34b Mon Sep 17 00:00:00 2001 From: Yoshiki Miura Date: Sat, 20 Apr 2024 14:05:02 +0900 Subject: [PATCH 1/2] Add specific api support for writer agent --- app/action.tsx | 37 +++++++++++++++++++++++----- bun.lockb | Bin 231210 -> 231210 bytes lib/agents/researcher.tsx | 10 +++++--- lib/agents/writer.tsx | 50 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 88 insertions(+), 9 deletions(-) create mode 100644 lib/agents/writer.tsx diff --git a/app/action.tsx b/app/action.tsx index 209f7912..ea19eba8 100644 --- a/app/action.tsx +++ b/app/action.tsx @@ -10,6 +10,7 @@ import { Spinner } from '@/components/ui/spinner' import { Section } from '@/components/section' import { FollowupPanel } from '@/components/followup-panel' import { inquire, researcher, taskManager, querySuggestor } from '@/lib/agents' +import { writer } from '@/lib/agents/writer' async function submit(formData?: FormData, skip?: boolean) { 'use server' @@ -20,8 +21,10 @@ async function submit(formData?: FormData, skip?: boolean) { const isCollapsed = createStreamableValue(false) const messages: ExperimentalMessage[] = aiState.get() as any - // Limit the number of messages to 10 - messages.splice(0, Math.max(messages.length - 10, 0)) + const useSpecificAPI = process.env.USE_SPECIFIC_API_FOR_WRITER === 'true' + const maxMessages = useSpecificAPI ? 5 : 10 + // Limit the number of messages to the maximum + messages.splice(0, Math.max(messages.length - maxMessages, 0)) // Get the user input from the form data const userInput = skip ? `{"action": "skip"}` @@ -64,19 +67,41 @@ async function submit(formData?: FormData, skip?: boolean) { // Generate the answer let answer = '' + let toolOutputs = [] let errorOccurred = false const streamText = createStreamableValue() - while (answer.length === 0) { + + // If useSpecificAPI is enabled, only function calls will be made + // If not using a tool, this model generates the answer + while ( + useSpecificAPI + ? toolOutputs.length === 0 && answer.length === 0 + : answer.length === 0 + ) { // Search the web and generate the answer - const { fullResponse, hasError } = await researcher( + const { fullResponse, hasError, toolResponses } = await researcher( uiStream, streamText, - messages + messages, + useSpecificAPI ) answer = fullResponse + toolOutputs = toolResponses errorOccurred = hasError } - streamText.done() + + // If useSpecificAPI is enabled, generate the answer using the specific model + if (useSpecificAPI && answer.length === 0) { + // modify the messages to be used by the specific model + const modifiedMessages = messages.map(msg => + msg.role === 'tool' + ? { ...msg, role: 'assistant', content: JSON.stringify(msg.content) } + : msg + ) as ExperimentalMessage[] + answer = await writer(uiStream, streamText, modifiedMessages) + } else { + streamText.done() + } if (!errorOccurred) { // Generate related queries diff --git a/bun.lockb b/bun.lockb index 01203a741c792633b45196ed2d7e46fe38301ea7..ddf5ae67d30a1d1d8f68d0524d090011dec6faac 100755 GIT binary patch delta 31 mcmZ40$G57Fuc3u;3zNGWJ7b)oo`IfmyPq4=c0V`fI}QM&3JJ0R delta 31 jcmZ40$G57Fuc3u;3zNGWI}-yKwEMX+ZTE9yzT*G@kq-!E diff --git a/lib/agents/researcher.tsx b/lib/agents/researcher.tsx index 61375b41..924782dd 100644 --- a/lib/agents/researcher.tsx +++ b/lib/agents/researcher.tsx @@ -19,7 +19,8 @@ import { Card } from '@/components/ui/card' export async function researcher( uiStream: ReturnType, streamText: ReturnType>, - messages: ExperimentalMessage[] + messages: ExperimentalMessage[], + useSpecificModel?: boolean ) { const openai = new OpenAI({ baseUrl: process.env.OPENAI_API_BASE, // optional base URL for proxies etc. @@ -110,7 +111,10 @@ export async function researcher( ) - uiStream.append(answerSection) + // Append the answer section if the specific model is not used + if (!useSpecificModel) { + uiStream.append(answerSection) + } return searchResult } @@ -156,7 +160,7 @@ export async function researcher( messages.push({ role: 'tool', content: toolResponses }) } - return { result, fullResponse, hasError } + return { result, fullResponse, hasError, toolResponses } } async function tavilySearch( diff --git a/lib/agents/writer.tsx b/lib/agents/writer.tsx new file mode 100644 index 00000000..debd5758 --- /dev/null +++ b/lib/agents/writer.tsx @@ -0,0 +1,50 @@ +import { OpenAI } from '@ai-sdk/openai' +import { createStreamableUI, createStreamableValue } from 'ai/rsc' +import { ExperimentalMessage, experimental_streamText } from 'ai' +import { Section } from '@/components/section' +import { BotMessage } from '@/components/message' + +export async function writer( + uiStream: ReturnType, + streamText: ReturnType>, + messages: ExperimentalMessage[] +) { + const openai = new OpenAI({ + baseUrl: process.env.SPECIFIC_API_BASE, + apiKey: process.env.SPECIFIC_API_KEY, + organization: '' // optional organization + }) + + let fullResponse = '' + const answerSection = ( +
+ +
+ ) + uiStream.append(answerSection) + + await experimental_streamText({ + model: openai.chat(process.env.SPECIFIC_API_MODEL || 'llama3-70b-8192'), + maxTokens: 2500, + system: `As a professional writer, your job is to generate a comprehensive and informative, yet concise answer of 400 words or less for the given question based solely on the provided search results (URL and content). You must only use information from the provided search results. Use an unbiased and journalistic tone. Combine search results together into a coherent answer. Do not repeat text. If there are any images relevant to your answer, be sure to include them as well. Aim to directly address the user's question, augmenting your response with insights gleaned from the search results. + Whenever quoting or referencing information from a specific URL, always cite the source URL explicitly. Please match the language of the response to the user's language. + Always answer in Markdown format. Links and images must follow the correct format. + Link format: [link text](url) + Image format: ![alt text](url) + `, + messages + }) + .then(async result => { + for await (const text of result.textStream) { + if (text) { + fullResponse += text + streamText.update(fullResponse) + } + } + }) + .finally(() => { + streamText.done() + }) + + return fullResponse +} From bcddbf6cb8f5f66eaa788fd2ae3c3b306bf91221 Mon Sep 17 00:00:00 2001 From: Yoshiki Miura Date: Sat, 20 Apr 2024 14:18:42 +0900 Subject: [PATCH 2/2] Add support for specifying verified models to writers --- .env.local.example | 9 ++++++++- README.md | 17 ++++++++++++++++- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/.env.local.example b/.env.local.example index 907a35bd..937fb214 100644 --- a/.env.local.example +++ b/.env.local.example @@ -10,4 +10,11 @@ OPENAI_API_KEY= # Tavily API Key retrieved here: https://app.tavily.com/home -TAVILY_API_KEY= \ No newline at end of file +TAVILY_API_KEY= + +# Only writers can set a specific model. It must be compatible with the OpenAI API. +# USE_SPECIFIC_API_FOR_WRITER=true +# SPECIFIC_API_BASE= +# SPECIFIC_API_KEY= +# SPECIFIC_API_MODEL= + diff --git a/README.md b/README.md index 9117603c..f457dd32 100644 --- a/README.md +++ b/README.md @@ -9,6 +9,7 @@ An AI-powered answer engine with a generative UI. - 🧱 [Stack](#-stack) - 🚀 [Quickstart](#-quickstart) - 🌐 [Deploy](#-deploy) +- ✅ [Verified models](#-vertified-models) ## 🧱 Stack @@ -59,9 +60,15 @@ OPENAI_API_KEY=[YOUR_OPENAI_API_KEY] # Tavily API Key retrieved here: https://app.tavily.com/home TAVILY_API_KEY=[YOUR_TAVILY_API_KEY] + +# Only writers can set a specific model. It must be compatible with the OpenAI API. +# USE_SPECIFIC_API_FOR_WRITER=true +# SPECIFIC_API_BASE= +# SPECIFIC_API_KEY= +# SPECIFIC_API_MODEL= ``` -**Note: This project focuses on Generative UI and requires complex output from LLMs. Currently, it's assumed that the official OpenAI models will be used. Although it is possible to set up other models, if you use an OpenAI-compatible model, please do so at your own risk.** +**Note: This project focuses on Generative UI and requires complex output from LLMs. Currently, it's assumed that the official OpenAI models will be used. Although it's possible to set up other models, if you use an OpenAI-compatible model, but we don't guarantee that it'll work. ** ### 4. Run app locally @@ -76,3 +83,11 @@ You can now visit http://localhost:3000. Host your own live version of Morphic with Vercel. [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fmiurla%2Fmorphic&env=OPENAI_API_KEY,TAVILY_API_KEY) + +## ✅ Verified models + +List of verified models that can be specified to writers. + +- [Groq](https://console.groq.com/docs/models) + - LLaMA3 8b + - LLaMA3 70b