Skip to content

Commit

Permalink
Merge pull request #354 from miurla/refactor-workflow
Browse files Browse the repository at this point in the history
Refactoring the agent's workflow
  • Loading branch information
miurla authored Sep 30, 2024
2 parents 4908dce + ba6e64e commit 5df58cf
Show file tree
Hide file tree
Showing 7 changed files with 102 additions and 412 deletions.
6 changes: 0 additions & 6 deletions .env.local.example
Original file line number Diff line number Diff line change
Expand Up @@ -61,12 +61,6 @@ SEARXNG_SAFESEARCH=0 # Safe search setting: 0 (off), 1 (moderate), 2 (strict)
# OLLAMA_MODEL=[YOUR_OLLAMA_MODEL] # The main model to use. Currently compatible: qwen2.5
# OLLAMA_BASE_URL=[YOUR_OLLAMA_URL] # The base URL to use. e.g. http://localhost:11434

# Only writers can set a specific model. It must be compatible with the OpenAI API.
# USE_SPECIFIC_API_FOR_WRITER=true
# SPECIFIC_API_BASE=[YOUR_API_BASE] # e.g. https://api.groq.com/openai/v1
# SPECIFIC_API_KEY=[YOUR_API_KEY]
# SPECIFIC_API_MODEL=[YOUR_API_MODEL] # e.g. llama-3.1-70b-versatile

# enable the share feature
# If you enable this feature, separate account management implementation is required.
# ENABLE_SHARE=true
Expand Down
10 changes: 0 additions & 10 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,6 @@ An AI-powered search engine with a generative UI.
- Azure OpenAI Provider [](https://github.com/miurla/morphic/issues/13)
- Anthropic Provider [](https://github.com/miurla/morphic/pull/239)
- Ollama Provider [](https://github.com/miurla/morphic/issues/215#issuecomment-2381902347)
- Specify the model to generate answers
- Groq API support [](https://github.com/miurla/morphic/pull/58)
- Local Redis support
- SearXNG Search API support with customizable depth (basic or advanced)
- Configurable search depth (basic or advanced)
Expand Down Expand Up @@ -235,11 +233,3 @@ engines:
- Claude 3.5 Sonnet
- Ollama
- qwen2.5
### List of verified models that can be specified to writers:
- [Groq](https://console.groq.com/docs/models)
- LLaMA3.1 8b
- LLaMA3.1 70B
- LLaMA3 8b
- LLaMA3 70b
9 changes: 3 additions & 6 deletions app/actions.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ import { VideoSearchSection } from '@/components/video-search-section'
import { AnswerSection } from '@/components/answer-section'
import { workflow } from '@/lib/actions/workflow'

const MAX_MESSAGES = 6

async function submit(
formData?: FormData,
skip?: boolean,
Expand Down Expand Up @@ -48,13 +50,8 @@ async function submit(
return { role, content } as CoreMessage
})

const useSpecificAPI = process.env.USE_SPECIFIC_API_FOR_WRITER === 'true'
const useOllamaProvider = !!(
process.env.OLLAMA_MODEL && process.env.OLLAMA_BASE_URL
)
const maxMessages = useSpecificAPI ? 5 : useOllamaProvider ? 1 : 10
// Limit the number of messages to the maximum
messages.splice(0, Math.max(messages.length - maxMessages, 0))
messages.splice(0, Math.max(messages.length - MAX_MESSAGES, 0))
// Get the user input from the form data
const userInput = skip
? `{"action": "skip"}`
Expand Down
243 changes: 26 additions & 217 deletions lib/actions/workflow.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -4,21 +4,17 @@ import React from 'react'
import { Spinner } from '@/components/ui/spinner'
import { Section } from '@/components/section'
import { FollowupPanel } from '@/components/followup-panel'
import { AnswerSection } from '@/components/answer-section'
import { ErrorCard } from '@/components/error-card'
import { transformToolMessages } from '@/lib/utils'
import {
querySuggestor,
inquire,
researcher,
taskManager,
ollamaResearcher
researcherWithOllama,
researcher
} from '@/lib/agents'
import { createStreamableValue, createStreamableUI } from 'ai/rsc'
import { CoreMessage, generateId, ToolResultPart } from 'ai'
import { writer } from '../agents/writer'
import { CoreMessage, generateId } from 'ai'

export async function defaultWorkflow(
export async function workflow(
uiState: {
uiStream: ReturnType<typeof createStreamableUI>
isCollapsed: ReturnType<typeof createStreamableValue>
Expand All @@ -29,11 +25,13 @@ export async function defaultWorkflow(
skip: boolean
) {
const { uiStream, isCollapsed, isGenerating } = uiState
// Show the spinner
const id = generateId()

// Display spinner
uiStream.append(<Spinner />)

let action = { object: { next: 'proceed' } }
// If the user skips the task, we proceed to the search
// If the user does not skip the task, run the task manager
if (!skip) action = (await taskManager(messages)) ?? action

if (action.object.next === 'inquire') {
Expand All @@ -55,220 +53,48 @@ export async function defaultWorkflow(

isCollapsed.done(false)
isGenerating.done(false)
uiStream.done()
return
}

// Set the collapsed state to true
isCollapsed.done(true)

// Generate the answer
let answer = ''
let stopReason = ''
let toolOutputs: ToolResultPart[] = []
let errorOccurred = false

const streamText = createStreamableValue<string>()

// If ANTHROPIC_API_KEY is set, update the UI with the answer
// If not, update the UI with a div
if (process.env.ANTHROPIC_API_KEY) {
uiStream.update(
<AnswerSection result={streamText.value} hasHeader={false} />
)
} else {
uiStream.update(<div />)
}

// Determine the API usage based on environment variables
const useSpecificAPI = process.env.USE_SPECIFIC_API_FOR_WRITER === 'true'
const useOllamaProvider = !!(
process.env.OLLAMA_MODEL && process.env.OLLAMA_BASE_URL
)
const maxMessages = useSpecificAPI ? 5 : useOllamaProvider ? 1 : 10
// Limit the number of messages to the maximum
messages.splice(0, Math.max(messages.length - maxMessages, 0))

// If useSpecificAPI is enabled, only function calls will be made
// If not using a tool, this model generates the answer
while (
useSpecificAPI
? toolOutputs.length === 0 && answer.length === 0 && !errorOccurred
: (stopReason !== 'stop' || answer.length === 0) && !errorOccurred
) {
// Search the web and generate the answer
const { fullResponse, hasError, toolResponses, finishReason } =
await researcher(uiStream, streamText, messages)
stopReason = finishReason || ''
answer = fullResponse
toolOutputs = toolResponses
errorOccurred = hasError

if (toolOutputs.length > 0) {
toolOutputs.map(output => {
aiState.update({
...aiState.get(),
messages: [
...aiState.get().messages,
{
id: generateId(),
role: 'tool',
content: JSON.stringify(output.result),
name: output.toolName,
type: 'tool'
}
]
})
})
}
}

// If useSpecificAPI is enabled, generate the answer using the specific model
if (useSpecificAPI && answer.length === 0 && !errorOccurred) {
// Modify the messages to be used by the specific model
const modifiedMessages = transformToolMessages(messages)
const latestMessages = modifiedMessages.slice(maxMessages * -1)
const { response, hasError } = await writer(uiStream, latestMessages)
answer = response
errorOccurred = hasError
messages.push({
role: 'assistant',
content: answer
})
}

if (!errorOccurred) {
const useGoogleProvider = process.env.GOOGLE_GENERATIVE_AI_API_KEY
const useOllamaProvider = !!(
process.env.OLLAMA_MODEL && process.env.OLLAMA_BASE_URL
)
let processedMessages = messages
// If using Google provider, we need to modify the messages
if (useGoogleProvider) {
processedMessages = transformToolMessages(messages)
}
if (useOllamaProvider) {
processedMessages = [{ role: 'assistant', content: answer }]
}

streamText.done()
aiState.update({
...aiState.get(),
messages: [
...aiState.get().messages,
{
id: generateId(),
role: 'assistant',
content: answer,
type: 'answer'
}
]
})

// Generate related queries
const relatedQueries = await querySuggestor(uiStream, processedMessages)
// Add follow-up panel
uiStream.append(
<Section title="Follow-up">
<FollowupPanel />
</Section>
)

aiState.done({
...aiState.get(),
messages: [
...aiState.get().messages,
{
id: generateId(),
role: 'assistant',
content: JSON.stringify(relatedQueries),
type: 'related'
},
{
id: generateId(),
role: 'assistant',
content: 'followup',
type: 'followup'
}
]
})
} else {
aiState.done(aiState.get())
streamText.done()
uiStream.append(
<ErrorCard
errorMessage={answer || 'An error occurred. Please try again.'}
/>
)
}

isGenerating.done(false)
uiStream.done()
}

export const ollamaWorkflow = async (
uiState: {
uiStream: ReturnType<typeof createStreamableUI>
isCollapsed: ReturnType<typeof createStreamableValue>
isGenerating: ReturnType<typeof createStreamableValue>
},
aiState: any,
messages: CoreMessage[],
skip: boolean
) => {
const { uiStream, isCollapsed, isGenerating } = uiState
let action = { object: { next: 'proceed' } }
// If the user skips the task, we proceed to the search
if (!skip) action = (await taskManager(messages)) ?? action

if (action.object.next === 'inquire') {
// Generate inquiry
const inquiry = await inquire(uiStream, messages)
isCollapsed.done(false)
isGenerating.done(false)
uiStream.done()
aiState.done({
...aiState.get(),
messages: [
...aiState.get().messages,
{
id: generateId(),
role: 'assistant',
content: `inquiry: ${inquiry?.question}`,
type: 'inquiry'
}
]
})
return
}

// Set the collapsed state to true
isCollapsed.done(true)

const { text, toolResults } = await ollamaResearcher(uiStream, messages)
const useOllama = process.env.OLLAMA_MODEL && process.env.OLLAMA_BASE_URL
// Select the appropriate researcher function based on the environment variables
const { text, toolResults } = useOllama
? await researcherWithOllama(uiStream, messages)
: await researcher(uiStream, messages)

aiState.update({
...aiState.get(),
messages: [
...aiState.get().messages,
...toolResults.map((toolResult: any) => ({
id: generateId(),
id,
role: 'tool',
content: JSON.stringify(toolResult.result),
name: toolResult.toolName,
type: 'tool'
})),
{
id: generateId(),
id,
role: 'assistant',
content: text,
type: 'answer'
}
]
})

const messagesWithAnswer: CoreMessage[] = [
...messages,
{
role: 'assistant',
content: text
}
]

// Generate related queries
const relatedQueries = await querySuggestor(uiStream, messages)
const relatedQueries = await querySuggestor(uiStream, messagesWithAnswer)
// Add follow-up panel
uiStream.append(
<Section title="Follow-up">
Expand All @@ -281,34 +107,17 @@ export const ollamaWorkflow = async (
messages: [
...aiState.get().messages,
{
id: generateId(),
id,
role: 'assistant',
content: JSON.stringify(relatedQueries),
type: 'related'
},
{
id: generateId(),
id,
role: 'assistant',
content: 'followup',
type: 'followup'
}
]
})
}

export async function workflow(
uiState: {
uiStream: ReturnType<typeof createStreamableUI>
isCollapsed: ReturnType<typeof createStreamableValue>
isGenerating: ReturnType<typeof createStreamableValue>
},
aiState: any,
messages: CoreMessage[],
skip: boolean
) {
if (process.env.OLLAMA_MODEL && process.env.OLLAMA_BASE_URL) {
return ollamaWorkflow(uiState, aiState, messages, skip)
}

return defaultWorkflow(uiState, aiState, messages, skip)
}
Loading

0 comments on commit 5df58cf

Please sign in to comment.