Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 9 additions & 2 deletions apps/mail/components/create/ai-chat.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -26,12 +26,12 @@ import VoiceChat from './voice';
const renderThread = (thread: { id: string; title: string; snippet: string }) => {
const [, setThreadId] = useQueryState('threadId');
const { data: getThread } = useThread(thread.id);
const [, setAiSidebarOpen] = useQueryState('aiSidebar');
// const [, setAiSidebarOpen] = useQueryState('aiSidebar');
const [, setIsFullScreen] = useQueryState('isFullScreen');

const handleClick = () => {
setThreadId(thread.id);
setAiSidebarOpen(null);
// setAiSidebarOpen(null);
setIsFullScreen(null);
};

Expand Down Expand Up @@ -275,6 +275,7 @@ export function AIChat({
messagesEndRef.current.scrollIntoView({ behavior: 'smooth' });
}
}, []);
const [aiSidebarOpen] = useQueryState('aiSidebar');

const editor = useComposeEditor({
placeholder: 'Ask Zero to do anything...',
Expand All @@ -298,6 +299,12 @@ export function AIChat({
scrollToBottom();
}, [messages, scrollToBottom]);

useEffect(() => {
if (aiSidebarOpen === 'true') {
editor.commands.focus();
}
}, [aiSidebarOpen, editor]);

return (
<div className={cn('flex h-full flex-col', isFullScreen ? 'mx-auto max-w-xl' : '')}>
<div className="no-scrollbar flex-1 overflow-y-auto" ref={messagesContainerRef}>
Expand Down
25 changes: 0 additions & 25 deletions apps/mail/components/mail/mail.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -433,11 +433,6 @@ export function MailLayout() {
}, [threadId, enableScope, disableScope]);
const [, setActiveReplyId] = useQueryState('activeReplyId');

const handleClose = useCallback(() => {
setThreadId(null);
setActiveReplyId(null);
}, [setThreadId]);

// Add mailto protocol handler registration
useEffect(() => {
// Register as a mailto protocol handler if browser supports it
Expand Down Expand Up @@ -626,26 +621,19 @@ export function MailLayout() {

function BulkSelectActions() {
const t = useTranslations();
const [errorQty, setErrorQty] = useState(0);
const [threadId, setThreadId] = useQueryState('threadId');
const [isLoading, setIsLoading] = useState(false);
const [isUnsub, setIsUnsub] = useState(false);
const [mail, setMail] = useMail();
const params = useParams<{ folder: string }>();
const folder = params?.folder ?? 'inbox';
const [{ refetch: refetchThreads }] = useThreads();
const { refetch: refetchStats } = useStats();
const trpc = useTRPC();
const { mutateAsync: markAsImportant } = useMutation(trpc.mail.markAsImportant.mutationOptions());
const { mutateAsync: bulkDeleteThread } = useMutation(trpc.mail.bulkDelete.mutationOptions());
const queryClient = useQueryClient();
const {
optimisticMarkAsRead,
optimisticToggleStar,
optimisticMoveThreadsTo,
optimisticDeleteThreads,
} = useOptimisticActions();
const [, setBackgroundQueue] = useAtom(backgroundQueueAtom);

const handleMassUnsubscribe = async () => {
setIsLoading(true);
Expand All @@ -659,7 +647,6 @@ function BulkSelectActions() {
if (firstEmail)
return handleUnsubscribe({ emailData: firstEmail }).catch((e) => {
toast.error(e.message ?? 'Unknown error while unsubscribing');
setErrorQty((eq) => eq++);
});
}
}),
Expand All @@ -678,18 +665,6 @@ function BulkSelectActions() {
);
};

const onMoveSuccess = useCallback(async () => {
if (threadId && mail.bulkSelected.includes(threadId)) setThreadId(null);
refetchThreads();
refetchStats();
await Promise.all(
mail.bulkSelected.map((threadId) =>
queryClient.invalidateQueries({ queryKey: trpc.mail.get.queryKey({ id: threadId }) }),
),
);
setMail({ ...mail, bulkSelected: [] });
}, [mail, setMail, refetchThreads, refetchStats, threadId, setThreadId]);

return (
<div className="flex items-center gap-2">
<button
Expand Down
16 changes: 1 addition & 15 deletions apps/mail/components/ui/ai-sidebar.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -290,26 +290,17 @@ export function useAISidebar() {
[setViewModeQuery],
);

// Function to set open state and save to localStorage
const setOpen = useCallback(
(openState: boolean) => {
// For closing, we need to handle state updates more carefully
if (!openState) {
// First remove from localStorage immediately
if (typeof window !== 'undefined') {
localStorage.removeItem('ai-sidebar-open');
}

// Use setTimeout to ensure the query update happens in the next tick
// This helps prevent the need for double-clicking
setTimeout(() => {
setOpenQuery(null).catch(console.error);
}, 0);
} else {
// For opening, we can use the normal flow
setOpenQuery('true').catch(console.error);

// Save to localStorage
if (typeof window !== 'undefined') {
localStorage.setItem('ai-sidebar-open', 'true');
}
Expand All @@ -318,13 +309,8 @@ export function useAISidebar() {
[setOpenQuery],
);

// Toggle open state
const toggleOpen = useCallback(() => {
const newState = !(open === 'true');
setOpen(newState);
}, [open, setOpen]);
const toggleOpen = useCallback(() => setOpen(open !== 'true'), [open, setOpen]);

// Sync with query parameters on mount or when they change
useEffect(() => {
if (viewModeQuery && viewModeQuery !== viewMode) {
setViewModeState(viewModeQuery as ViewMode);
Expand Down
1 change: 1 addition & 0 deletions apps/server/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
},
"dependencies": {
"@ai-sdk/google": "^1.2.18",
"@ai-sdk/groq": "1.2.9",
"@ai-sdk/openai": "^1.3.21",
"@ai-sdk/perplexity": "1.1.9",
"@ai-sdk/ui-utils": "1.2.11",
Expand Down
3 changes: 2 additions & 1 deletion apps/server/src/routes/chat.ts
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import { connection } from '../db/schema';
import { env } from 'cloudflare:workers';
import { openai } from '@ai-sdk/openai';
import { McpAgent } from 'agents/mcp';
import { groq } from '@ai-sdk/groq';
import { eq } from 'drizzle-orm';
import { createDb } from '../db';
import { z } from 'zod';
Expand Down Expand Up @@ -57,7 +58,7 @@ export class ZeroAgent extends AIChatAgent<typeof env> {
);

const result = streamText({
model: openai('gpt-4o'),
model: groq('meta-llama/llama-4-maverick-17b-128e-instruct'),
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

💡 Verification agent

🧩 Analysis chain

Model migration implemented correctly, but incomplete.

The migration to Groq's model is implemented correctly in the ZeroAgent class. However, this creates an inconsistent state in the codebase.

This file still has OpenAI model usage at lines 131 and 445 in the buildGmailSearchQuery implementations. Consider:

  1. Complete the migration by updating all remaining OpenAI usages to Groq
  2. Document the rationale if different models should be used for different operations
  3. Extract the model name to a constant to avoid repetition
+const GROQ_MODEL = 'meta-llama/llama-4-maverick-17b-128e-instruct';

-          model: groq('meta-llama/llama-4-maverick-17b-128e-instruct'),
+          model: groq(GROQ_MODEL),

Verify all OpenAI model usages in the codebase to ensure consistent migration:


🏁 Script executed:

#!/bin/bash
# Search for remaining OpenAI model usage patterns
rg -A 2 -B 2 "openai\('.*'\)" apps/server/src/
rg -A 2 -B 2 "gpt-4o" apps/server/src/

Length of output: 2675


Complete Groq Model Migration Across the Codebase

The migration to Groq’s model in ZeroAgent is correct, but there are still OpenAI calls that need updating:

• apps/server/src/trpc/routes/ai/compose.ts
– model: openai('gpt-4o-mini')
– model: openai('gpt-4o')
• apps/server/src/routes/chat.ts
– model: openai('gpt-4o') in both generateText and generateObject calls

Recommendations:

  1. Extract the Groq model name to a constant:
    const GROQ_MODEL = 'meta-llama/llama-4-maverick-17b-128e-instruct';
  2. Replace all openai('…') usages with groq(GROQ_MODEL) (or a variant, if you intentionally need different models, document that).
  3. Add/update a comment in each file explaining why Groq is used instead of OpenAI.

Example diff (adapt for each occurrence):

+ const GROQ_MODEL = 'meta-llama/llama-4-maverick-17b-128e-instruct';

-    model: openai('gpt-4o'),
+    model: groq(GROQ_MODEL),
🤖 Prompt for AI Agents
In apps/server/src/routes/chat.ts at line 61 and also at lines 131 and 445, the
code still uses OpenAI models in the buildGmailSearchQuery implementations,
causing inconsistency after migrating to Groq models. To fix this, extract the
Groq model name 'meta-llama/llama-4-maverick-17b-128e-instruct' into a constant
at the top of the file, replace all openai('gpt-4o') usages with
groq(GROQ_MODEL), and add a comment explaining why Groq is used instead of
OpenAI for clarity and consistency.

messages: processedMessages,
tools,
onFinish,
Expand Down
9 changes: 3 additions & 6 deletions apps/server/src/trpc/routes/ai/search.ts
Original file line number Diff line number Diff line change
@@ -1,17 +1,14 @@
import { type CoreMessage, generateText, tool, generateObject } from 'ai';
import { GmailSearchAssistantSystemPrompt } from '../../../lib/prompts';
import { activeDriverProcedure } from '../../trpc';
import type { gmail_v1 } from '@googleapis/gmail';
import { TRPCError } from '@trpc/server';
import { openai } from '@ai-sdk/openai';
import dedent from 'dedent';
import { groq } from '@ai-sdk/groq';
import { generateObject } from 'ai';
import { z } from 'zod';

export const generateSearchQuery = activeDriverProcedure
.input(z.object({ query: z.string() }))
.mutation(async ({ input }) => {
const result = await generateObject({
model: openai('gpt-4o'),
model: groq('meta-llama/llama-4-maverick-17b-128e-instruct'),
system: GmailSearchAssistantSystemPrompt(),
prompt: input.query,
schema: z.object({
Expand Down
15 changes: 15 additions & 0 deletions pnpm-lock.yaml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.