From 6f4f4e22f0b6858822fef3878c9704a23fc78450 Mon Sep 17 00:00:00 2001 From: Adam Gough <77861281+aadamgough@users.noreply.github.com> Date: Tue, 16 Dec 2025 16:08:56 -0800 Subject: [PATCH 01/15] fix(loop): increased max loop iterations to 1000 (#2413) --- .../panel/components/editor/hooks/use-subflow-editor.ts | 2 +- apps/sim/hooks/use-collaborative-workflow.ts | 2 +- apps/sim/stores/workflows/workflow/store.test.ts | 6 +++--- apps/sim/stores/workflows/workflow/store.ts | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/hooks/use-subflow-editor.ts b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/hooks/use-subflow-editor.ts index 85bf822007..ac3e64e8c7 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/hooks/use-subflow-editor.ts +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/hooks/use-subflow-editor.ts @@ -26,7 +26,7 @@ const SUBFLOW_CONFIG = { }, typeKey: 'loopType' as const, storeKey: 'loops' as const, - maxIterations: 100, + maxIterations: 1000, configKeys: { iterations: 'iterations' as const, items: 'forEachItems' as const, diff --git a/apps/sim/hooks/use-collaborative-workflow.ts b/apps/sim/hooks/use-collaborative-workflow.ts index 149965ec72..6a2cfcc4d3 100644 --- a/apps/sim/hooks/use-collaborative-workflow.ts +++ b/apps/sim/hooks/use-collaborative-workflow.ts @@ -1540,7 +1540,7 @@ export function useCollaborativeWorkflow() { const config = { id: nodeId, nodes: childNodes, - iterations: Math.max(1, Math.min(100, count)), // Clamp between 1-100 for loops + iterations: Math.max(1, Math.min(1000, count)), // Clamp between 1-1000 for loops loopType: currentLoopType, forEachItems: currentCollection, } diff --git a/apps/sim/stores/workflows/workflow/store.test.ts b/apps/sim/stores/workflows/workflow/store.test.ts index 21c13539c8..36ae556435 100644 --- a/apps/sim/stores/workflows/workflow/store.test.ts +++ b/apps/sim/stores/workflows/workflow/store.test.ts @@ -109,7 +109,7 @@ describe('workflow store', () => { expect(state.loops.loop1.forEachItems).toEqual(['item1', 'item2', 'item3']) }) - it('should clamp loop count between 1 and 100', () => { + it('should clamp loop count between 1 and 1000', () => { const { addBlock, updateLoopCount } = useWorkflowStore.getState() // Add a loop block @@ -126,9 +126,9 @@ describe('workflow store', () => { ) // Try to set count above max - updateLoopCount('loop1', 150) + updateLoopCount('loop1', 1500) let state = useWorkflowStore.getState() - expect(state.blocks.loop1?.data?.count).toBe(100) + expect(state.blocks.loop1?.data?.count).toBe(1000) // Try to set count below min updateLoopCount('loop1', 0) diff --git a/apps/sim/stores/workflows/workflow/store.ts b/apps/sim/stores/workflows/workflow/store.ts index 8b3bd73df8..5adcd6dd73 100644 --- a/apps/sim/stores/workflows/workflow/store.ts +++ b/apps/sim/stores/workflows/workflow/store.ts @@ -850,7 +850,7 @@ export const useWorkflowStore = create()( ...block, data: { ...block.data, - count: Math.max(1, Math.min(100, count)), // Clamp between 1-100 + count: Math.max(1, Math.min(1000, count)), // Clamp between 1-1000 }, }, } From fdbf8be79b461b8675fd4ba85c244eeef0fec7f7 Mon Sep 17 00:00:00 2001 From: Waleed Date: Tue, 16 Dec 2025 18:18:46 -0800 Subject: [PATCH 02/15] fix(logs-search): restored support for log search queries (#2417) --- apps/sim/app/api/logs/route.ts | 120 +++++++++----- .../logs-toolbar/components/search/search.tsx | 154 +++++++----------- .../logs/hooks/use-search-state.ts | 53 +----- apps/sim/lib/logs/query-parser.ts | 8 +- apps/sim/lib/logs/search-suggestions.ts | 33 +--- 5 files changed, 156 insertions(+), 212 deletions(-) diff --git a/apps/sim/app/api/logs/route.ts b/apps/sim/app/api/logs/route.ts index 9651e9a286..333b359742 100644 --- a/apps/sim/app/api/logs/route.ts +++ b/apps/sim/app/api/logs/route.ts @@ -6,7 +6,22 @@ import { workflowDeploymentVersion, workflowExecutionLogs, } from '@sim/db/schema' -import { and, desc, eq, gte, inArray, isNotNull, isNull, lte, or, type SQL, sql } from 'drizzle-orm' +import { + and, + desc, + eq, + gt, + gte, + inArray, + isNotNull, + isNull, + lt, + lte, + ne, + or, + type SQL, + sql, +} from 'drizzle-orm' import { type NextRequest, NextResponse } from 'next/server' import { z } from 'zod' import { getSession } from '@/lib/auth' @@ -22,14 +37,19 @@ const QueryParamsSchema = z.object({ limit: z.coerce.number().optional().default(100), offset: z.coerce.number().optional().default(0), level: z.string().optional(), - workflowIds: z.string().optional(), // Comma-separated list of workflow IDs - folderIds: z.string().optional(), // Comma-separated list of folder IDs - triggers: z.string().optional(), // Comma-separated list of trigger types + workflowIds: z.string().optional(), + folderIds: z.string().optional(), + triggers: z.string().optional(), startDate: z.string().optional(), endDate: z.string().optional(), search: z.string().optional(), workflowName: z.string().optional(), folderName: z.string().optional(), + executionId: z.string().optional(), + costOperator: z.enum(['=', '>', '<', '>=', '<=', '!=']).optional(), + costValue: z.coerce.number().optional(), + durationOperator: z.enum(['=', '>', '<', '>=', '<=', '!=']).optional(), + durationValue: z.coerce.number().optional(), workspaceId: z.string(), }) @@ -49,7 +69,6 @@ export async function GET(request: NextRequest) { const { searchParams } = new URL(request.url) const params = QueryParamsSchema.parse(Object.fromEntries(searchParams.entries())) - // Conditionally select columns based on detail level to optimize performance const selectColumns = params.details === 'full' ? { @@ -63,9 +82,9 @@ export async function GET(request: NextRequest) { startedAt: workflowExecutionLogs.startedAt, endedAt: workflowExecutionLogs.endedAt, totalDurationMs: workflowExecutionLogs.totalDurationMs, - executionData: workflowExecutionLogs.executionData, // Large field - only in full mode + executionData: workflowExecutionLogs.executionData, cost: workflowExecutionLogs.cost, - files: workflowExecutionLogs.files, // Large field - only in full mode + files: workflowExecutionLogs.files, createdAt: workflowExecutionLogs.createdAt, workflowName: workflow.name, workflowDescription: workflow.description, @@ -82,7 +101,6 @@ export async function GET(request: NextRequest) { deploymentVersionName: workflowDeploymentVersion.name, } : { - // Basic mode - exclude large fields for better performance id: workflowExecutionLogs.id, workflowId: workflowExecutionLogs.workflowId, executionId: workflowExecutionLogs.executionId, @@ -93,9 +111,9 @@ export async function GET(request: NextRequest) { startedAt: workflowExecutionLogs.startedAt, endedAt: workflowExecutionLogs.endedAt, totalDurationMs: workflowExecutionLogs.totalDurationMs, - executionData: sql`NULL`, // Exclude large execution data in basic mode + executionData: sql`NULL`, cost: workflowExecutionLogs.cost, - files: sql`NULL`, // Exclude files in basic mode + files: sql`NULL`, createdAt: workflowExecutionLogs.createdAt, workflowName: workflow.name, workflowDescription: workflow.description, @@ -109,7 +127,7 @@ export async function GET(request: NextRequest) { pausedTotalPauseCount: pausedExecutions.totalPauseCount, pausedResumedCount: pausedExecutions.resumedCount, deploymentVersion: workflowDeploymentVersion.version, - deploymentVersionName: sql`NULL`, // Only needed in full mode for details panel + deploymentVersionName: sql`NULL`, } const baseQuery = db @@ -139,34 +157,28 @@ export async function GET(request: NextRequest) { ) ) - // Build additional conditions for the query let conditions: SQL | undefined - // Filter by level with support for derived statuses (running, pending) if (params.level && params.level !== 'all') { const levels = params.level.split(',').filter(Boolean) const levelConditions: SQL[] = [] for (const level of levels) { if (level === 'error') { - // Direct database field levelConditions.push(eq(workflowExecutionLogs.level, 'error')) } else if (level === 'info') { - // Completed info logs only (not running, not pending) const condition = and( eq(workflowExecutionLogs.level, 'info'), isNotNull(workflowExecutionLogs.endedAt) ) if (condition) levelConditions.push(condition) } else if (level === 'running') { - // Running logs: info level with no endedAt const condition = and( eq(workflowExecutionLogs.level, 'info'), isNull(workflowExecutionLogs.endedAt) ) if (condition) levelConditions.push(condition) } else if (level === 'pending') { - // Pending logs: info level with pause status indicators const condition = and( eq(workflowExecutionLogs.level, 'info'), or( @@ -189,7 +201,6 @@ export async function GET(request: NextRequest) { } } - // Filter by specific workflow IDs if (params.workflowIds) { const workflowIds = params.workflowIds.split(',').filter(Boolean) if (workflowIds.length > 0) { @@ -197,7 +208,6 @@ export async function GET(request: NextRequest) { } } - // Filter by folder IDs if (params.folderIds) { const folderIds = params.folderIds.split(',').filter(Boolean) if (folderIds.length > 0) { @@ -205,7 +215,6 @@ export async function GET(request: NextRequest) { } } - // Filter by triggers if (params.triggers) { const triggers = params.triggers.split(',').filter(Boolean) if (triggers.length > 0 && !triggers.includes('all')) { @@ -213,7 +222,6 @@ export async function GET(request: NextRequest) { } } - // Filter by date range if (params.startDate) { conditions = and( conditions, @@ -224,33 +232,79 @@ export async function GET(request: NextRequest) { conditions = and(conditions, lte(workflowExecutionLogs.startedAt, new Date(params.endDate))) } - // Filter by search query if (params.search) { const searchTerm = `%${params.search}%` - // With message removed, restrict search to executionId only conditions = and(conditions, sql`${workflowExecutionLogs.executionId} ILIKE ${searchTerm}`) } - // Filter by workflow name (from advanced search input) if (params.workflowName) { const nameTerm = `%${params.workflowName}%` conditions = and(conditions, sql`${workflow.name} ILIKE ${nameTerm}`) } - // Filter by folder name (best-effort text match when present on workflows) if (params.folderName) { const folderTerm = `%${params.folderName}%` conditions = and(conditions, sql`${workflow.name} ILIKE ${folderTerm}`) } - // Execute the query using the optimized join + if (params.executionId) { + conditions = and(conditions, eq(workflowExecutionLogs.executionId, params.executionId)) + } + + if (params.costOperator && params.costValue !== undefined) { + const costField = sql`(${workflowExecutionLogs.cost}->>'total')::numeric` + switch (params.costOperator) { + case '=': + conditions = and(conditions, sql`${costField} = ${params.costValue}`) + break + case '>': + conditions = and(conditions, sql`${costField} > ${params.costValue}`) + break + case '<': + conditions = and(conditions, sql`${costField} < ${params.costValue}`) + break + case '>=': + conditions = and(conditions, sql`${costField} >= ${params.costValue}`) + break + case '<=': + conditions = and(conditions, sql`${costField} <= ${params.costValue}`) + break + case '!=': + conditions = and(conditions, sql`${costField} != ${params.costValue}`) + break + } + } + + if (params.durationOperator && params.durationValue !== undefined) { + const durationField = workflowExecutionLogs.totalDurationMs + switch (params.durationOperator) { + case '=': + conditions = and(conditions, eq(durationField, params.durationValue)) + break + case '>': + conditions = and(conditions, gt(durationField, params.durationValue)) + break + case '<': + conditions = and(conditions, lt(durationField, params.durationValue)) + break + case '>=': + conditions = and(conditions, gte(durationField, params.durationValue)) + break + case '<=': + conditions = and(conditions, lte(durationField, params.durationValue)) + break + case '!=': + conditions = and(conditions, ne(durationField, params.durationValue)) + break + } + } + const logs = await baseQuery .where(conditions) .orderBy(desc(workflowExecutionLogs.startedAt)) .limit(params.limit) .offset(params.offset) - // Get total count for pagination using the same join structure const countQuery = db .select({ count: sql`count(*)` }) .from(workflowExecutionLogs) @@ -279,13 +333,10 @@ export async function GET(request: NextRequest) { const count = countResult[0]?.count || 0 - // Block executions are now extracted from trace spans instead of separate table const blockExecutionsByExecution: Record = {} - // Create clean trace spans from block executions const createTraceSpans = (blockExecutions: any[]) => { return blockExecutions.map((block, index) => { - // For error blocks, include error information in the output let output = block.outputData if (block.status === 'error' && block.errorMessage) { output = { @@ -314,7 +365,6 @@ export async function GET(request: NextRequest) { }) } - // Extract cost information from block executions const extractCostSummary = (blockExecutions: any[]) => { let totalCost = 0 let totalInputCost = 0 @@ -333,7 +383,6 @@ export async function GET(request: NextRequest) { totalPromptTokens += block.cost.tokens?.prompt || 0 totalCompletionTokens += block.cost.tokens?.completion || 0 - // Track per-model costs if (block.cost.model) { if (!models.has(block.cost.model)) { models.set(block.cost.model, { @@ -363,34 +412,29 @@ export async function GET(request: NextRequest) { prompt: totalPromptTokens, completion: totalCompletionTokens, }, - models: Object.fromEntries(models), // Convert Map to object for JSON serialization + models: Object.fromEntries(models), } } - // Transform to clean log format with workflow data included const enhancedLogs = logs.map((log) => { const blockExecutions = blockExecutionsByExecution[log.executionId] || [] - // Only process trace spans and detailed cost in full mode let traceSpans = [] let finalOutput: any let costSummary = (log.cost as any) || { total: 0 } if (params.details === 'full' && log.executionData) { - // Use stored trace spans if available, otherwise create from block executions const storedTraceSpans = (log.executionData as any)?.traceSpans traceSpans = storedTraceSpans && Array.isArray(storedTraceSpans) && storedTraceSpans.length > 0 ? storedTraceSpans : createTraceSpans(blockExecutions) - // Prefer stored cost JSON; otherwise synthesize from blocks costSummary = log.cost && Object.keys(log.cost as any).length > 0 ? (log.cost as any) : extractCostSummary(blockExecutions) - // Include finalOutput if present on executionData try { const fo = (log.executionData as any)?.finalOutput if (fo !== undefined) finalOutput = fo diff --git a/apps/sim/app/workspace/[workspaceId]/logs/components/logs-toolbar/components/search/search.tsx b/apps/sim/app/workspace/[workspaceId]/logs/components/logs-toolbar/components/search/search.tsx index 9f14118fa0..27d23d7976 100644 --- a/apps/sim/app/workspace/[workspaceId]/logs/components/logs-toolbar/components/search/search.tsx +++ b/apps/sim/app/workspace/[workspaceId]/logs/components/logs-toolbar/components/search/search.tsx @@ -2,11 +2,9 @@ import { useEffect, useMemo, useRef, useState } from 'react' import { Search, X } from 'lucide-react' -import { useParams } from 'next/navigation' -import { Button, Popover, PopoverAnchor, PopoverContent } from '@/components/emcn' +import { Badge, Popover, PopoverAnchor, PopoverContent } from '@/components/emcn' import { cn } from '@/lib/core/utils/cn' -import { createLogger } from '@/lib/logs/console/logger' -import { getIntegrationMetadata } from '@/lib/logs/get-trigger-options' +import { getTriggerOptions } from '@/lib/logs/get-trigger-options' import { type ParsedFilter, parseQuery } from '@/lib/logs/query-parser' import { type FolderData, @@ -18,7 +16,15 @@ import { useSearchState } from '@/app/workspace/[workspaceId]/logs/hooks/use-sea import { useFolderStore } from '@/stores/folders/store' import { useWorkflowRegistry } from '@/stores/workflows/registry/store' -const logger = createLogger('AutocompleteSearch') +function truncateFilterValue(field: string, value: string): string { + if ((field === 'executionId' || field === 'workflowId') && value.length > 12) { + return `...${value.slice(-6)}` + } + if (value.length > 20) { + return `${value.slice(0, 17)}...` + } + return value +} interface AutocompleteSearchProps { value: string @@ -35,11 +41,8 @@ export function AutocompleteSearch({ className, onOpenChange, }: AutocompleteSearchProps) { - const params = useParams() - const workspaceId = params.workspaceId as string const workflows = useWorkflowRegistry((state) => state.workflows) const folders = useFolderStore((state) => state.folders) - const [triggersData, setTriggersData] = useState([]) const workflowsData = useMemo(() => { return Object.values(workflows).map((w) => ({ @@ -56,32 +59,13 @@ export function AutocompleteSearch({ })) }, [folders]) - useEffect(() => { - if (!workspaceId) return - - const fetchTriggers = async () => { - try { - const response = await fetch(`/api/logs/triggers?workspaceId=${workspaceId}`) - if (!response.ok) return - - const data = await response.json() - const triggers: TriggerData[] = data.triggers.map((trigger: string) => { - const metadata = getIntegrationMetadata(trigger) - return { - value: trigger, - label: metadata.label, - color: metadata.color, - } - }) - - setTriggersData(triggers) - } catch (error) { - logger.error('Failed to fetch triggers:', error) - } - } - - fetchTriggers() - }, [workspaceId]) + const triggersData = useMemo(() => { + return getTriggerOptions().map((t) => ({ + value: t.value, + label: t.label, + color: t.color, + })) + }, []) const suggestionEngine = useMemo(() => { return new SearchSuggestions(workflowsData, foldersData, triggersData) @@ -103,7 +87,6 @@ export function AutocompleteSearch({ suggestions, sections, highlightedIndex, - highlightedBadgeIndex, inputRef, dropdownRef, handleInputChange, @@ -122,7 +105,6 @@ export function AutocompleteSearch({ const lastExternalValue = useRef(value) useEffect(() => { - // Only re-initialize if value changed externally (not from user typing) if (value !== lastExternalValue.current) { lastExternalValue.current = value const parsed = parseQuery(value) @@ -130,7 +112,6 @@ export function AutocompleteSearch({ } }, [value, initializeFromQuery]) - // Initial sync on mount useEffect(() => { if (value) { const parsed = parseQuery(value) @@ -189,40 +170,49 @@ export function AutocompleteSearch({
{/* Applied Filter Badges */} {appliedFilters.map((filter, index) => ( - + + ))} {/* Text Search Badge (if present) */} {hasTextSearch && ( - + + "{textSearch}" + + + )} {/* Input - only current typing */} @@ -261,9 +251,8 @@ export function AutocompleteSearch({ sideOffset={4} onOpenAutoFocus={(e) => e.preventDefault()} > -
+
{sections.length > 0 ? ( - // Multi-section layout
{/* Show all results (no header) */} {suggestions[0]?.category === 'show-all' && ( @@ -271,9 +260,9 @@ export function AutocompleteSearch({ key={suggestions[0].id} data-index={0} className={cn( - 'w-full px-3 py-1.5 text-left transition-colors focus:outline-none', - 'hover:bg-[var(--surface-9)] dark:hover:bg-[var(--surface-9)]', - highlightedIndex === 0 && 'bg-[var(--surface-9)] dark:bg-[var(--surface-9)]' + 'w-full rounded-[6px] px-3 py-2 text-left transition-colors focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-[var(--border-focus)]', + 'hover:bg-[var(--surface-9)]', + highlightedIndex === 0 && 'bg-[var(--surface-9)]' )} onMouseEnter={() => setHighlightedIndex(0)} onMouseDown={(e) => { @@ -287,7 +276,7 @@ export function AutocompleteSearch({ {sections.map((section) => (
-
+
{section.title}
{section.suggestions.map((suggestion) => { @@ -301,9 +290,9 @@ export function AutocompleteSearch({ key={suggestion.id} data-index={index} className={cn( - 'w-full px-3 py-1.5 text-left transition-colors focus:outline-none', - 'hover:bg-[var(--surface-9)] dark:hover:bg-[var(--surface-9)]', - isHighlighted && 'bg-[var(--surface-9)] dark:bg-[var(--surface-9)]' + 'w-full rounded-[6px] px-3 py-2 text-left transition-colors focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-[var(--border-focus)]', + 'hover:bg-[var(--surface-9)]', + isHighlighted && 'bg-[var(--surface-9)]' )} onMouseEnter={() => setHighlightedIndex(index)} onMouseDown={(e) => { @@ -312,19 +301,11 @@ export function AutocompleteSearch({ }} >
-
- {suggestion.category === 'trigger' && suggestion.color && ( -
- )} -
- {suggestion.label} -
+
+ {suggestion.label}
{suggestion.value !== suggestion.label && ( -
+
{suggestion.category === 'workflow' || suggestion.category === 'folder' ? `${suggestion.category}:` @@ -342,7 +323,7 @@ export function AutocompleteSearch({ // Single section layout
{suggestionType === 'filters' && ( -
+
SUGGESTED FILTERS
)} @@ -352,10 +333,9 @@ export function AutocompleteSearch({ key={suggestion.id} data-index={index} className={cn( - 'w-full px-3 py-1.5 text-left transition-colors focus:outline-none', - 'hover:bg-[var(--surface-9)] dark:hover:bg-[var(--surface-9)]', - index === highlightedIndex && - 'bg-[var(--surface-9)] dark:bg-[var(--surface-9)]' + 'w-full rounded-[6px] px-3 py-2 text-left transition-colors focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-[var(--border-focus)]', + 'hover:bg-[var(--surface-9)]', + index === highlightedIndex && 'bg-[var(--surface-9)]' )} onMouseEnter={() => setHighlightedIndex(index)} onMouseDown={(e) => { @@ -364,17 +344,9 @@ export function AutocompleteSearch({ }} >
-
- {suggestion.category === 'trigger' && suggestion.color && ( -
- )} -
{suggestion.label}
-
+
{suggestion.label}
{suggestion.description && ( -
+
{suggestion.value}
)} diff --git a/apps/sim/app/workspace/[workspaceId]/logs/hooks/use-search-state.ts b/apps/sim/app/workspace/[workspaceId]/logs/hooks/use-search-state.ts index 92c6e476c5..5d88372ea2 100644 --- a/apps/sim/app/workspace/[workspaceId]/logs/hooks/use-search-state.ts +++ b/apps/sim/app/workspace/[workspaceId]/logs/hooks/use-search-state.ts @@ -21,21 +21,15 @@ export function useSearchState({ const [currentInput, setCurrentInput] = useState('') const [textSearch, setTextSearch] = useState('') - // Dropdown state const [isOpen, setIsOpen] = useState(false) const [suggestions, setSuggestions] = useState([]) const [sections, setSections] = useState([]) const [highlightedIndex, setHighlightedIndex] = useState(-1) - // Badge interaction - const [highlightedBadgeIndex, setHighlightedBadgeIndex] = useState(null) - - // Refs const inputRef = useRef(null) const dropdownRef = useRef(null) const debounceRef = useRef(null) - // Update suggestions when input changes const updateSuggestions = useCallback( (input: string) => { const suggestionGroup = getSuggestions(input) @@ -55,13 +49,10 @@ export function useSearchState({ [getSuggestions] ) - // Handle input changes const handleInputChange = useCallback( (value: string) => { setCurrentInput(value) - setHighlightedBadgeIndex(null) // Clear badge highlight on any input - // Debounce suggestion updates if (debounceRef.current) { clearTimeout(debounceRef.current) } @@ -73,11 +64,9 @@ export function useSearchState({ [updateSuggestions, debounceMs] ) - // Handle suggestion selection const handleSuggestionSelect = useCallback( (suggestion: Suggestion) => { if (suggestion.category === 'show-all') { - // Treat as text search setTextSearch(suggestion.value) setCurrentInput('') setIsOpen(false) @@ -85,15 +74,12 @@ export function useSearchState({ return } - // Check if this is a filter-key suggestion (ends with ':') if (suggestion.category === 'filters' && suggestion.value.endsWith(':')) { - // Set input to the filter key and keep dropdown open for values setCurrentInput(suggestion.value) updateSuggestions(suggestion.value) return } - // For filter values, workflows, folders - add as a filter const newFilter: ParsedFilter = { field: suggestion.value.split(':')[0] as any, operator: '=', @@ -110,15 +96,12 @@ export function useSearchState({ setCurrentInput('') setTextSearch('') - // Notify parent onFiltersChange(updatedFilters, '') - // Focus back on input and reopen dropdown with empty suggestions if (inputRef.current) { inputRef.current.focus() } - // Show filter keys dropdown again after selection setTimeout(() => { updateSuggestions('') }, 50) @@ -126,12 +109,10 @@ export function useSearchState({ [appliedFilters, onFiltersChange, updateSuggestions] ) - // Remove a badge const removeBadge = useCallback( (index: number) => { const updatedFilters = appliedFilters.filter((_, i) => i !== index) setAppliedFilters(updatedFilters) - setHighlightedBadgeIndex(null) onFiltersChange(updatedFilters, textSearch) if (inputRef.current) { @@ -141,39 +122,22 @@ export function useSearchState({ [appliedFilters, textSearch, onFiltersChange] ) - // Handle keyboard navigation const handleKeyDown = useCallback( (event: React.KeyboardEvent) => { - // Backspace on empty input - badge deletion if (event.key === 'Backspace' && currentInput === '') { - event.preventDefault() - - if (highlightedBadgeIndex !== null) { - // Delete highlighted badge - removeBadge(highlightedBadgeIndex) - } else if (appliedFilters.length > 0) { - // Highlight last badge - setHighlightedBadgeIndex(appliedFilters.length - 1) + if (appliedFilters.length > 0) { + event.preventDefault() + removeBadge(appliedFilters.length - 1) } return } - // Clear badge highlight on any other key when not in dropdown navigation - if ( - highlightedBadgeIndex !== null && - !['ArrowDown', 'ArrowUp', 'Enter'].includes(event.key) - ) { - setHighlightedBadgeIndex(null) - } - - // Enter key if (event.key === 'Enter') { event.preventDefault() if (isOpen && highlightedIndex >= 0 && suggestions[highlightedIndex]) { handleSuggestionSelect(suggestions[highlightedIndex]) } else if (currentInput.trim()) { - // Submit current input as text search setTextSearch(currentInput.trim()) setCurrentInput('') setIsOpen(false) @@ -182,7 +146,6 @@ export function useSearchState({ return } - // Dropdown navigation if (!isOpen) return switch (event.key) { @@ -216,7 +179,6 @@ export function useSearchState({ }, [ currentInput, - highlightedBadgeIndex, appliedFilters, isOpen, highlightedIndex, @@ -227,12 +189,10 @@ export function useSearchState({ ] ) - // Handle focus const handleFocus = useCallback(() => { updateSuggestions(currentInput) }, [currentInput, updateSuggestions]) - // Handle blur const handleBlur = useCallback(() => { setTimeout(() => { setIsOpen(false) @@ -240,7 +200,6 @@ export function useSearchState({ }, 150) }, []) - // Clear all filters const clearAll = useCallback(() => { setAppliedFilters([]) setCurrentInput('') @@ -253,7 +212,6 @@ export function useSearchState({ } }, [onFiltersChange]) - // Initialize from external value (URL params, etc.) const initializeFromQuery = useCallback((query: string, filters: ParsedFilter[]) => { setAppliedFilters(filters) setTextSearch(query) @@ -261,7 +219,6 @@ export function useSearchState({ }, []) return { - // State appliedFilters, currentInput, textSearch, @@ -269,13 +226,10 @@ export function useSearchState({ suggestions, sections, highlightedIndex, - highlightedBadgeIndex, - // Refs inputRef, dropdownRef, - // Handlers handleInputChange, handleSuggestionSelect, handleKeyDown, @@ -285,7 +239,6 @@ export function useSearchState({ clearAll, initializeFromQuery, - // Setters for external control setHighlightedIndex, } } diff --git a/apps/sim/lib/logs/query-parser.ts b/apps/sim/lib/logs/query-parser.ts index 716ad34265..64f996b42c 100644 --- a/apps/sim/lib/logs/query-parser.ts +++ b/apps/sim/lib/logs/query-parser.ts @@ -23,6 +23,8 @@ const FILTER_FIELDS = { workflow: 'string', trigger: 'string', execution: 'string', + executionId: 'string', + workflowId: 'string', id: 'string', cost: 'number', duration: 'number', @@ -215,11 +217,13 @@ export function queryToApiParams(parsedQuery: ParsedQuery): Record 0) { suggestions.push({ id: 'filter-key-workflow', @@ -249,12 +232,10 @@ export class SearchSuggestions { : null } - // Trigger filter values (core + integrations) if (key === 'trigger') { const allTriggers = this.getAllTriggers() const suggestions = allTriggers .filter((t) => !partial || t.label.toLowerCase().includes(partial.toLowerCase())) - .slice(0, 15) // Show more since we have core + integrations .map((t) => ({ id: `filter-value-trigger-${t.value}`, value: `trigger:${t.value}`, @@ -273,11 +254,9 @@ export class SearchSuggestions { : null } - // Workflow filter values if (key === 'workflow') { const suggestions = this.workflowsData .filter((w) => !partial || w.name.toLowerCase().includes(partial.toLowerCase())) - .slice(0, 8) .map((w) => ({ id: `filter-value-workflow-${w.id}`, value: `workflow:"${w.name}"`, @@ -295,11 +274,9 @@ export class SearchSuggestions { : null } - // Folder filter values if (key === 'folder') { const suggestions = this.foldersData .filter((f) => !partial || f.name.toLowerCase().includes(partial.toLowerCase())) - .slice(0, 8) .map((f) => ({ id: `filter-value-folder-${f.id}`, value: `folder:"${f.name}"`, @@ -326,7 +303,6 @@ export class SearchSuggestions { const sections: Array<{ title: string; suggestions: Suggestion[] }> = [] const allSuggestions: Suggestion[] = [] - // Show all results option const showAllSuggestion: Suggestion = { id: 'show-all', value: query, @@ -335,7 +311,6 @@ export class SearchSuggestions { } allSuggestions.push(showAllSuggestion) - // Match filter values (e.g., "info" → "Status: Info") const matchingFilterValues = this.getMatchingFilterValues(query) if (matchingFilterValues.length > 0) { sections.push({ @@ -345,7 +320,6 @@ export class SearchSuggestions { allSuggestions.push(...matchingFilterValues) } - // Match triggers const matchingTriggers = this.getMatchingTriggers(query) if (matchingTriggers.length > 0) { sections.push({ @@ -355,7 +329,6 @@ export class SearchSuggestions { allSuggestions.push(...matchingTriggers) } - // Match workflows const matchingWorkflows = this.getMatchingWorkflows(query) if (matchingWorkflows.length > 0) { sections.push({ @@ -365,7 +338,6 @@ export class SearchSuggestions { allSuggestions.push(...matchingWorkflows) } - // Match folders const matchingFolders = this.getMatchingFolders(query) if (matchingFolders.length > 0) { sections.push({ @@ -375,7 +347,6 @@ export class SearchSuggestions { allSuggestions.push(...matchingFolders) } - // Add filter keys if no specific matches if ( matchingFilterValues.length === 0 && matchingTriggers.length === 0 && From 9861d3a0ac5b4616335621ae6672a5119e561cae Mon Sep 17 00:00:00 2001 From: Waleed Date: Tue, 16 Dec 2025 18:24:00 -0800 Subject: [PATCH 03/15] improvement(helm): added more to helm charts, remove instance selector for various cloud providers (#2412) * improvement(helm): added more to helm charts, remove instance selector for various cloud providers * ack PR comment --- helm/sim/examples/values-aws.yaml | 58 ++++++++++++------------ helm/sim/examples/values-azure.yaml | 61 ++++++++++++++------------ helm/sim/examples/values-gcp.yaml | 52 +++++++++++----------- helm/sim/templates/_helpers.tpl | 6 +++ helm/sim/templates/deployment-app.yaml | 2 +- helm/sim/values.schema.json | 12 ++--- helm/sim/values.yaml | 16 +++---- 7 files changed, 107 insertions(+), 100 deletions(-) diff --git a/helm/sim/examples/values-aws.yaml b/helm/sim/examples/values-aws.yaml index 3588074e20..8fb7e167ab 100644 --- a/helm/sim/examples/values-aws.yaml +++ b/helm/sim/examples/values-aws.yaml @@ -4,17 +4,17 @@ # Global configuration global: imageRegistry: "ghcr.io" - storageClass: "gp3" + storageClass: "gp2" # Use gp2 (default on EKS) or create gp3 StorageClass for better performance # Main application app: enabled: true replicaCount: 2 - - # Node selector for application pods (customize based on your EKS node labels) - nodeSelector: - kubernetes.io/arch: amd64 - node.kubernetes.io/instance-type: "t3.large" + + # Node selector for application pods + # Uncomment and customize based on your EKS node labels: + # nodeSelector: + # node.kubernetes.io/instance-type: "t3.large" resources: limits: @@ -28,8 +28,8 @@ app: env: NEXT_PUBLIC_APP_URL: "https://simstudio.acme.com" BETTER_AUTH_URL: "https://simstudio.acme.com" - SOCKET_SERVER_URL: "https://simstudio-ws.acme.com" - NEXT_PUBLIC_SOCKET_URL: "https://simstudio-ws.acme.com" + # SOCKET_SERVER_URL is auto-detected (uses internal service http://sim-realtime:3002) + NEXT_PUBLIC_SOCKET_URL: "https://simstudio-ws.acme.com" # Public WebSocket URL for browsers # Security settings (REQUIRED - replace with your own secure secrets) # Generate using: openssl rand -hex 32 @@ -52,11 +52,11 @@ app: realtime: enabled: true replicaCount: 2 - - # Node selector for realtime pods (customize based on your EKS node labels) - nodeSelector: - kubernetes.io/arch: amd64 - node.kubernetes.io/instance-type: "t3.medium" + + # Node selector for realtime pods + # Uncomment and customize based on your EKS node labels: + # nodeSelector: + # node.kubernetes.io/instance-type: "t3.medium" resources: limits: @@ -89,10 +89,11 @@ migrations: # PostgreSQL database postgresql: enabled: true - - # Node selector for database pods (recommended: memory-optimized EC2 instances) - nodeSelector: - node.kubernetes.io/instance-type: "r5.large" + + # Node selector for database pods + # Uncomment and customize (recommended: memory-optimized EC2 instances like r5.large): + # nodeSelector: + # node.kubernetes.io/instance-type: "r5.large" # Database authentication (REQUIRED - set secure credentials) auth: @@ -109,17 +110,17 @@ postgresql: memory: "2Gi" cpu: "1000m" - # Persistent storage using AWS EBS GP3 volumes + # Persistent storage using AWS EBS volumes persistence: enabled: true - storageClass: "gp3" + storageClass: "gp2" # Use gp2 (default) or create gp3 StorageClass size: 50Gi accessModes: - ReadWriteOnce - # SSL/TLS configuration + # SSL/TLS configuration (requires cert-manager to be installed) tls: - enabled: true + enabled: false # Set to true if cert-manager is installed certificatesSecret: postgres-tls-secret # PostgreSQL performance tuning for AWS infrastructure @@ -130,14 +131,15 @@ postgresql: minWalSize: "160MB" # Ollama AI models with GPU acceleration (AWS EC2 GPU instances) +# Set ollama.enabled: false if you don't need local AI models ollama: - enabled: true + enabled: false replicaCount: 1 - - # GPU node targeting (recommended: g4dn.xlarge or p3.2xlarge instances) - nodeSelector: - node.kubernetes.io/instance-type: "g4dn.xlarge" - kubernetes.io/arch: amd64 + + # GPU node targeting - uncomment and customize for GPU instances + # Recommended: g4dn.xlarge or p3.2xlarge instances + # nodeSelector: + # node.kubernetes.io/instance-type: "g4dn.xlarge" tolerations: - key: "nvidia.com/gpu" @@ -162,7 +164,7 @@ ollama: # High-performance storage for AI models persistence: enabled: true - storageClass: "gp3" + storageClass: "gp2" # Use gp2 (default) or create gp3 StorageClass size: 100Gi accessModes: - ReadWriteOnce diff --git a/helm/sim/examples/values-azure.yaml b/helm/sim/examples/values-azure.yaml index a888531bf3..1ae5a468b1 100644 --- a/helm/sim/examples/values-azure.yaml +++ b/helm/sim/examples/values-azure.yaml @@ -4,16 +4,19 @@ # Global configuration global: imageRegistry: "ghcr.io" - storageClass: "managed-csi-premium" + # Use "managed-csi-premium" for Premium SSD (requires Premium storage-capable VMs like Standard_DS*) + # Use "managed-csi" for Standard SSD (works with all VM types) + storageClass: "managed-csi" # Main application app: enabled: true - replicaCount: 1 - - # Node selector for application pods (customize based on your AKS node labels) - nodeSelector: - node-role: application + replicaCount: 2 + + # Node selector for application pods + # Uncomment and customize based on your AKS node labels: + # nodeSelector: + # agentpool: "application" resources: limits: @@ -26,8 +29,8 @@ app: env: NEXT_PUBLIC_APP_URL: "https://simstudio.acme.com" BETTER_AUTH_URL: "https://simstudio.acme.com" - SOCKET_SERVER_URL: "https://simstudio-ws.acme.com" - NEXT_PUBLIC_SOCKET_URL: "https://simstudio-ws.acme.com" + # SOCKET_SERVER_URL is auto-detected (uses internal service http://sim-realtime:3002) + NEXT_PUBLIC_SOCKET_URL: "https://simstudio-ws.acme.com" # Public WebSocket URL for browsers # Security settings (REQUIRED - replace with your own secure secrets) # Generate using: openssl rand -hex 32 @@ -46,11 +49,12 @@ app: # Realtime service realtime: enabled: true - replicaCount: 1 - - # Node selector for application pods (customize based on your AKS node labels) - nodeSelector: - node-role: application + replicaCount: 2 + + # Node selector for realtime pods + # Uncomment and customize based on your AKS node labels: + # nodeSelector: + # agentpool: "application" resources: limits: @@ -74,10 +78,11 @@ migrations: # PostgreSQL database postgresql: enabled: true - - # Node selector for database pods (recommended: memory-optimized VM sizes) - nodeSelector: - node-role: datalake + + # Node selector for database pods + # Uncomment and customize (recommended: memory-optimized VM sizes): + # nodeSelector: + # agentpool: "database" # Database authentication (REQUIRED - set secure credentials) auth: @@ -93,15 +98,15 @@ postgresql: memory: "1Gi" cpu: "500m" - # Persistent storage using Azure Premium SSD + # Persistent storage using Azure Managed Disk persistence: enabled: true - storageClass: "managed-csi-premium" + storageClass: "managed-csi" size: 10Gi - # SSL/TLS configuration (recommended for production) + # SSL/TLS configuration (requires cert-manager to be installed) tls: - enabled: true + enabled: false # Set to true if cert-manager is installed certificatesSecret: postgres-tls-secret # PostgreSQL performance tuning for Azure infrastructure @@ -112,13 +117,15 @@ postgresql: minWalSize: "80MB" # Ollama AI models with GPU acceleration (Azure NC-series VMs) +# Set ollama.enabled: false if you don't need local AI models ollama: - enabled: true + enabled: false replicaCount: 1 - - # GPU node targeting (recommended: NC6s_v3 or NC12s_v3 VMs) - nodeSelector: - accelerator: nvidia + + # GPU node targeting - uncomment and customize for GPU node pools + # Recommended: NC6s_v3 or NC12s_v3 VMs + # nodeSelector: + # agentpool: "gpu" tolerations: - key: "sku" @@ -139,7 +146,7 @@ ollama: memory: "4Gi" cpu: "1000m" - # High-performance storage for AI models + # High-performance storage for AI models (use managed-csi-premium for GPU workloads) persistence: enabled: true storageClass: "managed-csi-premium" diff --git a/helm/sim/examples/values-gcp.yaml b/helm/sim/examples/values-gcp.yaml index c8d5af9083..f0b5e66b58 100644 --- a/helm/sim/examples/values-gcp.yaml +++ b/helm/sim/examples/values-gcp.yaml @@ -10,11 +10,11 @@ global: app: enabled: true replicaCount: 2 - - # Node selector for application pods (customize based on your GKE node labels) - nodeSelector: - kubernetes.io/arch: amd64 - cloud.google.com/gke-nodepool: "default-pool" + + # Node selector for application pods + # Uncomment and customize based on your GKE node labels: + # nodeSelector: + # cloud.google.com/gke-nodepool: "default-pool" resources: limits: @@ -28,8 +28,8 @@ app: env: NEXT_PUBLIC_APP_URL: "https://simstudio.acme.com" BETTER_AUTH_URL: "https://simstudio.acme.com" - SOCKET_SERVER_URL: "https://simstudio-ws.acme.com" - NEXT_PUBLIC_SOCKET_URL: "https://simstudio-ws.acme.com" + # SOCKET_SERVER_URL is auto-detected (uses internal service http://sim-realtime:3002) + NEXT_PUBLIC_SOCKET_URL: "https://simstudio-ws.acme.com" # Public WebSocket URL for browsers # Security settings (REQUIRED - replace with your own secure secrets) # Generate using: openssl rand -hex 32 @@ -53,11 +53,11 @@ app: realtime: enabled: true replicaCount: 2 - - # Node selector for realtime pods (customize based on your GKE node labels) - nodeSelector: - kubernetes.io/arch: amd64 - cloud.google.com/gke-nodepool: "default-pool" + + # Node selector for realtime pods + # Uncomment and customize based on your GKE node labels: + # nodeSelector: + # cloud.google.com/gke-nodepool: "default-pool" resources: limits: @@ -90,11 +90,11 @@ migrations: # PostgreSQL database postgresql: enabled: true - - # Node selector for database pods (recommended: memory-optimized machine types) - nodeSelector: - cloud.google.com/gke-nodepool: "database-pool" - cloud.google.com/machine-family: "n2" + + # Node selector for database pods + # Uncomment and customize (recommended: memory-optimized machine types): + # nodeSelector: + # cloud.google.com/gke-nodepool: "database-pool" # Database authentication (REQUIRED - set secure credentials) auth: @@ -119,9 +119,9 @@ postgresql: accessModes: - ReadWriteOnce - # SSL/TLS configuration + # SSL/TLS configuration (requires cert-manager to be installed) tls: - enabled: true + enabled: false # Set to true if cert-manager is installed certificatesSecret: postgres-tls-secret # PostgreSQL performance tuning for GCP infrastructure @@ -132,14 +132,16 @@ postgresql: minWalSize: "160MB" # Ollama AI models with GPU acceleration (GCP GPU instances) +# Set ollama.enabled: false if you don't need local AI models ollama: - enabled: true + enabled: false replicaCount: 1 - - # GPU node targeting (recommended: T4 or V100 GPU instances) - nodeSelector: - cloud.google.com/gke-nodepool: "gpu-pool" - cloud.google.com/gke-accelerator: "nvidia-tesla-t4" + + # GPU node targeting - uncomment and customize for GPU node pools + # Recommended: T4 or V100 GPU instances + # nodeSelector: + # cloud.google.com/gke-nodepool: "gpu-pool" + # cloud.google.com/gke-accelerator: "nvidia-tesla-t4" tolerations: - key: "nvidia.com/gpu" diff --git a/helm/sim/templates/_helpers.tpl b/helm/sim/templates/_helpers.tpl index 134fe2b443..9966b14937 100644 --- a/helm/sim/templates/_helpers.tpl +++ b/helm/sim/templates/_helpers.tpl @@ -204,9 +204,15 @@ Validate required secrets and reject default placeholder values {{- if and .Values.postgresql.enabled (eq .Values.postgresql.auth.password "CHANGE-ME-SECURE-PASSWORD") }} {{- fail "postgresql.auth.password must not use the default placeholder value. Set a secure password for production" }} {{- end }} +{{- if and .Values.postgresql.enabled (not (regexMatch "^[a-zA-Z0-9._-]+$" .Values.postgresql.auth.password)) }} +{{- fail "postgresql.auth.password must only contain alphanumeric characters, hyphens, underscores, or periods to ensure DATABASE_URL compatibility. Generate with: openssl rand -base64 16 | tr -d '/+='" }} +{{- end }} {{- if and .Values.externalDatabase.enabled (not .Values.externalDatabase.password) }} {{- fail "externalDatabase.password is required when using external database" }} {{- end }} +{{- if and .Values.externalDatabase.enabled .Values.externalDatabase.password (not (regexMatch "^[a-zA-Z0-9._-]+$" .Values.externalDatabase.password)) }} +{{- fail "externalDatabase.password must only contain alphanumeric characters, hyphens, underscores, or periods to ensure DATABASE_URL compatibility." }} +{{- end }} {{- end }} {{/* diff --git a/helm/sim/templates/deployment-app.yaml b/helm/sim/templates/deployment-app.yaml index 564fa532d0..6433e82ea0 100644 --- a/helm/sim/templates/deployment-app.yaml +++ b/helm/sim/templates/deployment-app.yaml @@ -68,7 +68,7 @@ spec: - name: DATABASE_URL value: {{ include "sim.databaseUrl" . | quote }} - name: SOCKET_SERVER_URL - value: {{ .Values.app.env.SOCKET_SERVER_URL | default "http://localhost:3002" | quote }} + value: {{ include "sim.socketServerUrl" . | quote }} - name: OLLAMA_URL value: {{ include "sim.ollamaUrl" . | quote }} {{- range $key, $value := omit .Values.app.env "DATABASE_URL" "SOCKET_SERVER_URL" "OLLAMA_URL" }} diff --git a/helm/sim/values.schema.json b/helm/sim/values.schema.json index 37f4d4288b..6aa96f1c2b 100644 --- a/helm/sim/values.schema.json +++ b/helm/sim/values.schema.json @@ -185,8 +185,7 @@ }, "OLLAMA_URL": { "type": "string", - "format": "uri", - "description": "Ollama local LLM server URL" + "description": "Ollama local LLM server URL (leave empty if not using Ollama)" }, "ELEVENLABS_API_KEY": { "type": "string", @@ -238,18 +237,15 @@ }, "NEXT_PUBLIC_BRAND_LOGO_URL": { "type": "string", - "format": "uri", - "description": "Custom logo URL (must be a full URL, e.g., https://example.com/logo.png)" + "description": "Custom logo URL (leave empty for default)" }, "NEXT_PUBLIC_BRAND_FAVICON_URL": { "type": "string", - "format": "uri", - "description": "Custom favicon URL (must be a full URL, e.g., https://example.com/favicon.ico)" + "description": "Custom favicon URL (leave empty for default)" }, "NEXT_PUBLIC_CUSTOM_CSS_URL": { "type": "string", - "format": "uri", - "description": "Custom stylesheet URL (must be a full URL)" + "description": "Custom stylesheet URL (leave empty for none)" }, "NEXT_PUBLIC_SUPPORT_EMAIL": { "type": "string", diff --git a/helm/sim/values.yaml b/helm/sim/values.yaml index d0d51252a2..d588d06b76 100644 --- a/helm/sim/values.yaml +++ b/helm/sim/values.yaml @@ -52,8 +52,9 @@ app: # Application URLs NEXT_PUBLIC_APP_URL: "http://localhost:3000" BETTER_AUTH_URL: "http://localhost:3000" - SOCKET_SERVER_URL: "http://localhost:3002" - NEXT_PUBLIC_SOCKET_URL: "http://localhost:3002" + # SOCKET_SERVER_URL: Auto-detected when realtime.enabled=true (uses internal service) + # Only set this if using an external WebSocket service with realtime.enabled=false + NEXT_PUBLIC_SOCKET_URL: "http://localhost:3002" # Public WebSocket URL for browsers # Node environment NODE_ENV: "production" @@ -99,15 +100,8 @@ app: # Rate Limiting Configuration (per minute) RATE_LIMIT_WINDOW_MS: "60000" # Rate limit window duration (1 minute) - RATE_LIMIT_FREE_SYNC: "10" # Free tier sync API executions - RATE_LIMIT_PRO_SYNC: "25" # Pro tier sync API executions - RATE_LIMIT_TEAM_SYNC: "75" # Team tier sync API executions - RATE_LIMIT_ENTERPRISE_SYNC: "150" # Enterprise tier sync API executions - RATE_LIMIT_FREE_ASYNC: "50" # Free tier async API executions - RATE_LIMIT_PRO_ASYNC: "200" # Pro tier async API executions - RATE_LIMIT_TEAM_ASYNC: "500" # Team tier async API executions - RATE_LIMIT_ENTERPRISE_ASYNC: "1000" # Enterprise tier async API executions - MANUAL_EXECUTION_LIMIT: "999999" # Manual execution bypass value + RATE_LIMIT_FREE_SYNC: "10" # Sync API executions per minute + RATE_LIMIT_FREE_ASYNC: "50" # Async API executions per minute # UI Branding & Whitelabeling Configuration NEXT_PUBLIC_BRAND_NAME: "Sim" # Custom brand name From 27ea333974d5df2cc266655628b9b31b42952ce6 Mon Sep 17 00:00:00 2001 From: Waleed Date: Tue, 16 Dec 2025 19:59:02 -0800 Subject: [PATCH 04/15] fix(chat): fix stale closure in workflow runner for chat (#2418) --- .../[workspaceId]/w/[workflowId]/hooks/use-workflow-execution.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-workflow-execution.ts b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-workflow-execution.ts index c8d5b62f21..557ea43524 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-workflow-execution.ts +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-workflow-execution.ts @@ -655,6 +655,7 @@ export function useWorkflowExecution() { setExecutor, setPendingBlocks, setActiveBlocks, + workflows, ] ) From dcbeca1abe896dd5845dbba52d4ed0d833b4315d Mon Sep 17 00:00:00 2001 From: Waleed Date: Tue, 16 Dec 2025 20:47:58 -0800 Subject: [PATCH 05/15] fix(subflow): fix json stringification in subflow collections (#2419) * fix(subflow): fix json stringification in subflow collections * cleanup --- .../stores/workflows/workflow/store.test.ts | 185 +++++++----------- .../stores/workflows/workflow/utils.test.ts | 11 +- apps/sim/stores/workflows/workflow/utils.ts | 27 +-- 3 files changed, 77 insertions(+), 146 deletions(-) diff --git a/apps/sim/stores/workflows/workflow/store.test.ts b/apps/sim/stores/workflows/workflow/store.test.ts index 36ae556435..45b7bad00d 100644 --- a/apps/sim/stores/workflows/workflow/store.test.ts +++ b/apps/sim/stores/workflows/workflow/store.test.ts @@ -22,10 +22,9 @@ describe('workflow store', () => { }) describe('loop management', () => { - it('should regenerate loops when updateLoopCount is called', () => { + it.concurrent('should regenerate loops when updateLoopCount is called', () => { const { addBlock, updateLoopCount } = useWorkflowStore.getState() - // Add a loop block addBlock( 'loop1', 'loop', @@ -38,23 +37,19 @@ describe('workflow store', () => { } ) - // Update loop count updateLoopCount('loop1', 10) const state = useWorkflowStore.getState() - // Check that block data was updated expect(state.blocks.loop1?.data?.count).toBe(10) - // Check that loops were regenerated expect(state.loops.loop1).toBeDefined() expect(state.loops.loop1.iterations).toBe(10) }) - it('should regenerate loops when updateLoopType is called', () => { + it.concurrent('should regenerate loops when updateLoopType is called', () => { const { addBlock, updateLoopType } = useWorkflowStore.getState() - // Add a loop block addBlock( 'loop1', 'loop', @@ -67,24 +62,20 @@ describe('workflow store', () => { } ) - // Update loop type updateLoopType('loop1', 'forEach') const state = useWorkflowStore.getState() - // Check that block data was updated expect(state.blocks.loop1?.data?.loopType).toBe('forEach') - // Check that loops were regenerated with forEach items expect(state.loops.loop1).toBeDefined() expect(state.loops.loop1.loopType).toBe('forEach') - expect(state.loops.loop1.forEachItems).toEqual(['a', 'b', 'c']) + expect(state.loops.loop1.forEachItems).toBe('["a", "b", "c"]') }) - it('should regenerate loops when updateLoopCollection is called', () => { + it.concurrent('should regenerate loops when updateLoopCollection is called', () => { const { addBlock, updateLoopCollection } = useWorkflowStore.getState() - // Add a forEach loop block addBlock( 'loop1', 'loop', @@ -96,23 +87,19 @@ describe('workflow store', () => { } ) - // Update loop collection updateLoopCollection('loop1', '["item1", "item2", "item3"]') const state = useWorkflowStore.getState() - // Check that block data was updated expect(state.blocks.loop1?.data?.collection).toBe('["item1", "item2", "item3"]') - // Check that loops were regenerated with new items expect(state.loops.loop1).toBeDefined() - expect(state.loops.loop1.forEachItems).toEqual(['item1', 'item2', 'item3']) + expect(state.loops.loop1.forEachItems).toBe('["item1", "item2", "item3"]') }) - it('should clamp loop count between 1 and 1000', () => { + it.concurrent('should clamp loop count between 1 and 1000', () => { const { addBlock, updateLoopCount } = useWorkflowStore.getState() - // Add a loop block addBlock( 'loop1', 'loop', @@ -125,12 +112,10 @@ describe('workflow store', () => { } ) - // Try to set count above max updateLoopCount('loop1', 1500) let state = useWorkflowStore.getState() expect(state.blocks.loop1?.data?.count).toBe(1000) - // Try to set count below min updateLoopCount('loop1', 0) state = useWorkflowStore.getState() expect(state.blocks.loop1?.data?.count).toBe(1) @@ -138,10 +123,9 @@ describe('workflow store', () => { }) describe('parallel management', () => { - it('should regenerate parallels when updateParallelCount is called', () => { + it.concurrent('should regenerate parallels when updateParallelCount is called', () => { const { addBlock, updateParallelCount } = useWorkflowStore.getState() - // Add a parallel block addBlock( 'parallel1', 'parallel', @@ -153,23 +137,19 @@ describe('workflow store', () => { } ) - // Update parallel count updateParallelCount('parallel1', 5) const state = useWorkflowStore.getState() - // Check that block data was updated expect(state.blocks.parallel1?.data?.count).toBe(5) - // Check that parallels were regenerated expect(state.parallels.parallel1).toBeDefined() expect(state.parallels.parallel1.distribution).toBe('') }) - it('should regenerate parallels when updateParallelCollection is called', () => { + it.concurrent('should regenerate parallels when updateParallelCollection is called', () => { const { addBlock, updateParallelCollection } = useWorkflowStore.getState() - // Add a parallel block addBlock( 'parallel1', 'parallel', @@ -182,27 +162,22 @@ describe('workflow store', () => { } ) - // Update parallel collection updateParallelCollection('parallel1', '["item1", "item2", "item3"]') const state = useWorkflowStore.getState() - // Check that block data was updated expect(state.blocks.parallel1?.data?.collection).toBe('["item1", "item2", "item3"]') - // Check that parallels were regenerated expect(state.parallels.parallel1).toBeDefined() expect(state.parallels.parallel1.distribution).toBe('["item1", "item2", "item3"]') - // Verify that the parallel count matches the collection size const parsedDistribution = JSON.parse(state.parallels.parallel1.distribution as string) expect(parsedDistribution).toHaveLength(3) }) - it('should clamp parallel count between 1 and 20', () => { + it.concurrent('should clamp parallel count between 1 and 20', () => { const { addBlock, updateParallelCount } = useWorkflowStore.getState() - // Add a parallel block addBlock( 'parallel1', 'parallel', @@ -214,21 +189,18 @@ describe('workflow store', () => { } ) - // Try to set count above max updateParallelCount('parallel1', 100) let state = useWorkflowStore.getState() expect(state.blocks.parallel1?.data?.count).toBe(20) - // Try to set count below min updateParallelCount('parallel1', 0) state = useWorkflowStore.getState() expect(state.blocks.parallel1?.data?.count).toBe(1) }) - it('should regenerate parallels when updateParallelType is called', () => { + it.concurrent('should regenerate parallels when updateParallelType is called', () => { const { addBlock, updateParallelType } = useWorkflowStore.getState() - // Add a parallel block with default collection type addBlock( 'parallel1', 'parallel', @@ -241,50 +213,40 @@ describe('workflow store', () => { } ) - // Update parallel type to count updateParallelType('parallel1', 'count') const state = useWorkflowStore.getState() - // Check that block data was updated expect(state.blocks.parallel1?.data?.parallelType).toBe('count') - // Check that parallels were regenerated with new type expect(state.parallels.parallel1).toBeDefined() expect(state.parallels.parallel1.parallelType).toBe('count') }) }) describe('mode switching', () => { - it('should toggle advanced mode on a block', () => { + it.concurrent('should toggle advanced mode on a block', () => { const { addBlock, toggleBlockAdvancedMode } = useWorkflowStore.getState() - // Add an agent block addBlock('agent1', 'agent', 'Test Agent', { x: 0, y: 0 }) - // Initially should be in basic mode (advancedMode: false) let state = useWorkflowStore.getState() expect(state.blocks.agent1?.advancedMode).toBe(false) - // Toggle to advanced mode toggleBlockAdvancedMode('agent1') state = useWorkflowStore.getState() expect(state.blocks.agent1?.advancedMode).toBe(true) - // Toggle back to basic mode toggleBlockAdvancedMode('agent1') state = useWorkflowStore.getState() expect(state.blocks.agent1?.advancedMode).toBe(false) }) - it('should preserve systemPrompt and userPrompt when switching modes', () => { + it.concurrent('should preserve systemPrompt and userPrompt when switching modes', () => { const { addBlock, toggleBlockAdvancedMode } = useWorkflowStore.getState() const { setState: setSubBlockState } = useSubBlockStore - // Set up a mock active workflow useWorkflowRegistry.setState({ activeWorkflowId: 'test-workflow' }) - // Add an agent block addBlock('agent1', 'agent', 'Test Agent', { x: 0, y: 0 }) - // Set initial values in basic mode setSubBlockState({ workflowValues: { 'test-workflow': { @@ -295,9 +257,7 @@ describe('workflow store', () => { }, }, }) - // Toggle to advanced mode toggleBlockAdvancedMode('agent1') - // Check that prompts are preserved in advanced mode let subBlockState = useSubBlockStore.getState() expect(subBlockState.workflowValues['test-workflow'].agent1.systemPrompt).toBe( 'You are a helpful assistant' @@ -305,9 +265,7 @@ describe('workflow store', () => { expect(subBlockState.workflowValues['test-workflow'].agent1.userPrompt).toBe( 'Hello, how are you?' ) - // Toggle back to basic mode toggleBlockAdvancedMode('agent1') - // Check that prompts are still preserved subBlockState = useSubBlockStore.getState() expect(subBlockState.workflowValues['test-workflow'].agent1.systemPrompt).toBe( 'You are a helpful assistant' @@ -317,20 +275,16 @@ describe('workflow store', () => { ) }) - it('should preserve memories when switching from advanced to basic mode', () => { + it.concurrent('should preserve memories when switching from advanced to basic mode', () => { const { addBlock, toggleBlockAdvancedMode } = useWorkflowStore.getState() const { setState: setSubBlockState } = useSubBlockStore - // Set up a mock active workflow useWorkflowRegistry.setState({ activeWorkflowId: 'test-workflow' }) - // Add an agent block in advanced mode addBlock('agent1', 'agent', 'Test Agent', { x: 0, y: 0 }) - // First toggle to advanced mode toggleBlockAdvancedMode('agent1') - // Set values including memories setSubBlockState({ workflowValues: { 'test-workflow': { @@ -346,10 +300,8 @@ describe('workflow store', () => { }, }) - // Toggle back to basic mode toggleBlockAdvancedMode('agent1') - // Check that prompts and memories are all preserved const subBlockState = useSubBlockStore.getState() expect(subBlockState.workflowValues['test-workflow'].agent1.systemPrompt).toBe( 'You are a helpful assistant' @@ -363,52 +315,50 @@ describe('workflow store', () => { ]) }) - it('should handle mode switching when no subblock values exist', () => { + it.concurrent('should handle mode switching when no subblock values exist', () => { const { addBlock, toggleBlockAdvancedMode } = useWorkflowStore.getState() - // Set up a mock active workflow useWorkflowRegistry.setState({ activeWorkflowId: 'test-workflow' }) - // Add an agent block addBlock('agent1', 'agent', 'Test Agent', { x: 0, y: 0 }) - // Toggle modes without any subblock values set expect(useWorkflowStore.getState().blocks.agent1?.advancedMode).toBe(false) expect(() => toggleBlockAdvancedMode('agent1')).not.toThrow() - // Verify the mode changed const state = useWorkflowStore.getState() expect(state.blocks.agent1?.advancedMode).toBe(true) }) - it('should not throw when toggling non-existent block', () => { + it.concurrent('should not throw when toggling non-existent block', () => { const { toggleBlockAdvancedMode } = useWorkflowStore.getState() - // Try to toggle a block that doesn't exist expect(() => toggleBlockAdvancedMode('non-existent')).not.toThrow() }) }) describe('addBlock with blockProperties', () => { - it('should create a block with default properties when no blockProperties provided', () => { - const { addBlock } = useWorkflowStore.getState() - - addBlock('agent1', 'agent', 'Test Agent', { x: 100, y: 200 }) - - const state = useWorkflowStore.getState() - const block = state.blocks.agent1 - - expect(block).toBeDefined() - expect(block.id).toBe('agent1') - expect(block.type).toBe('agent') - expect(block.name).toBe('Test Agent') - expect(block.position).toEqual({ x: 100, y: 200 }) - expect(block.enabled).toBe(true) - expect(block.horizontalHandles).toBe(true) - expect(block.height).toBe(0) - }) + it.concurrent( + 'should create a block with default properties when no blockProperties provided', + () => { + const { addBlock } = useWorkflowStore.getState() + + addBlock('agent1', 'agent', 'Test Agent', { x: 100, y: 200 }) + + const state = useWorkflowStore.getState() + const block = state.blocks.agent1 + + expect(block).toBeDefined() + expect(block.id).toBe('agent1') + expect(block.type).toBe('agent') + expect(block.name).toBe('Test Agent') + expect(block.position).toEqual({ x: 100, y: 200 }) + expect(block.enabled).toBe(true) + expect(block.horizontalHandles).toBe(true) + expect(block.height).toBe(0) + } + ) - it('should create a block with custom blockProperties for regular blocks', () => { + it.concurrent('should create a block with custom blockProperties for regular blocks', () => { const { addBlock } = useWorkflowStore.getState() addBlock( @@ -524,10 +474,8 @@ describe('workflow store', () => { it('should handle blockProperties with parent relationships', () => { const { addBlock } = useWorkflowStore.getState() - // First add a parent loop block addBlock('loop1', 'loop', 'Parent Loop', { x: 0, y: 0 }) - // Then add a child block with custom properties addBlock( 'agent1', 'agent', @@ -571,7 +519,7 @@ describe('workflow store', () => { addBlock('block3', 'trigger', 'Start', { x: 200, y: 0 }) }) - it('should have test blocks set up correctly', () => { + it.concurrent('should have test blocks set up correctly', () => { const state = useWorkflowStore.getState() expect(state.blocks.block1).toBeDefined() @@ -582,7 +530,7 @@ describe('workflow store', () => { expect(state.blocks.block3.name).toBe('Start') }) - it('should successfully rename a block when no conflicts exist', () => { + it.concurrent('should successfully rename a block when no conflicts exist', () => { const { updateBlockName } = useWorkflowStore.getState() const result = updateBlockName('block1', 'Data Processor') @@ -593,18 +541,21 @@ describe('workflow store', () => { expect(state.blocks.block1.name).toBe('Data Processor') }) - it('should allow renaming a block to a different case/spacing of its current name', () => { - const { updateBlockName } = useWorkflowStore.getState() + it.concurrent( + 'should allow renaming a block to a different case/spacing of its current name', + () => { + const { updateBlockName } = useWorkflowStore.getState() - const result = updateBlockName('block1', 'column ad') + const result = updateBlockName('block1', 'column ad') - expect(result.success).toBe(true) + expect(result.success).toBe(true) - const state = useWorkflowStore.getState() - expect(state.blocks.block1.name).toBe('column ad') - }) + const state = useWorkflowStore.getState() + expect(state.blocks.block1.name).toBe('column ad') + } + ) - it('should prevent renaming when another block has the same normalized name', () => { + it.concurrent('should prevent renaming when another block has the same normalized name', () => { const { updateBlockName } = useWorkflowStore.getState() const result = updateBlockName('block2', 'Column AD') @@ -615,29 +566,35 @@ describe('workflow store', () => { expect(state.blocks.block2.name).toBe('Employee Length') }) - it('should prevent renaming when another block has a name that normalizes to the same value', () => { - const { updateBlockName } = useWorkflowStore.getState() + it.concurrent( + 'should prevent renaming when another block has a name that normalizes to the same value', + () => { + const { updateBlockName } = useWorkflowStore.getState() - const result = updateBlockName('block2', 'columnad') + const result = updateBlockName('block2', 'columnad') - expect(result.success).toBe(false) + expect(result.success).toBe(false) - const state = useWorkflowStore.getState() - expect(state.blocks.block2.name).toBe('Employee Length') - }) + const state = useWorkflowStore.getState() + expect(state.blocks.block2.name).toBe('Employee Length') + } + ) - it('should prevent renaming when another block has a similar name with different spacing', () => { - const { updateBlockName } = useWorkflowStore.getState() + it.concurrent( + 'should prevent renaming when another block has a similar name with different spacing', + () => { + const { updateBlockName } = useWorkflowStore.getState() - const result = updateBlockName('block3', 'employee length') + const result = updateBlockName('block3', 'employee length') - expect(result.success).toBe(false) + expect(result.success).toBe(false) - const state = useWorkflowStore.getState() - expect(state.blocks.block3.name).toBe('Start') - }) + const state = useWorkflowStore.getState() + expect(state.blocks.block3.name).toBe('Start') + } + ) - it('should handle edge cases with empty or whitespace-only names', () => { + it.concurrent('should handle edge cases with empty or whitespace-only names', () => { const { updateBlockName } = useWorkflowStore.getState() const result1 = updateBlockName('block1', '') @@ -651,7 +608,7 @@ describe('workflow store', () => { expect(state.blocks.block2.name).toBe(' ') }) - it('should return false when trying to rename a non-existent block', () => { + it.concurrent('should return false when trying to rename a non-existent block', () => { const { updateBlockName } = useWorkflowStore.getState() const result = updateBlockName('nonexistent', 'New Name') diff --git a/apps/sim/stores/workflows/workflow/utils.test.ts b/apps/sim/stores/workflows/workflow/utils.test.ts index 01403f95d1..8094dc6f9f 100644 --- a/apps/sim/stores/workflows/workflow/utils.test.ts +++ b/apps/sim/stores/workflows/workflow/utils.test.ts @@ -3,7 +3,7 @@ import type { BlockState } from '@/stores/workflows/workflow/types' import { convertLoopBlockToLoop } from '@/stores/workflows/workflow/utils' describe('convertLoopBlockToLoop', () => { - it.concurrent('should parse JSON array string for forEach loops', () => { + it.concurrent('should keep JSON array string as-is for forEach loops', () => { const blocks: Record = { loop1: { id: 'loop1', @@ -25,11 +25,11 @@ describe('convertLoopBlockToLoop', () => { expect(result).toBeDefined() expect(result?.loopType).toBe('forEach') - expect(result?.forEachItems).toEqual(['item1', 'item2', 'item3']) + expect(result?.forEachItems).toBe('["item1", "item2", "item3"]') expect(result?.iterations).toBe(10) }) - it.concurrent('should parse JSON object string for forEach loops', () => { + it.concurrent('should keep JSON object string as-is for forEach loops', () => { const blocks: Record = { loop1: { id: 'loop1', @@ -51,7 +51,7 @@ describe('convertLoopBlockToLoop', () => { expect(result).toBeDefined() expect(result?.loopType).toBe('forEach') - expect(result?.forEachItems).toEqual({ key1: 'value1', key2: 'value2' }) + expect(result?.forEachItems).toBe('{"key1": "value1", "key2": "value2"}') }) it.concurrent('should keep string as-is if not valid JSON', () => { @@ -125,7 +125,6 @@ describe('convertLoopBlockToLoop', () => { expect(result).toBeDefined() expect(result?.loopType).toBe('for') expect(result?.iterations).toBe(5) - // For 'for' loops, the collection is still parsed in case it's later changed to forEach - expect(result?.forEachItems).toEqual(['should', 'not', 'matter']) + expect(result?.forEachItems).toBe('["should", "not", "matter"]') }) }) diff --git a/apps/sim/stores/workflows/workflow/utils.ts b/apps/sim/stores/workflows/workflow/utils.ts index 726cfe711d..a4b4a30193 100644 --- a/apps/sim/stores/workflows/workflow/utils.ts +++ b/apps/sim/stores/workflows/workflow/utils.ts @@ -25,28 +25,8 @@ export function convertLoopBlockToLoop( loopType, } - // Load ALL fields regardless of current loop type - // This allows switching between loop types without losing data - - // For for/forEach loops, read from collection (block data) and map to forEachItems (loops store) - let forEachItems: any = loopBlock.data?.collection || '' - if (typeof forEachItems === 'string' && forEachItems.trim()) { - const trimmed = forEachItems.trim() - // Try to parse if it looks like JSON - if (trimmed.startsWith('[') || trimmed.startsWith('{')) { - try { - forEachItems = JSON.parse(trimmed) - } catch { - // Keep as string if parsing fails - will be evaluated at runtime - } - } - } - loop.forEachItems = forEachItems - - // For while loops, use whileCondition + loop.forEachItems = loopBlock.data?.collection || '' loop.whileCondition = loopBlock.data?.whileCondition || '' - - // For do-while loops, use doWhileCondition loop.doWhileCondition = loopBlock.data?.doWhileCondition || '' return loop @@ -66,16 +46,13 @@ export function convertParallelBlockToParallel( const parallelBlock = blocks[parallelBlockId] if (!parallelBlock || parallelBlock.type !== 'parallel') return undefined - // Get the parallel type from block data, defaulting to 'count' for consistency const parallelType = parallelBlock.data?.parallelType || 'count' - // Validate parallelType against allowed values const validParallelTypes = ['collection', 'count'] as const const validatedParallelType = validParallelTypes.includes(parallelType as any) ? parallelType : 'collection' - // Only set distribution if it's a collection-based parallel const distribution = validatedParallelType === 'collection' ? parallelBlock.data?.collection || '' : '' @@ -139,7 +116,6 @@ export function findAllDescendantNodes( export function generateLoopBlocks(blocks: Record): Record { const loops: Record = {} - // Find all loop nodes Object.entries(blocks) .filter(([_, block]) => block.type === 'loop') .forEach(([id, block]) => { @@ -163,7 +139,6 @@ export function generateParallelBlocks( ): Record { const parallels: Record = {} - // Find all parallel nodes Object.entries(blocks) .filter(([_, block]) => block.type === 'parallel') .forEach(([id, block]) => { From b7228d57f7a47d1bfb78cf00a3becf54242215fa Mon Sep 17 00:00:00 2001 From: Emir Karabeg <78010029+emir-karabeg@users.noreply.github.com> Date: Tue, 16 Dec 2025 21:16:09 -0800 Subject: [PATCH 06/15] feat(service-now): added service now block (#2404) * feat(service-now): added service now block * fix: bun lock * improvement: fixed @trigger.dev/sdk imports and removal of sentry blocks * improvement: fixed @trigger.dev/sdk import * improvement: fixed @trigger.dev/sdk import * fix(servicenow): save accessTokenExpiresAt on initial OAuth account creation * docs(servicenow): add ServiceNow tool documentation and icon mapping * fixing bun lint issues * fixing username/password fields * fixing test file for refreshaccesstoken to support instance uri * removing basic auth and fixing undo-redo/store.ts * removed import set api code, changed CRUD operations to CRUD_record and added wand configuration to help users to generate JSON Arrays --------- Co-authored-by: priyanshu.solanki --- apps/docs/components/icons.tsx | 18 ++ apps/docs/components/ui/icon-mapping.ts | 2 + apps/docs/content/docs/en/tools/meta.json | 1 + .../docs/content/docs/en/tools/servicenow.mdx | 111 ++++++++ apps/sim/app/api/auth/oauth/utils.test.ts | 4 +- apps/sim/app/api/auth/oauth/utils.ts | 26 +- .../auth/oauth2/callback/servicenow/route.ts | 166 +++++++++++ .../api/auth/oauth2/servicenow/store/route.ts | 142 ++++++++++ .../api/auth/servicenow/authorize/route.ts | 264 ++++++++++++++++++ .../components/oauth-required-modal.tsx | 7 + apps/sim/blocks/blocks/servicenow.ts | 257 +++++++++++++++++ apps/sim/blocks/registry.ts | 2 + apps/sim/components/icons.tsx | 18 ++ apps/sim/hooks/queries/oauth-connections.ts | 7 + apps/sim/lib/core/config/env.ts | 2 + apps/sim/lib/oauth/oauth.ts | 51 +++- apps/sim/stores/undo-redo/store.ts | 3 +- apps/sim/tools/registry.ts | 10 + apps/sim/tools/servicenow/create_record.ts | 107 +++++++ apps/sim/tools/servicenow/delete_record.ts | 107 +++++++ apps/sim/tools/servicenow/index.ts | 11 + apps/sim/tools/servicenow/read_record.ts | 149 ++++++++++ apps/sim/tools/servicenow/types.ts | 80 ++++++ apps/sim/tools/servicenow/update_record.ts | 114 ++++++++ bun.lock | 84 +++--- 25 files changed, 1690 insertions(+), 53 deletions(-) create mode 100644 apps/docs/content/docs/en/tools/servicenow.mdx create mode 100644 apps/sim/app/api/auth/oauth2/callback/servicenow/route.ts create mode 100644 apps/sim/app/api/auth/oauth2/servicenow/store/route.ts create mode 100644 apps/sim/app/api/auth/servicenow/authorize/route.ts create mode 100644 apps/sim/blocks/blocks/servicenow.ts create mode 100644 apps/sim/tools/servicenow/create_record.ts create mode 100644 apps/sim/tools/servicenow/delete_record.ts create mode 100644 apps/sim/tools/servicenow/index.ts create mode 100644 apps/sim/tools/servicenow/read_record.ts create mode 100644 apps/sim/tools/servicenow/types.ts create mode 100644 apps/sim/tools/servicenow/update_record.ts diff --git a/apps/docs/components/icons.tsx b/apps/docs/components/icons.tsx index 12ead996f7..2e668f913e 100644 --- a/apps/docs/components/icons.tsx +++ b/apps/docs/components/icons.tsx @@ -3335,6 +3335,24 @@ export function SalesforceIcon(props: SVGProps) { ) } +export function ServiceNowIcon(props: SVGProps) { + return ( + + + + ) +} + export function ApolloIcon(props: SVGProps) { return ( = { webflow: WebflowIcon, pinecone: PineconeIcon, apollo: ApolloIcon, + servicenow: ServiceNowIcon, whatsapp: WhatsAppIcon, typeform: TypeformIcon, qdrant: QdrantIcon, diff --git a/apps/docs/content/docs/en/tools/meta.json b/apps/docs/content/docs/en/tools/meta.json index 7ca67d7a90..42771ff867 100644 --- a/apps/docs/content/docs/en/tools/meta.json +++ b/apps/docs/content/docs/en/tools/meta.json @@ -80,6 +80,7 @@ "sendgrid", "sentry", "serper", + "servicenow", "sftp", "sharepoint", "shopify", diff --git a/apps/docs/content/docs/en/tools/servicenow.mdx b/apps/docs/content/docs/en/tools/servicenow.mdx new file mode 100644 index 0000000000..affb455af2 --- /dev/null +++ b/apps/docs/content/docs/en/tools/servicenow.mdx @@ -0,0 +1,111 @@ +--- +title: ServiceNow +description: Create, read, update, delete, and bulk import ServiceNow records +--- + +import { BlockInfoCard } from "@/components/ui/block-info-card" + + + +## Usage Instructions + +Integrate ServiceNow into your workflow. Can create, read, update, and delete records in any ServiceNow table (incidents, tasks, users, etc.). Supports bulk import operations for data migration and ETL. + + + +## Tools + +### `servicenow_create_record` + +Create a new record in a ServiceNow table + +#### Input + +| Parameter | Type | Required | Description | +| --------- | ---- | -------- | ----------- | +| `instanceUrl` | string | Yes | ServiceNow instance URL \(e.g., https://instance.service-now.com\) | +| `credential` | string | No | ServiceNow OAuth credential ID | +| `tableName` | string | Yes | Table name \(e.g., incident, task, sys_user\) | +| `fields` | json | Yes | Fields to set on the record \(JSON object\) | + +#### Output + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| `record` | json | Created ServiceNow record with sys_id and other fields | +| `metadata` | json | Operation metadata | + +### `servicenow_read_record` + +Read records from a ServiceNow table + +#### Input + +| Parameter | Type | Required | Description | +| --------- | ---- | -------- | ----------- | +| `instanceUrl` | string | No | ServiceNow instance URL \(auto-detected from OAuth if not provided\) | +| `credential` | string | No | ServiceNow OAuth credential ID | +| `tableName` | string | Yes | Table name | +| `sysId` | string | No | Specific record sys_id | +| `number` | string | No | Record number \(e.g., INC0010001\) | +| `query` | string | No | Encoded query string \(e.g., "active=true^priority=1"\) | +| `limit` | number | No | Maximum number of records to return | +| `fields` | string | No | Comma-separated list of fields to return | + +#### Output + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| `records` | array | Array of ServiceNow records | +| `metadata` | json | Operation metadata | + +### `servicenow_update_record` + +Update an existing record in a ServiceNow table + +#### Input + +| Parameter | Type | Required | Description | +| --------- | ---- | -------- | ----------- | +| `instanceUrl` | string | No | ServiceNow instance URL \(auto-detected from OAuth if not provided\) | +| `credential` | string | No | ServiceNow OAuth credential ID | +| `tableName` | string | Yes | Table name | +| `sysId` | string | Yes | Record sys_id to update | +| `fields` | json | Yes | Fields to update \(JSON object\) | + +#### Output + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| `record` | json | Updated ServiceNow record | +| `metadata` | json | Operation metadata | + +### `servicenow_delete_record` + +Delete a record from a ServiceNow table + +#### Input + +| Parameter | Type | Required | Description | +| --------- | ---- | -------- | ----------- | +| `instanceUrl` | string | No | ServiceNow instance URL \(auto-detected from OAuth if not provided\) | +| `credential` | string | No | ServiceNow OAuth credential ID | +| `tableName` | string | Yes | Table name | +| `sysId` | string | Yes | Record sys_id to delete | + +#### Output + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| `success` | boolean | Whether the deletion was successful | +| `metadata` | json | Operation metadata | + + + +## Notes + +- Category: `tools` +- Type: `servicenow` diff --git a/apps/sim/app/api/auth/oauth/utils.test.ts b/apps/sim/app/api/auth/oauth/utils.test.ts index af55886267..95b3894a6d 100644 --- a/apps/sim/app/api/auth/oauth/utils.test.ts +++ b/apps/sim/app/api/auth/oauth/utils.test.ts @@ -159,7 +159,7 @@ describe('OAuth Utils', () => { const result = await refreshTokenIfNeeded('request-id', mockCredential, 'credential-id') - expect(mockRefreshOAuthToken).toHaveBeenCalledWith('google', 'refresh-token') + expect(mockRefreshOAuthToken).toHaveBeenCalledWith('google', 'refresh-token', undefined) expect(mockDb.update).toHaveBeenCalled() expect(mockDb.set).toHaveBeenCalled() expect(result).toEqual({ accessToken: 'new-token', refreshed: true }) @@ -239,7 +239,7 @@ describe('OAuth Utils', () => { const token = await refreshAccessTokenIfNeeded('credential-id', 'test-user-id', 'request-id') - expect(mockRefreshOAuthToken).toHaveBeenCalledWith('google', 'refresh-token') + expect(mockRefreshOAuthToken).toHaveBeenCalledWith('google', 'refresh-token', undefined) expect(mockDb.update).toHaveBeenCalled() expect(mockDb.set).toHaveBeenCalled() expect(token).toBe('new-token') diff --git a/apps/sim/app/api/auth/oauth/utils.ts b/apps/sim/app/api/auth/oauth/utils.ts index da5e352461..66ea033fdb 100644 --- a/apps/sim/app/api/auth/oauth/utils.ts +++ b/apps/sim/app/api/auth/oauth/utils.ts @@ -18,6 +18,7 @@ interface AccountInsertData { updatedAt: Date refreshToken?: string idToken?: string + accessTokenExpiresAt?: Date } /** @@ -103,6 +104,7 @@ export async function getOAuthToken(userId: string, providerId: string): Promise accessToken: account.accessToken, refreshToken: account.refreshToken, accessTokenExpiresAt: account.accessTokenExpiresAt, + idToken: account.idToken, }) .from(account) .where(and(eq(account.userId, userId), eq(account.providerId, providerId))) @@ -130,7 +132,14 @@ export async function getOAuthToken(userId: string, providerId: string): Promise try { // Use the existing refreshOAuthToken function - const refreshResult = await refreshOAuthToken(providerId, credential.refreshToken!) + // For ServiceNow, pass the instance URL (stored in idToken) for the token endpoint + const instanceUrl = + providerId === 'servicenow' ? (credential.idToken ?? undefined) : undefined + const refreshResult = await refreshOAuthToken( + providerId, + credential.refreshToken!, + instanceUrl + ) if (!refreshResult) { logger.error(`Failed to refresh token for user ${userId}, provider ${providerId}`, { @@ -213,9 +222,13 @@ export async function refreshAccessTokenIfNeeded( if (shouldRefresh) { logger.info(`[${requestId}] Token expired, attempting to refresh for credential`) try { + // For ServiceNow, pass the instance URL (stored in idToken) for the token endpoint + const instanceUrl = + credential.providerId === 'servicenow' ? (credential.idToken ?? undefined) : undefined const refreshedToken = await refreshOAuthToken( credential.providerId, - credential.refreshToken! + credential.refreshToken!, + instanceUrl ) if (!refreshedToken) { @@ -287,7 +300,14 @@ export async function refreshTokenIfNeeded( } try { - const refreshResult = await refreshOAuthToken(credential.providerId, credential.refreshToken!) + // For ServiceNow, pass the instance URL (stored in idToken) for the token endpoint + const instanceUrl = + credential.providerId === 'servicenow' ? (credential.idToken ?? undefined) : undefined + const refreshResult = await refreshOAuthToken( + credential.providerId, + credential.refreshToken!, + instanceUrl + ) if (!refreshResult) { logger.error(`[${requestId}] Failed to refresh token for credential`) diff --git a/apps/sim/app/api/auth/oauth2/callback/servicenow/route.ts b/apps/sim/app/api/auth/oauth2/callback/servicenow/route.ts new file mode 100644 index 0000000000..0a84066f63 --- /dev/null +++ b/apps/sim/app/api/auth/oauth2/callback/servicenow/route.ts @@ -0,0 +1,166 @@ +import { type NextRequest, NextResponse } from 'next/server' +import { getSession } from '@/lib/auth' +import { env } from '@/lib/core/config/env' +import { getBaseUrl } from '@/lib/core/utils/urls' +import { createLogger } from '@/lib/logs/console/logger' + +const logger = createLogger('ServiceNowCallback') + +export const dynamic = 'force-dynamic' + +export async function GET(request: NextRequest) { + const baseUrl = getBaseUrl() + + try { + const session = await getSession() + if (!session?.user?.id) { + return NextResponse.redirect(`${baseUrl}/workspace?error=unauthorized`) + } + + const { searchParams } = request.nextUrl + const code = searchParams.get('code') + const state = searchParams.get('state') + const error = searchParams.get('error') + const errorDescription = searchParams.get('error_description') + + // Handle OAuth errors from ServiceNow + if (error) { + logger.error('ServiceNow OAuth error:', { error, errorDescription }) + return NextResponse.redirect( + `${baseUrl}/workspace?error=servicenow_auth_error&message=${encodeURIComponent(errorDescription || error)}` + ) + } + + const storedState = request.cookies.get('servicenow_oauth_state')?.value + const storedInstanceUrl = request.cookies.get('servicenow_instance_url')?.value + + const clientId = env.SERVICENOW_CLIENT_ID + const clientSecret = env.SERVICENOW_CLIENT_SECRET + + if (!clientId || !clientSecret) { + logger.error('ServiceNow credentials not configured') + return NextResponse.redirect(`${baseUrl}/workspace?error=servicenow_config_error`) + } + + // Validate state parameter + if (!state || state !== storedState) { + logger.error('State mismatch in ServiceNow OAuth callback') + return NextResponse.redirect(`${baseUrl}/workspace?error=servicenow_state_mismatch`) + } + + // Validate authorization code + if (!code) { + logger.error('No code received from ServiceNow') + return NextResponse.redirect(`${baseUrl}/workspace?error=servicenow_no_code`) + } + + // Validate instance URL + if (!storedInstanceUrl) { + logger.error('No instance URL stored') + return NextResponse.redirect(`${baseUrl}/workspace?error=servicenow_no_instance`) + } + + const redirectUri = `${baseUrl}/api/auth/oauth2/callback/servicenow` + + // Exchange authorization code for access token + const tokenResponse = await fetch(`${storedInstanceUrl}/oauth_token.do`, { + method: 'POST', + headers: { + 'Content-Type': 'application/x-www-form-urlencoded', + }, + body: new URLSearchParams({ + grant_type: 'authorization_code', + code: code, + redirect_uri: redirectUri, + client_id: clientId, + client_secret: clientSecret, + }).toString(), + }) + + if (!tokenResponse.ok) { + const errorText = await tokenResponse.text() + logger.error('Failed to exchange code for token:', { + status: tokenResponse.status, + body: errorText, + }) + return NextResponse.redirect(`${baseUrl}/workspace?error=servicenow_token_error`) + } + + const tokenData = await tokenResponse.json() + const accessToken = tokenData.access_token + const refreshToken = tokenData.refresh_token + const expiresIn = tokenData.expires_in + // ServiceNow always grants 'useraccount' scope but returns empty string + const scope = tokenData.scope || 'useraccount' + + logger.info('ServiceNow token exchange successful:', { + hasAccessToken: !!accessToken, + hasRefreshToken: !!refreshToken, + expiresIn, + }) + + if (!accessToken) { + logger.error('No access token in response') + return NextResponse.redirect(`${baseUrl}/workspace?error=servicenow_no_token`) + } + + // Redirect to store endpoint with token data in cookies + const storeUrl = new URL(`${baseUrl}/api/auth/oauth2/servicenow/store`) + + const response = NextResponse.redirect(storeUrl) + + // Store token data in secure cookies for the store endpoint + response.cookies.set('servicenow_pending_token', accessToken, { + httpOnly: true, + secure: process.env.NODE_ENV === 'production', + sameSite: 'lax', + maxAge: 60, // 1 minute + path: '/', + }) + + if (refreshToken) { + response.cookies.set('servicenow_pending_refresh_token', refreshToken, { + httpOnly: true, + secure: process.env.NODE_ENV === 'production', + sameSite: 'lax', + maxAge: 60, + path: '/', + }) + } + + response.cookies.set('servicenow_pending_instance', storedInstanceUrl, { + httpOnly: true, + secure: process.env.NODE_ENV === 'production', + sameSite: 'lax', + maxAge: 60, + path: '/', + }) + + response.cookies.set('servicenow_pending_scope', scope || '', { + httpOnly: true, + secure: process.env.NODE_ENV === 'production', + sameSite: 'lax', + maxAge: 60, + path: '/', + }) + + if (expiresIn) { + response.cookies.set('servicenow_pending_expires_in', expiresIn.toString(), { + httpOnly: true, + secure: process.env.NODE_ENV === 'production', + sameSite: 'lax', + maxAge: 60, + path: '/', + }) + } + + // Clean up OAuth state cookies + response.cookies.delete('servicenow_oauth_state') + response.cookies.delete('servicenow_instance_url') + + return response + } catch (error) { + logger.error('Error in ServiceNow OAuth callback:', error) + return NextResponse.redirect(`${baseUrl}/workspace?error=servicenow_callback_error`) + } +} diff --git a/apps/sim/app/api/auth/oauth2/servicenow/store/route.ts b/apps/sim/app/api/auth/oauth2/servicenow/store/route.ts new file mode 100644 index 0000000000..9029af8c03 --- /dev/null +++ b/apps/sim/app/api/auth/oauth2/servicenow/store/route.ts @@ -0,0 +1,142 @@ +import { db } from '@sim/db' +import { account } from '@sim/db/schema' +import { and, eq } from 'drizzle-orm' +import { type NextRequest, NextResponse } from 'next/server' +import { getSession } from '@/lib/auth' +import { getBaseUrl } from '@/lib/core/utils/urls' +import { createLogger } from '@/lib/logs/console/logger' +import { safeAccountInsert } from '@/app/api/auth/oauth/utils' + +const logger = createLogger('ServiceNowStore') + +export const dynamic = 'force-dynamic' + +export async function GET(request: NextRequest) { + const baseUrl = getBaseUrl() + + try { + const session = await getSession() + if (!session?.user?.id) { + logger.warn('Unauthorized attempt to store ServiceNow token') + return NextResponse.redirect(`${baseUrl}/workspace?error=unauthorized`) + } + + // Retrieve token data from cookies + const accessToken = request.cookies.get('servicenow_pending_token')?.value + const refreshToken = request.cookies.get('servicenow_pending_refresh_token')?.value + const instanceUrl = request.cookies.get('servicenow_pending_instance')?.value + const scope = request.cookies.get('servicenow_pending_scope')?.value + const expiresInStr = request.cookies.get('servicenow_pending_expires_in')?.value + + if (!accessToken || !instanceUrl) { + logger.error('Missing token or instance URL in cookies') + return NextResponse.redirect(`${baseUrl}/workspace?error=servicenow_missing_data`) + } + + // Validate the token by fetching user info from ServiceNow + const userResponse = await fetch( + `${instanceUrl}/api/now/table/sys_user?sysparm_query=user_name=${encodeURIComponent('javascript:gs.getUserName()')}&sysparm_limit=1`, + { + headers: { + Authorization: `Bearer ${accessToken}`, + Accept: 'application/json', + }, + } + ) + + // Alternative: Use the instance info endpoint instead + let accountIdentifier = instanceUrl + let userInfo: Record | null = null + + // Try to get current user info + try { + const whoamiResponse = await fetch(`${instanceUrl}/api/now/ui/user/current_user`, { + headers: { + Authorization: `Bearer ${accessToken}`, + Accept: 'application/json', + }, + }) + + if (whoamiResponse.ok) { + const whoamiData = await whoamiResponse.json() + userInfo = whoamiData.result + if (userInfo?.user_sys_id) { + accountIdentifier = userInfo.user_sys_id as string + } else if (userInfo?.user_name) { + accountIdentifier = userInfo.user_name as string + } + logger.info('Retrieved ServiceNow user info', { accountIdentifier }) + } + } catch (e) { + logger.warn('Could not retrieve ServiceNow user info, using instance URL as identifier') + } + + // Calculate expiration time + const now = new Date() + const expiresIn = expiresInStr ? Number.parseInt(expiresInStr, 10) : 3600 // Default to 1 hour + const accessTokenExpiresAt = new Date(now.getTime() + expiresIn * 1000) + + // Check for existing ServiceNow account for this user + const existing = await db.query.account.findFirst({ + where: and(eq(account.userId, session.user.id), eq(account.providerId, 'servicenow')), + }) + + // ServiceNow always grants 'useraccount' scope but returns empty string + const effectiveScope = scope?.trim() ? scope : 'useraccount' + + const accountData = { + accessToken: accessToken, + refreshToken: refreshToken || null, + accountId: accountIdentifier, + scope: effectiveScope, + updatedAt: now, + accessTokenExpiresAt: accessTokenExpiresAt, + idToken: instanceUrl, // Store instance URL in idToken for API calls + } + + if (existing) { + await db.update(account).set(accountData).where(eq(account.id, existing.id)) + logger.info('Updated existing ServiceNow account', { accountId: existing.id }) + } else { + await safeAccountInsert( + { + id: `servicenow_${session.user.id}_${Date.now()}`, + userId: session.user.id, + providerId: 'servicenow', + accountId: accountData.accountId, + accessToken: accountData.accessToken, + refreshToken: accountData.refreshToken || undefined, + accessTokenExpiresAt: accountData.accessTokenExpiresAt, + scope: accountData.scope, + idToken: accountData.idToken, + createdAt: now, + updatedAt: now, + }, + { provider: 'ServiceNow', identifier: instanceUrl } + ) + logger.info('Created new ServiceNow account') + } + + // Get return URL from cookie + const returnUrl = request.cookies.get('servicenow_return_url')?.value + + const redirectUrl = returnUrl || `${baseUrl}/workspace` + const finalUrl = new URL(redirectUrl) + finalUrl.searchParams.set('servicenow_connected', 'true') + + const response = NextResponse.redirect(finalUrl.toString()) + + // Clean up all ServiceNow cookies + response.cookies.delete('servicenow_pending_token') + response.cookies.delete('servicenow_pending_refresh_token') + response.cookies.delete('servicenow_pending_instance') + response.cookies.delete('servicenow_pending_scope') + response.cookies.delete('servicenow_pending_expires_in') + response.cookies.delete('servicenow_return_url') + + return response + } catch (error) { + logger.error('Error storing ServiceNow token:', error) + return NextResponse.redirect(`${baseUrl}/workspace?error=servicenow_store_error`) + } +} diff --git a/apps/sim/app/api/auth/servicenow/authorize/route.ts b/apps/sim/app/api/auth/servicenow/authorize/route.ts new file mode 100644 index 0000000000..a505ddd608 --- /dev/null +++ b/apps/sim/app/api/auth/servicenow/authorize/route.ts @@ -0,0 +1,264 @@ +import { type NextRequest, NextResponse } from 'next/server' +import { getSession } from '@/lib/auth' +import { env } from '@/lib/core/config/env' +import { getBaseUrl } from '@/lib/core/utils/urls' +import { createLogger } from '@/lib/logs/console/logger' + +const logger = createLogger('ServiceNowAuthorize') + +export const dynamic = 'force-dynamic' + +/** + * ServiceNow OAuth scopes + * useraccount - Default scope for user account access + * Note: ServiceNow always returns 'useraccount' in OAuth responses regardless of requested scopes. + * Table API permissions are configured at the OAuth application level in ServiceNow. + */ +const SERVICENOW_SCOPES = 'useraccount' + +/** + * Validates a ServiceNow instance URL format + */ +function isValidInstanceUrl(url: string): boolean { + try { + const parsed = new URL(url) + return ( + parsed.protocol === 'https:' && + (parsed.hostname.endsWith('.service-now.com') || parsed.hostname.endsWith('.servicenow.com')) + ) + } catch { + return false + } +} + +export async function GET(request: NextRequest) { + try { + const session = await getSession() + if (!session?.user?.id) { + return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) + } + + const clientId = env.SERVICENOW_CLIENT_ID + + if (!clientId) { + logger.error('SERVICENOW_CLIENT_ID not configured') + return NextResponse.json({ error: 'ServiceNow client ID not configured' }, { status: 500 }) + } + + const instanceUrl = request.nextUrl.searchParams.get('instanceUrl') + const returnUrl = request.nextUrl.searchParams.get('returnUrl') + + if (!instanceUrl) { + const returnUrlParam = returnUrl ? encodeURIComponent(returnUrl) : '' + return new NextResponse( + ` + + + Connect ServiceNow Instance + + + + + +
+

Connect Your ServiceNow Instance

+

Enter your ServiceNow instance URL to continue

+
+
+ + +
+

Your instance URL looks like: https://yourcompany.service-now.com

+
+ + + +`, + { + headers: { + 'Content-Type': 'text/html; charset=utf-8', + 'Cache-Control': 'no-store, no-cache, must-revalidate', + }, + } + ) + } + + // Validate instance URL + if (!isValidInstanceUrl(instanceUrl)) { + logger.error('Invalid ServiceNow instance URL:', { instanceUrl }) + return NextResponse.json( + { + error: + 'Invalid ServiceNow instance URL. Must be a valid .service-now.com or .servicenow.com domain.', + }, + { status: 400 } + ) + } + + // Clean the instance URL + const parsedUrl = new URL(instanceUrl) + const cleanInstanceUrl = parsedUrl.origin + + const baseUrl = getBaseUrl() + const redirectUri = `${baseUrl}/api/auth/oauth2/callback/servicenow` + + const state = crypto.randomUUID() + + // ServiceNow OAuth authorization URL + const oauthUrl = + `${cleanInstanceUrl}/oauth_auth.do?` + + new URLSearchParams({ + response_type: 'code', + client_id: clientId, + redirect_uri: redirectUri, + state: state, + scope: SERVICENOW_SCOPES, + }).toString() + + logger.info('Initiating ServiceNow OAuth:', { + instanceUrl: cleanInstanceUrl, + requestedScopes: SERVICENOW_SCOPES, + redirectUri, + returnUrl: returnUrl || 'not specified', + }) + + const response = NextResponse.redirect(oauthUrl) + + // Store state and instance URL in cookies for validation in callback + response.cookies.set('servicenow_oauth_state', state, { + httpOnly: true, + secure: process.env.NODE_ENV === 'production', + sameSite: 'lax', + maxAge: 60 * 10, // 10 minutes + path: '/', + }) + + response.cookies.set('servicenow_instance_url', cleanInstanceUrl, { + httpOnly: true, + secure: process.env.NODE_ENV === 'production', + sameSite: 'lax', + maxAge: 60 * 10, + path: '/', + }) + + if (returnUrl) { + response.cookies.set('servicenow_return_url', returnUrl, { + httpOnly: true, + secure: process.env.NODE_ENV === 'production', + sameSite: 'lax', + maxAge: 60 * 10, + path: '/', + }) + } + + return response + } catch (error) { + logger.error('Error initiating ServiceNow authorization:', error) + return NextResponse.json({ error: 'Internal server error' }, { status: 500 }) + } +} diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/credential-selector/components/oauth-required-modal.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/credential-selector/components/oauth-required-modal.tsx index 818defe02f..c03422dd77 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/credential-selector/components/oauth-required-modal.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/credential-selector/components/oauth-required-modal.tsx @@ -347,6 +347,13 @@ export function OAuthRequiredModal({ return } + if (providerId === 'servicenow') { + // Pass the current URL so we can redirect back after OAuth + const returnUrl = encodeURIComponent(window.location.href) + window.location.href = `/api/auth/servicenow/authorize?returnUrl=${returnUrl}` + return + } + await client.oauth2.link({ providerId, callbackURL: window.location.href, diff --git a/apps/sim/blocks/blocks/servicenow.ts b/apps/sim/blocks/blocks/servicenow.ts new file mode 100644 index 0000000000..110323dc1f --- /dev/null +++ b/apps/sim/blocks/blocks/servicenow.ts @@ -0,0 +1,257 @@ +import { ServiceNowIcon } from '@/components/icons' +import type { BlockConfig } from '@/blocks/types' +import { AuthMode } from '@/blocks/types' +import type { ServiceNowResponse } from '@/tools/servicenow/types' + +export const ServiceNowBlock: BlockConfig = { + type: 'servicenow', + name: 'ServiceNow', + description: 'Create, read, update, delete, and bulk import ServiceNow records', + authMode: AuthMode.OAuth, + longDescription: + 'Integrate ServiceNow into your workflow. Can create, read, update, and delete records in any ServiceNow table (incidents, tasks, users, etc.). Supports bulk import operations for data migration and ETL.', + docsLink: 'https://docs.sim.ai/tools/servicenow', + category: 'tools', + bgColor: '#032D42', + icon: ServiceNowIcon, + subBlocks: [ + // Operation selector + { + id: 'operation', + title: 'Operation', + type: 'dropdown', + options: [ + { label: 'Create Record', id: 'create' }, + { label: 'Read Records', id: 'read' }, + { label: 'Update Record', id: 'update' }, + { label: 'Delete Record', id: 'delete' }, + ], + value: () => 'read', + }, + // Instance URL + { + id: 'instanceUrl', + title: 'Instance URL', + type: 'short-input', + placeholder: 'https://instance.service-now.com', + required: true, + description: 'Your ServiceNow instance URL', + }, + // OAuth Credential + { + id: 'credential', + title: 'ServiceNow Account', + type: 'oauth-input', + serviceId: 'servicenow', + requiredScopes: ['useraccount'], + placeholder: 'Select ServiceNow account', + required: true, + }, + // Table Name + { + id: 'tableName', + title: 'Table Name', + type: 'short-input', + placeholder: 'incident, task, sys_user, etc.', + required: true, + description: 'ServiceNow table name', + }, + // Create-specific: Fields + { + id: 'fields', + title: 'Fields (JSON)', + type: 'code', + language: 'json', + placeholder: '{\n "short_description": "Issue description",\n "priority": "1"\n}', + condition: { field: 'operation', value: 'create' }, + required: true, + wandConfig: { + enabled: true, + maintainHistory: true, + prompt: `You are an expert ServiceNow developer. Generate ServiceNow record field objects as JSON based on the user's request. + +### CONTEXT +ServiceNow records use specific field names depending on the table. Common tables and their key fields include: +- incident: short_description, description, priority (1-5), urgency (1-3), impact (1-3), caller_id, assignment_group, assigned_to, category, subcategory, state +- task: short_description, description, priority, assignment_group, assigned_to, state +- sys_user: user_name, first_name, last_name, email, active, department, title +- change_request: short_description, description, type, risk, impact, priority, assignment_group + +### RULES +- Output ONLY valid JSON object starting with { and ending with } +- Use correct ServiceNow field names for the target table +- Values should be strings unless the field specifically requires another type +- For reference fields (like caller_id, assigned_to), use sys_id values or display values +- Do not include sys_id in create operations (it's auto-generated) + +### EXAMPLE +User: "Create a high priority incident for network outage" +Output: {"short_description": "Network outage", "description": "Network connectivity issue affecting users", "priority": "1", "urgency": "1", "impact": "1", "category": "Network"}`, + generationType: 'json-object', + }, + }, + // Read-specific: Query options + { + id: 'sysId', + title: 'Record sys_id', + type: 'short-input', + placeholder: 'Specific record sys_id (optional)', + condition: { field: 'operation', value: 'read' }, + }, + { + id: 'number', + title: 'Record Number', + type: 'short-input', + placeholder: 'e.g., INC0010001 (optional)', + condition: { field: 'operation', value: 'read' }, + }, + { + id: 'query', + title: 'Query String', + type: 'short-input', + placeholder: 'active=true^priority=1', + condition: { field: 'operation', value: 'read' }, + description: 'ServiceNow encoded query string', + }, + { + id: 'limit', + title: 'Limit', + type: 'short-input', + placeholder: '10', + condition: { field: 'operation', value: 'read' }, + }, + { + id: 'fields', + title: 'Fields to Return', + type: 'short-input', + placeholder: 'number,short_description,priority', + condition: { field: 'operation', value: 'read' }, + description: 'Comma-separated list of fields', + }, + // Update-specific: sysId and fields + { + id: 'sysId', + title: 'Record sys_id', + type: 'short-input', + placeholder: 'Record sys_id to update', + condition: { field: 'operation', value: 'update' }, + required: true, + }, + { + id: 'fields', + title: 'Fields to Update (JSON)', + type: 'code', + language: 'json', + placeholder: '{\n "state": "2",\n "assigned_to": "user.sys_id"\n}', + condition: { field: 'operation', value: 'update' }, + required: true, + wandConfig: { + enabled: true, + maintainHistory: true, + prompt: `You are an expert ServiceNow developer. Generate ServiceNow record update field objects as JSON based on the user's request. + +### CONTEXT +ServiceNow records use specific field names depending on the table. Common update scenarios include: +- incident: state (1=New, 2=In Progress, 3=On Hold, 6=Resolved, 7=Closed), assigned_to, work_notes, close_notes, close_code +- task: state, assigned_to, work_notes, percent_complete +- change_request: state, risk, approval, work_notes + +### RULES +- Output ONLY valid JSON object starting with { and ending with } +- Include only the fields that need to be updated +- Use correct ServiceNow field names for the target table +- For state transitions, use the correct numeric state values +- work_notes and comments fields append to existing values + +### EXAMPLE +User: "Assign the incident to John and set to in progress" +Output: {"state": "2", "assigned_to": "john.doe", "work_notes": "Assigned and starting investigation"}`, + generationType: 'json-object', + }, + }, + // Delete-specific: sysId + { + id: 'sysId', + title: 'Record sys_id', + type: 'short-input', + placeholder: 'Record sys_id to delete', + condition: { field: 'operation', value: 'delete' }, + required: true, + }, + ], + tools: { + access: [ + 'servicenow_create_record', + 'servicenow_read_record', + 'servicenow_update_record', + 'servicenow_delete_record', + ], + config: { + tool: (params) => { + switch (params.operation) { + case 'create': + return 'servicenow_create_record' + case 'read': + return 'servicenow_read_record' + case 'update': + return 'servicenow_update_record' + case 'delete': + return 'servicenow_delete_record' + default: + throw new Error(`Invalid ServiceNow operation: ${params.operation}`) + } + }, + params: (params) => { + const { operation, fields, records, credential, ...rest } = params + + // Parse JSON fields if provided + let parsedFields: Record | undefined + if (fields && (operation === 'create' || operation === 'update')) { + try { + parsedFields = typeof fields === 'string' ? JSON.parse(fields) : fields + } catch (error) { + throw new Error( + `Invalid JSON in fields: ${error instanceof Error ? error.message : String(error)}` + ) + } + } + + // Validate OAuth credential + if (!credential) { + throw new Error('ServiceNow account credential is required') + } + + // Build params + const baseParams: Record = { + ...rest, + credential, + } + + if (operation === 'create' || operation === 'update') { + return { + ...baseParams, + fields: parsedFields, + } + } + return baseParams + }, + }, + }, + inputs: { + operation: { type: 'string', description: 'Operation to perform' }, + instanceUrl: { type: 'string', description: 'ServiceNow instance URL' }, + credential: { type: 'string', description: 'ServiceNow OAuth credential ID' }, + tableName: { type: 'string', description: 'Table name' }, + sysId: { type: 'string', description: 'Record sys_id' }, + number: { type: 'string', description: 'Record number' }, + query: { type: 'string', description: 'Query string' }, + limit: { type: 'number', description: 'Result limit' }, + fields: { type: 'json', description: 'Fields object or JSON string' }, + }, + outputs: { + record: { type: 'json', description: 'Single ServiceNow record' }, + records: { type: 'json', description: 'Array of ServiceNow records' }, + success: { type: 'boolean', description: 'Operation success status' }, + metadata: { type: 'json', description: 'Operation metadata' }, + }, +} diff --git a/apps/sim/blocks/registry.ts b/apps/sim/blocks/registry.ts index ca1f30e845..bd5b96f6bd 100644 --- a/apps/sim/blocks/registry.ts +++ b/apps/sim/blocks/registry.ts @@ -96,6 +96,7 @@ import { SearchBlock } from '@/blocks/blocks/search' import { SendGridBlock } from '@/blocks/blocks/sendgrid' import { SentryBlock } from '@/blocks/blocks/sentry' import { SerperBlock } from '@/blocks/blocks/serper' +import { ServiceNowBlock } from '@/blocks/blocks/servicenow' import { SftpBlock } from '@/blocks/blocks/sftp' import { SharepointBlock } from '@/blocks/blocks/sharepoint' import { ShopifyBlock } from '@/blocks/blocks/shopify' @@ -238,6 +239,7 @@ export const registry: Record = { search: SearchBlock, sendgrid: SendGridBlock, sentry: SentryBlock, + servicenow: ServiceNowBlock, serper: SerperBlock, sharepoint: SharepointBlock, shopify: ShopifyBlock, diff --git a/apps/sim/components/icons.tsx b/apps/sim/components/icons.tsx index 12ead996f7..2e668f913e 100644 --- a/apps/sim/components/icons.tsx +++ b/apps/sim/components/icons.tsx @@ -3335,6 +3335,24 @@ export function SalesforceIcon(props: SVGProps) { ) } +export function ServiceNowIcon(props: SVGProps) { + return ( + + + + ) +} + export function ApolloIcon(props: SVGProps) { return ( = { }, defaultService: 'shopify', }, + servicenow: { + id: 'servicenow', + name: 'ServiceNow', + icon: (props) => ServiceNowIcon(props), + services: { + servicenow: { + id: 'servicenow', + name: 'ServiceNow', + description: 'Manage incidents, tasks, and records in your ServiceNow instance.', + providerId: 'servicenow', + icon: (props) => ServiceNowIcon(props), + baseProviderIcon: (props) => ServiceNowIcon(props), + scopes: ['useraccount'], + }, + }, + defaultService: 'servicenow', + }, slack: { id: 'slack', name: 'Slack', @@ -1487,6 +1507,21 @@ function getProviderAuthConfig(provider: string): ProviderAuthConfig { supportsRefreshTokenRotation: false, } } + case 'servicenow': { + // ServiceNow OAuth - token endpoint is instance-specific + // This is a placeholder; actual token endpoint is set during authorization + const { clientId, clientSecret } = getCredentials( + env.SERVICENOW_CLIENT_ID, + env.SERVICENOW_CLIENT_SECRET + ) + return { + tokenEndpoint: '', // Instance-specific, set during authorization + clientId, + clientSecret, + useBasicAuth: false, + supportsRefreshTokenRotation: true, + } + } case 'zoom': { const { clientId, clientSecret } = getCredentials(env.ZOOM_CLIENT_ID, env.ZOOM_CLIENT_SECRET) return { @@ -1565,11 +1600,13 @@ function buildAuthRequest( * This is a server-side utility function to refresh OAuth tokens * @param providerId The provider ID (e.g., 'google-drive') * @param refreshToken The refresh token to use + * @param instanceUrl Optional instance URL for providers with instance-specific endpoints (e.g., ServiceNow) * @returns Object containing the new access token and expiration time in seconds, or null if refresh failed */ export async function refreshOAuthToken( providerId: string, - refreshToken: string + refreshToken: string, + instanceUrl?: string ): Promise<{ accessToken: string; expiresIn: number; refreshToken: string } | null> { try { // Get the provider from the providerId (e.g., 'google-drive' -> 'google') @@ -1578,11 +1615,21 @@ export async function refreshOAuthToken( // Get provider configuration const config = getProviderAuthConfig(provider) + // For ServiceNow, the token endpoint is instance-specific + let tokenEndpoint = config.tokenEndpoint + if (provider === 'servicenow') { + if (!instanceUrl) { + logger.error('ServiceNow token refresh requires instance URL') + return null + } + tokenEndpoint = `${instanceUrl.replace(/\/$/, '')}/oauth_token.do` + } + // Build authentication request const { headers, bodyParams } = buildAuthRequest(config, refreshToken) // Refresh the token - const response = await fetch(config.tokenEndpoint, { + const response = await fetch(tokenEndpoint, { method: 'POST', headers, body: new URLSearchParams(bodyParams).toString(), diff --git a/apps/sim/stores/undo-redo/store.ts b/apps/sim/stores/undo-redo/store.ts index b5575bc49a..af2867a734 100644 --- a/apps/sim/stores/undo-redo/store.ts +++ b/apps/sim/stores/undo-redo/store.ts @@ -45,8 +45,9 @@ function getStackKey(workflowId: string, userId: string): string { /** * Custom storage adapter for Zustand's persist middleware. - * We need this wrapper to gracefully handle 'QuotaExceededError' when localStorage is full. + * We need this wrapper to gracefully handle 'QuotaExceededError' when localStorage is full, * Without this, the default storage engine would throw and crash the application. + * and to properly handle SSR/Node.js environments. */ const safeStorageAdapter = { getItem: (name: string): string | null => { diff --git a/apps/sim/tools/registry.ts b/apps/sim/tools/registry.ts index ed28b15e1b..f6830d4621 100644 --- a/apps/sim/tools/registry.ts +++ b/apps/sim/tools/registry.ts @@ -959,6 +959,12 @@ import { updateProjectTool, } from '@/tools/sentry' import { serperSearchTool } from '@/tools/serper' +import { + servicenowCreateRecordTool, + servicenowDeleteRecordTool, + servicenowReadRecordTool, + servicenowUpdateRecordTool, +} from '@/tools/servicenow' import { sftpDeleteTool, sftpDownloadTool, @@ -1520,6 +1526,10 @@ export const tools: Record = { github_repo_info: githubRepoInfoTool, github_latest_commit: githubLatestCommitTool, serper_search: serperSearchTool, + servicenow_create_record: servicenowCreateRecordTool, + servicenow_read_record: servicenowReadRecordTool, + servicenow_update_record: servicenowUpdateRecordTool, + servicenow_delete_record: servicenowDeleteRecordTool, tavily_search: tavilySearchTool, tavily_extract: tavilyExtractTool, tavily_crawl: tavilyCrawlTool, diff --git a/apps/sim/tools/servicenow/create_record.ts b/apps/sim/tools/servicenow/create_record.ts new file mode 100644 index 0000000000..a8ee81e072 --- /dev/null +++ b/apps/sim/tools/servicenow/create_record.ts @@ -0,0 +1,107 @@ +import { createLogger } from '@/lib/logs/console/logger' +import type { ServiceNowCreateParams, ServiceNowCreateResponse } from '@/tools/servicenow/types' +import type { ToolConfig } from '@/tools/types' + +const logger = createLogger('ServiceNowCreateRecordTool') + +export const createRecordTool: ToolConfig = { + id: 'servicenow_create_record', + name: 'Create ServiceNow Record', + description: 'Create a new record in a ServiceNow table', + version: '1.0.0', + + oauth: { + required: true, + provider: 'servicenow', + }, + + params: { + instanceUrl: { + type: 'string', + required: true, + visibility: 'user-only', + description: 'ServiceNow instance URL (e.g., https://instance.service-now.com)', + }, + credential: { + type: 'string', + required: false, + visibility: 'hidden', + description: 'ServiceNow OAuth credential ID', + }, + tableName: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'Table name (e.g., incident, task, sys_user)', + }, + fields: { + type: 'json', + required: true, + visibility: 'user-or-llm', + description: 'Fields to set on the record (JSON object)', + }, + }, + + request: { + url: (params) => { + // Use instanceUrl if provided, otherwise fall back to idToken (stored instance URL from OAuth) + const baseUrl = (params.instanceUrl || params.idToken || '').replace(/\/$/, '') + if (!baseUrl) { + throw new Error('ServiceNow instance URL is required') + } + return `${baseUrl}/api/now/table/${params.tableName}` + }, + method: 'POST', + headers: (params) => { + if (!params.accessToken) { + throw new Error('OAuth access token is required') + } + return { + Authorization: `Bearer ${params.accessToken}`, + 'Content-Type': 'application/json', + Accept: 'application/json', + } + }, + body: (params) => { + if (!params.fields || typeof params.fields !== 'object') { + throw new Error('Fields must be a JSON object') + } + return params.fields + }, + }, + + transformResponse: async (response: Response) => { + try { + const data = await response.json() + + if (!response.ok) { + const error = data.error || data + throw new Error(typeof error === 'string' ? error : error.message || JSON.stringify(error)) + } + + return { + success: true, + output: { + record: data.result, + metadata: { + recordCount: 1, + }, + }, + } + } catch (error) { + logger.error('ServiceNow create record - Error processing response:', { error }) + throw error + } + }, + + outputs: { + record: { + type: 'json', + description: 'Created ServiceNow record with sys_id and other fields', + }, + metadata: { + type: 'json', + description: 'Operation metadata', + }, + }, +} diff --git a/apps/sim/tools/servicenow/delete_record.ts b/apps/sim/tools/servicenow/delete_record.ts new file mode 100644 index 0000000000..25021dbca8 --- /dev/null +++ b/apps/sim/tools/servicenow/delete_record.ts @@ -0,0 +1,107 @@ +import { createLogger } from '@/lib/logs/console/logger' +import type { ServiceNowDeleteParams, ServiceNowDeleteResponse } from '@/tools/servicenow/types' +import type { ToolConfig } from '@/tools/types' + +const logger = createLogger('ServiceNowDeleteRecordTool') + +export const deleteRecordTool: ToolConfig = { + id: 'servicenow_delete_record', + name: 'Delete ServiceNow Record', + description: 'Delete a record from a ServiceNow table', + version: '1.0.0', + + oauth: { + required: true, + provider: 'servicenow', + }, + + params: { + instanceUrl: { + type: 'string', + required: false, + visibility: 'user-only', + description: 'ServiceNow instance URL (auto-detected from OAuth if not provided)', + }, + credential: { + type: 'string', + required: false, + visibility: 'hidden', + description: 'ServiceNow OAuth credential ID', + }, + tableName: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'Table name', + }, + sysId: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'Record sys_id to delete', + }, + }, + + request: { + url: (params) => { + // Use instanceUrl if provided, otherwise fall back to idToken (stored instance URL from OAuth) + const baseUrl = (params.instanceUrl || params.idToken || '').replace(/\/$/, '') + if (!baseUrl) { + throw new Error('ServiceNow instance URL is required') + } + return `${baseUrl}/api/now/table/${params.tableName}/${params.sysId}` + }, + method: 'DELETE', + headers: (params) => { + if (!params.accessToken) { + throw new Error('OAuth access token is required') + } + return { + Authorization: `Bearer ${params.accessToken}`, + Accept: 'application/json', + } + }, + }, + + transformResponse: async (response: Response, params?: ServiceNowDeleteParams) => { + try { + if (!response.ok) { + let errorData: any + try { + errorData = await response.json() + } catch { + errorData = { status: response.status, statusText: response.statusText } + } + throw new Error( + typeof errorData === 'string' + ? errorData + : errorData.error?.message || JSON.stringify(errorData) + ) + } + + return { + success: true, + output: { + success: true, + metadata: { + deletedSysId: params?.sysId || '', + }, + }, + } + } catch (error) { + logger.error('ServiceNow delete record - Error processing response:', { error }) + throw error + } + }, + + outputs: { + success: { + type: 'boolean', + description: 'Whether the deletion was successful', + }, + metadata: { + type: 'json', + description: 'Operation metadata', + }, + }, +} diff --git a/apps/sim/tools/servicenow/index.ts b/apps/sim/tools/servicenow/index.ts new file mode 100644 index 0000000000..905b22d8a8 --- /dev/null +++ b/apps/sim/tools/servicenow/index.ts @@ -0,0 +1,11 @@ +import { createRecordTool } from '@/tools/servicenow/create_record' +import { deleteRecordTool } from '@/tools/servicenow/delete_record' +import { readRecordTool } from '@/tools/servicenow/read_record' +import { updateRecordTool } from '@/tools/servicenow/update_record' + +export { + createRecordTool as servicenowCreateRecordTool, + readRecordTool as servicenowReadRecordTool, + updateRecordTool as servicenowUpdateRecordTool, + deleteRecordTool as servicenowDeleteRecordTool, +} diff --git a/apps/sim/tools/servicenow/read_record.ts b/apps/sim/tools/servicenow/read_record.ts new file mode 100644 index 0000000000..93b81c06bd --- /dev/null +++ b/apps/sim/tools/servicenow/read_record.ts @@ -0,0 +1,149 @@ +import { createLogger } from '@/lib/logs/console/logger' +import type { ServiceNowReadParams, ServiceNowReadResponse } from '@/tools/servicenow/types' +import type { ToolConfig } from '@/tools/types' + +const logger = createLogger('ServiceNowReadRecordTool') + +export const readRecordTool: ToolConfig = { + id: 'servicenow_read_record', + name: 'Read ServiceNow Records', + description: 'Read records from a ServiceNow table', + version: '1.0.0', + + oauth: { + required: true, + provider: 'servicenow', + }, + + params: { + instanceUrl: { + type: 'string', + required: false, + visibility: 'user-only', + description: 'ServiceNow instance URL (auto-detected from OAuth if not provided)', + }, + credential: { + type: 'string', + required: false, + visibility: 'hidden', + description: 'ServiceNow OAuth credential ID', + }, + tableName: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'Table name', + }, + sysId: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Specific record sys_id', + }, + number: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Record number (e.g., INC0010001)', + }, + query: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Encoded query string (e.g., "active=true^priority=1")', + }, + limit: { + type: 'number', + required: false, + visibility: 'user-only', + description: 'Maximum number of records to return', + }, + fields: { + type: 'string', + required: false, + visibility: 'user-only', + description: 'Comma-separated list of fields to return', + }, + }, + + request: { + url: (params) => { + // Use instanceUrl if provided, otherwise fall back to idToken (stored instance URL from OAuth) + const baseUrl = (params.instanceUrl || params.idToken || '').replace(/\/$/, '') + if (!baseUrl) { + throw new Error('ServiceNow instance URL is required') + } + let url = `${baseUrl}/api/now/table/${params.tableName}` + + const queryParams = new URLSearchParams() + + if (params.sysId) { + url = `${url}/${params.sysId}` + } else if (params.number) { + queryParams.append('number', params.number) + } + + if (params.query) { + queryParams.append('sysparm_query', params.query) + } + + if (params.limit) { + queryParams.append('sysparm_limit', params.limit.toString()) + } + + if (params.fields) { + queryParams.append('sysparm_fields', params.fields) + } + + const queryString = queryParams.toString() + return queryString ? `${url}?${queryString}` : url + }, + method: 'GET', + headers: (params) => { + if (!params.accessToken) { + throw new Error('OAuth access token is required') + } + return { + Authorization: `Bearer ${params.accessToken}`, + Accept: 'application/json', + } + }, + }, + + transformResponse: async (response: Response) => { + try { + const data = await response.json() + + if (!response.ok) { + const error = data.error || data + throw new Error(typeof error === 'string' ? error : error.message || JSON.stringify(error)) + } + + const records = Array.isArray(data.result) ? data.result : [data.result] + + return { + success: true, + output: { + records, + metadata: { + recordCount: records.length, + }, + }, + } + } catch (error) { + logger.error('ServiceNow read record - Error processing response:', { error }) + throw error + } + }, + + outputs: { + records: { + type: 'array', + description: 'Array of ServiceNow records', + }, + metadata: { + type: 'json', + description: 'Operation metadata', + }, + }, +} diff --git a/apps/sim/tools/servicenow/types.ts b/apps/sim/tools/servicenow/types.ts new file mode 100644 index 0000000000..07a6c073ea --- /dev/null +++ b/apps/sim/tools/servicenow/types.ts @@ -0,0 +1,80 @@ +import type { ToolResponse } from '@/tools/types' + +export interface ServiceNowRecord { + sys_id: string + number?: string + [key: string]: any +} + +export interface ServiceNowBaseParams { + instanceUrl?: string + tableName: string + // OAuth fields (injected by the system when using OAuth) + credential?: string + accessToken?: string + idToken?: string // Stores the instance URL from OAuth +} + +export interface ServiceNowCreateParams extends ServiceNowBaseParams { + fields: Record +} + +export interface ServiceNowCreateResponse extends ToolResponse { + output: { + record: ServiceNowRecord + metadata: { + recordCount: 1 + } + } +} + +export interface ServiceNowReadParams extends ServiceNowBaseParams { + sysId?: string + number?: string + query?: string + limit?: number + fields?: string +} + +export interface ServiceNowReadResponse extends ToolResponse { + output: { + records: ServiceNowRecord[] + metadata: { + recordCount: number + } + } +} + +export interface ServiceNowUpdateParams extends ServiceNowBaseParams { + sysId: string + fields: Record +} + +export interface ServiceNowUpdateResponse extends ToolResponse { + output: { + record: ServiceNowRecord + metadata: { + recordCount: 1 + updatedFields: string[] + } + } +} + +export interface ServiceNowDeleteParams extends ServiceNowBaseParams { + sysId: string +} + +export interface ServiceNowDeleteResponse extends ToolResponse { + output: { + success: boolean + metadata: { + deletedSysId: string + } + } +} + +export type ServiceNowResponse = + | ServiceNowCreateResponse + | ServiceNowReadResponse + | ServiceNowUpdateResponse + | ServiceNowDeleteResponse diff --git a/apps/sim/tools/servicenow/update_record.ts b/apps/sim/tools/servicenow/update_record.ts new file mode 100644 index 0000000000..629468e7d0 --- /dev/null +++ b/apps/sim/tools/servicenow/update_record.ts @@ -0,0 +1,114 @@ +import { createLogger } from '@/lib/logs/console/logger' +import type { ServiceNowUpdateParams, ServiceNowUpdateResponse } from '@/tools/servicenow/types' +import type { ToolConfig } from '@/tools/types' + +const logger = createLogger('ServiceNowUpdateRecordTool') + +export const updateRecordTool: ToolConfig = { + id: 'servicenow_update_record', + name: 'Update ServiceNow Record', + description: 'Update an existing record in a ServiceNow table', + version: '1.0.0', + + oauth: { + required: true, + provider: 'servicenow', + }, + + params: { + instanceUrl: { + type: 'string', + required: false, + visibility: 'user-only', + description: 'ServiceNow instance URL (auto-detected from OAuth if not provided)', + }, + credential: { + type: 'string', + required: false, + visibility: 'hidden', + description: 'ServiceNow OAuth credential ID', + }, + tableName: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'Table name', + }, + sysId: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'Record sys_id to update', + }, + fields: { + type: 'json', + required: true, + visibility: 'user-or-llm', + description: 'Fields to update (JSON object)', + }, + }, + + request: { + url: (params) => { + // Use instanceUrl if provided, otherwise fall back to idToken (stored instance URL from OAuth) + const baseUrl = (params.instanceUrl || params.idToken || '').replace(/\/$/, '') + if (!baseUrl) { + throw new Error('ServiceNow instance URL is required') + } + return `${baseUrl}/api/now/table/${params.tableName}/${params.sysId}` + }, + method: 'PATCH', + headers: (params) => { + if (!params.accessToken) { + throw new Error('OAuth access token is required') + } + return { + Authorization: `Bearer ${params.accessToken}`, + 'Content-Type': 'application/json', + Accept: 'application/json', + } + }, + body: (params) => { + if (!params.fields || typeof params.fields !== 'object') { + throw new Error('Fields must be a JSON object') + } + return params.fields + }, + }, + + transformResponse: async (response: Response, params?: ServiceNowUpdateParams) => { + try { + const data = await response.json() + + if (!response.ok) { + const error = data.error || data + throw new Error(typeof error === 'string' ? error : error.message || JSON.stringify(error)) + } + + return { + success: true, + output: { + record: data.result, + metadata: { + recordCount: 1, + updatedFields: params ? Object.keys(params.fields || {}) : [], + }, + }, + } + } catch (error) { + logger.error('ServiceNow update record - Error processing response:', { error }) + throw error + } + }, + + outputs: { + record: { + type: 'json', + description: 'Updated ServiceNow record', + }, + metadata: { + type: 'json', + description: 'Operation metadata', + }, + }, +} diff --git a/bun.lock b/bun.lock index 41edf429b3..c5863930ca 100644 --- a/bun.lock +++ b/bun.lock @@ -266,19 +266,19 @@ "sharp", ], "overrides": { + "react": "19.2.1", + "react-dom": "19.2.1", + "next": "16.1.0-canary.21", "@next/env": "16.1.0-canary.21", "drizzle-orm": "^0.44.5", - "next": "16.1.0-canary.21", "postgres": "^3.4.5", - "react": "19.2.1", - "react-dom": "19.2.1", }, "packages": { "@adobe/css-tools": ["@adobe/css-tools@4.4.4", "", {}, "sha512-Elp+iwUx5rN5+Y8xLt5/GRoG20WGoDCQ/1Fb+1LiGtvwbDavuSk0jhD/eZdckHAuzcDzccnkv+rEjyWfRx18gg=="], "@ai-sdk/anthropic": ["@ai-sdk/anthropic@2.0.56", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.19" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-XHJKu0Yvfu9SPzRfsAFESa+9T7f2YJY6TxykKMfRsAwpeWAiX/Gbx5J5uM15AzYC3Rw8tVP3oH+j7jEivENirQ=="], - "@ai-sdk/azure": ["@ai-sdk/azure@2.0.88", "", { "dependencies": { "@ai-sdk/openai": "2.0.86", "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.19" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-OMAXXZV7GiFz8qpCpzhaesTfiuiXU92WZWdvtr+K8rjfTNGm9sJWUuSLZ29z5aAeLUSRlwDMUlK4lYr8/1IewQ=="], + "@ai-sdk/azure": ["@ai-sdk/azure@2.0.89", "", { "dependencies": { "@ai-sdk/openai": "2.0.87", "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.19" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-ELwVkqvvBVDtDH5DtAFhp4tltIdCWVZMwtwodc8v9y0XJyGJiCNdx1Dl9dwS/VzgJpjcj/u2pGs6vTjzBA+M9Q=="], "@ai-sdk/cerebras": ["@ai-sdk/cerebras@1.0.33", "", { "dependencies": { "@ai-sdk/openai-compatible": "1.0.29", "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.19" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-2gSSS/7kunIwMdC4td5oWsUAzoLw84ccGpz6wQbxVnrb1iWnrEnKa5tRBduaP6IXpzLWsu8wME3+dQhZy+gT7w=="], @@ -286,15 +286,15 @@ "@ai-sdk/gateway": ["@ai-sdk/gateway@2.0.21", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.19", "@vercel/oidc": "3.0.5" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-BwV7DU/lAm3Xn6iyyvZdWgVxgLu3SNXzl5y57gMvkW4nGhAOV5269IrJzQwGt03bb107sa6H6uJwWxc77zXoGA=="], - "@ai-sdk/google": ["@ai-sdk/google@2.0.46", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.19" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-8PK6u4sGE/kXebd7ZkTp+0aya4kNqzoqpS5m7cHY2NfTK6fhPc6GNvE+MZIZIoHQTp5ed86wGBdeBPpFaaUtyg=="], + "@ai-sdk/google": ["@ai-sdk/google@2.0.47", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.19" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-grIlvzh+jzMoKNOnn5Xe/8fdYiJOs0ThMVetsGzqflvMkUNF3B83t5i0kf4XqiM8MwTJ8gkdOA4VeQOZKR7TkA=="], - "@ai-sdk/google-vertex": ["@ai-sdk/google-vertex@3.0.91", "", { "dependencies": { "@ai-sdk/anthropic": "2.0.56", "@ai-sdk/google": "2.0.46", "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.19", "google-auth-library": "^10.5.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-SonFMMdSIlos0fjBFBff7rcZQx+q3WP4CpXdz7+YEIEWItnR/k9f5MqRCXMZilfyzcpz5wFxa7Sqlnapv3oqsA=="], + "@ai-sdk/google-vertex": ["@ai-sdk/google-vertex@3.0.92", "", { "dependencies": { "@ai-sdk/anthropic": "2.0.56", "@ai-sdk/google": "2.0.47", "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.19", "google-auth-library": "^10.5.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-L1hqen0UdslZEkuZZhZR8rC6RrTlMyZbtbd3wSoXGnpJiJ0SGSsUc2RFBz6YtbVhZo9GeFPtrnzD8zqIsOBtVQ=="], "@ai-sdk/groq": ["@ai-sdk/groq@2.0.33", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.19" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-FWGl7xNr88NBveao3y9EcVWYUt9ABPrwLFY7pIutSNgaTf32vgvyhREobaMrLU4Scr5G/2tlNqOPZ5wkYMaZig=="], "@ai-sdk/mistral": ["@ai-sdk/mistral@2.0.26", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.19" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-jxDB++4WI1wEx5ONNBI+VbkmYJOYIuS8UQY13/83UGRaiW7oB/WHiH4ETe6KzbKpQPB3XruwTJQjUMsMfKyTXA=="], - "@ai-sdk/openai": ["@ai-sdk/openai@2.0.86", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.19" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-obsLIOyA93lbQiSt1rvBItoVQp1U2RDPs0bNG0JYhm6Gku8Dg/0Cm8e4NUWT5p5PN10/doKSb3SMSKCixwIAKA=="], + "@ai-sdk/openai": ["@ai-sdk/openai@2.0.87", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.19" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-qywHMz8Kd+y/cluanX63SqFV/J8gLq596+W8K/MgdNroEnSabRIeikEP1/K0wwuKtSI7/KaLlVUnt1N5E3889Q=="], "@ai-sdk/openai-compatible": ["@ai-sdk/openai-compatible@1.0.29", "", { "dependencies": { "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.19" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-cZUppWzxjfpNaH1oVZ6U8yDLKKsdGbC9X0Pex8cG9CXhKWSoVLLnW1rKr6tu9jDISK5okjBIW/O1ZzfnbUrtEw=="], @@ -1080,49 +1080,49 @@ "@rolldown/pluginutils": ["@rolldown/pluginutils@1.0.0-beta.27", "", {}, "sha512-+d0F4MKMCbeVUJwG96uQ4SgAznZNSq93I3V+9NHA4OpvqG8mRCpGdKmK8l/dl02h2CCDHwW2FqilnTyDcAnqjA=="], - "@rollup/rollup-android-arm-eabi": ["@rollup/rollup-android-arm-eabi@4.53.4", "", { "os": "android", "cpu": "arm" }, "sha512-PWU3Y92H4DD0bOqorEPp1Y0tbzwAurFmIYpjcObv5axGVOtcTlB0b2UKMd2echo08MgN7jO8WQZSSysvfisFSQ=="], + "@rollup/rollup-android-arm-eabi": ["@rollup/rollup-android-arm-eabi@4.53.5", "", { "os": "android", "cpu": "arm" }, "sha512-iDGS/h7D8t7tvZ1t6+WPK04KD0MwzLZrG0se1hzBjSi5fyxlsiggoJHwh18PCFNn7tG43OWb6pdZ6Y+rMlmyNQ=="], - "@rollup/rollup-android-arm64": ["@rollup/rollup-android-arm64@4.53.4", "", { "os": "android", "cpu": "arm64" }, "sha512-Gw0/DuVm3rGsqhMGYkSOXXIx20cC3kTlivZeuaGt4gEgILivykNyBWxeUV5Cf2tDA2nPLah26vq3emlRrWVbng=="], + "@rollup/rollup-android-arm64": ["@rollup/rollup-android-arm64@4.53.5", "", { "os": "android", "cpu": "arm64" }, "sha512-wrSAViWvZHBMMlWk6EJhvg8/rjxzyEhEdgfMMjREHEq11EtJ6IP6yfcCH57YAEca2Oe3FNCE9DSTgU70EIGmVw=="], - "@rollup/rollup-darwin-arm64": ["@rollup/rollup-darwin-arm64@4.53.4", "", { "os": "darwin", "cpu": "arm64" }, "sha512-+w06QvXsgzKwdVg5qRLZpTHh1bigHZIqoIUPtiqh05ZiJVUQ6ymOxaPkXTvRPRLH88575ZCRSRM3PwIoNma01Q=="], + "@rollup/rollup-darwin-arm64": ["@rollup/rollup-darwin-arm64@4.53.5", "", { "os": "darwin", "cpu": "arm64" }, "sha512-S87zZPBmRO6u1YXQLwpveZm4JfPpAa6oHBX7/ghSiGH3rz/KDgAu1rKdGutV+WUI6tKDMbaBJomhnT30Y2t4VQ=="], - "@rollup/rollup-darwin-x64": ["@rollup/rollup-darwin-x64@4.53.4", "", { "os": "darwin", "cpu": "x64" }, "sha512-EB4Na9G2GsrRNRNFPuxfwvDRDUwQEzJPpiK1vo2zMVhEeufZ1k7J1bKnT0JYDfnPC7RNZ2H5YNQhW6/p2QKATw=="], + "@rollup/rollup-darwin-x64": ["@rollup/rollup-darwin-x64@4.53.5", "", { "os": "darwin", "cpu": "x64" }, "sha512-YTbnsAaHo6VrAczISxgpTva8EkfQus0VPEVJCEaboHtZRIb6h6j0BNxRBOwnDciFTZLDPW5r+ZBmhL/+YpTZgA=="], - "@rollup/rollup-freebsd-arm64": ["@rollup/rollup-freebsd-arm64@4.53.4", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-bldA8XEqPcs6OYdknoTMaGhjytnwQ0NClSPpWpmufOuGPN5dDmvIa32FygC2gneKK4A1oSx86V1l55hyUWUYFQ=="], + "@rollup/rollup-freebsd-arm64": ["@rollup/rollup-freebsd-arm64@4.53.5", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-1T8eY2J8rKJWzaznV7zedfdhD1BqVs1iqILhmHDq/bqCUZsrMt+j8VCTHhP0vdfbHK3e1IQ7VYx3jlKqwlf+vw=="], - "@rollup/rollup-freebsd-x64": ["@rollup/rollup-freebsd-x64@4.53.4", "", { "os": "freebsd", "cpu": "x64" }, "sha512-3T8GPjH6mixCd0YPn0bXtcuSXi1Lj+15Ujw2CEb7dd24j9thcKscCf88IV7n76WaAdorOzAgSSbuVRg4C8V8Qw=="], + "@rollup/rollup-freebsd-x64": ["@rollup/rollup-freebsd-x64@4.53.5", "", { "os": "freebsd", "cpu": "x64" }, "sha512-sHTiuXyBJApxRn+VFMaw1U+Qsz4kcNlxQ742snICYPrY+DDL8/ZbaC4DVIB7vgZmp3jiDaKA0WpBdP0aqPJoBQ=="], - "@rollup/rollup-linux-arm-gnueabihf": ["@rollup/rollup-linux-arm-gnueabihf@4.53.4", "", { "os": "linux", "cpu": "arm" }, "sha512-UPMMNeC4LXW7ZSHxeP3Edv09aLsFUMaD1TSVW6n1CWMECnUIJMFFB7+XC2lZTdPtvB36tYC0cJWc86mzSsaviw=="], + "@rollup/rollup-linux-arm-gnueabihf": ["@rollup/rollup-linux-arm-gnueabihf@4.53.5", "", { "os": "linux", "cpu": "arm" }, "sha512-dV3T9MyAf0w8zPVLVBptVlzaXxka6xg1f16VAQmjg+4KMSTWDvhimI/Y6mp8oHwNrmnmVl9XxJ/w/mO4uIQONA=="], - "@rollup/rollup-linux-arm-musleabihf": ["@rollup/rollup-linux-arm-musleabihf@4.53.4", "", { "os": "linux", "cpu": "arm" }, "sha512-H8uwlV0otHs5Q7WAMSoyvjV9DJPiy5nJ/xnHolY0QptLPjaSsuX7tw+SPIfiYH6cnVx3fe4EWFafo6gH6ekZKA=="], + "@rollup/rollup-linux-arm-musleabihf": ["@rollup/rollup-linux-arm-musleabihf@4.53.5", "", { "os": "linux", "cpu": "arm" }, "sha512-wIGYC1x/hyjP+KAu9+ewDI+fi5XSNiUi9Bvg6KGAh2TsNMA3tSEs+Sh6jJ/r4BV/bx/CyWu2ue9kDnIdRyafcQ=="], - "@rollup/rollup-linux-arm64-gnu": ["@rollup/rollup-linux-arm64-gnu@4.53.4", "", { "os": "linux", "cpu": "arm64" }, "sha512-BLRwSRwICXz0TXkbIbqJ1ibK+/dSBpTJqDClF61GWIrxTXZWQE78ROeIhgl5MjVs4B4gSLPCFeD4xML9vbzvCQ=="], + "@rollup/rollup-linux-arm64-gnu": ["@rollup/rollup-linux-arm64-gnu@4.53.5", "", { "os": "linux", "cpu": "arm64" }, "sha512-Y+qVA0D9d0y2FRNiG9oM3Hut/DgODZbU9I8pLLPwAsU0tUKZ49cyV1tzmB/qRbSzGvY8lpgGkJuMyuhH7Ma+Vg=="], - "@rollup/rollup-linux-arm64-musl": ["@rollup/rollup-linux-arm64-musl@4.53.4", "", { "os": "linux", "cpu": "arm64" }, "sha512-6bySEjOTbmVcPJAywjpGLckK793A0TJWSbIa0sVwtVGfe/Nz6gOWHOwkshUIAp9j7wg2WKcA4Snu7Y1nUZyQew=="], + "@rollup/rollup-linux-arm64-musl": ["@rollup/rollup-linux-arm64-musl@4.53.5", "", { "os": "linux", "cpu": "arm64" }, "sha512-juaC4bEgJsyFVfqhtGLz8mbopaWD+WeSOYr5E16y+1of6KQjc0BpwZLuxkClqY1i8sco+MdyoXPNiCkQou09+g=="], - "@rollup/rollup-linux-loong64-gnu": ["@rollup/rollup-linux-loong64-gnu@4.53.4", "", { "os": "linux", "cpu": "none" }, "sha512-U0ow3bXYJZ5MIbchVusxEycBw7bO6C2u5UvD31i5IMTrnt2p4Fh4ZbHSdc/31TScIJQYHwxbj05BpevB3201ug=="], + "@rollup/rollup-linux-loong64-gnu": ["@rollup/rollup-linux-loong64-gnu@4.53.5", "", { "os": "linux", "cpu": "none" }, "sha512-rIEC0hZ17A42iXtHX+EPJVL/CakHo+tT7W0pbzdAGuWOt2jxDFh7A/lRhsNHBcqL4T36+UiAgwO8pbmn3dE8wA=="], - "@rollup/rollup-linux-ppc64-gnu": ["@rollup/rollup-linux-ppc64-gnu@4.53.4", "", { "os": "linux", "cpu": "ppc64" }, "sha512-iujDk07ZNwGLVn0YIWM80SFN039bHZHCdCCuX9nyx3Jsa2d9V/0Y32F+YadzwbvDxhSeVo9zefkoPnXEImnM5w=="], + "@rollup/rollup-linux-ppc64-gnu": ["@rollup/rollup-linux-ppc64-gnu@4.53.5", "", { "os": "linux", "cpu": "ppc64" }, "sha512-T7l409NhUE552RcAOcmJHj3xyZ2h7vMWzcwQI0hvn5tqHh3oSoclf9WgTl+0QqffWFG8MEVZZP1/OBglKZx52Q=="], - "@rollup/rollup-linux-riscv64-gnu": ["@rollup/rollup-linux-riscv64-gnu@4.53.4", "", { "os": "linux", "cpu": "none" }, "sha512-MUtAktiOUSu+AXBpx1fkuG/Bi5rhlorGs3lw5QeJ2X3ziEGAq7vFNdWVde6XGaVqi0LGSvugwjoxSNJfHFTC0g=="], + "@rollup/rollup-linux-riscv64-gnu": ["@rollup/rollup-linux-riscv64-gnu@4.53.5", "", { "os": "linux", "cpu": "none" }, "sha512-7OK5/GhxbnrMcxIFoYfhV/TkknarkYC1hqUw1wU2xUN3TVRLNT5FmBv4KkheSG2xZ6IEbRAhTooTV2+R5Tk0lQ=="], - "@rollup/rollup-linux-riscv64-musl": ["@rollup/rollup-linux-riscv64-musl@4.53.4", "", { "os": "linux", "cpu": "none" }, "sha512-btm35eAbDfPtcFEgaXCI5l3c2WXyzwiE8pArhd66SDtoLWmgK5/M7CUxmUglkwtniPzwvWioBKKl6IXLbPf2sQ=="], + "@rollup/rollup-linux-riscv64-musl": ["@rollup/rollup-linux-riscv64-musl@4.53.5", "", { "os": "linux", "cpu": "none" }, "sha512-GwuDBE/PsXaTa76lO5eLJTyr2k8QkPipAyOrs4V/KJufHCZBJ495VCGJol35grx9xryk4V+2zd3Ri+3v7NPh+w=="], - "@rollup/rollup-linux-s390x-gnu": ["@rollup/rollup-linux-s390x-gnu@4.53.4", "", { "os": "linux", "cpu": "s390x" }, "sha512-uJlhKE9ccUTCUlK+HUz/80cVtx2RayadC5ldDrrDUFaJK0SNb8/cCmC9RhBhIWuZ71Nqj4Uoa9+xljKWRogdhA=="], + "@rollup/rollup-linux-s390x-gnu": ["@rollup/rollup-linux-s390x-gnu@4.53.5", "", { "os": "linux", "cpu": "s390x" }, "sha512-IAE1Ziyr1qNfnmiQLHBURAD+eh/zH1pIeJjeShleII7Vj8kyEm2PF77o+lf3WTHDpNJcu4IXJxNO0Zluro8bOw=="], - "@rollup/rollup-linux-x64-gnu": ["@rollup/rollup-linux-x64-gnu@4.53.4", "", { "os": "linux", "cpu": "x64" }, "sha512-jjEMkzvASQBbzzlzf4os7nzSBd/cvPrpqXCUOqoeCh1dQ4BP3RZCJk8XBeik4MUln3m+8LeTJcY54C/u8wb3DQ=="], + "@rollup/rollup-linux-x64-gnu": ["@rollup/rollup-linux-x64-gnu@4.53.5", "", { "os": "linux", "cpu": "x64" }, "sha512-Pg6E+oP7GvZ4XwgRJBuSXZjcqpIW3yCBhK4BcsANvb47qMvAbCjR6E+1a/U2WXz1JJxp9/4Dno3/iSJLcm5auw=="], - "@rollup/rollup-linux-x64-musl": ["@rollup/rollup-linux-x64-musl@4.53.4", "", { "os": "linux", "cpu": "x64" }, "sha512-lu90KG06NNH19shC5rBPkrh6mrTpq5kviFylPBXQVpdEu0yzb0mDgyxLr6XdcGdBIQTH/UAhDJnL+APZTBu1aQ=="], + "@rollup/rollup-linux-x64-musl": ["@rollup/rollup-linux-x64-musl@4.53.5", "", { "os": "linux", "cpu": "x64" }, "sha512-txGtluxDKTxaMDzUduGP0wdfng24y1rygUMnmlUJ88fzCCULCLn7oE5kb2+tRB+MWq1QDZT6ObT5RrR8HFRKqg=="], - "@rollup/rollup-openharmony-arm64": ["@rollup/rollup-openharmony-arm64@4.53.4", "", { "os": "none", "cpu": "arm64" }, "sha512-dFDcmLwsUzhAm/dn0+dMOQZoONVYBtgik0VuY/d5IJUUb787L3Ko/ibvTvddqhb3RaB7vFEozYevHN4ox22R/w=="], + "@rollup/rollup-openharmony-arm64": ["@rollup/rollup-openharmony-arm64@4.53.5", "", { "os": "none", "cpu": "arm64" }, "sha512-3DFiLPnTxiOQV993fMc+KO8zXHTcIjgaInrqlG8zDp1TlhYl6WgrOHuJkJQ6M8zHEcntSJsUp1XFZSY8C1DYbg=="], - "@rollup/rollup-win32-arm64-msvc": ["@rollup/rollup-win32-arm64-msvc@4.53.4", "", { "os": "win32", "cpu": "arm64" }, "sha512-WvUpUAWmUxZKtRnQWpRKnLW2DEO8HB/l8z6oFFMNuHndMzFTJEXzaYJ5ZAmzNw0L21QQJZsUQFt2oPf3ykAD/w=="], + "@rollup/rollup-win32-arm64-msvc": ["@rollup/rollup-win32-arm64-msvc@4.53.5", "", { "os": "win32", "cpu": "arm64" }, "sha512-nggc/wPpNTgjGg75hu+Q/3i32R00Lq1B6N1DO7MCU340MRKL3WZJMjA9U4K4gzy3dkZPXm9E1Nc81FItBVGRlA=="], - "@rollup/rollup-win32-ia32-msvc": ["@rollup/rollup-win32-ia32-msvc@4.53.4", "", { "os": "win32", "cpu": "ia32" }, "sha512-JGbeF2/FDU0x2OLySw/jgvkwWUo05BSiJK0dtuI4LyuXbz3wKiC1xHhLB1Tqm5VU6ZZDmAorj45r/IgWNWku5g=="], + "@rollup/rollup-win32-ia32-msvc": ["@rollup/rollup-win32-ia32-msvc@4.53.5", "", { "os": "win32", "cpu": "ia32" }, "sha512-U/54pTbdQpPLBdEzCT6NBCFAfSZMvmjr0twhnD9f4EIvlm9wy3jjQ38yQj1AGznrNO65EWQMgm/QUjuIVrYF9w=="], - "@rollup/rollup-win32-x64-gnu": ["@rollup/rollup-win32-x64-gnu@4.53.4", "", { "os": "win32", "cpu": "x64" }, "sha512-zuuC7AyxLWLubP+mlUwEyR8M1ixW1ERNPHJfXm8x7eQNP4Pzkd7hS3qBuKBR70VRiQ04Kw8FNfRMF5TNxuZq2g=="], + "@rollup/rollup-win32-x64-gnu": ["@rollup/rollup-win32-x64-gnu@4.53.5", "", { "os": "win32", "cpu": "x64" }, "sha512-2NqKgZSuLH9SXBBV2dWNRCZmocgSOx8OJSdpRaEcRlIfX8YrKxUT6z0F1NpvDVhOsl190UFTRh2F2WDWWCYp3A=="], - "@rollup/rollup-win32-x64-msvc": ["@rollup/rollup-win32-x64-msvc@4.53.4", "", { "os": "win32", "cpu": "x64" }, "sha512-Sbx45u/Lbb5RyptSbX7/3deP+/lzEmZ0BTSHxwxN/IMOZDZf8S0AGo0hJD5n/LQssxb5Z3B4og4P2X6Dd8acCA=="], + "@rollup/rollup-win32-x64-msvc": ["@rollup/rollup-win32-x64-msvc@4.53.5", "", { "os": "win32", "cpu": "x64" }, "sha512-JRpZUhCfhZ4keB5v0fe02gQJy05GqboPOaxvjugW04RLSYYoB/9t2lx2u/tMs/Na/1NXfY8QYjgRljRpN+MjTQ=="], "@s2-dev/streamstore": ["@s2-dev/streamstore@0.17.3", "", { "dependencies": { "@protobuf-ts/runtime": "^2.11.1" }, "peerDependencies": { "typescript": "^5.9.3" } }, "sha512-UeXL5+MgZQfNkbhCgEDVm7PrV5B3bxh6Zp4C5pUzQQwaoA+iGh2QiiIptRZynWgayzRv4vh0PYfnKpTzJEXegQ=="], @@ -1522,7 +1522,7 @@ "agentkeepalive": ["agentkeepalive@4.6.0", "", { "dependencies": { "humanize-ms": "^1.2.1" } }, "sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ=="], - "ai": ["ai@5.0.113", "", { "dependencies": { "@ai-sdk/gateway": "2.0.21", "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.19", "@opentelemetry/api": "1.9.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-26vivpSO/mzZj0k1Si2IpsFspp26ttQICHRySQiMrtWcRd5mnJMX2a8sG28vmZ38C+JUn1cWmfZrsLMxkSMw9g=="], + "ai": ["ai@5.0.114", "", { "dependencies": { "@ai-sdk/gateway": "2.0.21", "@ai-sdk/provider": "2.0.0", "@ai-sdk/provider-utils": "3.0.19", "@opentelemetry/api": "1.9.0" }, "peerDependencies": { "zod": "^3.25.76 || ^4.1.8" } }, "sha512-q/lxcJA6avYn/TXTaE41VX6p9lN245mDU9bIGuPpfk6WxDMvmMoUKUIS0/aXAPYN3UmkUn/r9rvq/8C98RoCWw=="], "ajv": ["ajv@6.12.6", "", { "dependencies": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", "json-schema-traverse": "^0.4.1", "uri-js": "^4.2.2" } }, "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g=="], @@ -1560,7 +1560,7 @@ "ast-types": ["ast-types@0.13.4", "", { "dependencies": { "tslib": "^2.0.1" } }, "sha512-x1FCFnFifvYDDzTaLII71vG5uvDwgtmDTEVWAxrgeiR8VjMONcCXJx7E+USjDtHlwFmt9MysbqgF9b9Vjr6w+w=="], - "ast-v8-to-istanbul": ["ast-v8-to-istanbul@0.3.8", "", { "dependencies": { "@jridgewell/trace-mapping": "^0.3.31", "estree-walker": "^3.0.3", "js-tokens": "^9.0.1" } }, "sha512-szgSZqUxI5T8mLKvS7WTjF9is+MVbOeLADU73IseOcrqhxr/VAvy6wfoVE39KnKzA7JRhjF5eUagNlHwvZPlKQ=="], + "ast-v8-to-istanbul": ["ast-v8-to-istanbul@0.3.9", "", { "dependencies": { "@jridgewell/trace-mapping": "^0.3.31", "estree-walker": "^3.0.3", "js-tokens": "^9.0.1" } }, "sha512-dSC6tJeOJxbZrPzPbv5mMd6CMiQ1ugaVXXPRad2fXUSsy1kstFn9XQWemV9VW7Y7kpxgQ/4WMoZfwdH8XSU48w=="], "astring": ["astring@1.9.0", "", { "bin": { "astring": "bin/astring" } }, "sha512-LElXdjswlqjWrPpJFg1Fx4wpkOCxj1TDHlSV4PlaRxHGWko024xICaa97ZkMfs6DRKlCguiAI+rbXv5GWwXIkg=="], @@ -1598,7 +1598,7 @@ "base64id": ["base64id@2.0.0", "", {}, "sha512-lGe34o6EHj9y3Kts9R4ZYs/Gr+6N7MCaMlIFA3F1R2O5/m7K06AxfSeO5530PEERE6/WyEg3lsuyw4GHlPZHog=="], - "baseline-browser-mapping": ["baseline-browser-mapping@2.9.7", "", { "bin": { "baseline-browser-mapping": "dist/cli.js" } }, "sha512-k9xFKplee6KIio3IDbwj+uaCLpqzOwakOgmqzPezM0sFJlFKcg30vk2wOiAJtkTSfx0SSQDSe8q+mWA/fSH5Zg=="], + "baseline-browser-mapping": ["baseline-browser-mapping@2.9.8", "", { "bin": { "baseline-browser-mapping": "dist/cli.js" } }, "sha512-Y1fOuNDowLfgKOypdc9SPABfoWXuZHBOyCS4cD52IeZBhr4Md6CLLs6atcxVrzRmQ06E7hSlm5bHHApPKR/byA=="], "basic-auth": ["basic-auth@2.0.1", "", { "dependencies": { "safe-buffer": "5.1.2" } }, "sha512-NF+epuEdnUYVlGuhaxbbq+dvJttwLnGY+YixlXlME5KpQ5W3CnXA5cVTneY3SPbPDRkcjMbifrwmFYcClgOZeg=="], @@ -1652,8 +1652,6 @@ "buildcheck": ["buildcheck@0.0.7", "", {}, "sha512-lHblz4ahamxpTmnsk+MNTRWsjYKv965MwOrSJyeD588rR3Jcu7swE+0wN5F+PbL5cjgu/9ObkhfzEPuofEMwLA=="], - "bun-types": ["bun-types@1.3.4", "", { "dependencies": { "@types/node": "*" } }, "sha512-5ua817+BZPZOlNaRgGBpZJOSAQ9RQ17pkwPD0yR7CfJg+r8DgIILByFifDTa+IPDDxzf5VNhtNlcKqFzDgJvlQ=="], - "bytes": ["bytes@3.1.2", "", {}, "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg=="], "c12": ["c12@3.1.0", "", { "dependencies": { "chokidar": "^4.0.3", "confbox": "^0.2.2", "defu": "^6.1.4", "dotenv": "^16.6.1", "exsolve": "^1.0.7", "giget": "^2.0.0", "jiti": "^2.4.2", "ohash": "^2.0.11", "pathe": "^2.0.3", "perfect-debounce": "^1.0.0", "pkg-types": "^2.2.0", "rc9": "^2.1.2" }, "peerDependencies": { "magicast": "^0.3.5" }, "optionalPeers": ["magicast"] }, "sha512-uWoS8OU1MEIsOv8p/5a82c3H31LsWVR5qiyXVfBNOzfffjUWtPnhAb4BYI2uG2HfGmZmFjCtui5XNWaps+iFuw=="], @@ -2056,7 +2054,7 @@ "fast-content-type-parse": ["fast-content-type-parse@2.0.1", "", {}, "sha512-nGqtvLrj5w0naR6tDPfB4cUmYCqouzyQiz6C5y/LtcDllJdrcc6WaWW6iXyIIOErTa/XRybj28aasdn4LkVk6Q=="], - "fast-copy": ["fast-copy@4.0.1", "", {}, "sha512-+uUOQlhsaswsizHFmEFAQhB3lSiQ+lisxl50N6ZP0wywlZeWsIESxSi9ftPEps8UGfiBzyYP7x27zA674WUvXw=="], + "fast-copy": ["fast-copy@4.0.2", "", {}, "sha512-ybA6PDXIXOXivLJK/z9e+Otk7ve13I4ckBvGO5I2RRmBU1gMHLVDJYEuJYhGwez7YNlYji2M2DvVU+a9mSFDlw=="], "fast-deep-equal": ["fast-deep-equal@3.1.3", "", {}, "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q=="], @@ -3022,7 +3020,7 @@ "rimraf": ["rimraf@5.0.10", "", { "dependencies": { "glob": "^10.3.7" }, "bin": { "rimraf": "dist/esm/bin.mjs" } }, "sha512-l0OE8wL34P4nJH/H2ffoaniAokM2qSmrtXHmlpvYr5AVVX8msAyW0l8NVJFDxlSK4u3Uh/f41cQheDVdnYijwQ=="], - "rollup": ["rollup@4.53.4", "", { "dependencies": { "@types/estree": "1.0.8" }, "optionalDependencies": { "@rollup/rollup-android-arm-eabi": "4.53.4", "@rollup/rollup-android-arm64": "4.53.4", "@rollup/rollup-darwin-arm64": "4.53.4", "@rollup/rollup-darwin-x64": "4.53.4", "@rollup/rollup-freebsd-arm64": "4.53.4", "@rollup/rollup-freebsd-x64": "4.53.4", "@rollup/rollup-linux-arm-gnueabihf": "4.53.4", "@rollup/rollup-linux-arm-musleabihf": "4.53.4", "@rollup/rollup-linux-arm64-gnu": "4.53.4", "@rollup/rollup-linux-arm64-musl": "4.53.4", "@rollup/rollup-linux-loong64-gnu": "4.53.4", "@rollup/rollup-linux-ppc64-gnu": "4.53.4", "@rollup/rollup-linux-riscv64-gnu": "4.53.4", "@rollup/rollup-linux-riscv64-musl": "4.53.4", "@rollup/rollup-linux-s390x-gnu": "4.53.4", "@rollup/rollup-linux-x64-gnu": "4.53.4", "@rollup/rollup-linux-x64-musl": "4.53.4", "@rollup/rollup-openharmony-arm64": "4.53.4", "@rollup/rollup-win32-arm64-msvc": "4.53.4", "@rollup/rollup-win32-ia32-msvc": "4.53.4", "@rollup/rollup-win32-x64-gnu": "4.53.4", "@rollup/rollup-win32-x64-msvc": "4.53.4", "fsevents": "~2.3.2" }, "bin": { "rollup": "dist/bin/rollup" } }, "sha512-YpXaaArg0MvrnJpvduEDYIp7uGOqKXbH9NsHGQ6SxKCOsNAjZF018MmxefFUulVP2KLtiGw1UvZbr+/ekjvlDg=="], + "rollup": ["rollup@4.53.5", "", { "dependencies": { "@types/estree": "1.0.8" }, "optionalDependencies": { "@rollup/rollup-android-arm-eabi": "4.53.5", "@rollup/rollup-android-arm64": "4.53.5", "@rollup/rollup-darwin-arm64": "4.53.5", "@rollup/rollup-darwin-x64": "4.53.5", "@rollup/rollup-freebsd-arm64": "4.53.5", "@rollup/rollup-freebsd-x64": "4.53.5", "@rollup/rollup-linux-arm-gnueabihf": "4.53.5", "@rollup/rollup-linux-arm-musleabihf": "4.53.5", "@rollup/rollup-linux-arm64-gnu": "4.53.5", "@rollup/rollup-linux-arm64-musl": "4.53.5", "@rollup/rollup-linux-loong64-gnu": "4.53.5", "@rollup/rollup-linux-ppc64-gnu": "4.53.5", "@rollup/rollup-linux-riscv64-gnu": "4.53.5", "@rollup/rollup-linux-riscv64-musl": "4.53.5", "@rollup/rollup-linux-s390x-gnu": "4.53.5", "@rollup/rollup-linux-x64-gnu": "4.53.5", "@rollup/rollup-linux-x64-musl": "4.53.5", "@rollup/rollup-openharmony-arm64": "4.53.5", "@rollup/rollup-win32-arm64-msvc": "4.53.5", "@rollup/rollup-win32-ia32-msvc": "4.53.5", "@rollup/rollup-win32-x64-gnu": "4.53.5", "@rollup/rollup-win32-x64-msvc": "4.53.5", "fsevents": "~2.3.2" }, "bin": { "rollup": "dist/bin/rollup" } }, "sha512-iTNAbFSlRpcHeeWu73ywU/8KuU/LZmNCSxp6fjQkJBD3ivUb8tpDrXhIxEzA05HlYMEwmtaUnb3RP+YNv162OQ=="], "rou3": ["rou3@0.5.1", "", {}, "sha512-OXMmJ3zRk2xeXFGfA3K+EOPHC5u7RDFG7lIOx0X1pdnhUkI8MdVrbV+sNsD80ElpUZ+MRHdyxPnFthq9VHs8uQ=="], @@ -3372,7 +3370,7 @@ "unpipe": ["unpipe@1.0.0", "", {}, "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ=="], - "update-browserslist-db": ["update-browserslist-db@1.2.2", "", { "dependencies": { "escalade": "^3.2.0", "picocolors": "^1.1.1" }, "peerDependencies": { "browserslist": ">= 4.21.0" }, "bin": { "update-browserslist-db": "cli.js" } }, "sha512-E85pfNzMQ9jpKkA7+TJAi4TJN+tBCuWh5rUcS/sv6cFi+1q9LYDwDI5dpUL0u/73EElyQ8d3TEaeW4sPedBqYA=="], + "update-browserslist-db": ["update-browserslist-db@1.2.3", "", { "dependencies": { "escalade": "^3.2.0", "picocolors": "^1.1.1" }, "peerDependencies": { "browserslist": ">= 4.21.0" }, "bin": { "update-browserslist-db": "cli.js" } }, "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w=="], "uri-js": ["uri-js@4.4.1", "", { "dependencies": { "punycode": "^2.1.0" } }, "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg=="], @@ -3554,9 +3552,9 @@ "@better-auth/sso/jose": ["jose@6.1.3", "", {}, "sha512-0TpaTfihd4QMNwrz/ob2Bp7X04yuxJkjRGi4aKmOqwhov54i6u79oCv7T+C7lo70MKH6BesI3vscD1yb/yzKXQ=="], - "@better-auth/sso/zod": ["zod@4.2.0", "", {}, "sha512-Bd5fw9wlIhtqCCxotZgdTOMwGm1a0u75wARVEY9HMs1X17trvA/lMi4+MGK5EUfYkXVTbX8UDiDKW4OgzHVUZw=="], + "@better-auth/sso/zod": ["zod@4.2.1", "", {}, "sha512-0wZ1IRqGGhMP76gLqz8EyfBXKk0J2qo2+H3fi4mcUP/KtTocoX08nmIAHl1Z2kJIZbZee8KOpBCSNPRgauucjw=="], - "@better-auth/stripe/zod": ["zod@4.2.0", "", {}, "sha512-Bd5fw9wlIhtqCCxotZgdTOMwGm1a0u75wARVEY9HMs1X17trvA/lMi4+MGK5EUfYkXVTbX8UDiDKW4OgzHVUZw=="], + "@better-auth/stripe/zod": ["zod@4.2.1", "", {}, "sha512-0wZ1IRqGGhMP76gLqz8EyfBXKk0J2qo2+H3fi4mcUP/KtTocoX08nmIAHl1Z2kJIZbZee8KOpBCSNPRgauucjw=="], "@browserbasehq/sdk/@types/node": ["@types/node@18.19.130", "", { "dependencies": { "undici-types": "~5.26.4" } }, "sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg=="], @@ -3776,7 +3774,7 @@ "better-auth/jose": ["jose@6.1.3", "", {}, "sha512-0TpaTfihd4QMNwrz/ob2Bp7X04yuxJkjRGi4aKmOqwhov54i6u79oCv7T+C7lo70MKH6BesI3vscD1yb/yzKXQ=="], - "better-auth/zod": ["zod@4.2.0", "", {}, "sha512-Bd5fw9wlIhtqCCxotZgdTOMwGm1a0u75wARVEY9HMs1X17trvA/lMi4+MGK5EUfYkXVTbX8UDiDKW4OgzHVUZw=="], + "better-auth/zod": ["zod@4.2.1", "", {}, "sha512-0wZ1IRqGGhMP76gLqz8EyfBXKk0J2qo2+H3fi4mcUP/KtTocoX08nmIAHl1Z2kJIZbZee8KOpBCSNPRgauucjw=="], "bl/buffer": ["buffer@5.7.1", "", { "dependencies": { "base64-js": "^1.3.1", "ieee754": "^1.1.13" } }, "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ=="], @@ -3784,8 +3782,6 @@ "body-parser/iconv-lite": ["iconv-lite@0.7.1", "", { "dependencies": { "safer-buffer": ">= 2.1.2 < 3.0.0" } }, "sha512-2Tth85cXwGFHfvRgZWszZSvdo+0Xsqmw8k8ZwxScfcBneNUraK+dxRxRm24nszx80Y0TVio8kKLt5sLE7ZCLlw=="], - "bun-types/@types/node": ["@types/node@24.2.1", "", { "dependencies": { "undici-types": "~7.10.0" } }, "sha512-DRh5K+ka5eJic8CjH7td8QpYEV6Zo10gfRkjHCO3weqZHWDtAaSTFtl4+VMqOJ4N5jcuhZ9/l+yy8rVgw7BQeQ=="], - "c12/chokidar": ["chokidar@4.0.3", "", { "dependencies": { "readdirp": "^4.0.1" } }, "sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA=="], "c12/confbox": ["confbox@0.2.2", "", {}, "sha512-1NB+BKqhtNipMsov4xI/NnhCKp9XG9NamYp5PVm9klAT0fsrNPjaFICsCFhNhwZJKNh7zB/3q8qXz0E9oaMNtQ=="], @@ -3850,7 +3846,7 @@ "fumadocs-mdx/js-yaml": ["js-yaml@4.1.1", "", { "dependencies": { "argparse": "^2.0.1" }, "bin": { "js-yaml": "bin/js-yaml.js" } }, "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA=="], - "fumadocs-mdx/zod": ["zod@4.2.0", "", {}, "sha512-Bd5fw9wlIhtqCCxotZgdTOMwGm1a0u75wARVEY9HMs1X17trvA/lMi4+MGK5EUfYkXVTbX8UDiDKW4OgzHVUZw=="], + "fumadocs-mdx/zod": ["zod@4.2.1", "", {}, "sha512-0wZ1IRqGGhMP76gLqz8EyfBXKk0J2qo2+H3fi4mcUP/KtTocoX08nmIAHl1Z2kJIZbZee8KOpBCSNPRgauucjw=="], "fumadocs-ui/@radix-ui/react-slot": ["@radix-ui/react-slot@1.2.4", "", { "dependencies": { "@radix-ui/react-compose-refs": "1.1.2" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-Jl+bCv8HxKnlTLVrcDE8zTMJ09R9/ukw4qBs/oZClOfoQk/cOTbDn+NceXfV7j09YPVQUryJPHurafcSg6EVKA=="], @@ -4240,8 +4236,6 @@ "accepts/mime-types/mime-db": ["mime-db@1.52.0", "", {}, "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg=="], - "bun-types/@types/node/undici-types": ["undici-types@7.10.0", "", {}, "sha512-t5Fy/nfn+14LuOc2KNYg75vZqClpAiqscVvMygNnlsHBFpSXdJaYtXMcdNLpl/Qvc3P2cB3s6lOV51nqsFq4ag=="], - "c12/chokidar/readdirp": ["readdirp@4.1.2", "", {}, "sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg=="], "chrome-launcher/@types/node/undici-types": ["undici-types@7.10.0", "", {}, "sha512-t5Fy/nfn+14LuOc2KNYg75vZqClpAiqscVvMygNnlsHBFpSXdJaYtXMcdNLpl/Qvc3P2cB3s6lOV51nqsFq4ag=="], From de330d80f531d4e3b01c192527d46f882dc2c582 Mon Sep 17 00:00:00 2001 From: Vikhyath Mondreti Date: Tue, 16 Dec 2025 21:23:18 -0800 Subject: [PATCH 07/15] improvement(mcp): restructure mcp tools caching/fetching info to improve UX (#2416) * feat(mcp): improve cache practice * restructure mcps fetching, caching, UX indicators * fix schema * styling improvements * fix tooltips and render issue * fix loading sequence + add redis --------- Co-authored-by: waleed --- .../app/api/mcp/servers/[id]/refresh/route.ts | 35 +- apps/sim/app/api/mcp/servers/[id]/route.ts | 21 +- apps/sim/app/api/mcp/servers/route.ts | 10 +- apps/sim/app/api/mcp/tools/stored/route.ts | 103 + .../tool-input/components/mcp-tools-list.tsx | 82 +- .../components/tool-input/tool-input.tsx | 113 +- .../server-list-item/server-list-item.tsx | 31 +- .../settings-modal/components/mcp/mcp.tsx | 178 +- .../settings-modal/settings-modal.tsx | 23 +- .../w/components/sidebar/sidebar.tsx | 14 +- apps/sim/executor/execution/block-executor.ts | 62 + .../executor/handlers/agent/agent-handler.ts | 70 +- apps/sim/hooks/queries/mcp.ts | 118 +- apps/sim/hooks/use-mcp-server-test.ts | 25 +- apps/sim/lib/mcp/client.ts | 8 +- apps/sim/lib/mcp/service.ts | 320 +- apps/sim/lib/mcp/storage/adapter.ts | 14 + apps/sim/lib/mcp/storage/factory.ts | 53 + apps/sim/lib/mcp/storage/index.ts | 4 + apps/sim/lib/mcp/storage/memory-cache.ts | 103 + apps/sim/lib/mcp/storage/redis-cache.ts | 96 + apps/sim/lib/mcp/tool-validation.ts | 129 + apps/sim/lib/mcp/types.ts | 10 +- apps/sim/lib/mcp/utils.ts | 3 +- apps/sim/providers/utils.ts | 4 +- apps/sim/stores/settings-modal/store.ts | 51 + apps/sim/tools/index.ts | 12 +- .../db/migrations/0123_windy_lockheed.sql | 1 + .../db/migrations/meta/0123_snapshot.json | 7722 +++++++++++++++++ packages/db/migrations/meta/_journal.json | 7 + packages/db/schema.ts | 2 + 31 files changed, 9087 insertions(+), 337 deletions(-) create mode 100644 apps/sim/app/api/mcp/tools/stored/route.ts create mode 100644 apps/sim/lib/mcp/storage/adapter.ts create mode 100644 apps/sim/lib/mcp/storage/factory.ts create mode 100644 apps/sim/lib/mcp/storage/index.ts create mode 100644 apps/sim/lib/mcp/storage/memory-cache.ts create mode 100644 apps/sim/lib/mcp/storage/redis-cache.ts create mode 100644 apps/sim/lib/mcp/tool-validation.ts create mode 100644 apps/sim/stores/settings-modal/store.ts create mode 100644 packages/db/migrations/0123_windy_lockheed.sql create mode 100644 packages/db/migrations/meta/0123_snapshot.json diff --git a/apps/sim/app/api/mcp/servers/[id]/refresh/route.ts b/apps/sim/app/api/mcp/servers/[id]/refresh/route.ts index 8f3b2cad80..ba58b0ba7a 100644 --- a/apps/sim/app/api/mcp/servers/[id]/refresh/route.ts +++ b/apps/sim/app/api/mcp/servers/[id]/refresh/route.ts @@ -5,6 +5,7 @@ import type { NextRequest } from 'next/server' import { createLogger } from '@/lib/logs/console/logger' import { withMcpAuth } from '@/lib/mcp/middleware' import { mcpService } from '@/lib/mcp/service' +import type { McpServerStatusConfig } from '@/lib/mcp/types' import { createMcpErrorResponse, createMcpSuccessResponse } from '@/lib/mcp/utils' const logger = createLogger('McpServerRefreshAPI') @@ -50,6 +51,12 @@ export const POST = withMcpAuth<{ id: string }>('read')( let toolCount = 0 let lastError: string | null = null + const currentStatusConfig: McpServerStatusConfig = + (server.statusConfig as McpServerStatusConfig | null) ?? { + consecutiveFailures: 0, + lastSuccessfulDiscovery: null, + } + try { const tools = await mcpService.discoverServerTools(userId, serverId, workspaceId) connectionStatus = 'connected' @@ -63,20 +70,40 @@ export const POST = withMcpAuth<{ id: string }>('read')( logger.warn(`[${requestId}] Failed to connect to server ${serverId}:`, error) } + const now = new Date() + const newStatusConfig = + connectionStatus === 'connected' + ? { consecutiveFailures: 0, lastSuccessfulDiscovery: now.toISOString() } + : { + consecutiveFailures: currentStatusConfig.consecutiveFailures + 1, + lastSuccessfulDiscovery: currentStatusConfig.lastSuccessfulDiscovery, + } + const [refreshedServer] = await db .update(mcpServers) .set({ - lastToolsRefresh: new Date(), + lastToolsRefresh: now, connectionStatus, lastError, - lastConnected: connectionStatus === 'connected' ? new Date() : server.lastConnected, + lastConnected: connectionStatus === 'connected' ? now : server.lastConnected, toolCount, - updatedAt: new Date(), + statusConfig: newStatusConfig, + updatedAt: now, }) .where(eq(mcpServers.id, serverId)) .returning() - logger.info(`[${requestId}] Successfully refreshed MCP server: ${serverId}`) + if (connectionStatus === 'connected') { + logger.info( + `[${requestId}] Successfully refreshed MCP server: ${serverId} (${toolCount} tools)` + ) + await mcpService.clearCache(workspaceId) + } else { + logger.warn( + `[${requestId}] Refresh completed for MCP server ${serverId} but connection failed: ${lastError}` + ) + } + return createMcpSuccessResponse({ status: connectionStatus, toolCount, diff --git a/apps/sim/app/api/mcp/servers/[id]/route.ts b/apps/sim/app/api/mcp/servers/[id]/route.ts index d5e70f71c7..40c35fdb73 100644 --- a/apps/sim/app/api/mcp/servers/[id]/route.ts +++ b/apps/sim/app/api/mcp/servers/[id]/route.ts @@ -48,6 +48,19 @@ export const PATCH = withMcpAuth<{ id: string }>('write')( // Remove workspaceId from body to prevent it from being updated const { workspaceId: _, ...updateData } = body + // Get the current server to check if URL is changing + const [currentServer] = await db + .select({ url: mcpServers.url }) + .from(mcpServers) + .where( + and( + eq(mcpServers.id, serverId), + eq(mcpServers.workspaceId, workspaceId), + isNull(mcpServers.deletedAt) + ) + ) + .limit(1) + const [updatedServer] = await db .update(mcpServers) .set({ @@ -71,8 +84,12 @@ export const PATCH = withMcpAuth<{ id: string }>('write')( ) } - // Clear MCP service cache after update - mcpService.clearCache(workspaceId) + // Only clear cache if URL changed (requires re-discovery) + const urlChanged = body.url && currentServer?.url !== body.url + if (urlChanged) { + await mcpService.clearCache(workspaceId) + logger.info(`[${requestId}] Cleared cache due to URL change`) + } logger.info(`[${requestId}] Successfully updated MCP server: ${serverId}`) return createMcpSuccessResponse({ server: updatedServer }) diff --git a/apps/sim/app/api/mcp/servers/route.ts b/apps/sim/app/api/mcp/servers/route.ts index 183c5e434d..8dc3db4dc9 100644 --- a/apps/sim/app/api/mcp/servers/route.ts +++ b/apps/sim/app/api/mcp/servers/route.ts @@ -117,12 +117,14 @@ export const POST = withMcpAuth('write')( timeout: body.timeout || 30000, retries: body.retries || 3, enabled: body.enabled !== false, + connectionStatus: 'connected', + lastConnected: new Date(), updatedAt: new Date(), deletedAt: null, }) .where(eq(mcpServers.id, serverId)) - mcpService.clearCache(workspaceId) + await mcpService.clearCache(workspaceId) logger.info( `[${requestId}] Successfully updated MCP server: ${body.name} (ID: ${serverId})` @@ -145,12 +147,14 @@ export const POST = withMcpAuth('write')( timeout: body.timeout || 30000, retries: body.retries || 3, enabled: body.enabled !== false, + connectionStatus: 'connected', + lastConnected: new Date(), createdAt: new Date(), updatedAt: new Date(), }) .returning() - mcpService.clearCache(workspaceId) + await mcpService.clearCache(workspaceId) logger.info( `[${requestId}] Successfully registered MCP server: ${body.name} (ID: ${serverId})` @@ -212,7 +216,7 @@ export const DELETE = withMcpAuth('admin')( ) } - mcpService.clearCache(workspaceId) + await mcpService.clearCache(workspaceId) logger.info(`[${requestId}] Successfully deleted MCP server: ${serverId}`) return createMcpSuccessResponse({ message: `Server ${serverId} deleted successfully` }) diff --git a/apps/sim/app/api/mcp/tools/stored/route.ts b/apps/sim/app/api/mcp/tools/stored/route.ts new file mode 100644 index 0000000000..b3906954aa --- /dev/null +++ b/apps/sim/app/api/mcp/tools/stored/route.ts @@ -0,0 +1,103 @@ +import { db } from '@sim/db' +import { workflow, workflowBlocks } from '@sim/db/schema' +import { eq } from 'drizzle-orm' +import type { NextRequest } from 'next/server' +import { createLogger } from '@/lib/logs/console/logger' +import { withMcpAuth } from '@/lib/mcp/middleware' +import { createMcpErrorResponse, createMcpSuccessResponse } from '@/lib/mcp/utils' + +const logger = createLogger('McpStoredToolsAPI') + +export const dynamic = 'force-dynamic' + +interface StoredMcpTool { + workflowId: string + workflowName: string + serverId: string + serverUrl?: string + toolName: string + schema?: Record +} + +/** + * GET - Get all stored MCP tools from workflows in the workspace + * + * Scans all workflows in the workspace and extracts MCP tools that have been + * added to agent blocks. Returns the stored state of each tool for comparison + * against current server state. + */ +export const GET = withMcpAuth('read')( + async (request: NextRequest, { userId, workspaceId, requestId }) => { + try { + logger.info(`[${requestId}] Fetching stored MCP tools for workspace ${workspaceId}`) + + // Get all workflows in workspace + const workflows = await db + .select({ + id: workflow.id, + name: workflow.name, + }) + .from(workflow) + .where(eq(workflow.workspaceId, workspaceId)) + + const workflowMap = new Map(workflows.map((w) => [w.id, w.name])) + const workflowIds = workflows.map((w) => w.id) + + if (workflowIds.length === 0) { + return createMcpSuccessResponse({ tools: [] }) + } + + // Get all agent blocks from these workflows + const agentBlocks = await db + .select({ + workflowId: workflowBlocks.workflowId, + subBlocks: workflowBlocks.subBlocks, + }) + .from(workflowBlocks) + .where(eq(workflowBlocks.type, 'agent')) + + const storedTools: StoredMcpTool[] = [] + + for (const block of agentBlocks) { + if (!workflowMap.has(block.workflowId)) continue + + const subBlocks = block.subBlocks as Record | null + if (!subBlocks) continue + + const toolsSubBlock = subBlocks.tools as Record | undefined + const toolsValue = toolsSubBlock?.value + + if (!toolsValue || !Array.isArray(toolsValue)) continue + + for (const tool of toolsValue) { + if (tool.type !== 'mcp') continue + + const params = tool.params as Record | undefined + if (!params?.serverId || !params?.toolName) continue + + storedTools.push({ + workflowId: block.workflowId, + workflowName: workflowMap.get(block.workflowId) || 'Untitled', + serverId: params.serverId as string, + serverUrl: params.serverUrl as string | undefined, + toolName: params.toolName as string, + schema: tool.schema as Record | undefined, + }) + } + } + + logger.info( + `[${requestId}] Found ${storedTools.length} stored MCP tools across ${workflows.length} workflows` + ) + + return createMcpSuccessResponse({ tools: storedTools }) + } catch (error) { + logger.error(`[${requestId}] Error fetching stored MCP tools:`, error) + return createMcpErrorResponse( + error instanceof Error ? error : new Error('Failed to fetch stored MCP tools'), + 'Failed to fetch stored MCP tools', + 500 + ) + } + } +) diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/tool-input/components/mcp-tools-list.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/tool-input/components/mcp-tools-list.tsx index d3ffbe78e8..dbaa41d32b 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/tool-input/components/mcp-tools-list.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/tool-input/components/mcp-tools-list.tsx @@ -18,12 +18,18 @@ interface McpTool { inputSchema?: any } +interface McpServer { + id: string + url?: string +} + interface StoredTool { type: 'mcp' title: string toolId: string params: { serverId: string + serverUrl?: string toolName: string serverName: string } @@ -34,6 +40,7 @@ interface StoredTool { interface McpToolsListProps { mcpTools: McpTool[] + mcpServers?: McpServer[] searchQuery: string customFilter: (name: string, query: string) => number onToolSelect: (tool: StoredTool) => void @@ -45,6 +52,7 @@ interface McpToolsListProps { */ export function McpToolsList({ mcpTools, + mcpServers = [], searchQuery, customFilter, onToolSelect, @@ -59,44 +67,48 @@ export function McpToolsList({ return ( <> MCP Tools - {filteredTools.map((mcpTool) => ( - { - if (disabled) return + {filteredTools.map((mcpTool) => { + const server = mcpServers.find((s) => s.id === mcpTool.serverId) + return ( + { + if (disabled) return - const newTool: StoredTool = { - type: 'mcp', - title: mcpTool.name, - toolId: mcpTool.id, - params: { - serverId: mcpTool.serverId, - toolName: mcpTool.name, - serverName: mcpTool.serverName, - }, - isExpanded: true, - usageControl: 'auto', - schema: { - ...mcpTool.inputSchema, - description: mcpTool.description, - }, - } + const newTool: StoredTool = { + type: 'mcp', + title: mcpTool.name, + toolId: mcpTool.id, + params: { + serverId: mcpTool.serverId, + serverUrl: server?.url, + toolName: mcpTool.name, + serverName: mcpTool.serverName, + }, + isExpanded: true, + usageControl: 'auto', + schema: { + ...mcpTool.inputSchema, + description: mcpTool.description, + }, + } - onToolSelect(newTool) - }} - > -
- -
- - {mcpTool.name} - - - ))} +
+ +
+ + {mcpTool.name} + + + ) + })} ) } diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/tool-input/tool-input.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/tool-input/tool-input.tsx index d3f9534d99..882a968391 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/tool-input/tool-input.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/tool-input/tool-input.tsx @@ -4,6 +4,7 @@ import { useQuery } from '@tanstack/react-query' import { Loader2, PlusIcon, WrenchIcon, XIcon } from 'lucide-react' import { useParams } from 'next/navigation' import { + Badge, Combobox, Popover, PopoverContent, @@ -12,6 +13,7 @@ import { PopoverSearch, PopoverSection, PopoverTrigger, + Tooltip, } from '@/components/emcn' import { McpIcon } from '@/components/icons' import { Switch } from '@/components/ui/switch' @@ -55,9 +57,11 @@ import { type CustomTool as CustomToolDefinition, useCustomTools, } from '@/hooks/queries/custom-tools' +import { useMcpServers } from '@/hooks/queries/mcp' import { useWorkflows } from '@/hooks/queries/workflows' import { useMcpTools } from '@/hooks/use-mcp-tools' import { getProviderFromModel, supportsToolUsageControl } from '@/providers/utils' +import { useSettingsModalStore } from '@/stores/settings-modal/store' import { useSubBlockStore } from '@/stores/workflows/subblock/store' import { formatParameterLabel, @@ -802,6 +806,66 @@ export function ToolInput({ refreshTools, } = useMcpTools(workspaceId) + const { data: mcpServers = [], isLoading: mcpServersLoading } = useMcpServers(workspaceId) + const openSettingsModal = useSettingsModalStore((state) => state.openModal) + const mcpDataLoading = mcpLoading || mcpServersLoading + + /** + * Returns issue info for an MCP tool using shared validation logic. + */ + const getMcpToolIssue = useCallback( + (tool: StoredTool) => { + if (tool.type !== 'mcp') return null + + const { getMcpToolIssue: validateTool } = require('@/lib/mcp/tool-validation') + + return validateTool( + { + serverId: tool.params?.serverId as string, + serverUrl: tool.params?.serverUrl as string | undefined, + toolName: tool.params?.toolName as string, + schema: tool.schema, + }, + mcpServers.map((s) => ({ + id: s.id, + url: s.url, + connectionStatus: s.connectionStatus, + lastError: s.lastError, + })), + mcpTools.map((t) => ({ + serverId: t.serverId, + name: t.name, + inputSchema: t.inputSchema, + })) + ) + }, + [mcpTools, mcpServers] + ) + + const isMcpToolUnavailable = useCallback( + (tool: StoredTool): boolean => { + const { isToolUnavailable } = require('@/lib/mcp/tool-validation') + return isToolUnavailable(getMcpToolIssue(tool)) + }, + [getMcpToolIssue] + ) + + const hasMcpToolIssue = useCallback( + (tool: StoredTool): boolean => { + return getMcpToolIssue(tool) !== null + }, + [getMcpToolIssue] + ) + + // Filter out MCP tools from unavailable servers for the dropdown + const availableMcpTools = useMemo(() => { + return mcpTools.filter((mcpTool) => { + const server = mcpServers.find((s) => s.id === mcpTool.serverId) + // Only include tools from connected servers + return server && server.connectionStatus === 'connected' + }) + }, [mcpTools, mcpServers]) + // Reset search query when popover opens useEffect(() => { if (open) { @@ -1849,9 +1913,10 @@ export function ToolInput({ ) })()} - {/* Display MCP tools */} + {/* Display MCP tools (only from available servers) */} {isCustomTool ? customToolTitle : tool.title} + {isMcpTool && + !mcpDataLoading && + (() => { + const issue = getMcpToolIssue(tool) + if (!issue) return null + const { getIssueBadgeLabel } = require('@/lib/mcp/tool-validation') + const serverId = tool.params?.serverId + return ( +
{ + e.stopPropagation() + e.preventDefault() + openSettingsModal({ section: 'mcp', mcpServerId: serverId }) + }} + > + + + + {getIssueBadgeLabel(issue)} + + + + + {issue.message} · Click to open settings + + + +
+ ) + })()}
- {supportsToolControl && ( + {supportsToolControl && !(isMcpTool && isMcpToolUnavailable(tool)) && ( @@ -2386,9 +2488,10 @@ export function ToolInput({ ) })()} - {/* Display MCP tools */} + {/* Display MCP tools (only from available servers) */} "Streamable-HTTP"). - */ export function formatTransportLabel(transport: string): string { return transport .split('-') @@ -14,10 +11,10 @@ export function formatTransportLabel(transport: string): string { .join('-') } -/** - * Formats tools count and names for display. - */ -function formatToolsLabel(tools: any[]): string { +function formatToolsLabel(tools: any[], connectionStatus?: string): string { + if (connectionStatus === 'error') { + return 'Unable to connect' + } const count = tools.length const plural = count !== 1 ? 's' : '' const names = count > 0 ? `: ${tools.map((t) => t.name).join(', ')}` : '' @@ -29,35 +26,41 @@ interface ServerListItemProps { tools: any[] isDeleting: boolean isLoadingTools?: boolean + isRefreshing?: boolean onRemove: () => void onViewDetails: () => void } -/** - * Renders a single MCP server list item with details and delete actions. - */ export function ServerListItem({ server, tools, isDeleting, isLoadingTools = false, + isRefreshing = false, onRemove, onViewDetails, }: ServerListItemProps) { const transportLabel = formatTransportLabel(server.transport || 'http') - const toolsLabel = formatToolsLabel(tools) + const toolsLabel = formatToolsLabel(tools, server.connectionStatus) + const isError = server.connectionStatus === 'error' return (
- + {server.name || 'Unnamed Server'} ({transportLabel})
-

- {isLoadingTools && tools.length === 0 ? 'Loading...' : toolsLabel} +

+ {isRefreshing + ? 'Refreshing...' + : isLoadingTools && tools.length === 0 + ? 'Loading...' + : toolsLabel}

diff --git a/apps/sim/app/workspace/[workspaceId]/w/components/sidebar/components/settings-modal/components/mcp/mcp.tsx b/apps/sim/app/workspace/[workspaceId]/w/components/sidebar/components/settings-modal/components/mcp/mcp.tsx index 92a9b2c332..cf08e08aaa 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/components/sidebar/components/settings-modal/components/mcp/mcp.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/components/sidebar/components/settings-modal/components/mcp/mcp.tsx @@ -1,9 +1,10 @@ 'use client' -import { useCallback, useMemo, useRef, useState } from 'react' +import { useCallback, useEffect, useMemo, useRef, useState } from 'react' import { Plus, Search } from 'lucide-react' import { useParams } from 'next/navigation' import { + Badge, Button, Input as EmcnInput, Modal, @@ -14,6 +15,7 @@ import { } from '@/components/emcn' import { Input } from '@/components/ui' import { createLogger } from '@/lib/logs/console/logger' +import { getIssueBadgeLabel, getMcpToolIssue, type McpToolIssue } from '@/lib/mcp/tool-validation' import { checkEnvVarTrigger } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/env-var-dropdown' import { useCreateMcpServer, @@ -21,6 +23,7 @@ import { useMcpServers, useMcpToolsQuery, useRefreshMcpServer, + useStoredMcpTools, } from '@/hooks/queries/mcp' import { useMcpServerTest } from '@/hooks/use-mcp-server-test' import type { InputFieldType, McpServerFormData, McpServerTestResult } from './components' @@ -44,6 +47,9 @@ interface McpServer { name?: string transport?: string url?: string + connectionStatus?: 'connected' | 'disconnected' | 'error' + lastError?: string | null + lastConnected?: string } const logger = createLogger('McpSettings') @@ -69,11 +75,15 @@ function getTestButtonLabel( return 'Test Connection' } +interface MCPProps { + initialServerId?: string | null +} + /** * MCP Settings component for managing Model Context Protocol servers. * Handles server CRUD operations, connection testing, and environment variable integration. */ -export function MCP() { +export function MCP({ initialServerId }: MCPProps) { const params = useParams() const workspaceId = params.workspaceId as string @@ -88,6 +98,7 @@ export function MCP() { isLoading: toolsLoading, isFetching: toolsFetching, } = useMcpToolsQuery(workspaceId) + const { data: storedTools = [] } = useStoredMcpTools(workspaceId) const createServerMutation = useCreateMcpServer() const deleteServerMutation = useDeleteMcpServer() const refreshServerMutation = useRefreshMcpServer() @@ -106,7 +117,9 @@ export function MCP() { const [serverToDelete, setServerToDelete] = useState<{ id: string; name: string } | null>(null) const [selectedServerId, setSelectedServerId] = useState(null) - const [refreshStatus, setRefreshStatus] = useState<'idle' | 'refreshing' | 'refreshed'>('idle') + const [refreshingServers, setRefreshingServers] = useState< + Record + >({}) const [showEnvVars, setShowEnvVars] = useState(false) const [envSearchTerm, setEnvSearchTerm] = useState('') @@ -114,10 +127,16 @@ export function MCP() { const [activeInputField, setActiveInputField] = useState(null) const [activeHeaderIndex, setActiveHeaderIndex] = useState(null) - // Scroll position state for formatted text overlays const [urlScrollLeft, setUrlScrollLeft] = useState(0) const [headerScrollLeft, setHeaderScrollLeft] = useState>({}) + // Auto-select server when initialServerId is provided + useEffect(() => { + if (initialServerId && servers.some((s) => s.id === initialServerId)) { + setSelectedServerId(initialServerId) + } + }, [initialServerId, servers]) + /** * Resets environment variable dropdown state. */ @@ -237,6 +256,7 @@ export function MCP() { /** * Adds a new MCP server after validating and testing the connection. + * Only creates the server if connection test succeeds. */ const handleAddServer = useCallback(async () => { if (!formData.name.trim()) return @@ -253,12 +273,12 @@ export function MCP() { workspaceId, } - if (!testResult) { - const result = await testConnection(serverConfig) - if (!result.success) return - } + const connectionResult = await testConnection(serverConfig) - if (testResult && !testResult.success) return + if (!connectionResult.success) { + logger.error('Connection test failed, server not added:', connectionResult.error) + return + } await createServerMutation.mutateAsync({ workspaceId, @@ -279,15 +299,7 @@ export function MCP() { } finally { setIsAddingServer(false) } - }, [ - formData, - testResult, - testConnection, - createServerMutation, - workspaceId, - headersToRecord, - resetForm, - ]) + }, [formData, testConnection, createServerMutation, workspaceId, headersToRecord, resetForm]) /** * Opens the delete confirmation dialog for an MCP server. @@ -297,9 +309,6 @@ export function MCP() { setShowDeleteDialog(true) }, []) - /** - * Confirms and executes the server deletion. - */ const confirmDeleteServer = useCallback(async () => { if (!serverToDelete) return @@ -399,14 +408,24 @@ export function MCP() { const handleRefreshServer = useCallback( async (serverId: string) => { try { - setRefreshStatus('refreshing') + setRefreshingServers((prev) => ({ ...prev, [serverId]: 'refreshing' })) await refreshServerMutation.mutateAsync({ workspaceId, serverId }) logger.info(`Refreshed MCP server: ${serverId}`) - setRefreshStatus('refreshed') - setTimeout(() => setRefreshStatus('idle'), 2000) + setRefreshingServers((prev) => ({ ...prev, [serverId]: 'refreshed' })) + setTimeout(() => { + setRefreshingServers((prev) => { + const newState = { ...prev } + delete newState[serverId] + return newState + }) + }, 2000) } catch (error) { logger.error('Failed to refresh MCP server:', error) - setRefreshStatus('idle') + setRefreshingServers((prev) => { + const newState = { ...prev } + delete newState[serverId] + return newState + }) } }, [refreshServerMutation, workspaceId] @@ -432,6 +451,53 @@ export function MCP() { const isSubmitDisabled = serversLoading || isAddingServer || !isFormValid const testButtonLabel = getTestButtonLabel(testResult, isTestingConnection) + /** + * Gets issues for stored tools that reference a specific server tool. + * Returns issues from all workflows that have stored this tool. + */ + const getStoredToolIssues = useCallback( + (serverId: string, toolName: string): { issue: McpToolIssue; workflowName: string }[] => { + const relevantStoredTools = storedTools.filter( + (st) => st.serverId === serverId && st.toolName === toolName + ) + + const serverStates = servers.map((s) => ({ + id: s.id, + url: s.url, + connectionStatus: s.connectionStatus, + lastError: s.lastError || undefined, + })) + + const discoveredTools = mcpToolsData.map((t) => ({ + serverId: t.serverId, + name: t.name, + inputSchema: t.inputSchema, + })) + + const issues: { issue: McpToolIssue; workflowName: string }[] = [] + + for (const storedTool of relevantStoredTools) { + const issue = getMcpToolIssue( + { + serverId: storedTool.serverId, + serverUrl: storedTool.serverUrl, + toolName: storedTool.toolName, + schema: storedTool.schema, + }, + serverStates, + discoveredTools + ) + + if (issue) { + issues.push({ issue, workflowName: storedTool.workflowName }) + } + } + + return issues + }, + [storedTools, servers, mcpToolsData] + ) + if (selectedServer) { const { server, tools } = selectedServer const transportLabel = formatTransportLabel(server.transport || 'http') @@ -463,6 +529,15 @@ export function MCP() {
)} + {server.connectionStatus === 'error' && ( +
+ Status +

+ {server.lastError || 'Unable to connect'} +

+
+ )} +
Tools ({tools.length}) @@ -471,21 +546,37 @@ export function MCP() {

No tools available

) : (
- {tools.map((tool) => ( -
-

- {tool.name} -

- {tool.description && ( -

- {tool.description} -

- )} -
- ))} + {tools.map((tool) => { + const issues = getStoredToolIssues(server.id, tool.name) + return ( +
+
+

+ {tool.name} +

+ {issues.length > 0 && ( + + {getIssueBadgeLabel(issues[0].issue)} + + )} +
+ {tool.description && ( +

+ {tool.description} +

+ )} +
+ ) + })}
)}
@@ -496,11 +587,11 @@ export function MCP() { @@ -672,6 +763,7 @@ export function MCP() { tools={tools} isDeleting={deletingServers.has(server.id)} isLoadingTools={isLoadingTools} + isRefreshing={refreshingServers[server.id] === 'refreshing'} onRemove={() => handleRemoveServer(server.id, server.name || 'this server')} onViewDetails={() => handleViewDetails(server.id)} /> diff --git a/apps/sim/app/workspace/[workspaceId]/w/components/sidebar/components/settings-modal/settings-modal.tsx b/apps/sim/app/workspace/[workspaceId]/w/components/sidebar/components/settings-modal/settings-modal.tsx index f0634c7f4c..b318006e96 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/components/sidebar/components/settings-modal/settings-modal.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/components/sidebar/components/settings-modal/settings-modal.tsx @@ -46,6 +46,7 @@ import { generalSettingsKeys, useGeneralSettings } from '@/hooks/queries/general import { organizationKeys, useOrganizations } from '@/hooks/queries/organization' import { ssoKeys, useSSOProviders } from '@/hooks/queries/sso' import { subscriptionKeys, useSubscriptionData } from '@/hooks/queries/subscription' +import { useSettingsModalStore } from '@/stores/settings-modal/store' const isBillingEnabled = isTruthy(getEnv('NEXT_PUBLIC_BILLING_ENABLED')) const isSSOEnabled = isTruthy(getEnv('NEXT_PUBLIC_SSO_ENABLED')) @@ -134,6 +135,8 @@ const allNavigationItems: NavigationItem[] = [ export function SettingsModal({ open, onOpenChange }: SettingsModalProps) { const [activeSection, setActiveSection] = useState('general') + const { initialSection, mcpServerId, clearInitialState } = useSettingsModalStore() + const [pendingMcpServerId, setPendingMcpServerId] = useState(null) const { data: session } = useSession() const queryClient = useQueryClient() const { data: organizationsData } = useOrganizations() @@ -247,6 +250,24 @@ export function SettingsModal({ open, onOpenChange }: SettingsModalProps) { // React Query hook automatically loads and syncs settings useGeneralSettings() + // Apply initial section from store when modal opens + useEffect(() => { + if (open && initialSection) { + setActiveSection(initialSection) + if (mcpServerId) { + setPendingMcpServerId(mcpServerId) + } + clearInitialState() + } + }, [open, initialSection, mcpServerId, clearInitialState]) + + // Clear pending server ID when section changes away from MCP + useEffect(() => { + if (activeSection !== 'mcp') { + setPendingMcpServerId(null) + } + }, [activeSection]) + useEffect(() => { const handleOpenSettings = (event: CustomEvent<{ tab: SettingsSection }>) => { setActiveSection(event.detail.tab) @@ -436,7 +457,7 @@ export function SettingsModal({ open, onOpenChange }: SettingsModalProps) { {isBillingEnabled && activeSection === 'team' && } {activeSection === 'sso' && } {activeSection === 'copilot' && } - {activeSection === 'mcp' && } + {activeSection === 'mcp' && } {activeSection === 'custom-tools' && } diff --git a/apps/sim/app/workspace/[workspaceId]/w/components/sidebar/sidebar.tsx b/apps/sim/app/workspace/[workspaceId]/w/components/sidebar/sidebar.tsx index 96298d7677..3077d6c39a 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/components/sidebar/sidebar.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/components/sidebar/sidebar.tsx @@ -32,6 +32,7 @@ import { } from '@/app/workspace/[workspaceId]/w/hooks' import { useFolderStore } from '@/stores/folders/store' import { useSearchModalStore } from '@/stores/search-modal/store' +import { useSettingsModalStore } from '@/stores/settings-modal/store' import { MIN_SIDEBAR_WIDTH, useSidebarStore } from '@/stores/sidebar/store' const logger = createLogger('Sidebar') @@ -88,7 +89,11 @@ export function Sidebar() { const [isWorkspaceMenuOpen, setIsWorkspaceMenuOpen] = useState(false) const [isHelpModalOpen, setIsHelpModalOpen] = useState(false) - const [isSettingsModalOpen, setIsSettingsModalOpen] = useState(false) + const { + isOpen: isSettingsModalOpen, + openModal: openSettingsModal, + closeModal: closeSettingsModal, + } = useSettingsModalStore() /** Listens for external events to open help modal */ useEffect(() => { @@ -219,7 +224,7 @@ export function Sidebar() { id: 'settings', label: 'Settings', icon: Settings, - onClick: () => setIsSettingsModalOpen(true), + onClick: () => openSettingsModal(), }, ], [workspaceId] @@ -654,7 +659,10 @@ export function Sidebar() { {/* Footer Navigation Modals */} - + (open ? openSettingsModal() : closeSettingsModal())} + /> {/* Hidden file input for workspace import */} + ): Promise> { + const tools = inputs.tools + if (!Array.isArray(tools) || tools.length === 0) return inputs + + const mcpTools = tools.filter((t: any) => t.type === 'mcp') + if (mcpTools.length === 0) return inputs + + const serverIds = [ + ...new Set(mcpTools.map((t: any) => t.params?.serverId).filter(Boolean)), + ] as string[] + if (serverIds.length === 0) return inputs + + const availableServerIds = new Set() + if (ctx.workspaceId && serverIds.length > 0) { + try { + const servers = await db + .select({ id: mcpServers.id, connectionStatus: mcpServers.connectionStatus }) + .from(mcpServers) + .where( + and( + eq(mcpServers.workspaceId, ctx.workspaceId), + inArray(mcpServers.id, serverIds), + isNull(mcpServers.deletedAt) + ) + ) + + for (const server of servers) { + if (server.connectionStatus === 'connected') { + availableServerIds.add(server.id) + } + } + } catch (error) { + logger.warn('Failed to check MCP server availability for logging:', error) + return inputs + } + } + + const filteredTools = tools.filter((tool: any) => { + if (tool.type !== 'mcp') return true + const serverId = tool.params?.serverId + if (!serverId) return false + return availableServerIds.has(serverId) + }) + + return { ...inputs, tools: filteredTools } + } + private preparePauseResumeSelfReference( ctx: ExecutionContext, node: DAGNode, diff --git a/apps/sim/executor/handlers/agent/agent-handler.ts b/apps/sim/executor/handlers/agent/agent-handler.ts index 2aeaff3035..20d3bd903d 100644 --- a/apps/sim/executor/handlers/agent/agent-handler.ts +++ b/apps/sim/executor/handlers/agent/agent-handler.ts @@ -1,3 +1,6 @@ +import { db } from '@sim/db' +import { mcpServers } from '@sim/db/schema' +import { and, eq, inArray, isNull } from 'drizzle-orm' import { createLogger } from '@/lib/logs/console/logger' import { createMcpToolId } from '@/lib/mcp/utils' import { getAllBlocks } from '@/blocks' @@ -35,19 +38,23 @@ export class AgentBlockHandler implements BlockHandler { block: SerializedBlock, inputs: AgentInputs ): Promise { - const responseFormat = this.parseResponseFormat(inputs.responseFormat) - const model = inputs.model || AGENT.DEFAULT_MODEL + // Filter out unavailable MCP tools early so they don't appear in logs/inputs + const filteredTools = await this.filterUnavailableMcpTools(ctx, inputs.tools || []) + const filteredInputs = { ...inputs, tools: filteredTools } + + const responseFormat = this.parseResponseFormat(filteredInputs.responseFormat) + const model = filteredInputs.model || AGENT.DEFAULT_MODEL const providerId = getProviderFromModel(model) - const formattedTools = await this.formatTools(ctx, inputs.tools || []) + const formattedTools = await this.formatTools(ctx, filteredInputs.tools || []) const streamingConfig = this.getStreamingConfig(ctx, block) - const messages = await this.buildMessages(ctx, inputs, block.id) + const messages = await this.buildMessages(ctx, filteredInputs, block.id) const providerRequest = this.buildProviderRequest({ ctx, providerId, model, messages, - inputs, + inputs: filteredInputs, formattedTools, responseFormat, streaming: streamingConfig.shouldUseStreaming ?? false, @@ -58,10 +65,10 @@ export class AgentBlockHandler implements BlockHandler { providerRequest, block, responseFormat, - inputs + filteredInputs ) - await this.persistResponseToMemory(ctx, inputs, result, block.id) + await this.persistResponseToMemory(ctx, filteredInputs, result, block.id) return result } @@ -115,6 +122,53 @@ export class AgentBlockHandler implements BlockHandler { return undefined } + private async filterUnavailableMcpTools( + ctx: ExecutionContext, + tools: ToolInput[] + ): Promise { + if (!Array.isArray(tools) || tools.length === 0) return tools + + const mcpTools = tools.filter((t) => t.type === 'mcp') + if (mcpTools.length === 0) return tools + + const serverIds = [...new Set(mcpTools.map((t) => t.params?.serverId).filter(Boolean))] + if (serverIds.length === 0) return tools + + const availableServerIds = new Set() + if (ctx.workspaceId && serverIds.length > 0) { + try { + const servers = await db + .select({ id: mcpServers.id, connectionStatus: mcpServers.connectionStatus }) + .from(mcpServers) + .where( + and( + eq(mcpServers.workspaceId, ctx.workspaceId), + inArray(mcpServers.id, serverIds), + isNull(mcpServers.deletedAt) + ) + ) + + for (const server of servers) { + if (server.connectionStatus === 'connected') { + availableServerIds.add(server.id) + } + } + } catch (error) { + logger.warn('Failed to check MCP server availability, including all tools:', error) + for (const serverId of serverIds) { + availableServerIds.add(serverId) + } + } + } + + return tools.filter((tool) => { + if (tool.type !== 'mcp') return true + const serverId = tool.params?.serverId + if (!serverId) return false + return availableServerIds.has(serverId) + }) + } + private async formatTools(ctx: ExecutionContext, inputTools: ToolInput[]): Promise { if (!Array.isArray(inputTools)) return [] @@ -304,6 +358,7 @@ export class AgentBlockHandler implements BlockHandler { /** * Process MCP tools using cached schemas from build time. + * Note: Unavailable tools are already filtered by filterUnavailableMcpTools. */ private async processMcpToolsBatched( ctx: ExecutionContext, @@ -312,7 +367,6 @@ export class AgentBlockHandler implements BlockHandler { if (mcpTools.length === 0) return [] const results: any[] = [] - const toolsWithSchema: ToolInput[] = [] const toolsNeedingDiscovery: ToolInput[] = [] diff --git a/apps/sim/hooks/queries/mcp.ts b/apps/sim/hooks/queries/mcp.ts index e13cb89eea..95365fb61a 100644 --- a/apps/sim/hooks/queries/mcp.ts +++ b/apps/sim/hooks/queries/mcp.ts @@ -1,20 +1,17 @@ import { keepPreviousData, useMutation, useQuery, useQueryClient } from '@tanstack/react-query' import { createLogger } from '@/lib/logs/console/logger' +import type { McpServerStatusConfig } from '@/lib/mcp/types' const logger = createLogger('McpQueries') -/** - * Query key factories for MCP-related queries - */ +export type { McpServerStatusConfig } + export const mcpKeys = { all: ['mcp'] as const, servers: (workspaceId: string) => [...mcpKeys.all, 'servers', workspaceId] as const, tools: (workspaceId: string) => [...mcpKeys.all, 'tools', workspaceId] as const, } -/** - * MCP Server Types - */ export interface McpServer { id: string workspaceId: string @@ -25,9 +22,11 @@ export interface McpServer { headers?: Record enabled: boolean connectionStatus?: 'connected' | 'disconnected' | 'error' - lastError?: string + lastError?: string | null + statusConfig?: McpServerStatusConfig toolCount?: number lastToolsRefresh?: string + lastConnected?: string createdAt: string updatedAt: string deletedAt?: string @@ -86,8 +85,13 @@ export function useMcpServers(workspaceId: string) { /** * Fetch MCP tools for a workspace */ -async function fetchMcpTools(workspaceId: string): Promise { - const response = await fetch(`/api/mcp/tools/discover?workspaceId=${workspaceId}`) +async function fetchMcpTools(workspaceId: string, forceRefresh = false): Promise { + const params = new URLSearchParams({ workspaceId }) + if (forceRefresh) { + params.set('refresh', 'true') + } + + const response = await fetch(`/api/mcp/tools/discover?${params.toString()}`) // Treat 404 as "no tools available" - return empty array if (response.status === 404) { @@ -159,14 +163,43 @@ export function useCreateMcpServer() { return { ...serverData, id: serverId, - connectionStatus: 'disconnected' as const, + connectionStatus: 'connected' as const, serverId, updated: wasUpdated, } }, - onSuccess: (_data, variables) => { + onSuccess: async (data, variables) => { + const freshTools = await fetchMcpTools(variables.workspaceId, true) + + const previousServers = queryClient.getQueryData( + mcpKeys.servers(variables.workspaceId) + ) + if (previousServers) { + const newServer: McpServer = { + id: data.id, + workspaceId: variables.workspaceId, + name: variables.config.name, + transport: variables.config.transport, + url: variables.config.url, + timeout: variables.config.timeout || 30000, + headers: variables.config.headers, + enabled: variables.config.enabled, + connectionStatus: 'connected', + createdAt: new Date().toISOString(), + updatedAt: new Date().toISOString(), + } + + const serverExists = previousServers.some((s) => s.id === data.id) + queryClient.setQueryData( + mcpKeys.servers(variables.workspaceId), + serverExists + ? previousServers.map((s) => (s.id === data.id ? { ...s, ...newServer } : s)) + : [...previousServers, newServer] + ) + } + + queryClient.setQueryData(mcpKeys.tools(variables.workspaceId), freshTools) queryClient.invalidateQueries({ queryKey: mcpKeys.servers(variables.workspaceId) }) - queryClient.invalidateQueries({ queryKey: mcpKeys.tools(variables.workspaceId) }) }, }) } @@ -213,7 +246,7 @@ export function useDeleteMcpServer() { interface UpdateMcpServerParams { workspaceId: string serverId: string - updates: Partial + updates: Partial } export function useUpdateMcpServer() { @@ -221,8 +254,20 @@ export function useUpdateMcpServer() { return useMutation({ mutationFn: async ({ workspaceId, serverId, updates }: UpdateMcpServerParams) => { + const response = await fetch(`/api/mcp/servers/${serverId}?workspaceId=${workspaceId}`, { + method: 'PATCH', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify(updates), + }) + + const data = await response.json() + + if (!response.ok) { + throw new Error(data.error || 'Failed to update MCP server') + } + logger.info(`Updated MCP server: ${serverId} in workspace: ${workspaceId}`) - return { serverId, updates } + return data.data?.server }, onMutate: async ({ workspaceId, serverId, updates }) => { await queryClient.cancelQueries({ queryKey: mcpKeys.servers(workspaceId) }) @@ -249,6 +294,7 @@ export function useUpdateMcpServer() { }, onSettled: (_data, _error, variables) => { queryClient.invalidateQueries({ queryKey: mcpKeys.servers(variables.workspaceId) }) + queryClient.invalidateQueries({ queryKey: mcpKeys.tools(variables.workspaceId) }) }, }) } @@ -292,9 +338,10 @@ export function useRefreshMcpServer() { logger.info(`Refreshed MCP server: ${serverId}`) return data.data }, - onSuccess: (_data, variables) => { + onSuccess: async (_data, variables) => { queryClient.invalidateQueries({ queryKey: mcpKeys.servers(variables.workspaceId) }) - queryClient.invalidateQueries({ queryKey: mcpKeys.tools(variables.workspaceId) }) + const freshTools = await fetchMcpTools(variables.workspaceId, true) + queryClient.setQueryData(mcpKeys.tools(variables.workspaceId), freshTools) }, }) } @@ -349,3 +396,42 @@ export function useTestMcpServer() { }, }) } + +/** + * Stored MCP tool from workflow state + */ +export interface StoredMcpTool { + workflowId: string + workflowName: string + serverId: string + serverUrl?: string + toolName: string + schema?: Record +} + +/** + * Fetch stored MCP tools from all workflows in the workspace + */ +async function fetchStoredMcpTools(workspaceId: string): Promise { + const response = await fetch(`/api/mcp/tools/stored?workspaceId=${workspaceId}`) + + if (!response.ok) { + const data = await response.json().catch(() => ({})) + throw new Error(data.error || 'Failed to fetch stored MCP tools') + } + + const data = await response.json() + return data.data?.tools || [] +} + +/** + * Hook to fetch stored MCP tools from all workflows + */ +export function useStoredMcpTools(workspaceId: string) { + return useQuery({ + queryKey: [...mcpKeys.all, workspaceId, 'stored'], + queryFn: () => fetchStoredMcpTools(workspaceId), + enabled: !!workspaceId, + staleTime: 60 * 1000, // 1 minute - workflows don't change frequently + }) +} diff --git a/apps/sim/hooks/use-mcp-server-test.ts b/apps/sim/hooks/use-mcp-server-test.ts index bae93ee7a1..72f0190ae5 100644 --- a/apps/sim/hooks/use-mcp-server-test.ts +++ b/apps/sim/hooks/use-mcp-server-test.ts @@ -34,14 +34,19 @@ export function useMcpServerTest() { const [isTestingConnection, setIsTestingConnection] = useState(false) const testConnection = useCallback( - async (config: McpServerTestConfig): Promise => { + async ( + config: McpServerTestConfig, + options?: { silent?: boolean } + ): Promise => { + const { silent = false } = options || {} + if (!config.name || !config.transport || !config.workspaceId) { const result: McpServerTestResult = { success: false, message: 'Missing required configuration', error: 'Please provide server name, transport method, and workspace ID', } - setTestResult(result) + if (!silent) setTestResult(result) return result } @@ -51,12 +56,14 @@ export function useMcpServerTest() { message: 'Missing server URL', error: 'Please provide a server URL for HTTP/SSE transport', } - setTestResult(result) + if (!silent) setTestResult(result) return result } - setIsTestingConnection(true) - setTestResult(null) + if (!silent) { + setIsTestingConnection(true) + setTestResult(null) + } try { const cleanConfig = { @@ -88,14 +95,14 @@ export function useMcpServerTest() { error: result.data.error, warnings: result.data.warnings, } - setTestResult(testResult) + if (!silent) setTestResult(testResult) logger.error('MCP server test failed:', result.data.error) return testResult } throw new Error(result.error || 'Connection test failed') } - setTestResult(result.data || result) + if (!silent) setTestResult(result.data || result) logger.info(`MCP server test ${result.data?.success ? 'passed' : 'failed'}:`, config.name) return result.data || result } catch (error) { @@ -105,11 +112,11 @@ export function useMcpServerTest() { message: 'Connection failed', error: errorMessage, } - setTestResult(result) + if (!silent) setTestResult(result) logger.error('MCP server test failed:', errorMessage) return result } finally { - setIsTestingConnection(false) + if (!silent) setIsTestingConnection(false) } }, [] diff --git a/apps/sim/lib/mcp/client.ts b/apps/sim/lib/mcp/client.ts index c6675c958c..a5015f4244 100644 --- a/apps/sim/lib/mcp/client.ts +++ b/apps/sim/lib/mcp/client.ts @@ -108,7 +108,7 @@ export class McpClient { this.connectionStatus.lastError = errorMessage this.isConnected = false logger.error(`Failed to connect to MCP server ${this.config.name}:`, error) - throw new McpConnectionError(errorMessage, this.config.id) + throw new McpConnectionError(errorMessage, this.config.name) } } @@ -141,7 +141,7 @@ export class McpClient { */ async listTools(): Promise { if (!this.isConnected) { - throw new McpConnectionError('Not connected to server', this.config.id) + throw new McpConnectionError('Not connected to server', this.config.name) } try { @@ -170,7 +170,7 @@ export class McpClient { */ async callTool(toolCall: McpToolCall): Promise { if (!this.isConnected) { - throw new McpConnectionError('Not connected to server', this.config.id) + throw new McpConnectionError('Not connected to server', this.config.name) } const consentRequest: McpConsentRequest = { @@ -217,7 +217,7 @@ export class McpClient { */ async ping(): Promise<{ _meta?: Record }> { if (!this.isConnected) { - throw new McpConnectionError('Not connected to server', this.config.id) + throw new McpConnectionError('Not connected to server', this.config.name) } try { diff --git a/apps/sim/lib/mcp/service.ts b/apps/sim/lib/mcp/service.ts index 8d1483f522..cfadec8f49 100644 --- a/apps/sim/lib/mcp/service.ts +++ b/apps/sim/lib/mcp/service.ts @@ -10,8 +10,14 @@ import { generateRequestId } from '@/lib/core/utils/request' import { getEffectiveDecryptedEnv } from '@/lib/environment/utils' import { createLogger } from '@/lib/logs/console/logger' import { McpClient } from '@/lib/mcp/client' +import { + createMcpCacheAdapter, + getMcpCacheType, + type McpCacheStorageAdapter, +} from '@/lib/mcp/storage' import type { McpServerConfig, + McpServerStatusConfig, McpServerSummary, McpTool, McpToolCall, @@ -22,154 +28,21 @@ import { MCP_CONSTANTS } from '@/lib/mcp/utils' const logger = createLogger('McpService') -interface ToolCache { - tools: McpTool[] - expiry: Date - lastAccessed: Date -} - -interface CacheStats { - totalEntries: number - activeEntries: number - expiredEntries: number - maxCacheSize: number - cacheHitRate: number - memoryUsage: { - approximateBytes: number - entriesEvicted: number - } -} - class McpService { - private toolCache = new Map() - private readonly cacheTimeout = MCP_CONSTANTS.CACHE_TIMEOUT // 30 seconds - private readonly maxCacheSize = MCP_CONSTANTS.MAX_CACHE_SIZE // 1000 - private cleanupInterval: NodeJS.Timeout | null = null - private cacheHits = 0 - private cacheMisses = 0 - private entriesEvicted = 0 + private cacheAdapter: McpCacheStorageAdapter + private readonly cacheTimeout = MCP_CONSTANTS.CACHE_TIMEOUT // 5 minutes constructor() { - this.startPeriodicCleanup() - } - - /** - * Start periodic cleanup of expired cache entries - */ - private startPeriodicCleanup(): void { - this.cleanupInterval = setInterval( - () => { - this.cleanupExpiredEntries() - }, - 5 * 60 * 1000 - ) - } - - /** - * Stop periodic cleanup - */ - private stopPeriodicCleanup(): void { - if (this.cleanupInterval) { - clearInterval(this.cleanupInterval) - this.cleanupInterval = null - } - } - - /** - * Cleanup expired cache entries - */ - private cleanupExpiredEntries(): void { - const now = new Date() - const expiredKeys: string[] = [] - - this.toolCache.forEach((cache, key) => { - if (cache.expiry <= now) { - expiredKeys.push(key) - } - }) - - expiredKeys.forEach((key) => this.toolCache.delete(key)) - - if (expiredKeys.length > 0) { - logger.debug(`Cleaned up ${expiredKeys.length} expired cache entries`) - } - } - - /** - * Evict least recently used entries when cache exceeds max size - */ - private evictLRUEntries(): void { - if (this.toolCache.size <= this.maxCacheSize) { - return - } - - const entries: { key: string; cache: ToolCache }[] = [] - this.toolCache.forEach((cache, key) => { - entries.push({ key, cache }) - }) - entries.sort((a, b) => a.cache.lastAccessed.getTime() - b.cache.lastAccessed.getTime()) - - const entriesToRemove = this.toolCache.size - this.maxCacheSize + 1 - for (let i = 0; i < entriesToRemove && i < entries.length; i++) { - this.toolCache.delete(entries[i].key) - this.entriesEvicted++ - } - - logger.debug(`Evicted ${entriesToRemove} LRU cache entries to maintain size limit`) - } - - /** - * Get cache entry and update last accessed time - */ - private getCacheEntry(key: string): ToolCache | undefined { - const entry = this.toolCache.get(key) - if (entry) { - entry.lastAccessed = new Date() - this.cacheHits++ - return entry - } - this.cacheMisses++ - return undefined - } - - /** - * Set cache entry with LRU eviction - */ - private setCacheEntry(key: string, tools: McpTool[]): void { - const now = new Date() - const cache: ToolCache = { - tools, - expiry: new Date(now.getTime() + this.cacheTimeout), - lastAccessed: now, - } - - this.toolCache.set(key, cache) - - this.evictLRUEntries() - } - - /** - * Calculate approximate memory usage of cache - */ - private calculateMemoryUsage(): number { - let totalBytes = 0 - - this.toolCache.forEach((cache, key) => { - totalBytes += key.length * 2 // UTF-16 encoding - totalBytes += JSON.stringify(cache.tools).length * 2 - totalBytes += 64 - }) - - return totalBytes + this.cacheAdapter = createMcpCacheAdapter() + logger.info(`MCP Service initialized with ${getMcpCacheType()} cache`) } /** * Dispose of the service and cleanup resources */ dispose(): void { - this.stopPeriodicCleanup() - this.toolCache.clear() - logger.info('MCP Service disposed and cleanup stopped') + this.cacheAdapter.dispose() + logger.info('MCP Service disposed') } /** @@ -385,6 +258,81 @@ class McpService { ) } + /** + * Update server connection status after discovery attempt + */ + private async updateServerStatus( + serverId: string, + workspaceId: string, + success: boolean, + error?: string, + toolCount?: number + ): Promise { + try { + const [currentServer] = await db + .select({ statusConfig: mcpServers.statusConfig }) + .from(mcpServers) + .where( + and( + eq(mcpServers.id, serverId), + eq(mcpServers.workspaceId, workspaceId), + isNull(mcpServers.deletedAt) + ) + ) + .limit(1) + + const currentConfig: McpServerStatusConfig = + (currentServer?.statusConfig as McpServerStatusConfig | null) ?? { + consecutiveFailures: 0, + lastSuccessfulDiscovery: null, + } + + const now = new Date() + + if (success) { + await db + .update(mcpServers) + .set({ + connectionStatus: 'connected', + lastConnected: now, + lastError: null, + toolCount: toolCount ?? 0, + lastToolsRefresh: now, + statusConfig: { + consecutiveFailures: 0, + lastSuccessfulDiscovery: now.toISOString(), + }, + updatedAt: now, + }) + .where(eq(mcpServers.id, serverId)) + } else { + const newFailures = currentConfig.consecutiveFailures + 1 + const isErrorState = newFailures >= MCP_CONSTANTS.MAX_CONSECUTIVE_FAILURES + + await db + .update(mcpServers) + .set({ + connectionStatus: isErrorState ? 'error' : 'disconnected', + lastError: error || 'Unknown error', + statusConfig: { + consecutiveFailures: newFailures, + lastSuccessfulDiscovery: currentConfig.lastSuccessfulDiscovery, + }, + updatedAt: now, + }) + .where(eq(mcpServers.id, serverId)) + + if (isErrorState) { + logger.warn( + `Server ${serverId} marked as error after ${newFailures} consecutive failures` + ) + } + } + } catch (err) { + logger.error(`Failed to update server status for ${serverId}:`, err) + } + } + /** * Discover tools from all workspace servers */ @@ -399,10 +347,14 @@ class McpService { try { if (!forceRefresh) { - const cached = this.getCacheEntry(cacheKey) - if (cached && cached.expiry > new Date()) { - logger.debug(`[${requestId}] Using cached tools for user ${userId}`) - return cached.tools + try { + const cached = await this.cacheAdapter.get(cacheKey) + if (cached) { + logger.debug(`[${requestId}] Using cached tools for user ${userId}`) + return cached.tools + } + } catch (error) { + logger.warn(`[${requestId}] Cache read failed, proceeding with discovery:`, error) } } @@ -425,7 +377,7 @@ class McpService { logger.debug( `[${requestId}] Discovered ${tools.length} tools from server ${config.name}` ) - return tools + return { serverId: config.id, tools } } finally { await client.disconnect() } @@ -433,20 +385,44 @@ class McpService { ) let failedCount = 0 + const statusUpdates: Promise[] = [] + results.forEach((result, index) => { + const server = servers[index] if (result.status === 'fulfilled') { - allTools.push(...result.value) + allTools.push(...result.value.tools) + statusUpdates.push( + this.updateServerStatus( + server.id!, + workspaceId, + true, + undefined, + result.value.tools.length + ) + ) } else { failedCount++ + const errorMessage = + result.reason instanceof Error ? result.reason.message : 'Unknown error' logger.warn( - `[${requestId}] Failed to discover tools from server ${servers[index].name}:`, + `[${requestId}] Failed to discover tools from server ${server.name}:`, result.reason ) + statusUpdates.push(this.updateServerStatus(server.id!, workspaceId, false, errorMessage)) } }) + // Update server statuses in parallel (don't block on this) + Promise.allSettled(statusUpdates).catch((err) => { + logger.error(`[${requestId}] Error updating server statuses:`, err) + }) + if (failedCount === 0) { - this.setCacheEntry(cacheKey, allTools) + try { + await this.cacheAdapter.set(cacheKey, allTools, this.cacheTimeout) + } catch (error) { + logger.warn(`[${requestId}] Cache write failed:`, error) + } } else { logger.warn( `[${requestId}] Skipping cache due to ${failedCount} failed server(s) - will retry on next request` @@ -565,44 +541,18 @@ class McpService { /** * Clear tool cache for a workspace or all workspaces */ - clearCache(workspaceId?: string): void { - if (workspaceId) { - const workspaceCacheKey = `workspace:${workspaceId}` - this.toolCache.delete(workspaceCacheKey) - logger.debug(`Cleared MCP tool cache for workspace ${workspaceId}`) - } else { - this.toolCache.clear() - this.cacheHits = 0 - this.cacheMisses = 0 - this.entriesEvicted = 0 - logger.debug('Cleared all MCP tool cache and reset statistics') - } - } - - /** - * Get comprehensive cache statistics - */ - getCacheStats(): CacheStats { - const entries: { key: string; cache: ToolCache }[] = [] - this.toolCache.forEach((cache, key) => { - entries.push({ key, cache }) - }) - - const now = new Date() - const activeEntries = entries.filter(({ cache }) => cache.expiry > now) - const totalRequests = this.cacheHits + this.cacheMisses - const hitRate = totalRequests > 0 ? this.cacheHits / totalRequests : 0 - - return { - totalEntries: entries.length, - activeEntries: activeEntries.length, - expiredEntries: entries.length - activeEntries.length, - maxCacheSize: this.maxCacheSize, - cacheHitRate: Math.round(hitRate * 100) / 100, - memoryUsage: { - approximateBytes: this.calculateMemoryUsage(), - entriesEvicted: this.entriesEvicted, - }, + async clearCache(workspaceId?: string): Promise { + try { + if (workspaceId) { + const workspaceCacheKey = `workspace:${workspaceId}` + await this.cacheAdapter.delete(workspaceCacheKey) + logger.debug(`Cleared MCP tool cache for workspace ${workspaceId}`) + } else { + await this.cacheAdapter.clear() + logger.debug('Cleared all MCP tool cache') + } + } catch (error) { + logger.warn('Failed to clear cache:', error) } } } diff --git a/apps/sim/lib/mcp/storage/adapter.ts b/apps/sim/lib/mcp/storage/adapter.ts new file mode 100644 index 0000000000..9332bb371b --- /dev/null +++ b/apps/sim/lib/mcp/storage/adapter.ts @@ -0,0 +1,14 @@ +import type { McpTool } from '@/lib/mcp/types' + +export interface McpCacheEntry { + tools: McpTool[] + expiry: number // Unix timestamp ms +} + +export interface McpCacheStorageAdapter { + get(key: string): Promise + set(key: string, tools: McpTool[], ttlMs: number): Promise + delete(key: string): Promise + clear(): Promise + dispose(): void +} diff --git a/apps/sim/lib/mcp/storage/factory.ts b/apps/sim/lib/mcp/storage/factory.ts new file mode 100644 index 0000000000..1b457ead21 --- /dev/null +++ b/apps/sim/lib/mcp/storage/factory.ts @@ -0,0 +1,53 @@ +import { getRedisClient } from '@/lib/core/config/redis' +import { createLogger } from '@/lib/logs/console/logger' +import type { McpCacheStorageAdapter } from './adapter' +import { MemoryMcpCache } from './memory-cache' +import { RedisMcpCache } from './redis-cache' + +const logger = createLogger('McpCacheFactory') + +let cachedAdapter: McpCacheStorageAdapter | null = null + +/** + * Create MCP cache storage adapter. + * Uses Redis if available, falls back to in-memory cache. + * + * Unlike rate-limiting (which fails if Redis is configured but unavailable), + * MCP caching gracefully falls back to memory since it's an optimization. + */ +export function createMcpCacheAdapter(): McpCacheStorageAdapter { + if (cachedAdapter) { + return cachedAdapter + } + + const redis = getRedisClient() + + if (redis) { + logger.info('MCP cache: Using Redis') + cachedAdapter = new RedisMcpCache(redis) + } else { + logger.info('MCP cache: Using in-memory (Redis not configured)') + cachedAdapter = new MemoryMcpCache() + } + + return cachedAdapter +} + +/** + * Get the current adapter type for logging/debugging + */ +export function getMcpCacheType(): 'redis' | 'memory' { + const redis = getRedisClient() + return redis ? 'redis' : 'memory' +} + +/** + * Reset the cached adapter. + * Only use for testing purposes. + */ +export function resetMcpCacheAdapter(): void { + if (cachedAdapter) { + cachedAdapter.dispose() + cachedAdapter = null + } +} diff --git a/apps/sim/lib/mcp/storage/index.ts b/apps/sim/lib/mcp/storage/index.ts new file mode 100644 index 0000000000..bf69021f36 --- /dev/null +++ b/apps/sim/lib/mcp/storage/index.ts @@ -0,0 +1,4 @@ +export type { McpCacheEntry, McpCacheStorageAdapter } from './adapter' +export { createMcpCacheAdapter, getMcpCacheType, resetMcpCacheAdapter } from './factory' +export { MemoryMcpCache } from './memory-cache' +export { RedisMcpCache } from './redis-cache' diff --git a/apps/sim/lib/mcp/storage/memory-cache.ts b/apps/sim/lib/mcp/storage/memory-cache.ts new file mode 100644 index 0000000000..053ab56816 --- /dev/null +++ b/apps/sim/lib/mcp/storage/memory-cache.ts @@ -0,0 +1,103 @@ +import { createLogger } from '@/lib/logs/console/logger' +import type { McpTool } from '@/lib/mcp/types' +import { MCP_CONSTANTS } from '@/lib/mcp/utils' +import type { McpCacheEntry, McpCacheStorageAdapter } from './adapter' + +const logger = createLogger('McpMemoryCache') + +export class MemoryMcpCache implements McpCacheStorageAdapter { + private cache = new Map() + private readonly maxCacheSize = MCP_CONSTANTS.MAX_CACHE_SIZE + private cleanupInterval: NodeJS.Timeout | null = null + + constructor() { + this.startPeriodicCleanup() + } + + private startPeriodicCleanup(): void { + this.cleanupInterval = setInterval( + () => { + this.cleanupExpiredEntries() + }, + 5 * 60 * 1000 // 5 minutes + ) + // Don't keep Node process alive just for cache cleanup + this.cleanupInterval.unref() + } + + private cleanupExpiredEntries(): void { + const now = Date.now() + const expiredKeys: string[] = [] + + this.cache.forEach((entry, key) => { + if (entry.expiry <= now) { + expiredKeys.push(key) + } + }) + + expiredKeys.forEach((key) => this.cache.delete(key)) + + if (expiredKeys.length > 0) { + logger.debug(`Cleaned up ${expiredKeys.length} expired cache entries`) + } + } + + private evictIfNeeded(): void { + if (this.cache.size <= this.maxCacheSize) { + return + } + + // Evict oldest entries (by insertion order - Map maintains order) + const entriesToRemove = this.cache.size - this.maxCacheSize + const keys = Array.from(this.cache.keys()).slice(0, entriesToRemove) + keys.forEach((key) => this.cache.delete(key)) + + logger.debug(`Evicted ${entriesToRemove} cache entries`) + } + + async get(key: string): Promise { + const entry = this.cache.get(key) + const now = Date.now() + + if (!entry || entry.expiry <= now) { + if (entry) { + this.cache.delete(key) + } + return null + } + + // Return copy to prevent caller from mutating cache + return { + tools: entry.tools, + expiry: entry.expiry, + } + } + + async set(key: string, tools: McpTool[], ttlMs: number): Promise { + const now = Date.now() + const entry: McpCacheEntry = { + tools, + expiry: now + ttlMs, + } + + this.cache.set(key, entry) + this.evictIfNeeded() + } + + async delete(key: string): Promise { + this.cache.delete(key) + } + + async clear(): Promise { + this.cache.clear() + } + + dispose(): void { + if (this.cleanupInterval) { + clearInterval(this.cleanupInterval) + this.cleanupInterval = null + } + this.cache.clear() + logger.info('Memory cache disposed') + } +} diff --git a/apps/sim/lib/mcp/storage/redis-cache.ts b/apps/sim/lib/mcp/storage/redis-cache.ts new file mode 100644 index 0000000000..3f69468055 --- /dev/null +++ b/apps/sim/lib/mcp/storage/redis-cache.ts @@ -0,0 +1,96 @@ +import type Redis from 'ioredis' +import { createLogger } from '@/lib/logs/console/logger' +import type { McpTool } from '@/lib/mcp/types' +import type { McpCacheEntry, McpCacheStorageAdapter } from './adapter' + +const logger = createLogger('McpRedisCache') + +const REDIS_KEY_PREFIX = 'mcp:tools:' + +export class RedisMcpCache implements McpCacheStorageAdapter { + constructor(private redis: Redis) {} + + private getKey(key: string): string { + return `${REDIS_KEY_PREFIX}${key}` + } + + async get(key: string): Promise { + try { + const redisKey = this.getKey(key) + const data = await this.redis.get(redisKey) + + if (!data) { + return null + } + + try { + return JSON.parse(data) as McpCacheEntry + } catch { + // Corrupted data - delete and treat as miss + logger.warn('Corrupted cache entry, deleting:', redisKey) + await this.redis.del(redisKey) + return null + } + } catch (error) { + logger.error('Redis cache get error:', error) + throw error + } + } + + async set(key: string, tools: McpTool[], ttlMs: number): Promise { + try { + const now = Date.now() + const entry: McpCacheEntry = { + tools, + expiry: now + ttlMs, + } + + await this.redis.set(this.getKey(key), JSON.stringify(entry), 'PX', ttlMs) + } catch (error) { + logger.error('Redis cache set error:', error) + throw error + } + } + + async delete(key: string): Promise { + try { + await this.redis.del(this.getKey(key)) + } catch (error) { + logger.error('Redis cache delete error:', error) + throw error + } + } + + async clear(): Promise { + try { + let cursor = '0' + let deletedCount = 0 + + do { + const [nextCursor, keys] = await this.redis.scan( + cursor, + 'MATCH', + `${REDIS_KEY_PREFIX}*`, + 'COUNT', + 100 + ) + cursor = nextCursor + + if (keys.length > 0) { + await this.redis.del(...keys) + deletedCount += keys.length + } + } while (cursor !== '0') + + logger.debug(`Cleared ${deletedCount} MCP cache entries from Redis`) + } catch (error) { + logger.error('Redis cache clear error:', error) + throw error + } + } + + dispose(): void { + // Redis client is managed externally, nothing to dispose + logger.info('Redis cache adapter disposed') + } +} diff --git a/apps/sim/lib/mcp/tool-validation.ts b/apps/sim/lib/mcp/tool-validation.ts new file mode 100644 index 0000000000..d84aaa9846 --- /dev/null +++ b/apps/sim/lib/mcp/tool-validation.ts @@ -0,0 +1,129 @@ +/** + * MCP Tool Validation + * + * Shared logic for detecting issues with MCP tools across the platform. + * Used by both tool-input.tsx (workflow context) and MCP modal (workspace context). + */ + +import isEqual from 'lodash/isEqual' +import omit from 'lodash/omit' + +export type McpToolIssueType = + | 'server_not_found' + | 'server_error' + | 'tool_not_found' + | 'schema_changed' + | 'url_changed' + +export interface McpToolIssue { + type: McpToolIssueType + message: string +} + +export interface StoredMcpTool { + serverId: string + serverUrl?: string + toolName: string + schema?: Record +} + +export interface ServerState { + id: string + url?: string + connectionStatus?: 'connected' | 'disconnected' | 'error' + lastError?: string +} + +export interface DiscoveredTool { + serverId: string + name: string + inputSchema?: Record +} + +/** + * Compares two schemas to detect changes. + * Uses lodash isEqual for deep, key-order-independent comparison. + * Ignores description field which may be backfilled. + */ +export function hasSchemaChanged( + storedSchema: Record | undefined, + serverSchema: Record | undefined +): boolean { + if (!storedSchema || !serverSchema) return false + + const storedWithoutDesc = omit(storedSchema, 'description') + const serverWithoutDesc = omit(serverSchema, 'description') + + return !isEqual(storedWithoutDesc, serverWithoutDesc) +} + +/** + * Detects issues with a stored MCP tool by comparing against current server/tool state. + */ +export function getMcpToolIssue( + storedTool: StoredMcpTool, + servers: ServerState[], + discoveredTools: DiscoveredTool[] +): McpToolIssue | null { + const { serverId, serverUrl, toolName, schema } = storedTool + + // Check server exists + const server = servers.find((s) => s.id === serverId) + if (!server) { + return { type: 'server_not_found', message: 'Server not found' } + } + + // Check server connection status + if (server.connectionStatus === 'error') { + return { type: 'server_error', message: server.lastError || 'Server connection error' } + } + if (server.connectionStatus !== 'connected') { + return { type: 'server_error', message: 'Server not connected' } + } + + // Check server URL changed (if we have stored URL) + if (serverUrl && server.url && serverUrl !== server.url) { + return { type: 'url_changed', message: 'Server URL changed - tools may be different' } + } + + // Check tool exists on server + const serverTool = discoveredTools.find((t) => t.serverId === serverId && t.name === toolName) + if (!serverTool) { + return { type: 'tool_not_found', message: 'Tool not found on server' } + } + + // Check schema changed + if (schema && serverTool.inputSchema) { + if (hasSchemaChanged(schema, serverTool.inputSchema)) { + return { type: 'schema_changed', message: 'Tool schema changed' } + } + } + + return null +} + +/** + * Returns a user-friendly label for the issue badge + */ +export function getIssueBadgeLabel(issue: McpToolIssue): string { + switch (issue.type) { + case 'schema_changed': + return 'stale' + case 'url_changed': + return 'stale' + default: + return 'unavailable' + } +} + +/** + * Checks if an issue means the tool cannot be used (vs just being stale) + */ +export function isToolUnavailable(issue: McpToolIssue | null): boolean { + if (!issue) return false + return ( + issue.type === 'server_not_found' || + issue.type === 'server_error' || + issue.type === 'tool_not_found' + ) +} diff --git a/apps/sim/lib/mcp/types.ts b/apps/sim/lib/mcp/types.ts index 169d07dcfa..d10a9c3fc6 100644 --- a/apps/sim/lib/mcp/types.ts +++ b/apps/sim/lib/mcp/types.ts @@ -6,6 +6,11 @@ // Modern MCP uses Streamable HTTP which handles both HTTP POST and SSE responses export type McpTransport = 'streamable-http' +export interface McpServerStatusConfig { + consecutiveFailures: number + lastSuccessfulDiscovery: string | null +} + export interface McpServerConfig { id: string name: string @@ -20,6 +25,7 @@ export interface McpServerConfig { timeout?: number retries?: number enabled?: boolean + statusConfig?: McpServerStatusConfig createdAt?: string updatedAt?: string } @@ -113,8 +119,8 @@ export class McpError extends Error { } export class McpConnectionError extends McpError { - constructor(message: string, serverId: string) { - super(`MCP Connection Error for server ${serverId}: ${message}`) + constructor(message: string, serverName: string) { + super(`Failed to connect to "${serverName}": ${message}`) this.name = 'McpConnectionError' } } diff --git a/apps/sim/lib/mcp/utils.ts b/apps/sim/lib/mcp/utils.ts index 7c245894ff..eee16742f3 100644 --- a/apps/sim/lib/mcp/utils.ts +++ b/apps/sim/lib/mcp/utils.ts @@ -6,10 +6,11 @@ import type { McpApiResponse } from '@/lib/mcp/types' */ export const MCP_CONSTANTS = { EXECUTION_TIMEOUT: 60000, - CACHE_TIMEOUT: 30 * 1000, + CACHE_TIMEOUT: 5 * 60 * 1000, // 5 minutes DEFAULT_RETRIES: 3, DEFAULT_CONNECTION_TIMEOUT: 30000, MAX_CACHE_SIZE: 1000, + MAX_CONSECUTIVE_FAILURES: 3, } as const /** diff --git a/apps/sim/providers/utils.ts b/apps/sim/providers/utils.ts index 695818fae4..d1cbe1b819 100644 --- a/apps/sim/providers/utils.ts +++ b/apps/sim/providers/utils.ts @@ -1001,7 +1001,7 @@ export function supportsToolUsageControl(provider: string): boolean { * Prepare tool execution parameters, separating tool parameters from system parameters */ export function prepareToolExecution( - tool: { params?: Record }, + tool: { params?: Record; parameters?: Record }, llmArgs: Record, request: { workflowId?: string @@ -1051,6 +1051,8 @@ export function prepareToolExecution( ...(request.workflowVariables ? { workflowVariables: request.workflowVariables } : {}), ...(request.blockData ? { blockData: request.blockData } : {}), ...(request.blockNameMapping ? { blockNameMapping: request.blockNameMapping } : {}), + // Pass tool schema for MCP tools to skip discovery + ...(tool.parameters ? { _toolSchema: tool.parameters } : {}), } return { toolParams, executionParams } diff --git a/apps/sim/stores/settings-modal/store.ts b/apps/sim/stores/settings-modal/store.ts new file mode 100644 index 0000000000..b5acdb6018 --- /dev/null +++ b/apps/sim/stores/settings-modal/store.ts @@ -0,0 +1,51 @@ +'use client' + +import { create } from 'zustand' + +type SettingsSection = + | 'general' + | 'environment' + | 'template-profile' + | 'integrations' + | 'apikeys' + | 'files' + | 'subscription' + | 'team' + | 'sso' + | 'copilot' + | 'mcp' + | 'custom-tools' + +interface SettingsModalState { + isOpen: boolean + initialSection: SettingsSection | null + mcpServerId: string | null + + openModal: (options?: { section?: SettingsSection; mcpServerId?: string }) => void + closeModal: () => void + clearInitialState: () => void +} + +export const useSettingsModalStore = create((set) => ({ + isOpen: false, + initialSection: null, + mcpServerId: null, + + openModal: (options) => + set({ + isOpen: true, + initialSection: options?.section || null, + mcpServerId: options?.mcpServerId || null, + }), + + closeModal: () => + set({ + isOpen: false, + }), + + clearInitialState: () => + set({ + initialSection: null, + mcpServerId: null, + }), +})) diff --git a/apps/sim/tools/index.ts b/apps/sim/tools/index.ts index 43c3c3b981..4061ecf1e3 100644 --- a/apps/sim/tools/index.ts +++ b/apps/sim/tools/index.ts @@ -107,6 +107,7 @@ const MCP_SYSTEM_PARAMETERS = new Set([ 'workflowVariables', 'blockData', 'blockNameMapping', + '_toolSchema', ]) /** @@ -979,7 +980,10 @@ async function executeMcpTool( } } - const requestBody = { + // Get tool schema if provided (from agent block's cached schema) + const toolSchema = params._toolSchema + + const requestBody: Record = { serverId, toolName, arguments: toolArguments, @@ -987,6 +991,11 @@ async function executeMcpTool( workspaceId, // Pass workspace context for scoping } + // Include schema to skip discovery on execution + if (toolSchema) { + requestBody.toolSchema = toolSchema + } + const body = JSON.stringify(requestBody) // Check request body size before sending @@ -995,6 +1004,7 @@ async function executeMcpTool( logger.info(`[${actualRequestId}] Making MCP tool request to ${toolName} on ${serverId}`, { hasWorkspaceId: !!workspaceId, hasWorkflowId: !!workflowId, + hasToolSchema: !!toolSchema, }) const response = await fetch(`${baseUrl}/api/mcp/tools/execute`, { diff --git a/packages/db/migrations/0123_windy_lockheed.sql b/packages/db/migrations/0123_windy_lockheed.sql new file mode 100644 index 0000000000..d0b67f6313 --- /dev/null +++ b/packages/db/migrations/0123_windy_lockheed.sql @@ -0,0 +1 @@ +ALTER TABLE "mcp_servers" ADD COLUMN "status_config" jsonb DEFAULT '{}'; \ No newline at end of file diff --git a/packages/db/migrations/meta/0123_snapshot.json b/packages/db/migrations/meta/0123_snapshot.json new file mode 100644 index 0000000000..4001416fd3 --- /dev/null +++ b/packages/db/migrations/meta/0123_snapshot.json @@ -0,0 +1,7722 @@ +{ + "id": "1be6ebb3-fe8a-4abd-9f4d-cfb8341f5319", + "prevId": "ec1ad797-549e-4e4e-a40b-f2a865ae4c4c", + "version": "7", + "dialect": "postgresql", + "tables": { + "public.account": { + "name": "account", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "account_id": { + "name": "account_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "provider_id": { + "name": "provider_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "access_token": { + "name": "access_token", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "refresh_token": { + "name": "refresh_token", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "id_token": { + "name": "id_token", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "access_token_expires_at": { + "name": "access_token_expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "refresh_token_expires_at": { + "name": "refresh_token_expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "scope": { + "name": "scope", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "password": { + "name": "password", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + } + }, + "indexes": { + "account_user_id_idx": { + "name": "account_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "idx_account_on_account_id_provider_id": { + "name": "idx_account_on_account_id_provider_id", + "columns": [ + { + "expression": "account_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "provider_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "account_user_provider_account_unique": { + "name": "account_user_provider_account_unique", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "provider_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "account_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "account_user_id_user_id_fk": { + "name": "account_user_id_user_id_fk", + "tableFrom": "account", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.api_key": { + "name": "api_key", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_by": { + "name": "created_by", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "key": { + "name": "key", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "type": { + "name": "type", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'personal'" + }, + "last_used": { + "name": "last_used", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "expires_at": { + "name": "expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + } + }, + "indexes": {}, + "foreignKeys": { + "api_key_user_id_user_id_fk": { + "name": "api_key_user_id_user_id_fk", + "tableFrom": "api_key", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "api_key_workspace_id_workspace_id_fk": { + "name": "api_key_workspace_id_workspace_id_fk", + "tableFrom": "api_key", + "tableTo": "workspace", + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "api_key_created_by_user_id_fk": { + "name": "api_key_created_by_user_id_fk", + "tableFrom": "api_key", + "tableTo": "user", + "columnsFrom": ["created_by"], + "columnsTo": ["id"], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "api_key_key_unique": { + "name": "api_key_key_unique", + "nullsNotDistinct": false, + "columns": ["key"] + } + }, + "policies": {}, + "checkConstraints": { + "workspace_type_check": { + "name": "workspace_type_check", + "value": "(type = 'workspace' AND workspace_id IS NOT NULL) OR (type = 'personal' AND workspace_id IS NULL)" + } + }, + "isRLSEnabled": false + }, + "public.chat": { + "name": "chat", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "identifier": { + "name": "identifier", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "title": { + "name": "title", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "description": { + "name": "description", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "is_active": { + "name": "is_active", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "customizations": { + "name": "customizations", + "type": "json", + "primaryKey": false, + "notNull": false, + "default": "'{}'" + }, + "auth_type": { + "name": "auth_type", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'public'" + }, + "password": { + "name": "password", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "allowed_emails": { + "name": "allowed_emails", + "type": "json", + "primaryKey": false, + "notNull": false, + "default": "'[]'" + }, + "output_configs": { + "name": "output_configs", + "type": "json", + "primaryKey": false, + "notNull": false, + "default": "'[]'" + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "identifier_idx": { + "name": "identifier_idx", + "columns": [ + { + "expression": "identifier", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "chat_workflow_id_workflow_id_fk": { + "name": "chat_workflow_id_workflow_id_fk", + "tableFrom": "chat", + "tableTo": "workflow", + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "chat_user_id_user_id_fk": { + "name": "chat_user_id_user_id_fk", + "tableFrom": "chat", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.copilot_chats": { + "name": "copilot_chats", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "uuid", + "primaryKey": true, + "notNull": true, + "default": "gen_random_uuid()" + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "title": { + "name": "title", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "messages": { + "name": "messages", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'[]'" + }, + "model": { + "name": "model", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'claude-3-7-sonnet-latest'" + }, + "conversation_id": { + "name": "conversation_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "preview_yaml": { + "name": "preview_yaml", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "plan_artifact": { + "name": "plan_artifact", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "config": { + "name": "config", + "type": "jsonb", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "copilot_chats_user_id_idx": { + "name": "copilot_chats_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "copilot_chats_workflow_id_idx": { + "name": "copilot_chats_workflow_id_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "copilot_chats_user_workflow_idx": { + "name": "copilot_chats_user_workflow_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "copilot_chats_created_at_idx": { + "name": "copilot_chats_created_at_idx", + "columns": [ + { + "expression": "created_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "copilot_chats_updated_at_idx": { + "name": "copilot_chats_updated_at_idx", + "columns": [ + { + "expression": "updated_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "copilot_chats_user_id_user_id_fk": { + "name": "copilot_chats_user_id_user_id_fk", + "tableFrom": "copilot_chats", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "copilot_chats_workflow_id_workflow_id_fk": { + "name": "copilot_chats_workflow_id_workflow_id_fk", + "tableFrom": "copilot_chats", + "tableTo": "workflow", + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.copilot_feedback": { + "name": "copilot_feedback", + "schema": "", + "columns": { + "feedback_id": { + "name": "feedback_id", + "type": "uuid", + "primaryKey": true, + "notNull": true, + "default": "gen_random_uuid()" + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "chat_id": { + "name": "chat_id", + "type": "uuid", + "primaryKey": false, + "notNull": true + }, + "user_query": { + "name": "user_query", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "agent_response": { + "name": "agent_response", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "is_positive": { + "name": "is_positive", + "type": "boolean", + "primaryKey": false, + "notNull": true + }, + "feedback": { + "name": "feedback", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "workflow_yaml": { + "name": "workflow_yaml", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "copilot_feedback_user_id_idx": { + "name": "copilot_feedback_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "copilot_feedback_chat_id_idx": { + "name": "copilot_feedback_chat_id_idx", + "columns": [ + { + "expression": "chat_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "copilot_feedback_user_chat_idx": { + "name": "copilot_feedback_user_chat_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "chat_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "copilot_feedback_is_positive_idx": { + "name": "copilot_feedback_is_positive_idx", + "columns": [ + { + "expression": "is_positive", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "copilot_feedback_created_at_idx": { + "name": "copilot_feedback_created_at_idx", + "columns": [ + { + "expression": "created_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "copilot_feedback_user_id_user_id_fk": { + "name": "copilot_feedback_user_id_user_id_fk", + "tableFrom": "copilot_feedback", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "copilot_feedback_chat_id_copilot_chats_id_fk": { + "name": "copilot_feedback_chat_id_copilot_chats_id_fk", + "tableFrom": "copilot_feedback", + "tableTo": "copilot_chats", + "columnsFrom": ["chat_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.custom_tools": { + "name": "custom_tools", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "title": { + "name": "title", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "schema": { + "name": "schema", + "type": "json", + "primaryKey": false, + "notNull": true + }, + "code": { + "name": "code", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "custom_tools_workspace_id_idx": { + "name": "custom_tools_workspace_id_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "custom_tools_workspace_title_unique": { + "name": "custom_tools_workspace_title_unique", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "title", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "custom_tools_workspace_id_workspace_id_fk": { + "name": "custom_tools_workspace_id_workspace_id_fk", + "tableFrom": "custom_tools", + "tableTo": "workspace", + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "custom_tools_user_id_user_id_fk": { + "name": "custom_tools_user_id_user_id_fk", + "tableFrom": "custom_tools", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.docs_embeddings": { + "name": "docs_embeddings", + "schema": "", + "columns": { + "chunk_id": { + "name": "chunk_id", + "type": "uuid", + "primaryKey": true, + "notNull": true, + "default": "gen_random_uuid()" + }, + "chunk_text": { + "name": "chunk_text", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "source_document": { + "name": "source_document", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "source_link": { + "name": "source_link", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "header_text": { + "name": "header_text", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "header_level": { + "name": "header_level", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "token_count": { + "name": "token_count", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "embedding": { + "name": "embedding", + "type": "vector(1536)", + "primaryKey": false, + "notNull": true + }, + "embedding_model": { + "name": "embedding_model", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'text-embedding-3-small'" + }, + "metadata": { + "name": "metadata", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'{}'" + }, + "chunk_text_tsv": { + "name": "chunk_text_tsv", + "type": "tsvector", + "primaryKey": false, + "notNull": false, + "generated": { + "as": "to_tsvector('english', \"docs_embeddings\".\"chunk_text\")", + "type": "stored" + } + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "docs_emb_source_document_idx": { + "name": "docs_emb_source_document_idx", + "columns": [ + { + "expression": "source_document", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "docs_emb_header_level_idx": { + "name": "docs_emb_header_level_idx", + "columns": [ + { + "expression": "header_level", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "docs_emb_source_header_idx": { + "name": "docs_emb_source_header_idx", + "columns": [ + { + "expression": "source_document", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "header_level", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "docs_emb_model_idx": { + "name": "docs_emb_model_idx", + "columns": [ + { + "expression": "embedding_model", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "docs_emb_created_at_idx": { + "name": "docs_emb_created_at_idx", + "columns": [ + { + "expression": "created_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "docs_embedding_vector_hnsw_idx": { + "name": "docs_embedding_vector_hnsw_idx", + "columns": [ + { + "expression": "embedding", + "isExpression": false, + "asc": true, + "nulls": "last", + "opclass": "vector_cosine_ops" + } + ], + "isUnique": false, + "concurrently": false, + "method": "hnsw", + "with": { + "m": 16, + "ef_construction": 64 + } + }, + "docs_emb_metadata_gin_idx": { + "name": "docs_emb_metadata_gin_idx", + "columns": [ + { + "expression": "metadata", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "gin", + "with": {} + }, + "docs_emb_chunk_text_fts_idx": { + "name": "docs_emb_chunk_text_fts_idx", + "columns": [ + { + "expression": "chunk_text_tsv", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "gin", + "with": {} + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": { + "docs_embedding_not_null_check": { + "name": "docs_embedding_not_null_check", + "value": "\"embedding\" IS NOT NULL" + }, + "docs_header_level_check": { + "name": "docs_header_level_check", + "value": "\"header_level\" >= 1 AND \"header_level\" <= 6" + } + }, + "isRLSEnabled": false + }, + "public.document": { + "name": "document", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "knowledge_base_id": { + "name": "knowledge_base_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "filename": { + "name": "filename", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "file_url": { + "name": "file_url", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "file_size": { + "name": "file_size", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "mime_type": { + "name": "mime_type", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "chunk_count": { + "name": "chunk_count", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "token_count": { + "name": "token_count", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "character_count": { + "name": "character_count", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "processing_status": { + "name": "processing_status", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'pending'" + }, + "processing_started_at": { + "name": "processing_started_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "processing_completed_at": { + "name": "processing_completed_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "processing_error": { + "name": "processing_error", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "enabled": { + "name": "enabled", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "deleted_at": { + "name": "deleted_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "tag1": { + "name": "tag1", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag2": { + "name": "tag2", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag3": { + "name": "tag3", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag4": { + "name": "tag4", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag5": { + "name": "tag5", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag6": { + "name": "tag6", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag7": { + "name": "tag7", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "uploaded_at": { + "name": "uploaded_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "doc_kb_id_idx": { + "name": "doc_kb_id_idx", + "columns": [ + { + "expression": "knowledge_base_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_filename_idx": { + "name": "doc_filename_idx", + "columns": [ + { + "expression": "filename", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_processing_status_idx": { + "name": "doc_processing_status_idx", + "columns": [ + { + "expression": "knowledge_base_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "processing_status", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_tag1_idx": { + "name": "doc_tag1_idx", + "columns": [ + { + "expression": "tag1", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_tag2_idx": { + "name": "doc_tag2_idx", + "columns": [ + { + "expression": "tag2", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_tag3_idx": { + "name": "doc_tag3_idx", + "columns": [ + { + "expression": "tag3", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_tag4_idx": { + "name": "doc_tag4_idx", + "columns": [ + { + "expression": "tag4", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_tag5_idx": { + "name": "doc_tag5_idx", + "columns": [ + { + "expression": "tag5", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_tag6_idx": { + "name": "doc_tag6_idx", + "columns": [ + { + "expression": "tag6", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "doc_tag7_idx": { + "name": "doc_tag7_idx", + "columns": [ + { + "expression": "tag7", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "document_knowledge_base_id_knowledge_base_id_fk": { + "name": "document_knowledge_base_id_knowledge_base_id_fk", + "tableFrom": "document", + "tableTo": "knowledge_base", + "columnsFrom": ["knowledge_base_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.embedding": { + "name": "embedding", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "knowledge_base_id": { + "name": "knowledge_base_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "document_id": { + "name": "document_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "chunk_index": { + "name": "chunk_index", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "chunk_hash": { + "name": "chunk_hash", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "content": { + "name": "content", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "content_length": { + "name": "content_length", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "token_count": { + "name": "token_count", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "embedding": { + "name": "embedding", + "type": "vector(1536)", + "primaryKey": false, + "notNull": false + }, + "embedding_model": { + "name": "embedding_model", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'text-embedding-3-small'" + }, + "start_offset": { + "name": "start_offset", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "end_offset": { + "name": "end_offset", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "tag1": { + "name": "tag1", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag2": { + "name": "tag2", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag3": { + "name": "tag3", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag4": { + "name": "tag4", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag5": { + "name": "tag5", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag6": { + "name": "tag6", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "tag7": { + "name": "tag7", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "enabled": { + "name": "enabled", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "content_tsv": { + "name": "content_tsv", + "type": "tsvector", + "primaryKey": false, + "notNull": false, + "generated": { + "as": "to_tsvector('english', \"embedding\".\"content\")", + "type": "stored" + } + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "emb_kb_id_idx": { + "name": "emb_kb_id_idx", + "columns": [ + { + "expression": "knowledge_base_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_doc_id_idx": { + "name": "emb_doc_id_idx", + "columns": [ + { + "expression": "document_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_doc_chunk_idx": { + "name": "emb_doc_chunk_idx", + "columns": [ + { + "expression": "document_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "chunk_index", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_kb_model_idx": { + "name": "emb_kb_model_idx", + "columns": [ + { + "expression": "knowledge_base_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "embedding_model", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_kb_enabled_idx": { + "name": "emb_kb_enabled_idx", + "columns": [ + { + "expression": "knowledge_base_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "enabled", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_doc_enabled_idx": { + "name": "emb_doc_enabled_idx", + "columns": [ + { + "expression": "document_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "enabled", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "embedding_vector_hnsw_idx": { + "name": "embedding_vector_hnsw_idx", + "columns": [ + { + "expression": "embedding", + "isExpression": false, + "asc": true, + "nulls": "last", + "opclass": "vector_cosine_ops" + } + ], + "isUnique": false, + "concurrently": false, + "method": "hnsw", + "with": { + "m": 16, + "ef_construction": 64 + } + }, + "emb_tag1_idx": { + "name": "emb_tag1_idx", + "columns": [ + { + "expression": "tag1", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_tag2_idx": { + "name": "emb_tag2_idx", + "columns": [ + { + "expression": "tag2", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_tag3_idx": { + "name": "emb_tag3_idx", + "columns": [ + { + "expression": "tag3", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_tag4_idx": { + "name": "emb_tag4_idx", + "columns": [ + { + "expression": "tag4", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_tag5_idx": { + "name": "emb_tag5_idx", + "columns": [ + { + "expression": "tag5", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_tag6_idx": { + "name": "emb_tag6_idx", + "columns": [ + { + "expression": "tag6", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_tag7_idx": { + "name": "emb_tag7_idx", + "columns": [ + { + "expression": "tag7", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "emb_content_fts_idx": { + "name": "emb_content_fts_idx", + "columns": [ + { + "expression": "content_tsv", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "gin", + "with": {} + } + }, + "foreignKeys": { + "embedding_knowledge_base_id_knowledge_base_id_fk": { + "name": "embedding_knowledge_base_id_knowledge_base_id_fk", + "tableFrom": "embedding", + "tableTo": "knowledge_base", + "columnsFrom": ["knowledge_base_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "embedding_document_id_document_id_fk": { + "name": "embedding_document_id_document_id_fk", + "tableFrom": "embedding", + "tableTo": "document", + "columnsFrom": ["document_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": { + "embedding_not_null_check": { + "name": "embedding_not_null_check", + "value": "\"embedding\" IS NOT NULL" + } + }, + "isRLSEnabled": false + }, + "public.environment": { + "name": "environment", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "variables": { + "name": "variables", + "type": "json", + "primaryKey": false, + "notNull": true + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": {}, + "foreignKeys": { + "environment_user_id_user_id_fk": { + "name": "environment_user_id_user_id_fk", + "tableFrom": "environment", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "environment_user_id_unique": { + "name": "environment_user_id_unique", + "nullsNotDistinct": false, + "columns": ["user_id"] + } + }, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.idempotency_key": { + "name": "idempotency_key", + "schema": "", + "columns": { + "key": { + "name": "key", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "namespace": { + "name": "namespace", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'default'" + }, + "result": { + "name": "result", + "type": "json", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "idempotency_key_namespace_unique": { + "name": "idempotency_key_namespace_unique", + "columns": [ + { + "expression": "key", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "namespace", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "idempotency_key_created_at_idx": { + "name": "idempotency_key_created_at_idx", + "columns": [ + { + "expression": "created_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "idempotency_key_namespace_idx": { + "name": "idempotency_key_namespace_idx", + "columns": [ + { + "expression": "namespace", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.invitation": { + "name": "invitation", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "email": { + "name": "email", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "inviter_id": { + "name": "inviter_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "organization_id": { + "name": "organization_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "role": { + "name": "role", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "status": { + "name": "status", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "expires_at": { + "name": "expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "invitation_email_idx": { + "name": "invitation_email_idx", + "columns": [ + { + "expression": "email", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "invitation_organization_id_idx": { + "name": "invitation_organization_id_idx", + "columns": [ + { + "expression": "organization_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "invitation_inviter_id_user_id_fk": { + "name": "invitation_inviter_id_user_id_fk", + "tableFrom": "invitation", + "tableTo": "user", + "columnsFrom": ["inviter_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "invitation_organization_id_organization_id_fk": { + "name": "invitation_organization_id_organization_id_fk", + "tableFrom": "invitation", + "tableTo": "organization", + "columnsFrom": ["organization_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.knowledge_base": { + "name": "knowledge_base", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "description": { + "name": "description", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "token_count": { + "name": "token_count", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "embedding_model": { + "name": "embedding_model", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'text-embedding-3-small'" + }, + "embedding_dimension": { + "name": "embedding_dimension", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 1536 + }, + "chunking_config": { + "name": "chunking_config", + "type": "json", + "primaryKey": false, + "notNull": true, + "default": "'{\"maxSize\": 1024, \"minSize\": 1, \"overlap\": 200}'" + }, + "deleted_at": { + "name": "deleted_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "kb_user_id_idx": { + "name": "kb_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "kb_workspace_id_idx": { + "name": "kb_workspace_id_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "kb_user_workspace_idx": { + "name": "kb_user_workspace_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "kb_deleted_at_idx": { + "name": "kb_deleted_at_idx", + "columns": [ + { + "expression": "deleted_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "knowledge_base_user_id_user_id_fk": { + "name": "knowledge_base_user_id_user_id_fk", + "tableFrom": "knowledge_base", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "knowledge_base_workspace_id_workspace_id_fk": { + "name": "knowledge_base_workspace_id_workspace_id_fk", + "tableFrom": "knowledge_base", + "tableTo": "workspace", + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], + "onDelete": "no action", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.knowledge_base_tag_definitions": { + "name": "knowledge_base_tag_definitions", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "knowledge_base_id": { + "name": "knowledge_base_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "tag_slot": { + "name": "tag_slot", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "display_name": { + "name": "display_name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "field_type": { + "name": "field_type", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'text'" + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "kb_tag_definitions_kb_slot_idx": { + "name": "kb_tag_definitions_kb_slot_idx", + "columns": [ + { + "expression": "knowledge_base_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "tag_slot", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "kb_tag_definitions_kb_display_name_idx": { + "name": "kb_tag_definitions_kb_display_name_idx", + "columns": [ + { + "expression": "knowledge_base_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "display_name", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "kb_tag_definitions_kb_id_idx": { + "name": "kb_tag_definitions_kb_id_idx", + "columns": [ + { + "expression": "knowledge_base_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "knowledge_base_tag_definitions_knowledge_base_id_knowledge_base_id_fk": { + "name": "knowledge_base_tag_definitions_knowledge_base_id_knowledge_base_id_fk", + "tableFrom": "knowledge_base_tag_definitions", + "tableTo": "knowledge_base", + "columnsFrom": ["knowledge_base_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.mcp_servers": { + "name": "mcp_servers", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_by": { + "name": "created_by", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "description": { + "name": "description", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "transport": { + "name": "transport", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "url": { + "name": "url", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "headers": { + "name": "headers", + "type": "json", + "primaryKey": false, + "notNull": false, + "default": "'{}'" + }, + "timeout": { + "name": "timeout", + "type": "integer", + "primaryKey": false, + "notNull": false, + "default": 30000 + }, + "retries": { + "name": "retries", + "type": "integer", + "primaryKey": false, + "notNull": false, + "default": 3 + }, + "enabled": { + "name": "enabled", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "last_connected": { + "name": "last_connected", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "connection_status": { + "name": "connection_status", + "type": "text", + "primaryKey": false, + "notNull": false, + "default": "'disconnected'" + }, + "last_error": { + "name": "last_error", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "status_config": { + "name": "status_config", + "type": "jsonb", + "primaryKey": false, + "notNull": false, + "default": "'{}'" + }, + "tool_count": { + "name": "tool_count", + "type": "integer", + "primaryKey": false, + "notNull": false, + "default": 0 + }, + "last_tools_refresh": { + "name": "last_tools_refresh", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "total_requests": { + "name": "total_requests", + "type": "integer", + "primaryKey": false, + "notNull": false, + "default": 0 + }, + "last_used": { + "name": "last_used", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "deleted_at": { + "name": "deleted_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "mcp_servers_workspace_enabled_idx": { + "name": "mcp_servers_workspace_enabled_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "enabled", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "mcp_servers_workspace_deleted_idx": { + "name": "mcp_servers_workspace_deleted_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "deleted_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "mcp_servers_workspace_id_workspace_id_fk": { + "name": "mcp_servers_workspace_id_workspace_id_fk", + "tableFrom": "mcp_servers", + "tableTo": "workspace", + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "mcp_servers_created_by_user_id_fk": { + "name": "mcp_servers_created_by_user_id_fk", + "tableFrom": "mcp_servers", + "tableTo": "user", + "columnsFrom": ["created_by"], + "columnsTo": ["id"], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.member": { + "name": "member", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "organization_id": { + "name": "organization_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "role": { + "name": "role", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "member_user_id_idx": { + "name": "member_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "member_organization_id_idx": { + "name": "member_organization_id_idx", + "columns": [ + { + "expression": "organization_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "member_user_id_user_id_fk": { + "name": "member_user_id_user_id_fk", + "tableFrom": "member", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "member_organization_id_organization_id_fk": { + "name": "member_organization_id_organization_id_fk", + "tableFrom": "member", + "tableTo": "organization", + "columnsFrom": ["organization_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.memory": { + "name": "memory", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "key": { + "name": "key", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "data": { + "name": "data", + "type": "jsonb", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "deleted_at": { + "name": "deleted_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + } + }, + "indexes": { + "memory_key_idx": { + "name": "memory_key_idx", + "columns": [ + { + "expression": "key", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "memory_workflow_idx": { + "name": "memory_workflow_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "memory_workflow_key_idx": { + "name": "memory_workflow_key_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "key", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "memory_workflow_id_workflow_id_fk": { + "name": "memory_workflow_id_workflow_id_fk", + "tableFrom": "memory", + "tableTo": "workflow", + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.organization": { + "name": "organization", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "slug": { + "name": "slug", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "logo": { + "name": "logo", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "metadata": { + "name": "metadata", + "type": "json", + "primaryKey": false, + "notNull": false + }, + "org_usage_limit": { + "name": "org_usage_limit", + "type": "numeric", + "primaryKey": false, + "notNull": false + }, + "storage_used_bytes": { + "name": "storage_used_bytes", + "type": "bigint", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "departed_member_usage": { + "name": "departed_member_usage", + "type": "numeric", + "primaryKey": false, + "notNull": true, + "default": "'0'" + }, + "credit_balance": { + "name": "credit_balance", + "type": "numeric", + "primaryKey": false, + "notNull": true, + "default": "'0'" + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": {}, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.paused_executions": { + "name": "paused_executions", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "execution_id": { + "name": "execution_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "execution_snapshot": { + "name": "execution_snapshot", + "type": "jsonb", + "primaryKey": false, + "notNull": true + }, + "pause_points": { + "name": "pause_points", + "type": "jsonb", + "primaryKey": false, + "notNull": true + }, + "total_pause_count": { + "name": "total_pause_count", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "resumed_count": { + "name": "resumed_count", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "status": { + "name": "status", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'paused'" + }, + "metadata": { + "name": "metadata", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'{}'::jsonb" + }, + "paused_at": { + "name": "paused_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "expires_at": { + "name": "expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + } + }, + "indexes": { + "paused_executions_workflow_id_idx": { + "name": "paused_executions_workflow_id_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "paused_executions_status_idx": { + "name": "paused_executions_status_idx", + "columns": [ + { + "expression": "status", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "paused_executions_execution_id_unique": { + "name": "paused_executions_execution_id_unique", + "columns": [ + { + "expression": "execution_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "paused_executions_workflow_id_workflow_id_fk": { + "name": "paused_executions_workflow_id_workflow_id_fk", + "tableFrom": "paused_executions", + "tableTo": "workflow", + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.permissions": { + "name": "permissions", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "entity_type": { + "name": "entity_type", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "entity_id": { + "name": "entity_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "permission_type": { + "name": "permission_type", + "type": "permission_type", + "typeSchema": "public", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "permissions_user_id_idx": { + "name": "permissions_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "permissions_entity_idx": { + "name": "permissions_entity_idx", + "columns": [ + { + "expression": "entity_type", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "entity_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "permissions_user_entity_type_idx": { + "name": "permissions_user_entity_type_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "entity_type", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "permissions_user_entity_permission_idx": { + "name": "permissions_user_entity_permission_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "entity_type", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "permission_type", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "permissions_user_entity_idx": { + "name": "permissions_user_entity_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "entity_type", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "entity_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "permissions_unique_constraint": { + "name": "permissions_unique_constraint", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "entity_type", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "entity_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "permissions_user_id_user_id_fk": { + "name": "permissions_user_id_user_id_fk", + "tableFrom": "permissions", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.rate_limit_bucket": { + "name": "rate_limit_bucket", + "schema": "", + "columns": { + "key": { + "name": "key", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "tokens": { + "name": "tokens", + "type": "numeric", + "primaryKey": false, + "notNull": true + }, + "last_refill_at": { + "name": "last_refill_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": {}, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.resume_queue": { + "name": "resume_queue", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "paused_execution_id": { + "name": "paused_execution_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "parent_execution_id": { + "name": "parent_execution_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "new_execution_id": { + "name": "new_execution_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "context_id": { + "name": "context_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "resume_input": { + "name": "resume_input", + "type": "jsonb", + "primaryKey": false, + "notNull": false + }, + "status": { + "name": "status", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'pending'" + }, + "queued_at": { + "name": "queued_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "claimed_at": { + "name": "claimed_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "completed_at": { + "name": "completed_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "failure_reason": { + "name": "failure_reason", + "type": "text", + "primaryKey": false, + "notNull": false + } + }, + "indexes": { + "resume_queue_parent_status_idx": { + "name": "resume_queue_parent_status_idx", + "columns": [ + { + "expression": "parent_execution_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "status", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "queued_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "resume_queue_new_execution_idx": { + "name": "resume_queue_new_execution_idx", + "columns": [ + { + "expression": "new_execution_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "resume_queue_paused_execution_id_paused_executions_id_fk": { + "name": "resume_queue_paused_execution_id_paused_executions_id_fk", + "tableFrom": "resume_queue", + "tableTo": "paused_executions", + "columnsFrom": ["paused_execution_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.session": { + "name": "session", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "expires_at": { + "name": "expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "token": { + "name": "token", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "ip_address": { + "name": "ip_address", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "user_agent": { + "name": "user_agent", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "active_organization_id": { + "name": "active_organization_id", + "type": "text", + "primaryKey": false, + "notNull": false + } + }, + "indexes": { + "session_user_id_idx": { + "name": "session_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "session_token_idx": { + "name": "session_token_idx", + "columns": [ + { + "expression": "token", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "session_user_id_user_id_fk": { + "name": "session_user_id_user_id_fk", + "tableFrom": "session", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "session_active_organization_id_organization_id_fk": { + "name": "session_active_organization_id_organization_id_fk", + "tableFrom": "session", + "tableTo": "organization", + "columnsFrom": ["active_organization_id"], + "columnsTo": ["id"], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "session_token_unique": { + "name": "session_token_unique", + "nullsNotDistinct": false, + "columns": ["token"] + } + }, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.settings": { + "name": "settings", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "theme": { + "name": "theme", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'system'" + }, + "auto_connect": { + "name": "auto_connect", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "telemetry_enabled": { + "name": "telemetry_enabled", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "email_preferences": { + "name": "email_preferences", + "type": "json", + "primaryKey": false, + "notNull": true, + "default": "'{}'" + }, + "billing_usage_notifications_enabled": { + "name": "billing_usage_notifications_enabled", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "show_training_controls": { + "name": "show_training_controls", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "super_user_mode_enabled": { + "name": "super_user_mode_enabled", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "error_notifications_enabled": { + "name": "error_notifications_enabled", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "copilot_enabled_models": { + "name": "copilot_enabled_models", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'{}'" + }, + "copilot_auto_allowed_tools": { + "name": "copilot_auto_allowed_tools", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'[]'" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": {}, + "foreignKeys": { + "settings_user_id_user_id_fk": { + "name": "settings_user_id_user_id_fk", + "tableFrom": "settings", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "settings_user_id_unique": { + "name": "settings_user_id_unique", + "nullsNotDistinct": false, + "columns": ["user_id"] + } + }, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.sso_provider": { + "name": "sso_provider", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "issuer": { + "name": "issuer", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "domain": { + "name": "domain", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "oidc_config": { + "name": "oidc_config", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "saml_config": { + "name": "saml_config", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "provider_id": { + "name": "provider_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "organization_id": { + "name": "organization_id", + "type": "text", + "primaryKey": false, + "notNull": false + } + }, + "indexes": { + "sso_provider_provider_id_idx": { + "name": "sso_provider_provider_id_idx", + "columns": [ + { + "expression": "provider_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "sso_provider_domain_idx": { + "name": "sso_provider_domain_idx", + "columns": [ + { + "expression": "domain", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "sso_provider_user_id_idx": { + "name": "sso_provider_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "sso_provider_organization_id_idx": { + "name": "sso_provider_organization_id_idx", + "columns": [ + { + "expression": "organization_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "sso_provider_user_id_user_id_fk": { + "name": "sso_provider_user_id_user_id_fk", + "tableFrom": "sso_provider", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "sso_provider_organization_id_organization_id_fk": { + "name": "sso_provider_organization_id_organization_id_fk", + "tableFrom": "sso_provider", + "tableTo": "organization", + "columnsFrom": ["organization_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.subscription": { + "name": "subscription", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "plan": { + "name": "plan", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "reference_id": { + "name": "reference_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "stripe_customer_id": { + "name": "stripe_customer_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "stripe_subscription_id": { + "name": "stripe_subscription_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "status": { + "name": "status", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "period_start": { + "name": "period_start", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "period_end": { + "name": "period_end", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "cancel_at_period_end": { + "name": "cancel_at_period_end", + "type": "boolean", + "primaryKey": false, + "notNull": false + }, + "seats": { + "name": "seats", + "type": "integer", + "primaryKey": false, + "notNull": false + }, + "trial_start": { + "name": "trial_start", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "trial_end": { + "name": "trial_end", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "metadata": { + "name": "metadata", + "type": "json", + "primaryKey": false, + "notNull": false + } + }, + "indexes": { + "subscription_reference_status_idx": { + "name": "subscription_reference_status_idx", + "columns": [ + { + "expression": "reference_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "status", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": { + "check_enterprise_metadata": { + "name": "check_enterprise_metadata", + "value": "plan != 'enterprise' OR metadata IS NOT NULL" + } + }, + "isRLSEnabled": false + }, + "public.template_creators": { + "name": "template_creators", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "reference_type": { + "name": "reference_type", + "type": "template_creator_type", + "typeSchema": "public", + "primaryKey": false, + "notNull": true + }, + "reference_id": { + "name": "reference_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "profile_image_url": { + "name": "profile_image_url", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "details": { + "name": "details", + "type": "jsonb", + "primaryKey": false, + "notNull": false + }, + "verified": { + "name": "verified", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "created_by": { + "name": "created_by", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "template_creators_reference_idx": { + "name": "template_creators_reference_idx", + "columns": [ + { + "expression": "reference_type", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "reference_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "template_creators_reference_id_idx": { + "name": "template_creators_reference_id_idx", + "columns": [ + { + "expression": "reference_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "template_creators_created_by_idx": { + "name": "template_creators_created_by_idx", + "columns": [ + { + "expression": "created_by", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "template_creators_created_by_user_id_fk": { + "name": "template_creators_created_by_user_id_fk", + "tableFrom": "template_creators", + "tableTo": "user", + "columnsFrom": ["created_by"], + "columnsTo": ["id"], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.template_stars": { + "name": "template_stars", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "template_id": { + "name": "template_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "starred_at": { + "name": "starred_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "template_stars_user_id_idx": { + "name": "template_stars_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "template_stars_template_id_idx": { + "name": "template_stars_template_id_idx", + "columns": [ + { + "expression": "template_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "template_stars_user_template_idx": { + "name": "template_stars_user_template_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "template_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "template_stars_template_user_idx": { + "name": "template_stars_template_user_idx", + "columns": [ + { + "expression": "template_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "template_stars_starred_at_idx": { + "name": "template_stars_starred_at_idx", + "columns": [ + { + "expression": "starred_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "template_stars_template_starred_at_idx": { + "name": "template_stars_template_starred_at_idx", + "columns": [ + { + "expression": "template_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "starred_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "template_stars_user_template_unique": { + "name": "template_stars_user_template_unique", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "template_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "template_stars_user_id_user_id_fk": { + "name": "template_stars_user_id_user_id_fk", + "tableFrom": "template_stars", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "template_stars_template_id_templates_id_fk": { + "name": "template_stars_template_id_templates_id_fk", + "tableFrom": "template_stars", + "tableTo": "templates", + "columnsFrom": ["template_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.templates": { + "name": "templates", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "details": { + "name": "details", + "type": "jsonb", + "primaryKey": false, + "notNull": false + }, + "creator_id": { + "name": "creator_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "views": { + "name": "views", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "stars": { + "name": "stars", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "status": { + "name": "status", + "type": "template_status", + "typeSchema": "public", + "primaryKey": false, + "notNull": true, + "default": "'pending'" + }, + "tags": { + "name": "tags", + "type": "text[]", + "primaryKey": false, + "notNull": true, + "default": "'{}'::text[]" + }, + "required_credentials": { + "name": "required_credentials", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'[]'" + }, + "state": { + "name": "state", + "type": "jsonb", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "templates_status_idx": { + "name": "templates_status_idx", + "columns": [ + { + "expression": "status", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "templates_creator_id_idx": { + "name": "templates_creator_id_idx", + "columns": [ + { + "expression": "creator_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "templates_views_idx": { + "name": "templates_views_idx", + "columns": [ + { + "expression": "views", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "templates_stars_idx": { + "name": "templates_stars_idx", + "columns": [ + { + "expression": "stars", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "templates_status_views_idx": { + "name": "templates_status_views_idx", + "columns": [ + { + "expression": "status", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "views", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "templates_status_stars_idx": { + "name": "templates_status_stars_idx", + "columns": [ + { + "expression": "status", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "stars", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "templates_created_at_idx": { + "name": "templates_created_at_idx", + "columns": [ + { + "expression": "created_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "templates_updated_at_idx": { + "name": "templates_updated_at_idx", + "columns": [ + { + "expression": "updated_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "templates_workflow_id_workflow_id_fk": { + "name": "templates_workflow_id_workflow_id_fk", + "tableFrom": "templates", + "tableTo": "workflow", + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], + "onDelete": "set null", + "onUpdate": "no action" + }, + "templates_creator_id_template_creators_id_fk": { + "name": "templates_creator_id_template_creators_id_fk", + "tableFrom": "templates", + "tableTo": "template_creators", + "columnsFrom": ["creator_id"], + "columnsTo": ["id"], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.user": { + "name": "user", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "email": { + "name": "email", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "email_verified": { + "name": "email_verified", + "type": "boolean", + "primaryKey": false, + "notNull": true + }, + "image": { + "name": "image", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "stripe_customer_id": { + "name": "stripe_customer_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "is_super_user": { + "name": "is_super_user", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + } + }, + "indexes": {}, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "user_email_unique": { + "name": "user_email_unique", + "nullsNotDistinct": false, + "columns": ["email"] + } + }, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.user_stats": { + "name": "user_stats", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "total_manual_executions": { + "name": "total_manual_executions", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "total_api_calls": { + "name": "total_api_calls", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "total_webhook_triggers": { + "name": "total_webhook_triggers", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "total_scheduled_executions": { + "name": "total_scheduled_executions", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "total_chat_executions": { + "name": "total_chat_executions", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "total_tokens_used": { + "name": "total_tokens_used", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "total_cost": { + "name": "total_cost", + "type": "numeric", + "primaryKey": false, + "notNull": true, + "default": "'0'" + }, + "current_usage_limit": { + "name": "current_usage_limit", + "type": "numeric", + "primaryKey": false, + "notNull": false, + "default": "'10'" + }, + "usage_limit_updated_at": { + "name": "usage_limit_updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false, + "default": "now()" + }, + "current_period_cost": { + "name": "current_period_cost", + "type": "numeric", + "primaryKey": false, + "notNull": true, + "default": "'0'" + }, + "last_period_cost": { + "name": "last_period_cost", + "type": "numeric", + "primaryKey": false, + "notNull": false, + "default": "'0'" + }, + "billed_overage_this_period": { + "name": "billed_overage_this_period", + "type": "numeric", + "primaryKey": false, + "notNull": true, + "default": "'0'" + }, + "pro_period_cost_snapshot": { + "name": "pro_period_cost_snapshot", + "type": "numeric", + "primaryKey": false, + "notNull": false, + "default": "'0'" + }, + "credit_balance": { + "name": "credit_balance", + "type": "numeric", + "primaryKey": false, + "notNull": true, + "default": "'0'" + }, + "total_copilot_cost": { + "name": "total_copilot_cost", + "type": "numeric", + "primaryKey": false, + "notNull": true, + "default": "'0'" + }, + "current_period_copilot_cost": { + "name": "current_period_copilot_cost", + "type": "numeric", + "primaryKey": false, + "notNull": true, + "default": "'0'" + }, + "last_period_copilot_cost": { + "name": "last_period_copilot_cost", + "type": "numeric", + "primaryKey": false, + "notNull": false, + "default": "'0'" + }, + "total_copilot_tokens": { + "name": "total_copilot_tokens", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "total_copilot_calls": { + "name": "total_copilot_calls", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "storage_used_bytes": { + "name": "storage_used_bytes", + "type": "bigint", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "last_active": { + "name": "last_active", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "billing_blocked": { + "name": "billing_blocked", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "billing_blocked_reason": { + "name": "billing_blocked_reason", + "type": "billing_blocked_reason", + "typeSchema": "public", + "primaryKey": false, + "notNull": false + } + }, + "indexes": {}, + "foreignKeys": { + "user_stats_user_id_user_id_fk": { + "name": "user_stats_user_id_user_id_fk", + "tableFrom": "user_stats", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "user_stats_user_id_unique": { + "name": "user_stats_user_id_unique", + "nullsNotDistinct": false, + "columns": ["user_id"] + } + }, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.verification": { + "name": "verification", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "identifier": { + "name": "identifier", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "value": { + "name": "value", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "expires_at": { + "name": "expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + } + }, + "indexes": { + "verification_identifier_idx": { + "name": "verification_identifier_idx", + "columns": [ + { + "expression": "identifier", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.waitlist": { + "name": "waitlist", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "email": { + "name": "email", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "status": { + "name": "status", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'pending'" + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": {}, + "foreignKeys": {}, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "waitlist_email_unique": { + "name": "waitlist_email_unique", + "nullsNotDistinct": false, + "columns": ["email"] + } + }, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.webhook": { + "name": "webhook", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "block_id": { + "name": "block_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "path": { + "name": "path", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "provider": { + "name": "provider", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "provider_config": { + "name": "provider_config", + "type": "json", + "primaryKey": false, + "notNull": false + }, + "is_active": { + "name": "is_active", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "failed_count": { + "name": "failed_count", + "type": "integer", + "primaryKey": false, + "notNull": false, + "default": 0 + }, + "last_failed_at": { + "name": "last_failed_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "path_idx": { + "name": "path_idx", + "columns": [ + { + "expression": "path", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "idx_webhook_on_workflow_id_block_id": { + "name": "idx_webhook_on_workflow_id_block_id", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "block_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "webhook_workflow_id_workflow_id_fk": { + "name": "webhook_workflow_id_workflow_id_fk", + "tableFrom": "webhook", + "tableTo": "workflow", + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "webhook_block_id_workflow_blocks_id_fk": { + "name": "webhook_block_id_workflow_blocks_id_fk", + "tableFrom": "webhook", + "tableTo": "workflow_blocks", + "columnsFrom": ["block_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workflow": { + "name": "workflow", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "folder_id": { + "name": "folder_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "description": { + "name": "description", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "color": { + "name": "color", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'#3972F6'" + }, + "last_synced": { + "name": "last_synced", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "is_deployed": { + "name": "is_deployed", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "deployed_at": { + "name": "deployed_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "run_count": { + "name": "run_count", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "last_run_at": { + "name": "last_run_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "variables": { + "name": "variables", + "type": "json", + "primaryKey": false, + "notNull": false, + "default": "'{}'" + } + }, + "indexes": { + "workflow_user_id_idx": { + "name": "workflow_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_workspace_id_idx": { + "name": "workflow_workspace_id_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_user_workspace_idx": { + "name": "workflow_user_workspace_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workflow_user_id_user_id_fk": { + "name": "workflow_user_id_user_id_fk", + "tableFrom": "workflow", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workflow_workspace_id_workspace_id_fk": { + "name": "workflow_workspace_id_workspace_id_fk", + "tableFrom": "workflow", + "tableTo": "workspace", + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workflow_folder_id_workflow_folder_id_fk": { + "name": "workflow_folder_id_workflow_folder_id_fk", + "tableFrom": "workflow", + "tableTo": "workflow_folder", + "columnsFrom": ["folder_id"], + "columnsTo": ["id"], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workflow_blocks": { + "name": "workflow_blocks", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "type": { + "name": "type", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "position_x": { + "name": "position_x", + "type": "numeric", + "primaryKey": false, + "notNull": true + }, + "position_y": { + "name": "position_y", + "type": "numeric", + "primaryKey": false, + "notNull": true + }, + "enabled": { + "name": "enabled", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "horizontal_handles": { + "name": "horizontal_handles", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "is_wide": { + "name": "is_wide", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "advanced_mode": { + "name": "advanced_mode", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "trigger_mode": { + "name": "trigger_mode", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "height": { + "name": "height", + "type": "numeric", + "primaryKey": false, + "notNull": true, + "default": "'0'" + }, + "sub_blocks": { + "name": "sub_blocks", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'{}'" + }, + "outputs": { + "name": "outputs", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'{}'" + }, + "data": { + "name": "data", + "type": "jsonb", + "primaryKey": false, + "notNull": false, + "default": "'{}'" + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workflow_blocks_workflow_id_idx": { + "name": "workflow_blocks_workflow_id_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workflow_blocks_workflow_id_workflow_id_fk": { + "name": "workflow_blocks_workflow_id_workflow_id_fk", + "tableFrom": "workflow_blocks", + "tableTo": "workflow", + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workflow_checkpoints": { + "name": "workflow_checkpoints", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "uuid", + "primaryKey": true, + "notNull": true, + "default": "gen_random_uuid()" + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "chat_id": { + "name": "chat_id", + "type": "uuid", + "primaryKey": false, + "notNull": true + }, + "message_id": { + "name": "message_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "workflow_state": { + "name": "workflow_state", + "type": "json", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workflow_checkpoints_user_id_idx": { + "name": "workflow_checkpoints_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_checkpoints_workflow_id_idx": { + "name": "workflow_checkpoints_workflow_id_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_checkpoints_chat_id_idx": { + "name": "workflow_checkpoints_chat_id_idx", + "columns": [ + { + "expression": "chat_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_checkpoints_message_id_idx": { + "name": "workflow_checkpoints_message_id_idx", + "columns": [ + { + "expression": "message_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_checkpoints_user_workflow_idx": { + "name": "workflow_checkpoints_user_workflow_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_checkpoints_workflow_chat_idx": { + "name": "workflow_checkpoints_workflow_chat_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "chat_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_checkpoints_created_at_idx": { + "name": "workflow_checkpoints_created_at_idx", + "columns": [ + { + "expression": "created_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_checkpoints_chat_created_at_idx": { + "name": "workflow_checkpoints_chat_created_at_idx", + "columns": [ + { + "expression": "chat_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "created_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workflow_checkpoints_user_id_user_id_fk": { + "name": "workflow_checkpoints_user_id_user_id_fk", + "tableFrom": "workflow_checkpoints", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workflow_checkpoints_workflow_id_workflow_id_fk": { + "name": "workflow_checkpoints_workflow_id_workflow_id_fk", + "tableFrom": "workflow_checkpoints", + "tableTo": "workflow", + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workflow_checkpoints_chat_id_copilot_chats_id_fk": { + "name": "workflow_checkpoints_chat_id_copilot_chats_id_fk", + "tableFrom": "workflow_checkpoints", + "tableTo": "copilot_chats", + "columnsFrom": ["chat_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workflow_deployment_version": { + "name": "workflow_deployment_version", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "version": { + "name": "version", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "state": { + "name": "state", + "type": "json", + "primaryKey": false, + "notNull": true + }, + "is_active": { + "name": "is_active", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "created_by": { + "name": "created_by", + "type": "text", + "primaryKey": false, + "notNull": false + } + }, + "indexes": { + "workflow_deployment_version_workflow_version_unique": { + "name": "workflow_deployment_version_workflow_version_unique", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "version", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_deployment_version_workflow_active_idx": { + "name": "workflow_deployment_version_workflow_active_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "is_active", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_deployment_version_created_at_idx": { + "name": "workflow_deployment_version_created_at_idx", + "columns": [ + { + "expression": "created_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workflow_deployment_version_workflow_id_workflow_id_fk": { + "name": "workflow_deployment_version_workflow_id_workflow_id_fk", + "tableFrom": "workflow_deployment_version", + "tableTo": "workflow", + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workflow_edges": { + "name": "workflow_edges", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "source_block_id": { + "name": "source_block_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "target_block_id": { + "name": "target_block_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "source_handle": { + "name": "source_handle", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "target_handle": { + "name": "target_handle", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workflow_edges_workflow_id_idx": { + "name": "workflow_edges_workflow_id_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_edges_workflow_source_idx": { + "name": "workflow_edges_workflow_source_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "source_block_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_edges_workflow_target_idx": { + "name": "workflow_edges_workflow_target_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "target_block_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workflow_edges_workflow_id_workflow_id_fk": { + "name": "workflow_edges_workflow_id_workflow_id_fk", + "tableFrom": "workflow_edges", + "tableTo": "workflow", + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workflow_edges_source_block_id_workflow_blocks_id_fk": { + "name": "workflow_edges_source_block_id_workflow_blocks_id_fk", + "tableFrom": "workflow_edges", + "tableTo": "workflow_blocks", + "columnsFrom": ["source_block_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workflow_edges_target_block_id_workflow_blocks_id_fk": { + "name": "workflow_edges_target_block_id_workflow_blocks_id_fk", + "tableFrom": "workflow_edges", + "tableTo": "workflow_blocks", + "columnsFrom": ["target_block_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workflow_execution_logs": { + "name": "workflow_execution_logs", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "execution_id": { + "name": "execution_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "state_snapshot_id": { + "name": "state_snapshot_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "deployment_version_id": { + "name": "deployment_version_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "level": { + "name": "level", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "trigger": { + "name": "trigger", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "started_at": { + "name": "started_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "ended_at": { + "name": "ended_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "total_duration_ms": { + "name": "total_duration_ms", + "type": "integer", + "primaryKey": false, + "notNull": false + }, + "execution_data": { + "name": "execution_data", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'{}'" + }, + "cost": { + "name": "cost", + "type": "jsonb", + "primaryKey": false, + "notNull": false + }, + "files": { + "name": "files", + "type": "jsonb", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workflow_execution_logs_workflow_id_idx": { + "name": "workflow_execution_logs_workflow_id_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_execution_logs_state_snapshot_id_idx": { + "name": "workflow_execution_logs_state_snapshot_id_idx", + "columns": [ + { + "expression": "state_snapshot_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_execution_logs_deployment_version_id_idx": { + "name": "workflow_execution_logs_deployment_version_id_idx", + "columns": [ + { + "expression": "deployment_version_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_execution_logs_trigger_idx": { + "name": "workflow_execution_logs_trigger_idx", + "columns": [ + { + "expression": "trigger", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_execution_logs_level_idx": { + "name": "workflow_execution_logs_level_idx", + "columns": [ + { + "expression": "level", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_execution_logs_started_at_idx": { + "name": "workflow_execution_logs_started_at_idx", + "columns": [ + { + "expression": "started_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_execution_logs_execution_id_unique": { + "name": "workflow_execution_logs_execution_id_unique", + "columns": [ + { + "expression": "execution_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_execution_logs_workflow_started_at_idx": { + "name": "workflow_execution_logs_workflow_started_at_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "started_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workflow_execution_logs_workflow_id_workflow_id_fk": { + "name": "workflow_execution_logs_workflow_id_workflow_id_fk", + "tableFrom": "workflow_execution_logs", + "tableTo": "workflow", + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workflow_execution_logs_state_snapshot_id_workflow_execution_snapshots_id_fk": { + "name": "workflow_execution_logs_state_snapshot_id_workflow_execution_snapshots_id_fk", + "tableFrom": "workflow_execution_logs", + "tableTo": "workflow_execution_snapshots", + "columnsFrom": ["state_snapshot_id"], + "columnsTo": ["id"], + "onDelete": "no action", + "onUpdate": "no action" + }, + "workflow_execution_logs_deployment_version_id_workflow_deployment_version_id_fk": { + "name": "workflow_execution_logs_deployment_version_id_workflow_deployment_version_id_fk", + "tableFrom": "workflow_execution_logs", + "tableTo": "workflow_deployment_version", + "columnsFrom": ["deployment_version_id"], + "columnsTo": ["id"], + "onDelete": "set null", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workflow_execution_snapshots": { + "name": "workflow_execution_snapshots", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "state_hash": { + "name": "state_hash", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "state_data": { + "name": "state_data", + "type": "jsonb", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workflow_snapshots_workflow_id_idx": { + "name": "workflow_snapshots_workflow_id_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_snapshots_hash_idx": { + "name": "workflow_snapshots_hash_idx", + "columns": [ + { + "expression": "state_hash", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_snapshots_workflow_hash_idx": { + "name": "workflow_snapshots_workflow_hash_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "state_hash", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_snapshots_created_at_idx": { + "name": "workflow_snapshots_created_at_idx", + "columns": [ + { + "expression": "created_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workflow_execution_snapshots_workflow_id_workflow_id_fk": { + "name": "workflow_execution_snapshots_workflow_id_workflow_id_fk", + "tableFrom": "workflow_execution_snapshots", + "tableTo": "workflow", + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workflow_folder": { + "name": "workflow_folder", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "parent_id": { + "name": "parent_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "color": { + "name": "color", + "type": "text", + "primaryKey": false, + "notNull": false, + "default": "'#6B7280'" + }, + "is_expanded": { + "name": "is_expanded", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "sort_order": { + "name": "sort_order", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workflow_folder_user_idx": { + "name": "workflow_folder_user_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_folder_workspace_parent_idx": { + "name": "workflow_folder_workspace_parent_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "parent_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_folder_parent_sort_idx": { + "name": "workflow_folder_parent_sort_idx", + "columns": [ + { + "expression": "parent_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "sort_order", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workflow_folder_user_id_user_id_fk": { + "name": "workflow_folder_user_id_user_id_fk", + "tableFrom": "workflow_folder", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workflow_folder_workspace_id_workspace_id_fk": { + "name": "workflow_folder_workspace_id_workspace_id_fk", + "tableFrom": "workflow_folder", + "tableTo": "workspace", + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workflow_schedule": { + "name": "workflow_schedule", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "block_id": { + "name": "block_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "cron_expression": { + "name": "cron_expression", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "next_run_at": { + "name": "next_run_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "last_ran_at": { + "name": "last_ran_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "last_queued_at": { + "name": "last_queued_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "trigger_type": { + "name": "trigger_type", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "timezone": { + "name": "timezone", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'UTC'" + }, + "failed_count": { + "name": "failed_count", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "status": { + "name": "status", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'active'" + }, + "last_failed_at": { + "name": "last_failed_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workflow_schedule_workflow_block_unique": { + "name": "workflow_schedule_workflow_block_unique", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "block_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workflow_schedule_workflow_id_workflow_id_fk": { + "name": "workflow_schedule_workflow_id_workflow_id_fk", + "tableFrom": "workflow_schedule", + "tableTo": "workflow", + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workflow_schedule_block_id_workflow_blocks_id_fk": { + "name": "workflow_schedule_block_id_workflow_blocks_id_fk", + "tableFrom": "workflow_schedule", + "tableTo": "workflow_blocks", + "columnsFrom": ["block_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workflow_subflows": { + "name": "workflow_subflows", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "type": { + "name": "type", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "config": { + "name": "config", + "type": "jsonb", + "primaryKey": false, + "notNull": true, + "default": "'{}'" + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workflow_subflows_workflow_id_idx": { + "name": "workflow_subflows_workflow_id_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workflow_subflows_workflow_type_idx": { + "name": "workflow_subflows_workflow_type_idx", + "columns": [ + { + "expression": "workflow_id", + "isExpression": false, + "asc": true, + "nulls": "last" + }, + { + "expression": "type", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workflow_subflows_workflow_id_workflow_id_fk": { + "name": "workflow_subflows_workflow_id_workflow_id_fk", + "tableFrom": "workflow_subflows", + "tableTo": "workflow", + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workspace": { + "name": "workspace", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "owner_id": { + "name": "owner_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "billed_account_user_id": { + "name": "billed_account_user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "allow_personal_api_keys": { + "name": "allow_personal_api_keys", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": {}, + "foreignKeys": { + "workspace_owner_id_user_id_fk": { + "name": "workspace_owner_id_user_id_fk", + "tableFrom": "workspace", + "tableTo": "user", + "columnsFrom": ["owner_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workspace_billed_account_user_id_user_id_fk": { + "name": "workspace_billed_account_user_id_user_id_fk", + "tableFrom": "workspace", + "tableTo": "user", + "columnsFrom": ["billed_account_user_id"], + "columnsTo": ["id"], + "onDelete": "no action", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workspace_environment": { + "name": "workspace_environment", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "variables": { + "name": "variables", + "type": "json", + "primaryKey": false, + "notNull": true, + "default": "'{}'" + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workspace_environment_workspace_unique": { + "name": "workspace_environment_workspace_unique", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": true, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workspace_environment_workspace_id_workspace_id_fk": { + "name": "workspace_environment_workspace_id_workspace_id_fk", + "tableFrom": "workspace_environment", + "tableTo": "workspace", + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workspace_file": { + "name": "workspace_file", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "name": { + "name": "name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "key": { + "name": "key", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "size": { + "name": "size", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "type": { + "name": "type", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "uploaded_by": { + "name": "uploaded_by", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "uploaded_at": { + "name": "uploaded_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workspace_file_workspace_id_idx": { + "name": "workspace_file_workspace_id_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workspace_file_key_idx": { + "name": "workspace_file_key_idx", + "columns": [ + { + "expression": "key", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workspace_file_workspace_id_workspace_id_fk": { + "name": "workspace_file_workspace_id_workspace_id_fk", + "tableFrom": "workspace_file", + "tableTo": "workspace", + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workspace_file_uploaded_by_user_id_fk": { + "name": "workspace_file_uploaded_by_user_id_fk", + "tableFrom": "workspace_file", + "tableTo": "user", + "columnsFrom": ["uploaded_by"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "workspace_file_key_unique": { + "name": "workspace_file_key_unique", + "nullsNotDistinct": false, + "columns": ["key"] + } + }, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workspace_files": { + "name": "workspace_files", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "key": { + "name": "key", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "user_id": { + "name": "user_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "context": { + "name": "context", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "original_name": { + "name": "original_name", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "content_type": { + "name": "content_type", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "size": { + "name": "size", + "type": "integer", + "primaryKey": false, + "notNull": true + }, + "uploaded_at": { + "name": "uploaded_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workspace_files_key_idx": { + "name": "workspace_files_key_idx", + "columns": [ + { + "expression": "key", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workspace_files_user_id_idx": { + "name": "workspace_files_user_id_idx", + "columns": [ + { + "expression": "user_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workspace_files_workspace_id_idx": { + "name": "workspace_files_workspace_id_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workspace_files_context_idx": { + "name": "workspace_files_context_idx", + "columns": [ + { + "expression": "context", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workspace_files_user_id_user_id_fk": { + "name": "workspace_files_user_id_user_id_fk", + "tableFrom": "workspace_files", + "tableTo": "user", + "columnsFrom": ["user_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workspace_files_workspace_id_workspace_id_fk": { + "name": "workspace_files_workspace_id_workspace_id_fk", + "tableFrom": "workspace_files", + "tableTo": "workspace", + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "workspace_files_key_unique": { + "name": "workspace_files_key_unique", + "nullsNotDistinct": false, + "columns": ["key"] + } + }, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workspace_invitation": { + "name": "workspace_invitation", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "email": { + "name": "email", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "inviter_id": { + "name": "inviter_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "role": { + "name": "role", + "type": "text", + "primaryKey": false, + "notNull": true, + "default": "'member'" + }, + "status": { + "name": "status", + "type": "workspace_invitation_status", + "typeSchema": "public", + "primaryKey": false, + "notNull": true, + "default": "'pending'" + }, + "token": { + "name": "token", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "permissions": { + "name": "permissions", + "type": "permission_type", + "typeSchema": "public", + "primaryKey": false, + "notNull": true, + "default": "'admin'" + }, + "org_invitation_id": { + "name": "org_invitation_id", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "expires_at": { + "name": "expires_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": {}, + "foreignKeys": { + "workspace_invitation_workspace_id_workspace_id_fk": { + "name": "workspace_invitation_workspace_id_workspace_id_fk", + "tableFrom": "workspace_invitation", + "tableTo": "workspace", + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workspace_invitation_inviter_id_user_id_fk": { + "name": "workspace_invitation_inviter_id_user_id_fk", + "tableFrom": "workspace_invitation", + "tableTo": "user", + "columnsFrom": ["inviter_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": { + "workspace_invitation_token_unique": { + "name": "workspace_invitation_token_unique", + "nullsNotDistinct": false, + "columns": ["token"] + } + }, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workspace_notification_delivery": { + "name": "workspace_notification_delivery", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "subscription_id": { + "name": "subscription_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "workflow_id": { + "name": "workflow_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "execution_id": { + "name": "execution_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "status": { + "name": "status", + "type": "notification_delivery_status", + "typeSchema": "public", + "primaryKey": false, + "notNull": true, + "default": "'pending'" + }, + "attempts": { + "name": "attempts", + "type": "integer", + "primaryKey": false, + "notNull": true, + "default": 0 + }, + "last_attempt_at": { + "name": "last_attempt_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "next_attempt_at": { + "name": "next_attempt_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "response_status": { + "name": "response_status", + "type": "integer", + "primaryKey": false, + "notNull": false + }, + "response_body": { + "name": "response_body", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "error_message": { + "name": "error_message", + "type": "text", + "primaryKey": false, + "notNull": false + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workspace_notification_delivery_subscription_id_idx": { + "name": "workspace_notification_delivery_subscription_id_idx", + "columns": [ + { + "expression": "subscription_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workspace_notification_delivery_execution_id_idx": { + "name": "workspace_notification_delivery_execution_id_idx", + "columns": [ + { + "expression": "execution_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workspace_notification_delivery_status_idx": { + "name": "workspace_notification_delivery_status_idx", + "columns": [ + { + "expression": "status", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workspace_notification_delivery_next_attempt_idx": { + "name": "workspace_notification_delivery_next_attempt_idx", + "columns": [ + { + "expression": "next_attempt_at", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workspace_notification_delivery_subscription_id_workspace_notification_subscription_id_fk": { + "name": "workspace_notification_delivery_subscription_id_workspace_notification_subscription_id_fk", + "tableFrom": "workspace_notification_delivery", + "tableTo": "workspace_notification_subscription", + "columnsFrom": ["subscription_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workspace_notification_delivery_workflow_id_workflow_id_fk": { + "name": "workspace_notification_delivery_workflow_id_workflow_id_fk", + "tableFrom": "workspace_notification_delivery", + "tableTo": "workflow", + "columnsFrom": ["workflow_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + }, + "public.workspace_notification_subscription": { + "name": "workspace_notification_subscription", + "schema": "", + "columns": { + "id": { + "name": "id", + "type": "text", + "primaryKey": true, + "notNull": true + }, + "workspace_id": { + "name": "workspace_id", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "notification_type": { + "name": "notification_type", + "type": "notification_type", + "typeSchema": "public", + "primaryKey": false, + "notNull": true + }, + "workflow_ids": { + "name": "workflow_ids", + "type": "text[]", + "primaryKey": false, + "notNull": true, + "default": "'{}'::text[]" + }, + "all_workflows": { + "name": "all_workflows", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "level_filter": { + "name": "level_filter", + "type": "text[]", + "primaryKey": false, + "notNull": true, + "default": "ARRAY['info', 'error']::text[]" + }, + "trigger_filter": { + "name": "trigger_filter", + "type": "text[]", + "primaryKey": false, + "notNull": true, + "default": "ARRAY['api', 'webhook', 'schedule', 'manual', 'chat']::text[]" + }, + "include_final_output": { + "name": "include_final_output", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "include_trace_spans": { + "name": "include_trace_spans", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "include_rate_limits": { + "name": "include_rate_limits", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "include_usage_data": { + "name": "include_usage_data", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": false + }, + "webhook_config": { + "name": "webhook_config", + "type": "jsonb", + "primaryKey": false, + "notNull": false + }, + "email_recipients": { + "name": "email_recipients", + "type": "text[]", + "primaryKey": false, + "notNull": false + }, + "slack_config": { + "name": "slack_config", + "type": "jsonb", + "primaryKey": false, + "notNull": false + }, + "alert_config": { + "name": "alert_config", + "type": "jsonb", + "primaryKey": false, + "notNull": false + }, + "last_alert_at": { + "name": "last_alert_at", + "type": "timestamp", + "primaryKey": false, + "notNull": false + }, + "active": { + "name": "active", + "type": "boolean", + "primaryKey": false, + "notNull": true, + "default": true + }, + "created_by": { + "name": "created_by", + "type": "text", + "primaryKey": false, + "notNull": true + }, + "created_at": { + "name": "created_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + }, + "updated_at": { + "name": "updated_at", + "type": "timestamp", + "primaryKey": false, + "notNull": true, + "default": "now()" + } + }, + "indexes": { + "workspace_notification_workspace_id_idx": { + "name": "workspace_notification_workspace_id_idx", + "columns": [ + { + "expression": "workspace_id", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workspace_notification_active_idx": { + "name": "workspace_notification_active_idx", + "columns": [ + { + "expression": "active", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + }, + "workspace_notification_type_idx": { + "name": "workspace_notification_type_idx", + "columns": [ + { + "expression": "notification_type", + "isExpression": false, + "asc": true, + "nulls": "last" + } + ], + "isUnique": false, + "concurrently": false, + "method": "btree", + "with": {} + } + }, + "foreignKeys": { + "workspace_notification_subscription_workspace_id_workspace_id_fk": { + "name": "workspace_notification_subscription_workspace_id_workspace_id_fk", + "tableFrom": "workspace_notification_subscription", + "tableTo": "workspace", + "columnsFrom": ["workspace_id"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + }, + "workspace_notification_subscription_created_by_user_id_fk": { + "name": "workspace_notification_subscription_created_by_user_id_fk", + "tableFrom": "workspace_notification_subscription", + "tableTo": "user", + "columnsFrom": ["created_by"], + "columnsTo": ["id"], + "onDelete": "cascade", + "onUpdate": "no action" + } + }, + "compositePrimaryKeys": {}, + "uniqueConstraints": {}, + "policies": {}, + "checkConstraints": {}, + "isRLSEnabled": false + } + }, + "enums": { + "public.billing_blocked_reason": { + "name": "billing_blocked_reason", + "schema": "public", + "values": ["payment_failed", "dispute"] + }, + "public.notification_delivery_status": { + "name": "notification_delivery_status", + "schema": "public", + "values": ["pending", "in_progress", "success", "failed"] + }, + "public.notification_type": { + "name": "notification_type", + "schema": "public", + "values": ["webhook", "email", "slack"] + }, + "public.permission_type": { + "name": "permission_type", + "schema": "public", + "values": ["admin", "write", "read"] + }, + "public.template_creator_type": { + "name": "template_creator_type", + "schema": "public", + "values": ["user", "organization"] + }, + "public.template_status": { + "name": "template_status", + "schema": "public", + "values": ["pending", "approved", "rejected"] + }, + "public.workspace_invitation_status": { + "name": "workspace_invitation_status", + "schema": "public", + "values": ["pending", "accepted", "rejected", "cancelled"] + } + }, + "schemas": {}, + "sequences": {}, + "roles": {}, + "policies": {}, + "views": {}, + "_meta": { + "columns": {}, + "schemas": {}, + "tables": {} + } +} diff --git a/packages/db/migrations/meta/_journal.json b/packages/db/migrations/meta/_journal.json index f793d74bf8..2a414a1580 100644 --- a/packages/db/migrations/meta/_journal.json +++ b/packages/db/migrations/meta/_journal.json @@ -855,6 +855,13 @@ "when": 1765587157593, "tag": "0122_pale_absorbing_man", "breakpoints": true + }, + { + "idx": 123, + "version": "7", + "when": 1765932898404, + "tag": "0123_windy_lockheed", + "breakpoints": true } ] } diff --git a/packages/db/schema.ts b/packages/db/schema.ts index d81034b0ee..6e8cbfec38 100644 --- a/packages/db/schema.ts +++ b/packages/db/schema.ts @@ -1547,6 +1547,8 @@ export const mcpServers = pgTable( connectionStatus: text('connection_status').default('disconnected'), lastError: text('last_error'), + statusConfig: jsonb('status_config').default('{}'), + toolCount: integer('tool_count').default(0), lastToolsRefresh: timestamp('last_tools_refresh'), totalRequests: integer('total_requests').default(0), From f4f74da1dc43833e803c735ecb9941926959ad17 Mon Sep 17 00:00:00 2001 From: Waleed Date: Wed, 17 Dec 2025 10:21:15 -0800 Subject: [PATCH 08/15] feat(i18n): update translations (#2421) Co-authored-by: icecrasher321 --- .../docs/content/docs/de/tools/servicenow.mdx | 108 ++++++++++++++++++ .../docs/content/docs/es/tools/servicenow.mdx | 107 +++++++++++++++++ .../docs/content/docs/fr/tools/servicenow.mdx | 108 ++++++++++++++++++ .../docs/content/docs/ja/tools/servicenow.mdx | 107 +++++++++++++++++ .../docs/content/docs/zh/tools/servicenow.mdx | 107 +++++++++++++++++ apps/docs/i18n.lock | 34 ++++++ 6 files changed, 571 insertions(+) create mode 100644 apps/docs/content/docs/de/tools/servicenow.mdx create mode 100644 apps/docs/content/docs/es/tools/servicenow.mdx create mode 100644 apps/docs/content/docs/fr/tools/servicenow.mdx create mode 100644 apps/docs/content/docs/ja/tools/servicenow.mdx create mode 100644 apps/docs/content/docs/zh/tools/servicenow.mdx diff --git a/apps/docs/content/docs/de/tools/servicenow.mdx b/apps/docs/content/docs/de/tools/servicenow.mdx new file mode 100644 index 0000000000..b977bc4f3c --- /dev/null +++ b/apps/docs/content/docs/de/tools/servicenow.mdx @@ -0,0 +1,108 @@ +--- +title: ServiceNow +description: Erstellen, lesen, aktualisieren, löschen und Massenimport von + ServiceNow-Datensätzen +--- + +import { BlockInfoCard } from "@/components/ui/block-info-card" + + + +## Nutzungsanleitung + +Integrieren Sie ServiceNow in Ihren Workflow. Kann Datensätze in jeder ServiceNow-Tabelle erstellen, lesen, aktualisieren und löschen (Vorfälle, Aufgaben, Benutzer usw.). Unterstützt Massenimport-Operationen für Datenmigration und ETL. + +## Tools + +### `servicenow_create_record` + +Erstellen eines neuen Datensatzes in einer ServiceNow-Tabelle + +#### Eingabe + +| Parameter | Typ | Erforderlich | Beschreibung | +| --------- | ---- | -------- | ----------- | +| `instanceUrl` | string | Ja | ServiceNow-Instanz-URL \(z. B. https://instance.service-now.com\) | +| `credential` | string | Nein | ServiceNow OAuth-Anmeldeinformations-ID | +| `tableName` | string | Ja | Tabellenname \(z. B. incident, task, sys_user\) | +| `fields` | json | Ja | Felder, die für den Datensatz festgelegt werden sollen \(JSON-Objekt\) | + +#### Ausgabe + +| Parameter | Typ | Beschreibung | +| --------- | ---- | ----------- | +| `record` | json | Erstellter ServiceNow-Datensatz mit sys_id und anderen Feldern | +| `metadata` | json | Metadaten der Operation | + +### `servicenow_read_record` + +Lesen von Datensätzen aus einer ServiceNow-Tabelle + +#### Eingabe + +| Parameter | Typ | Erforderlich | Beschreibung | +| --------- | ---- | -------- | ----------- | +| `instanceUrl` | string | Nein | ServiceNow-Instanz-URL \(automatisch aus OAuth erkannt, falls nicht angegeben\) | +| `credential` | string | Nein | ServiceNow OAuth-Anmeldeinformations-ID | +| `tableName` | string | Ja | Tabellenname | +| `sysId` | string | Nein | Spezifische Datensatz-sys_id | +| `number` | string | Nein | Datensatznummer \(z. B. INC0010001\) | +| `query` | string | Nein | Kodierte Abfragezeichenfolge \(z. B. "active=true^priority=1"\) | +| `limit` | number | Nein | Maximale Anzahl der zurückzugebenden Datensätze | +| `fields` | string | Nein | Durch Kommas getrennte Liste der zurückzugebenden Felder | + +#### Ausgabe + +| Parameter | Typ | Beschreibung | +| --------- | ---- | ----------- | +| `records` | array | Array von ServiceNow-Datensätzen | +| `metadata` | json | Metadaten der Operation | + +### `servicenow_update_record` + +Einen bestehenden Datensatz in einer ServiceNow-Tabelle aktualisieren + +#### Eingabe + +| Parameter | Typ | Erforderlich | Beschreibung | +| --------- | ---- | -------- | ----------- | +| `instanceUrl` | string | Nein | ServiceNow-Instanz-URL \(wird automatisch aus OAuth erkannt, falls nicht angegeben\) | +| `credential` | string | Nein | ServiceNow-OAuth-Credential-ID | +| `tableName` | string | Ja | Tabellenname | +| `sysId` | string | Ja | Sys_id des zu aktualisierenden Datensatzes | +| `fields` | json | Ja | Zu aktualisierende Felder \(JSON-Objekt\) | + +#### Ausgabe + +| Parameter | Typ | Beschreibung | +| --------- | ---- | ----------- | +| `record` | json | Aktualisierter ServiceNow-Datensatz | +| `metadata` | json | Metadaten der Operation | + +### `servicenow_delete_record` + +Einen Datensatz aus einer ServiceNow-Tabelle löschen + +#### Eingabe + +| Parameter | Typ | Erforderlich | Beschreibung | +| --------- | ---- | -------- | ----------- | +| `instanceUrl` | string | Nein | ServiceNow-Instanz-URL \(wird automatisch aus OAuth erkannt, falls nicht angegeben\) | +| `credential` | string | Nein | ServiceNow-OAuth-Credential-ID | +| `tableName` | string | Ja | Tabellenname | +| `sysId` | string | Ja | Sys_id des zu löschenden Datensatzes | + +#### Ausgabe + +| Parameter | Typ | Beschreibung | +| --------- | ---- | ----------- | +| `success` | boolean | Ob das Löschen erfolgreich war | +| `metadata` | json | Metadaten der Operation | + +## Hinweise + +- Kategorie: `tools` +- Typ: `servicenow` diff --git a/apps/docs/content/docs/es/tools/servicenow.mdx b/apps/docs/content/docs/es/tools/servicenow.mdx new file mode 100644 index 0000000000..8897344c69 --- /dev/null +++ b/apps/docs/content/docs/es/tools/servicenow.mdx @@ -0,0 +1,107 @@ +--- +title: ServiceNow +description: Crea, lee, actualiza, elimina e importa masivamente registros de ServiceNow +--- + +import { BlockInfoCard } from "@/components/ui/block-info-card" + + + +## Instrucciones de uso + +Integra ServiceNow en tu flujo de trabajo. Puede crear, leer, actualizar y eliminar registros en cualquier tabla de ServiceNow (incidentes, tareas, usuarios, etc.). Admite operaciones de importación masiva para migración de datos y ETL. + +## Herramientas + +### `servicenow_create_record` + +Crea un nuevo registro en una tabla de ServiceNow + +#### Entrada + +| Parámetro | Tipo | Requerido | Descripción | +| --------- | ---- | -------- | ----------- | +| `instanceUrl` | string | Sí | URL de la instancia de ServiceNow \(ej., https://instance.service-now.com\) | +| `credential` | string | No | ID de credencial OAuth de ServiceNow | +| `tableName` | string | Sí | Nombre de la tabla \(ej., incident, task, sys_user\) | +| `fields` | json | Sí | Campos a establecer en el registro \(objeto JSON\) | + +#### Salida + +| Parámetro | Tipo | Descripción | +| --------- | ---- | ----------- | +| `record` | json | Registro de ServiceNow creado con sys_id y otros campos | +| `metadata` | json | Metadatos de la operación | + +### `servicenow_read_record` + +Lee registros de una tabla de ServiceNow + +#### Entrada + +| Parámetro | Tipo | Requerido | Descripción | +| --------- | ---- | -------- | ----------- | +| `instanceUrl` | string | No | URL de la instancia de ServiceNow \(detectada automáticamente desde OAuth si no se proporciona\) | +| `credential` | string | No | ID de credencial OAuth de ServiceNow | +| `tableName` | string | Sí | Nombre de la tabla | +| `sysId` | string | No | sys_id específico del registro | +| `number` | string | No | Número de registro \(ej., INC0010001\) | +| `query` | string | No | Cadena de consulta codificada \(ej., "active=true^priority=1"\) | +| `limit` | number | No | Número máximo de registros a devolver | +| `fields` | string | No | Lista de campos separados por comas a devolver | + +#### Salida + +| Parámetro | Tipo | Descripción | +| --------- | ---- | ----------- | +| `records` | array | Array de registros de ServiceNow | +| `metadata` | json | Metadatos de la operación | + +### `servicenow_update_record` + +Actualizar un registro existente en una tabla de ServiceNow + +#### Entrada + +| Parámetro | Tipo | Requerido | Descripción | +| --------- | ---- | -------- | ----------- | +| `instanceUrl` | string | No | URL de la instancia de ServiceNow \(detectada automáticamente desde OAuth si no se proporciona\) | +| `credential` | string | No | ID de credencial OAuth de ServiceNow | +| `tableName` | string | Sí | Nombre de la tabla | +| `sysId` | string | Sí | sys_id del registro a actualizar | +| `fields` | json | Sí | Campos a actualizar \(objeto JSON\) | + +#### Salida + +| Parámetro | Tipo | Descripción | +| --------- | ---- | ----------- | +| `record` | json | Registro de ServiceNow actualizado | +| `metadata` | json | Metadatos de la operación | + +### `servicenow_delete_record` + +Eliminar un registro de una tabla de ServiceNow + +#### Entrada + +| Parámetro | Tipo | Requerido | Descripción | +| --------- | ---- | -------- | ----------- | +| `instanceUrl` | string | No | URL de la instancia de ServiceNow \(detectada automáticamente desde OAuth si no se proporciona\) | +| `credential` | string | No | ID de credencial OAuth de ServiceNow | +| `tableName` | string | Sí | Nombre de la tabla | +| `sysId` | string | Sí | sys_id del registro a eliminar | + +#### Salida + +| Parámetro | Tipo | Descripción | +| --------- | ---- | ----------- | +| `success` | boolean | Si la eliminación fue exitosa | +| `metadata` | json | Metadatos de la operación | + +## Notas + +- Categoría: `tools` +- Tipo: `servicenow` diff --git a/apps/docs/content/docs/fr/tools/servicenow.mdx b/apps/docs/content/docs/fr/tools/servicenow.mdx new file mode 100644 index 0000000000..c32c66423d --- /dev/null +++ b/apps/docs/content/docs/fr/tools/servicenow.mdx @@ -0,0 +1,108 @@ +--- +title: ServiceNow +description: Créer, lire, mettre à jour, supprimer et importer en masse des + enregistrements ServiceNow +--- + +import { BlockInfoCard } from "@/components/ui/block-info-card" + + + +## Instructions d'utilisation + +Intégrez ServiceNow dans votre flux de travail. Permet de créer, lire, mettre à jour et supprimer des enregistrements dans n'importe quelle table ServiceNow (incidents, tâches, utilisateurs, etc.). Prend en charge les opérations d'importation en masse pour la migration de données et l'ETL. + +## Outils + +### `servicenow_create_record` + +Créer un nouvel enregistrement dans une table ServiceNow + +#### Entrée + +| Paramètre | Type | Requis | Description | +| --------- | ---- | -------- | ----------- | +| `instanceUrl` | string | Oui | URL de l'instance ServiceNow \(par exemple, https://instance.service-now.com\) | +| `credential` | string | Non | ID d'identification OAuth ServiceNow | +| `tableName` | string | Oui | Nom de la table \(par exemple, incident, task, sys_user\) | +| `fields` | json | Oui | Champs à définir sur l'enregistrement \(objet JSON\) | + +#### Sortie + +| Paramètre | Type | Description | +| --------- | ---- | ----------- | +| `record` | json | Enregistrement ServiceNow créé avec sys_id et autres champs | +| `metadata` | json | Métadonnées de l'opération | + +### `servicenow_read_record` + +Lire des enregistrements d'une table ServiceNow + +#### Entrée + +| Paramètre | Type | Requis | Description | +| --------- | ---- | -------- | ----------- | +| `instanceUrl` | string | Non | URL de l'instance ServiceNow \(détectée automatiquement depuis OAuth si non fournie\) | +| `credential` | string | Non | ID d'identification OAuth ServiceNow | +| `tableName` | string | Oui | Nom de la table | +| `sysId` | string | Non | sys_id spécifique de l'enregistrement | +| `number` | string | Non | Numéro d'enregistrement \(par exemple, INC0010001\) | +| `query` | string | Non | Chaîne de requête encodée \(par exemple, "active=true^priority=1"\) | +| `limit` | number | Non | Nombre maximum d'enregistrements à retourner | +| `fields` | string | Non | Liste de champs séparés par des virgules à retourner | + +#### Sortie + +| Paramètre | Type | Description | +| --------- | ---- | ----------- | +| `records` | array | Tableau des enregistrements ServiceNow | +| `metadata` | json | Métadonnées de l'opération | + +### `servicenow_update_record` + +Mettre à jour un enregistrement existant dans une table ServiceNow + +#### Entrée + +| Paramètre | Type | Requis | Description | +| --------- | ---- | -------- | ----------- | +| `instanceUrl` | string | Non | URL de l'instance ServiceNow (détectée automatiquement depuis OAuth si non fournie) | +| `credential` | string | Non | ID des identifiants OAuth ServiceNow | +| `tableName` | string | Oui | Nom de la table | +| `sysId` | string | Oui | sys_id de l'enregistrement à mettre à jour | +| `fields` | json | Oui | Champs à mettre à jour (objet JSON) | + +#### Sortie + +| Paramètre | Type | Description | +| --------- | ---- | ----------- | +| `record` | json | Enregistrement ServiceNow mis à jour | +| `metadata` | json | Métadonnées de l'opération | + +### `servicenow_delete_record` + +Supprimer un enregistrement d'une table ServiceNow + +#### Entrée + +| Paramètre | Type | Requis | Description | +| --------- | ---- | -------- | ----------- | +| `instanceUrl` | string | Non | URL de l'instance ServiceNow (détectée automatiquement depuis OAuth si non fournie) | +| `credential` | string | Non | ID des identifiants OAuth ServiceNow | +| `tableName` | string | Oui | Nom de la table | +| `sysId` | string | Oui | sys_id de l'enregistrement à supprimer | + +#### Sortie + +| Paramètre | Type | Description | +| --------- | ---- | ----------- | +| `success` | boolean | Indique si la suppression a réussi | +| `metadata` | json | Métadonnées de l'opération | + +## Notes + +- Catégorie : `tools` +- Type : `servicenow` diff --git a/apps/docs/content/docs/ja/tools/servicenow.mdx b/apps/docs/content/docs/ja/tools/servicenow.mdx new file mode 100644 index 0000000000..49266ef909 --- /dev/null +++ b/apps/docs/content/docs/ja/tools/servicenow.mdx @@ -0,0 +1,107 @@ +--- +title: ServiceNow +description: ServiceNowレコードの作成、読み取り、更新、削除、一括インポート +--- + +import { BlockInfoCard } from "@/components/ui/block-info-card" + + + +## 使用方法 + +ServiceNowをワークフローに統合します。任意のServiceNowテーブル(インシデント、タスク、ユーザーなど)のレコードを作成、読み取り、更新、削除できます。データ移行とETLのための一括インポート操作をサポートします。 + +## ツール + +### `servicenow_create_record` + +ServiceNowテーブルに新しいレコードを作成 + +#### 入力 + +| パラメータ | 型 | 必須 | 説明 | +| --------- | ---- | -------- | ----------- | +| `instanceUrl` | string | はい | ServiceNowインスタンスURL(例:https://instance.service-now.com) | +| `credential` | string | いいえ | ServiceNow OAuth認証情報ID | +| `tableName` | string | はい | テーブル名(例:incident、task、sys_user) | +| `fields` | json | はい | レコードに設定するフィールド(JSONオブジェクト) | + +#### 出力 + +| パラメータ | 型 | 説明 | +| --------- | ---- | ----------- | +| `record` | json | sys_idおよびその他のフィールドを含む作成されたServiceNowレコード | +| `metadata` | json | 操作メタデータ | + +### `servicenow_read_record` + +ServiceNowテーブルからレコードを読み取り + +#### 入力 + +| パラメータ | 型 | 必須 | 説明 | +| --------- | ---- | -------- | ----------- | +| `instanceUrl` | string | いいえ | ServiceNowインスタンスURL(指定されていない場合はOAuthから自動検出) | +| `credential` | string | いいえ | ServiceNow OAuth認証情報ID | +| `tableName` | string | はい | テーブル名 | +| `sysId` | string | いいえ | 特定のレコードsys_id | +| `number` | string | いいえ | レコード番号(例:INC0010001) | +| `query` | string | いいえ | エンコードされたクエリ文字列(例:"active=true^priority=1") | +| `limit` | number | いいえ | 返す最大レコード数 | +| `fields` | string | いいえ | 返すフィールドのカンマ区切りリスト | + +#### 出力 + +| パラメータ | 型 | 説明 | +| --------- | ---- | ----------- | +| `records` | array | ServiceNowレコードの配列 | +| `metadata` | json | 操作メタデータ | + +### `servicenow_update_record` + +ServiceNowテーブル内の既存のレコードを更新します + +#### 入力 + +| パラメータ | 型 | 必須 | 説明 | +| --------- | ---- | -------- | ----------- | +| `instanceUrl` | string | いいえ | ServiceNowインスタンスURL(指定されていない場合はOAuthから自動検出) | +| `credential` | string | いいえ | ServiceNow OAuth認証情報ID | +| `tableName` | string | はい | テーブル名 | +| `sysId` | string | はい | 更新するレコードのsys_id | +| `fields` | json | はい | 更新するフィールド(JSONオブジェクト) | + +#### 出力 + +| パラメータ | 型 | 説明 | +| --------- | ---- | ----------- | +| `record` | json | 更新されたServiceNowレコード | +| `metadata` | json | 操作メタデータ | + +### `servicenow_delete_record` + +ServiceNowテーブルからレコードを削除します + +#### 入力 + +| パラメータ | 型 | 必須 | 説明 | +| --------- | ---- | -------- | ----------- | +| `instanceUrl` | string | いいえ | ServiceNowインスタンスURL(指定されていない場合はOAuthから自動検出) | +| `credential` | string | いいえ | ServiceNow OAuth認証情報ID | +| `tableName` | string | はい | テーブル名 | +| `sysId` | string | はい | 削除するレコードのsys_id | + +#### 出力 + +| パラメータ | 型 | 説明 | +| --------- | ---- | ----------- | +| `success` | boolean | 削除が成功したかどうか | +| `metadata` | json | 操作メタデータ | + +## 注記 + +- カテゴリー: `tools` +- タイプ: `servicenow` diff --git a/apps/docs/content/docs/zh/tools/servicenow.mdx b/apps/docs/content/docs/zh/tools/servicenow.mdx new file mode 100644 index 0000000000..274470b0a4 --- /dev/null +++ b/apps/docs/content/docs/zh/tools/servicenow.mdx @@ -0,0 +1,107 @@ +--- +title: ServiceNow +description: 创建、读取、更新、删除及批量导入 ServiceNow 记录 +--- + +import { BlockInfoCard } from "@/components/ui/block-info-card" + + + +## 使用说明 + +将 ServiceNow 集成到您的工作流程中。可在任意 ServiceNow 表(如事件、任务、用户等)中创建、读取、更新和删除记录。支持批量导入操作,便于数据迁移和 ETL。 + +## 工具 + +### `servicenow_create_record` + +在 ServiceNow 表中创建新记录 + +#### 输入 + +| 参数 | 类型 | 必填 | 说明 | +| --------- | ---- | -------- | ----------- | +| `instanceUrl` | string | 是 | ServiceNow 实例 URL(例如:https://instance.service-now.com) | +| `credential` | string | 否 | ServiceNow OAuth 凭证 ID | +| `tableName` | string | 是 | 表名(例如:incident、task、sys_user) | +| `fields` | json | 是 | 要设置在记录上的字段(JSON 对象) | + +#### 输出 + +| 参数 | 类型 | 说明 | +| --------- | ---- | ----------- | +| `record` | json | 创建的 ServiceNow 记录,包含 sys_id 及其他字段 | +| `metadata` | json | 操作元数据 | + +### `servicenow_read_record` + +从 ServiceNow 表中读取记录 + +#### 输入 + +| 参数 | 类型 | 必填 | 说明 | +| --------- | ---- | -------- | ----------- | +| `instanceUrl` | string | 否 | ServiceNow 实例 URL(如未提供,将通过 OAuth 自动检测) | +| `credential` | string | 否 | ServiceNow OAuth 凭证 ID | +| `tableName` | string | 是 | 表名 | +| `sysId` | string | 否 | 指定记录 sys_id | +| `number` | string | 否 | 记录编号(例如:INC0010001) | +| `query` | string | 否 | 编码查询字符串(例如:"active=true^priority=1") | +| `limit` | number | 否 | 返回的最大记录数 | +| `fields` | string | 否 | 要返回的字段列表(以逗号分隔) | + +#### 输出 + +| 参数 | 类型 | 描述 | +| --------- | ---- | ----------- | +| `records` | array | ServiceNow 记录数组 | +| `metadata` | json | 操作元数据 | + +### `servicenow_update_record` + +更新 ServiceNow 表中的现有记录 + +#### 输入 + +| 参数 | 类型 | 是否必填 | 描述 | +| --------- | ---- | -------- | ----------- | +| `instanceUrl` | string | 否 | ServiceNow 实例 URL(如果未提供,将通过 OAuth 自动检测) | +| `credential` | string | 否 | ServiceNow OAuth 凭证 ID | +| `tableName` | string | 是 | 表名 | +| `sysId` | string | 是 | 要更新的记录 sys_id | +| `fields` | json | 是 | 要更新的字段(JSON 对象) | + +#### 输出 + +| 参数 | 类型 | 描述 | +| --------- | ---- | ----------- | +| `record` | json | 已更新的 ServiceNow 记录 | +| `metadata` | json | 操作元数据 | + +### `servicenow_delete_record` + +从 ServiceNow 表中删除记录 + +#### 输入 + +| 参数 | 类型 | 是否必填 | 描述 | +| --------- | ---- | -------- | ----------- | +| `instanceUrl` | string | 否 | ServiceNow 实例 URL(如果未提供,将通过 OAuth 自动检测) | +| `credential` | string | 否 | ServiceNow OAuth 凭证 ID | +| `tableName` | string | 是 | 表名 | +| `sysId` | string | 是 | 要删除的记录 sys_id | + +#### 输出 + +| 参数 | 类型 | 描述 | +| --------- | ---- | ----------- | +| `success` | boolean | 删除是否成功 | +| `metadata` | json | 操作元数据 | + +## 注意事项 + +- 分类:`tools` +- 类型:`servicenow` diff --git a/apps/docs/i18n.lock b/apps/docs/i18n.lock index bd3c2e424d..df6782584a 100644 --- a/apps/docs/i18n.lock +++ b/apps/docs/i18n.lock @@ -49822,3 +49822,37 @@ checksums: content/472: dbc5fceeefb3ab5fa505394becafef4e content/473: b3f310d5ef115bea5a8b75bf25d7ea9a content/474: 27c398e669b297cea076e4ce4cc0c5eb + 9a28da736b42bf8de55126d4c06b6150: + meta/title: 418d5c8a18ad73520b38765741601f32 + meta/description: 2b5a9723c7a45d2be5001d5d056b7c7b + content/0: 1b031fb0c62c46b177aeed5c3d3f8f80 + content/1: e72670f88454b5b1c955b029de5fa8b5 + content/2: 821e6394b0a953e2b0842b04ae8f3105 + content/3: 7fa671d05a60d4f25b4980405c2c7278 + content/4: 9c8aa3f09c9b2bd50ea4cdff3598ea4e + content/5: 263633aee6db9332de806ae50d87de05 + content/6: 5a7e2171e5f73fec5eae21a50e5de661 + content/7: 371d0e46b4bd2c23f559b8bc112f6955 + content/8: 10d2d4eccb4b8923f048980dc16e43e1 + content/9: bcadfc362b69078beee0088e5936c98b + content/10: d81ef802f80143282cf4e534561a9570 + content/11: 02233e6212003c1d121424cfd8b86b62 + content/12: efe2c6dd368708de68a1addbfdb11b0c + content/13: 371d0e46b4bd2c23f559b8bc112f6955 + content/14: 0f3295854b7de5dbfab1ebd2a130b498 + content/15: bcadfc362b69078beee0088e5936c98b + content/16: 953f353184dc27db1f20156db2a9ad90 + content/17: 2011e87d0555cd0ab133ef2d35e7a37b + content/18: dbf08acb413d845ec419e45b1f986bdb + content/19: 371d0e46b4bd2c23f559b8bc112f6955 + content/20: 3a8417b390ec7d3d55b1920c721e9006 + content/21: bcadfc362b69078beee0088e5936c98b + content/22: c06a5bb458242baa23d34957034c2fe7 + content/23: ff043e912417bc29ac7c64520160c07d + content/24: 9c2175ab469cb6ff9e62bc8bdcf7621d + content/25: 371d0e46b4bd2c23f559b8bc112f6955 + content/26: 67e6ba04cf67f92e714ed94e7483dec5 + content/27: bcadfc362b69078beee0088e5936c98b + content/28: fd0f38eb3fe5cf95be366a4ff6b4fb90 + content/29: b3f310d5ef115bea5a8b75bf25d7ea9a + content/30: 4a7b2c644e487f3d12b6a6b54f8c6773 From 08bc1125bd044018267f3f3f0c5f4fa0e3e60d10 Mon Sep 17 00:00:00 2001 From: Waleed Date: Wed, 17 Dec 2025 10:21:35 -0800 Subject: [PATCH 09/15] fix(cmd-k): when navigating to current workspace/workflow, close modal instead of navigating (#2420) * fix(cmd-k): when navigating to current workspace, close modal instead of navigating * ack PR comment --- .../components/search-modal/search-modal.tsx | 20 +++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/apps/sim/app/workspace/[workspaceId]/w/components/sidebar/components/search-modal/search-modal.tsx b/apps/sim/app/workspace/[workspaceId]/w/components/sidebar/components/search-modal/search-modal.tsx index d641ad7d57..cbe1880ded 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/components/sidebar/components/search-modal/search-modal.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/components/sidebar/components/search-modal/search-modal.tsx @@ -423,7 +423,21 @@ export function SearchModal({ } break case 'workspace': + if (item.isCurrent) { + break + } + if (item.href) { + router.push(item.href) + } + break case 'workflow': + if (!item.isCurrent && item.href) { + router.push(item.href) + window.dispatchEvent( + new CustomEvent(SIDEBAR_SCROLL_EVENT, { detail: { itemId: item.id } }) + ) + } + break case 'page': case 'doc': if (item.href) { @@ -431,12 +445,6 @@ export function SearchModal({ window.open(item.href, '_blank', 'noopener,noreferrer') } else { router.push(item.href) - // Scroll to the workflow in the sidebar after navigation - if (item.type === 'workflow') { - window.dispatchEvent( - new CustomEvent(SIDEBAR_SCROLL_EVENT, { detail: { itemId: item.id } }) - ) - } } } break From 9a6c68789da2f3520d06bbed86447f12554601a5 Mon Sep 17 00:00:00 2001 From: Vikhyath Mondreti Date: Wed, 17 Dec 2025 11:49:24 -0800 Subject: [PATCH 10/15] fix(subflow): resizing live update --- .../[workspaceId]/w/[workflowId]/workflow.tsx | 86 ++++++++++++++++++- 1 file changed, 85 insertions(+), 1 deletion(-) diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/workflow.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/workflow.tsx index edb40a9aea..7396d1869e 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/workflow.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/workflow.tsx @@ -18,6 +18,7 @@ import { useShallow } from 'zustand/react/shallow' import type { OAuthConnectEventDetail } from '@/lib/copilot/tools/client/other/oauth-request-access' import { createLogger } from '@/lib/logs/console/logger' import type { OAuthProvider } from '@/lib/oauth' +import { BLOCK_DIMENSIONS, CONTAINER_DIMENSIONS } from '@/lib/workflows/blocks/block-dimensions' import { TriggerUtils } from '@/lib/workflows/triggers/triggers' import { useWorkspacePermissionsContext } from '@/app/workspace/[workspaceId]/providers/workspace-permissions-provider' import { @@ -1501,6 +1502,77 @@ const WorkflowContent = React.memo(() => { // Only sync non-position changes (like selection) to store if needed }, []) + /** + * Updates container dimensions in displayNodes during drag. + * This allows live resizing of containers as their children are dragged. + */ + const updateContainerDimensionsDuringDrag = useCallback( + (draggedNodeId: string, draggedNodePosition: { x: number; y: number }) => { + const parentId = blocks[draggedNodeId]?.data?.parentId + if (!parentId) return + + setDisplayNodes((currentNodes) => { + // Find all children of this container from current displayNodes + const childNodes = currentNodes.filter((n) => n.parentId === parentId) + if (childNodes.length === 0) return currentNodes + + // Calculate dimensions using current positions from displayNodes + // Match padding values from use-node-utilities.ts calculateLoopDimensions + const headerHeight = 50 + const leftPadding = 16 + const rightPadding = 80 + const topPadding = 16 + const bottomPadding = 16 + const minWidth = CONTAINER_DIMENSIONS.DEFAULT_WIDTH + const minHeight = CONTAINER_DIMENSIONS.DEFAULT_HEIGHT + + let maxRight = 0 + let maxBottom = 0 + + childNodes.forEach((node) => { + // Use the dragged node's live position, others from displayNodes + const nodePosition = node.id === draggedNodeId ? draggedNodePosition : node.position + + // Get dimensions - use block store for height estimates + const blockData = blocks[node.id] + const nodeWidth = BLOCK_DIMENSIONS.FIXED_WIDTH + const nodeHeight = blockData?.height || node.height || BLOCK_DIMENSIONS.MIN_HEIGHT + + const rightEdge = nodePosition.x + nodeWidth + const bottomEdge = nodePosition.y + nodeHeight + + maxRight = Math.max(maxRight, rightEdge) + maxBottom = Math.max(maxBottom, bottomEdge) + }) + + const newWidth = Math.max(minWidth, leftPadding + maxRight + rightPadding) + const newHeight = Math.max(minHeight, headerHeight + topPadding + maxBottom + bottomPadding) + + // Update the container node's dimensions in displayNodes + return currentNodes.map((node) => { + if (node.id === parentId) { + const currentWidth = node.data?.width || CONTAINER_DIMENSIONS.DEFAULT_WIDTH + const currentHeight = node.data?.height || CONTAINER_DIMENSIONS.DEFAULT_HEIGHT + + // Only update if dimensions changed + if (newWidth !== currentWidth || newHeight !== currentHeight) { + return { + ...node, + data: { + ...node.data, + width: newWidth, + height: newHeight, + }, + } + } + } + return node + }) + }) + }, + [blocks] + ) + /** * Effect to resize loops when nodes change (add/remove/position change). * Runs on structural changes only - not during drag (position-only changes). @@ -1681,6 +1753,11 @@ const WorkflowContent = React.memo(() => { // Get the current parent ID of the node being dragged const currentParentId = blocks[node.id]?.data?.parentId || null + // If the node is inside a container, update container dimensions during drag + if (currentParentId) { + updateContainerDimensionsDuringDrag(node.id, node.position) + } + // Check if this is a starter block - starter blocks should never be in containers const isStarterBlock = node.data?.type === 'starter' if (isStarterBlock) { @@ -1812,7 +1889,14 @@ const WorkflowContent = React.memo(() => { } } }, - [getNodes, potentialParentId, blocks, getNodeAbsolutePosition, getNodeDepth] + [ + getNodes, + potentialParentId, + blocks, + getNodeAbsolutePosition, + getNodeDepth, + updateContainerDimensionsDuringDrag, + ] ) /** Captures initial parent ID and position when drag starts. */ From 8775e76c326d1eb1fb485c0fb0902b590f61f14d Mon Sep 17 00:00:00 2001 From: Vikhyath Mondreti Date: Wed, 17 Dec 2025 12:07:57 -0800 Subject: [PATCH 11/15] improvement(subflow): resize vertical height estimate (#2428) * improvement(node-dims): share constants for node padding * fix vertical height estimation --- .../[workflowId]/hooks/use-node-utilities.ts | 45 +++++++------------ .../[workspaceId]/w/[workflowId]/workflow.tsx | 44 +++++++----------- .../lib/workflows/blocks/block-dimensions.ts | 4 ++ 3 files changed, 38 insertions(+), 55 deletions(-) diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-node-utilities.ts b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-node-utilities.ts index b8bb86eebe..cbb8c9e113 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-node-utilities.ts +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-node-utilities.ts @@ -252,23 +252,12 @@ export function useNodeUtilities(blocks: Record) { */ const calculateLoopDimensions = useCallback( (nodeId: string): { width: number; height: number } => { - const minWidth = CONTAINER_DIMENSIONS.DEFAULT_WIDTH - const minHeight = CONTAINER_DIMENSIONS.DEFAULT_HEIGHT - - // Match styling in subflow-node.tsx: - // - Header section: 50px total height - // - Content area: px-[16px] pb-[0px] pt-[16px] pr-[70px] - // Left padding: 16px, Right padding: 64px, Top padding: 16px, Bottom padding: -6px (reduced by additional 6px from 0 to achieve 14px total reduction from original 8px) - // - Children are positioned relative to the content area (after header, inside padding) - const headerHeight = 50 - const leftPadding = 16 - const rightPadding = 80 - const topPadding = 16 - const bottomPadding = 16 - const childNodes = getNodes().filter((node) => node.parentId === nodeId) if (childNodes.length === 0) { - return { width: minWidth, height: minHeight } + return { + width: CONTAINER_DIMENSIONS.DEFAULT_WIDTH, + height: CONTAINER_DIMENSIONS.DEFAULT_HEIGHT, + } } let maxRight = 0 @@ -276,21 +265,21 @@ export function useNodeUtilities(blocks: Record) { childNodes.forEach((node) => { const { width: nodeWidth, height: nodeHeight } = getBlockDimensions(node.id) - - // Child positions are relative to content area's inner top-left (inside padding) - // Calculate the rightmost and bottommost edges of children - const rightEdge = node.position.x + nodeWidth - const bottomEdge = node.position.y + nodeHeight - - maxRight = Math.max(maxRight, rightEdge) - maxBottom = Math.max(maxBottom, bottomEdge) + maxRight = Math.max(maxRight, node.position.x + nodeWidth) + maxBottom = Math.max(maxBottom, node.position.y + nodeHeight) }) - // Container dimensions = header + padding + children bounds + padding - // Width: left padding + max child right edge + right padding (64px) - const width = Math.max(minWidth, leftPadding + maxRight + rightPadding) - // Height: header + top padding + max child bottom edge + bottom padding (8px) - const height = Math.max(minHeight, headerHeight + topPadding + maxBottom + bottomPadding) + const width = Math.max( + CONTAINER_DIMENSIONS.DEFAULT_WIDTH, + CONTAINER_DIMENSIONS.LEFT_PADDING + maxRight + CONTAINER_DIMENSIONS.RIGHT_PADDING + ) + const height = Math.max( + CONTAINER_DIMENSIONS.DEFAULT_HEIGHT, + CONTAINER_DIMENSIONS.HEADER_HEIGHT + + CONTAINER_DIMENSIONS.TOP_PADDING + + maxBottom + + CONTAINER_DIMENSIONS.BOTTOM_PADDING + ) return { width, height } }, diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/workflow.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/workflow.tsx index 7396d1869e..906e4a245d 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/workflow.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/workflow.tsx @@ -18,7 +18,7 @@ import { useShallow } from 'zustand/react/shallow' import type { OAuthConnectEventDetail } from '@/lib/copilot/tools/client/other/oauth-request-access' import { createLogger } from '@/lib/logs/console/logger' import type { OAuthProvider } from '@/lib/oauth' -import { BLOCK_DIMENSIONS, CONTAINER_DIMENSIONS } from '@/lib/workflows/blocks/block-dimensions' +import { CONTAINER_DIMENSIONS } from '@/lib/workflows/blocks/block-dimensions' import { TriggerUtils } from '@/lib/workflows/triggers/triggers' import { useWorkspacePermissionsContext } from '@/app/workspace/[workspaceId]/providers/workspace-permissions-provider' import { @@ -177,6 +177,7 @@ const WorkflowContent = React.memo(() => { resizeLoopNodes, updateNodeParent: updateNodeParentUtil, getNodeAnchorPosition, + getBlockDimensions, } = useNodeUtilities(blocks) /** Triggers immediate subflow resize without delays. */ @@ -1512,43 +1513,32 @@ const WorkflowContent = React.memo(() => { if (!parentId) return setDisplayNodes((currentNodes) => { - // Find all children of this container from current displayNodes const childNodes = currentNodes.filter((n) => n.parentId === parentId) if (childNodes.length === 0) return currentNodes - // Calculate dimensions using current positions from displayNodes - // Match padding values from use-node-utilities.ts calculateLoopDimensions - const headerHeight = 50 - const leftPadding = 16 - const rightPadding = 80 - const topPadding = 16 - const bottomPadding = 16 - const minWidth = CONTAINER_DIMENSIONS.DEFAULT_WIDTH - const minHeight = CONTAINER_DIMENSIONS.DEFAULT_HEIGHT - let maxRight = 0 let maxBottom = 0 childNodes.forEach((node) => { - // Use the dragged node's live position, others from displayNodes const nodePosition = node.id === draggedNodeId ? draggedNodePosition : node.position + const { width: nodeWidth, height: nodeHeight } = getBlockDimensions(node.id) - // Get dimensions - use block store for height estimates - const blockData = blocks[node.id] - const nodeWidth = BLOCK_DIMENSIONS.FIXED_WIDTH - const nodeHeight = blockData?.height || node.height || BLOCK_DIMENSIONS.MIN_HEIGHT - - const rightEdge = nodePosition.x + nodeWidth - const bottomEdge = nodePosition.y + nodeHeight - - maxRight = Math.max(maxRight, rightEdge) - maxBottom = Math.max(maxBottom, bottomEdge) + maxRight = Math.max(maxRight, nodePosition.x + nodeWidth) + maxBottom = Math.max(maxBottom, nodePosition.y + nodeHeight) }) - const newWidth = Math.max(minWidth, leftPadding + maxRight + rightPadding) - const newHeight = Math.max(minHeight, headerHeight + topPadding + maxBottom + bottomPadding) + const newWidth = Math.max( + CONTAINER_DIMENSIONS.DEFAULT_WIDTH, + CONTAINER_DIMENSIONS.LEFT_PADDING + maxRight + CONTAINER_DIMENSIONS.RIGHT_PADDING + ) + const newHeight = Math.max( + CONTAINER_DIMENSIONS.DEFAULT_HEIGHT, + CONTAINER_DIMENSIONS.HEADER_HEIGHT + + CONTAINER_DIMENSIONS.TOP_PADDING + + maxBottom + + CONTAINER_DIMENSIONS.BOTTOM_PADDING + ) - // Update the container node's dimensions in displayNodes return currentNodes.map((node) => { if (node.id === parentId) { const currentWidth = node.data?.width || CONTAINER_DIMENSIONS.DEFAULT_WIDTH @@ -1570,7 +1560,7 @@ const WorkflowContent = React.memo(() => { }) }) }, - [blocks] + [blocks, getBlockDimensions] ) /** diff --git a/apps/sim/lib/workflows/blocks/block-dimensions.ts b/apps/sim/lib/workflows/blocks/block-dimensions.ts index d311f0dc0f..89a15c31b1 100644 --- a/apps/sim/lib/workflows/blocks/block-dimensions.ts +++ b/apps/sim/lib/workflows/blocks/block-dimensions.ts @@ -24,6 +24,10 @@ export const CONTAINER_DIMENSIONS = { MIN_WIDTH: 400, MIN_HEIGHT: 200, HEADER_HEIGHT: 50, + LEFT_PADDING: 16, + RIGHT_PADDING: 80, + TOP_PADDING: 16, + BOTTOM_PADDING: 16, } as const /** From 3120a785df5acb00ed246530619232bb079604e4 Mon Sep 17 00:00:00 2001 From: Waleed Date: Wed, 17 Dec 2025 13:42:43 -0800 Subject: [PATCH 12/15] fix(terminal): fix text wrap for errors and messages with long strings (#2429) --- .../w/[workflowId]/components/terminal/terminal.tsx | 2 +- apps/sim/components/emcn/components/code/code.tsx | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/terminal/terminal.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/terminal/terminal.tsx index c51ed91c59..49ccb04b47 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/terminal/terminal.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/terminal/terminal.tsx @@ -1741,7 +1741,7 @@ export function Terminal() { )} {/* Content */} -
+
{shouldShowCodeDisplay ? ( ) { const line = lines[index] return ( -
+
{showGutter && (
) {
@@ -625,7 +625,7 @@ const VirtualizedViewerInner = memo(function VirtualizedViewerInner({
         rowComponent={CodeRow}
         rowProps={rowProps}
         overscanCount={5}
-        className='overflow-x-auto'
+        className={wrapText ? 'overflow-x-hidden' : 'overflow-x-auto'}
       />
     
) From 1ae3b47f5c1488c5b8fd21091a3d8f95484a9a40 Mon Sep 17 00:00:00 2001 From: Vikhyath Mondreti Date: Wed, 17 Dec 2025 14:50:33 -0800 Subject: [PATCH 13/15] fix(inactivity-poll): need to respect level and trigger filters (#2431) --- apps/sim/lib/notifications/inactivity-polling.ts | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/apps/sim/lib/notifications/inactivity-polling.ts b/apps/sim/lib/notifications/inactivity-polling.ts index 4d4395faa4..f577b3666f 100644 --- a/apps/sim/lib/notifications/inactivity-polling.ts +++ b/apps/sim/lib/notifications/inactivity-polling.ts @@ -5,7 +5,7 @@ import { workspaceNotificationDelivery, workspaceNotificationSubscription, } from '@sim/db/schema' -import { and, eq, gte, sql } from 'drizzle-orm' +import { and, eq, gte, inArray, sql } from 'drizzle-orm' import { v4 as uuidv4 } from 'uuid' import { isTriggerDevEnabled } from '@/lib/core/config/feature-flags' import { createLogger } from '@/lib/logs/console/logger' @@ -45,6 +45,8 @@ async function checkWorkflowInactivity( } const windowStart = new Date(Date.now() - (alertConfig.inactivityHours || 24) * 60 * 60 * 1000) + const triggerFilter = subscription.triggerFilter + const levelFilter = subscription.levelFilter const recentLogs = await db .select({ id: workflowExecutionLogs.id }) @@ -52,7 +54,9 @@ async function checkWorkflowInactivity( .where( and( eq(workflowExecutionLogs.workflowId, workflowId), - gte(workflowExecutionLogs.createdAt, windowStart) + gte(workflowExecutionLogs.createdAt, windowStart), + inArray(workflowExecutionLogs.trigger, triggerFilter), + inArray(workflowExecutionLogs.level, levelFilter) ) ) .limit(1) From 7b5405e9688cfdcf4ebba9e763ec50d15a3d5053 Mon Sep 17 00:00:00 2001 From: Waleed Date: Wed, 17 Dec 2025 14:57:58 -0800 Subject: [PATCH 14/15] feat(vertex): added vertex to list of supported providers (#2430) * feat(vertex): added vertex to list of supported providers * added utils files for each provider, consolidated gemini utils, added dynamic verbosity and reasoning fetcher --- apps/docs/package.json | 2 +- apps/sim/app/api/copilot/chat/route.ts | 8 + .../app/api/copilot/context-usage/route.ts | 8 + apps/sim/app/api/providers/route.ts | 6 + apps/sim/blocks/blocks/agent.ts | 102 +- apps/sim/blocks/blocks/evaluator.ts | 30 + apps/sim/blocks/blocks/router.ts | 24 + apps/sim/blocks/blocks/translate.ts | 26 + apps/sim/components/icons.tsx | 50 + .../executor/handlers/agent/agent-handler.ts | 6 +- apps/sim/executor/handlers/agent/types.ts | 2 + apps/sim/lib/copilot/types.ts | 9 +- apps/sim/lib/core/config/env.ts | 4 + apps/sim/lib/mcp/service.ts | 6 +- apps/sim/package.json | 2 +- apps/sim/providers/anthropic/index.ts | 166 ++-- apps/sim/providers/anthropic/utils.ts | 70 ++ apps/sim/providers/azure-openai/index.ts | 96 +- apps/sim/providers/azure-openai/utils.ts | 70 ++ apps/sim/providers/cerebras/index.ts | 32 +- apps/sim/providers/cerebras/utils.ts | 23 + apps/sim/providers/deepseek/index.ts | 27 +- apps/sim/providers/deepseek/utils.ts | 21 + apps/sim/providers/google/index.ts | 515 ++++------ apps/sim/providers/google/utils.ts | 171 ++++ apps/sim/providers/groq/index.ts | 26 +- apps/sim/providers/groq/utils.ts | 23 + apps/sim/providers/index.ts | 11 +- apps/sim/providers/mistral/index.ts | 41 +- apps/sim/providers/mistral/utils.ts | 39 + apps/sim/providers/models.ts | 206 +++- apps/sim/providers/ollama/index.ts | 47 +- apps/sim/providers/ollama/utils.ts | 37 + apps/sim/providers/openai/index.ts | 47 +- apps/sim/providers/openai/utils.ts | 37 + apps/sim/providers/openrouter/index.ts | 85 +- apps/sim/providers/openrouter/utils.ts | 78 ++ apps/sim/providers/types.ts | 4 + apps/sim/providers/utils.test.ts | 38 +- apps/sim/providers/utils.ts | 64 +- apps/sim/providers/vertex/index.ts | 899 ++++++++++++++++++ apps/sim/providers/vertex/utils.ts | 233 +++++ apps/sim/providers/vllm/index.ts | 45 +- apps/sim/providers/vllm/utils.ts | 37 + apps/sim/providers/xai/index.ts | 123 +-- apps/sim/providers/xai/utils.ts | 83 ++ apps/sim/tools/llm/chat.ts | 16 + bun.lock | 7 +- 48 files changed, 2767 insertions(+), 935 deletions(-) create mode 100644 apps/sim/providers/anthropic/utils.ts create mode 100644 apps/sim/providers/azure-openai/utils.ts create mode 100644 apps/sim/providers/cerebras/utils.ts create mode 100644 apps/sim/providers/deepseek/utils.ts create mode 100644 apps/sim/providers/google/utils.ts create mode 100644 apps/sim/providers/groq/utils.ts create mode 100644 apps/sim/providers/mistral/utils.ts create mode 100644 apps/sim/providers/ollama/utils.ts create mode 100644 apps/sim/providers/openai/utils.ts create mode 100644 apps/sim/providers/openrouter/utils.ts create mode 100644 apps/sim/providers/vertex/index.ts create mode 100644 apps/sim/providers/vertex/utils.ts create mode 100644 apps/sim/providers/vllm/utils.ts create mode 100644 apps/sim/providers/xai/utils.ts diff --git a/apps/docs/package.json b/apps/docs/package.json index a589e671ed..76e0fffa88 100644 --- a/apps/docs/package.json +++ b/apps/docs/package.json @@ -4,7 +4,7 @@ "private": true, "license": "Apache-2.0", "scripts": { - "dev": "next dev --port 3001", + "dev": "next dev --port 7322", "build": "fumadocs-mdx && NODE_OPTIONS='--max-old-space-size=8192' next build", "start": "next start", "postinstall": "fumadocs-mdx", diff --git a/apps/sim/app/api/copilot/chat/route.ts b/apps/sim/app/api/copilot/chat/route.ts index 7c3ead7184..eb7331e0ea 100644 --- a/apps/sim/app/api/copilot/chat/route.ts +++ b/apps/sim/app/api/copilot/chat/route.ts @@ -303,6 +303,14 @@ export async function POST(req: NextRequest) { apiVersion: 'preview', endpoint: env.AZURE_OPENAI_ENDPOINT, } + } else if (providerEnv === 'vertex') { + providerConfig = { + provider: 'vertex', + model: modelToUse, + apiKey: env.COPILOT_API_KEY, + vertexProject: env.VERTEX_PROJECT, + vertexLocation: env.VERTEX_LOCATION, + } } else { providerConfig = { provider: providerEnv, diff --git a/apps/sim/app/api/copilot/context-usage/route.ts b/apps/sim/app/api/copilot/context-usage/route.ts index edb2b31c59..fba208bb44 100644 --- a/apps/sim/app/api/copilot/context-usage/route.ts +++ b/apps/sim/app/api/copilot/context-usage/route.ts @@ -66,6 +66,14 @@ export async function POST(req: NextRequest) { apiVersion: env.AZURE_OPENAI_API_VERSION, endpoint: env.AZURE_OPENAI_ENDPOINT, } + } else if (providerEnv === 'vertex') { + providerConfig = { + provider: 'vertex', + model: modelToUse, + apiKey: env.COPILOT_API_KEY, + vertexProject: env.VERTEX_PROJECT, + vertexLocation: env.VERTEX_LOCATION, + } } else { providerConfig = { provider: providerEnv, diff --git a/apps/sim/app/api/providers/route.ts b/apps/sim/app/api/providers/route.ts index 6b95f67e9f..ada02eb093 100644 --- a/apps/sim/app/api/providers/route.ts +++ b/apps/sim/app/api/providers/route.ts @@ -35,6 +35,8 @@ export async function POST(request: NextRequest) { apiKey, azureEndpoint, azureApiVersion, + vertexProject, + vertexLocation, responseFormat, workflowId, workspaceId, @@ -58,6 +60,8 @@ export async function POST(request: NextRequest) { hasApiKey: !!apiKey, hasAzureEndpoint: !!azureEndpoint, hasAzureApiVersion: !!azureApiVersion, + hasVertexProject: !!vertexProject, + hasVertexLocation: !!vertexLocation, hasResponseFormat: !!responseFormat, workflowId, stream: !!stream, @@ -104,6 +108,8 @@ export async function POST(request: NextRequest) { apiKey: finalApiKey, azureEndpoint, azureApiVersion, + vertexProject, + vertexLocation, responseFormat, workflowId, workspaceId, diff --git a/apps/sim/blocks/blocks/agent.ts b/apps/sim/blocks/blocks/agent.ts index 3e321d2cd0..d9cbed2b57 100644 --- a/apps/sim/blocks/blocks/agent.ts +++ b/apps/sim/blocks/blocks/agent.ts @@ -8,6 +8,8 @@ import { getHostedModels, getMaxTemperature, getProviderIcon, + getReasoningEffortValuesForModel, + getVerbosityValuesForModel, MODELS_WITH_REASONING_EFFORT, MODELS_WITH_VERBOSITY, providers, @@ -114,12 +116,47 @@ export const AgentBlock: BlockConfig = { type: 'dropdown', placeholder: 'Select reasoning effort...', options: [ - { label: 'none', id: 'none' }, - { label: 'minimal', id: 'minimal' }, { label: 'low', id: 'low' }, { label: 'medium', id: 'medium' }, { label: 'high', id: 'high' }, ], + dependsOn: ['model'], + fetchOptions: async (blockId: string) => { + const { useSubBlockStore } = await import('@/stores/workflows/subblock/store') + const { useWorkflowRegistry } = await import('@/stores/workflows/registry/store') + + const activeWorkflowId = useWorkflowRegistry.getState().activeWorkflowId + if (!activeWorkflowId) { + return [ + { label: 'low', id: 'low' }, + { label: 'medium', id: 'medium' }, + { label: 'high', id: 'high' }, + ] + } + + const workflowValues = useSubBlockStore.getState().workflowValues[activeWorkflowId] + const blockValues = workflowValues?.[blockId] + const modelValue = blockValues?.model as string + + if (!modelValue) { + return [ + { label: 'low', id: 'low' }, + { label: 'medium', id: 'medium' }, + { label: 'high', id: 'high' }, + ] + } + + const validOptions = getReasoningEffortValuesForModel(modelValue) + if (!validOptions) { + return [ + { label: 'low', id: 'low' }, + { label: 'medium', id: 'medium' }, + { label: 'high', id: 'high' }, + ] + } + + return validOptions.map((opt) => ({ label: opt, id: opt })) + }, value: () => 'medium', condition: { field: 'model', @@ -136,6 +173,43 @@ export const AgentBlock: BlockConfig = { { label: 'medium', id: 'medium' }, { label: 'high', id: 'high' }, ], + dependsOn: ['model'], + fetchOptions: async (blockId: string) => { + const { useSubBlockStore } = await import('@/stores/workflows/subblock/store') + const { useWorkflowRegistry } = await import('@/stores/workflows/registry/store') + + const activeWorkflowId = useWorkflowRegistry.getState().activeWorkflowId + if (!activeWorkflowId) { + return [ + { label: 'low', id: 'low' }, + { label: 'medium', id: 'medium' }, + { label: 'high', id: 'high' }, + ] + } + + const workflowValues = useSubBlockStore.getState().workflowValues[activeWorkflowId] + const blockValues = workflowValues?.[blockId] + const modelValue = blockValues?.model as string + + if (!modelValue) { + return [ + { label: 'low', id: 'low' }, + { label: 'medium', id: 'medium' }, + { label: 'high', id: 'high' }, + ] + } + + const validOptions = getVerbosityValuesForModel(modelValue) + if (!validOptions) { + return [ + { label: 'low', id: 'low' }, + { label: 'medium', id: 'medium' }, + { label: 'high', id: 'high' }, + ] + } + + return validOptions.map((opt) => ({ label: opt, id: opt })) + }, value: () => 'medium', condition: { field: 'model', @@ -166,6 +240,28 @@ export const AgentBlock: BlockConfig = { value: providers['azure-openai'].models, }, }, + { + id: 'vertexProject', + title: 'Vertex AI Project', + type: 'short-input', + placeholder: 'your-gcp-project-id', + connectionDroppable: false, + condition: { + field: 'model', + value: providers.vertex.models, + }, + }, + { + id: 'vertexLocation', + title: 'Vertex AI Location', + type: 'short-input', + placeholder: 'us-central1', + connectionDroppable: false, + condition: { + field: 'model', + value: providers.vertex.models, + }, + }, { id: 'tools', title: 'Tools', @@ -465,6 +561,8 @@ Example 3 (Array Input): apiKey: { type: 'string', description: 'Provider API key' }, azureEndpoint: { type: 'string', description: 'Azure OpenAI endpoint URL' }, azureApiVersion: { type: 'string', description: 'Azure API version' }, + vertexProject: { type: 'string', description: 'Google Cloud project ID for Vertex AI' }, + vertexLocation: { type: 'string', description: 'Google Cloud location for Vertex AI' }, responseFormat: { type: 'json', description: 'JSON response format schema', diff --git a/apps/sim/blocks/blocks/evaluator.ts b/apps/sim/blocks/blocks/evaluator.ts index e809ed0476..63ea9c74ce 100644 --- a/apps/sim/blocks/blocks/evaluator.ts +++ b/apps/sim/blocks/blocks/evaluator.ts @@ -239,6 +239,28 @@ export const EvaluatorBlock: BlockConfig = { value: providers['azure-openai'].models, }, }, + { + id: 'vertexProject', + title: 'Vertex AI Project', + type: 'short-input', + placeholder: 'your-gcp-project-id', + connectionDroppable: false, + condition: { + field: 'model', + value: providers.vertex.models, + }, + }, + { + id: 'vertexLocation', + title: 'Vertex AI Location', + type: 'short-input', + placeholder: 'us-central1', + connectionDroppable: false, + condition: { + field: 'model', + value: providers.vertex.models, + }, + }, { id: 'temperature', title: 'Temperature', @@ -356,6 +378,14 @@ export const EvaluatorBlock: BlockConfig = { apiKey: { type: 'string' as ParamType, description: 'Provider API key' }, azureEndpoint: { type: 'string' as ParamType, description: 'Azure OpenAI endpoint URL' }, azureApiVersion: { type: 'string' as ParamType, description: 'Azure API version' }, + vertexProject: { + type: 'string' as ParamType, + description: 'Google Cloud project ID for Vertex AI', + }, + vertexLocation: { + type: 'string' as ParamType, + description: 'Google Cloud location for Vertex AI', + }, temperature: { type: 'number' as ParamType, description: 'Response randomness level (low for consistent evaluation)', diff --git a/apps/sim/blocks/blocks/router.ts b/apps/sim/blocks/blocks/router.ts index 744aa53950..0c6006a43c 100644 --- a/apps/sim/blocks/blocks/router.ts +++ b/apps/sim/blocks/blocks/router.ts @@ -188,6 +188,28 @@ export const RouterBlock: BlockConfig = { value: providers['azure-openai'].models, }, }, + { + id: 'vertexProject', + title: 'Vertex AI Project', + type: 'short-input', + placeholder: 'your-gcp-project-id', + connectionDroppable: false, + condition: { + field: 'model', + value: providers.vertex.models, + }, + }, + { + id: 'vertexLocation', + title: 'Vertex AI Location', + type: 'short-input', + placeholder: 'us-central1', + connectionDroppable: false, + condition: { + field: 'model', + value: providers.vertex.models, + }, + }, { id: 'temperature', title: 'Temperature', @@ -235,6 +257,8 @@ export const RouterBlock: BlockConfig = { apiKey: { type: 'string', description: 'Provider API key' }, azureEndpoint: { type: 'string', description: 'Azure OpenAI endpoint URL' }, azureApiVersion: { type: 'string', description: 'Azure API version' }, + vertexProject: { type: 'string', description: 'Google Cloud project ID for Vertex AI' }, + vertexLocation: { type: 'string', description: 'Google Cloud location for Vertex AI' }, temperature: { type: 'number', description: 'Response randomness level (low for consistent routing)', diff --git a/apps/sim/blocks/blocks/translate.ts b/apps/sim/blocks/blocks/translate.ts index bd984b8601..1ecfc7a206 100644 --- a/apps/sim/blocks/blocks/translate.ts +++ b/apps/sim/blocks/blocks/translate.ts @@ -99,6 +99,28 @@ export const TranslateBlock: BlockConfig = { value: providers['azure-openai'].models, }, }, + { + id: 'vertexProject', + title: 'Vertex AI Project', + type: 'short-input', + placeholder: 'your-gcp-project-id', + connectionDroppable: false, + condition: { + field: 'model', + value: providers.vertex.models, + }, + }, + { + id: 'vertexLocation', + title: 'Vertex AI Location', + type: 'short-input', + placeholder: 'us-central1', + connectionDroppable: false, + condition: { + field: 'model', + value: providers.vertex.models, + }, + }, { id: 'systemPrompt', title: 'System Prompt', @@ -120,6 +142,8 @@ export const TranslateBlock: BlockConfig = { apiKey: params.apiKey, azureEndpoint: params.azureEndpoint, azureApiVersion: params.azureApiVersion, + vertexProject: params.vertexProject, + vertexLocation: params.vertexLocation, }), }, }, @@ -129,6 +153,8 @@ export const TranslateBlock: BlockConfig = { apiKey: { type: 'string', description: 'Provider API key' }, azureEndpoint: { type: 'string', description: 'Azure OpenAI endpoint URL' }, azureApiVersion: { type: 'string', description: 'Azure API version' }, + vertexProject: { type: 'string', description: 'Google Cloud project ID for Vertex AI' }, + vertexLocation: { type: 'string', description: 'Google Cloud location for Vertex AI' }, systemPrompt: { type: 'string', description: 'Translation instructions' }, }, outputs: { diff --git a/apps/sim/components/icons.tsx b/apps/sim/components/icons.tsx index 2e668f913e..6c7f641382 100644 --- a/apps/sim/components/icons.tsx +++ b/apps/sim/components/icons.tsx @@ -2452,6 +2452,56 @@ export const GeminiIcon = (props: SVGProps) => ( ) +export const VertexIcon = (props: SVGProps) => ( + + + + + + + + + + + + + + + + + + + + + + + +) + export const CerebrasIcon = (props: SVGProps) => ( + provider: 'vertex' + model: string + apiKey?: string + vertexProject?: string + vertexLocation?: string + } + | { + provider: Exclude model?: string apiKey?: string } diff --git a/apps/sim/lib/core/config/env.ts b/apps/sim/lib/core/config/env.ts index 290b163d8b..39780d841d 100644 --- a/apps/sim/lib/core/config/env.ts +++ b/apps/sim/lib/core/config/env.ts @@ -98,6 +98,10 @@ export const env = createEnv({ OCR_AZURE_MODEL_NAME: z.string().optional(), // Azure Mistral OCR model name for document processing OCR_AZURE_API_KEY: z.string().min(1).optional(), // Azure Mistral OCR API key + // Vertex AI Configuration + VERTEX_PROJECT: z.string().optional(), // Google Cloud project ID for Vertex AI + VERTEX_LOCATION: z.string().optional(), // Google Cloud location/region for Vertex AI (defaults to us-central1) + // Monitoring & Analytics TELEMETRY_ENDPOINT: z.string().url().optional(), // Custom telemetry/analytics endpoint COST_MULTIPLIER: z.number().optional(), // Multiplier for cost calculations diff --git a/apps/sim/lib/mcp/service.ts b/apps/sim/lib/mcp/service.ts index cfadec8f49..1e95dd7063 100644 --- a/apps/sim/lib/mcp/service.ts +++ b/apps/sim/lib/mcp/service.ts @@ -404,15 +404,11 @@ class McpService { failedCount++ const errorMessage = result.reason instanceof Error ? result.reason.message : 'Unknown error' - logger.warn( - `[${requestId}] Failed to discover tools from server ${server.name}:`, - result.reason - ) + logger.warn(`[${requestId}] Failed to discover tools from server ${server.name}:`) statusUpdates.push(this.updateServerStatus(server.id!, workspaceId, false, errorMessage)) } }) - // Update server statuses in parallel (don't block on this) Promise.allSettled(statusUpdates).catch((err) => { logger.error(`[${requestId}] Error updating server statuses:`, err) }) diff --git a/apps/sim/package.json b/apps/sim/package.json index b34c4a80fe..b7aff8168c 100644 --- a/apps/sim/package.json +++ b/apps/sim/package.json @@ -8,7 +8,7 @@ "node": ">=20.0.0" }, "scripts": { - "dev": "next dev --port 3000", + "dev": "next dev --port 7321", "dev:webpack": "next dev --webpack", "dev:sockets": "bun run socket-server/index.ts", "dev:full": "concurrently -n \"App,Realtime\" -c \"cyan,magenta\" \"bun run dev\" \"bun run dev:sockets\"", diff --git a/apps/sim/providers/anthropic/index.ts b/apps/sim/providers/anthropic/index.ts index 8afa26446d..5e9f2d26f8 100644 --- a/apps/sim/providers/anthropic/index.ts +++ b/apps/sim/providers/anthropic/index.ts @@ -1,35 +1,24 @@ import Anthropic from '@anthropic-ai/sdk' import { createLogger } from '@/lib/logs/console/logger' import type { StreamingExecution } from '@/executor/types' +import { MAX_TOOL_ITERATIONS } from '@/providers' +import { + checkForForcedToolUsage, + createReadableStreamFromAnthropicStream, + generateToolUseId, +} from '@/providers/anthropic/utils' +import { getProviderDefaultModel, getProviderModels } from '@/providers/models' +import type { + ProviderConfig, + ProviderRequest, + ProviderResponse, + TimeSegment, +} from '@/providers/types' +import { prepareToolExecution, prepareToolsWithUsageControl } from '@/providers/utils' import { executeTool } from '@/tools' -import { getProviderDefaultModel, getProviderModels } from '../models' -import type { ProviderConfig, ProviderRequest, ProviderResponse, TimeSegment } from '../types' -import { prepareToolExecution, prepareToolsWithUsageControl, trackForcedToolUsage } from '../utils' const logger = createLogger('AnthropicProvider') -/** - * Helper to wrap Anthropic streaming into a browser-friendly ReadableStream - */ -function createReadableStreamFromAnthropicStream( - anthropicStream: AsyncIterable -): ReadableStream { - return new ReadableStream({ - async start(controller) { - try { - for await (const event of anthropicStream) { - if (event.type === 'content_block_delta' && event.delta?.text) { - controller.enqueue(new TextEncoder().encode(event.delta.text)) - } - } - controller.close() - } catch (err) { - controller.error(err) - } - }, - }) -} - export const anthropicProvider: ProviderConfig = { id: 'anthropic', name: 'Anthropic', @@ -47,11 +36,6 @@ export const anthropicProvider: ProviderConfig = { const anthropic = new Anthropic({ apiKey: request.apiKey }) - // Helper function to generate a simple unique ID for tool uses - const generateToolUseId = (toolName: string) => { - return `${toolName}-${Date.now()}-${Math.random().toString(36).substring(2, 7)}` - } - // Transform messages to Anthropic format const messages: any[] = [] @@ -373,7 +357,6 @@ ${fieldDescriptions} const toolResults = [] const currentMessages = [...messages] let iterationCount = 0 - const MAX_ITERATIONS = 10 // Prevent infinite loops // Track if a forced tool has been used let hasUsedForcedTool = false @@ -393,47 +376,20 @@ ${fieldDescriptions} }, ] - // Helper function to check for forced tool usage in Anthropic responses - const checkForForcedToolUsage = (response: any, toolChoice: any) => { - if ( - typeof toolChoice === 'object' && - toolChoice !== null && - Array.isArray(response.content) - ) { - const toolUses = response.content.filter((item: any) => item.type === 'tool_use') - - if (toolUses.length > 0) { - // Convert Anthropic tool_use format to a format trackForcedToolUsage can understand - const adaptedToolCalls = toolUses.map((tool: any) => ({ - name: tool.name, - })) - - // Convert Anthropic tool_choice format to match OpenAI format for tracking - const adaptedToolChoice = - toolChoice.type === 'tool' ? { function: { name: toolChoice.name } } : toolChoice - - const result = trackForcedToolUsage( - adaptedToolCalls, - adaptedToolChoice, - logger, - 'anthropic', - forcedTools, - usedForcedTools - ) - // Make the behavior consistent with the initial check - hasUsedForcedTool = result.hasUsedForcedTool - usedForcedTools = result.usedForcedTools - return result - } - } - return null - } - // Check if a forced tool was used in the first response - checkForForcedToolUsage(currentResponse, originalToolChoice) + const firstCheckResult = checkForForcedToolUsage( + currentResponse, + originalToolChoice, + forcedTools, + usedForcedTools + ) + if (firstCheckResult) { + hasUsedForcedTool = firstCheckResult.hasUsedForcedTool + usedForcedTools = firstCheckResult.usedForcedTools + } try { - while (iterationCount < MAX_ITERATIONS) { + while (iterationCount < MAX_TOOL_ITERATIONS) { // Check for tool calls const toolUses = currentResponse.content.filter((item) => item.type === 'tool_use') if (!toolUses || toolUses.length === 0) { @@ -576,7 +532,16 @@ ${fieldDescriptions} currentResponse = await anthropic.messages.create(nextPayload) // Check if any forced tools were used in this response - checkForForcedToolUsage(currentResponse, nextPayload.tool_choice) + const nextCheckResult = checkForForcedToolUsage( + currentResponse, + nextPayload.tool_choice, + forcedTools, + usedForcedTools + ) + if (nextCheckResult) { + hasUsedForcedTool = nextCheckResult.hasUsedForcedTool + usedForcedTools = nextCheckResult.usedForcedTools + } const nextModelEndTime = Date.now() const thisModelTime = nextModelEndTime - nextModelStartTime @@ -727,7 +692,6 @@ ${fieldDescriptions} const toolResults = [] const currentMessages = [...messages] let iterationCount = 0 - const MAX_ITERATIONS = 10 // Prevent infinite loops // Track if a forced tool has been used let hasUsedForcedTool = false @@ -747,47 +711,20 @@ ${fieldDescriptions} }, ] - // Helper function to check for forced tool usage in Anthropic responses - const checkForForcedToolUsage = (response: any, toolChoice: any) => { - if ( - typeof toolChoice === 'object' && - toolChoice !== null && - Array.isArray(response.content) - ) { - const toolUses = response.content.filter((item: any) => item.type === 'tool_use') - - if (toolUses.length > 0) { - // Convert Anthropic tool_use format to a format trackForcedToolUsage can understand - const adaptedToolCalls = toolUses.map((tool: any) => ({ - name: tool.name, - })) - - // Convert Anthropic tool_choice format to match OpenAI format for tracking - const adaptedToolChoice = - toolChoice.type === 'tool' ? { function: { name: toolChoice.name } } : toolChoice - - const result = trackForcedToolUsage( - adaptedToolCalls, - adaptedToolChoice, - logger, - 'anthropic', - forcedTools, - usedForcedTools - ) - // Make the behavior consistent with the initial check - hasUsedForcedTool = result.hasUsedForcedTool - usedForcedTools = result.usedForcedTools - return result - } - } - return null - } - // Check if a forced tool was used in the first response - checkForForcedToolUsage(currentResponse, originalToolChoice) + const firstCheckResult = checkForForcedToolUsage( + currentResponse, + originalToolChoice, + forcedTools, + usedForcedTools + ) + if (firstCheckResult) { + hasUsedForcedTool = firstCheckResult.hasUsedForcedTool + usedForcedTools = firstCheckResult.usedForcedTools + } try { - while (iterationCount < MAX_ITERATIONS) { + while (iterationCount < MAX_TOOL_ITERATIONS) { // Check for tool calls const toolUses = currentResponse.content.filter((item) => item.type === 'tool_use') if (!toolUses || toolUses.length === 0) { @@ -926,7 +863,16 @@ ${fieldDescriptions} currentResponse = await anthropic.messages.create(nextPayload) // Check if any forced tools were used in this response - checkForForcedToolUsage(currentResponse, nextPayload.tool_choice) + const nextCheckResult = checkForForcedToolUsage( + currentResponse, + nextPayload.tool_choice, + forcedTools, + usedForcedTools + ) + if (nextCheckResult) { + hasUsedForcedTool = nextCheckResult.hasUsedForcedTool + usedForcedTools = nextCheckResult.usedForcedTools + } const nextModelEndTime = Date.now() const thisModelTime = nextModelEndTime - nextModelStartTime diff --git a/apps/sim/providers/anthropic/utils.ts b/apps/sim/providers/anthropic/utils.ts new file mode 100644 index 0000000000..d45a0e2a03 --- /dev/null +++ b/apps/sim/providers/anthropic/utils.ts @@ -0,0 +1,70 @@ +import { createLogger } from '@/lib/logs/console/logger' +import { trackForcedToolUsage } from '@/providers/utils' + +const logger = createLogger('AnthropicUtils') + +/** + * Helper to wrap Anthropic streaming into a browser-friendly ReadableStream + */ +export function createReadableStreamFromAnthropicStream( + anthropicStream: AsyncIterable +): ReadableStream { + return new ReadableStream({ + async start(controller) { + try { + for await (const event of anthropicStream) { + if (event.type === 'content_block_delta' && event.delta?.text) { + controller.enqueue(new TextEncoder().encode(event.delta.text)) + } + } + controller.close() + } catch (err) { + controller.error(err) + } + }, + }) +} + +/** + * Helper function to generate a simple unique ID for tool uses + */ +export function generateToolUseId(toolName: string): string { + return `${toolName}-${Date.now()}-${Math.random().toString(36).substring(2, 7)}` +} + +/** + * Helper function to check for forced tool usage in Anthropic responses + */ +export function checkForForcedToolUsage( + response: any, + toolChoice: any, + forcedTools: string[], + usedForcedTools: string[] +): { hasUsedForcedTool: boolean; usedForcedTools: string[] } | null { + if (typeof toolChoice === 'object' && toolChoice !== null && Array.isArray(response.content)) { + const toolUses = response.content.filter((item: any) => item.type === 'tool_use') + + if (toolUses.length > 0) { + // Convert Anthropic tool_use format to a format trackForcedToolUsage can understand + const adaptedToolCalls = toolUses.map((tool: any) => ({ + name: tool.name, + })) + + // Convert Anthropic tool_choice format to match OpenAI format for tracking + const adaptedToolChoice = + toolChoice.type === 'tool' ? { function: { name: toolChoice.name } } : toolChoice + + const result = trackForcedToolUsage( + adaptedToolCalls, + adaptedToolChoice, + logger, + 'anthropic', + forcedTools, + usedForcedTools + ) + + return result + } + } + return null +} diff --git a/apps/sim/providers/azure-openai/index.ts b/apps/sim/providers/azure-openai/index.ts index fd4f71a563..c2cf283396 100644 --- a/apps/sim/providers/azure-openai/index.ts +++ b/apps/sim/providers/azure-openai/index.ts @@ -2,6 +2,11 @@ import { AzureOpenAI } from 'openai' import { env } from '@/lib/core/config/env' import { createLogger } from '@/lib/logs/console/logger' import type { StreamingExecution } from '@/executor/types' +import { MAX_TOOL_ITERATIONS } from '@/providers' +import { + checkForForcedToolUsage, + createReadableStreamFromAzureOpenAIStream, +} from '@/providers/azure-openai/utils' import { getProviderDefaultModel, getProviderModels } from '@/providers/models' import type { ProviderConfig, @@ -9,55 +14,11 @@ import type { ProviderResponse, TimeSegment, } from '@/providers/types' -import { - prepareToolExecution, - prepareToolsWithUsageControl, - trackForcedToolUsage, -} from '@/providers/utils' +import { prepareToolExecution, prepareToolsWithUsageControl } from '@/providers/utils' import { executeTool } from '@/tools' const logger = createLogger('AzureOpenAIProvider') -/** - * Helper function to convert an Azure OpenAI stream to a standard ReadableStream - * and collect completion metrics - */ -function createReadableStreamFromAzureOpenAIStream( - azureOpenAIStream: any, - onComplete?: (content: string, usage?: any) => void -): ReadableStream { - let fullContent = '' - let usageData: any = null - - return new ReadableStream({ - async start(controller) { - try { - for await (const chunk of azureOpenAIStream) { - // Check for usage data in the final chunk - if (chunk.usage) { - usageData = chunk.usage - } - - const content = chunk.choices[0]?.delta?.content || '' - if (content) { - fullContent += content - controller.enqueue(new TextEncoder().encode(content)) - } - } - - // Once stream is complete, call the completion callback with the final content and usage - if (onComplete) { - onComplete(fullContent, usageData) - } - - controller.close() - } catch (error) { - controller.error(error) - } - }, - }) -} - /** * Azure OpenAI provider configuration */ @@ -303,26 +264,6 @@ export const azureOpenAIProvider: ProviderConfig = { const forcedTools = preparedTools?.forcedTools || [] let usedForcedTools: string[] = [] - // Helper function to check for forced tool usage in responses - const checkForForcedToolUsage = ( - response: any, - toolChoice: string | { type: string; function?: { name: string }; name?: string; any?: any } - ) => { - if (typeof toolChoice === 'object' && response.choices[0]?.message?.tool_calls) { - const toolCallsResponse = response.choices[0].message.tool_calls - const result = trackForcedToolUsage( - toolCallsResponse, - toolChoice, - logger, - 'azure-openai', - forcedTools, - usedForcedTools - ) - hasUsedForcedTool = result.hasUsedForcedTool - usedForcedTools = result.usedForcedTools - } - } - let currentResponse = await azureOpenAI.chat.completions.create(payload) const firstResponseTime = Date.now() - initialCallTime @@ -337,7 +278,6 @@ export const azureOpenAIProvider: ProviderConfig = { const toolResults = [] const currentMessages = [...allMessages] let iterationCount = 0 - const MAX_ITERATIONS = 10 // Prevent infinite loops // Track time spent in model vs tools let modelTime = firstResponseTime @@ -358,9 +298,17 @@ export const azureOpenAIProvider: ProviderConfig = { ] // Check if a forced tool was used in the first response - checkForForcedToolUsage(currentResponse, originalToolChoice) + const firstCheckResult = checkForForcedToolUsage( + currentResponse, + originalToolChoice, + logger, + forcedTools, + usedForcedTools + ) + hasUsedForcedTool = firstCheckResult.hasUsedForcedTool + usedForcedTools = firstCheckResult.usedForcedTools - while (iterationCount < MAX_ITERATIONS) { + while (iterationCount < MAX_TOOL_ITERATIONS) { // Check for tool calls const toolCallsInResponse = currentResponse.choices[0]?.message?.tool_calls if (!toolCallsInResponse || toolCallsInResponse.length === 0) { @@ -368,7 +316,7 @@ export const azureOpenAIProvider: ProviderConfig = { } logger.info( - `Processing ${toolCallsInResponse.length} tool calls (iteration ${iterationCount + 1}/${MAX_ITERATIONS})` + `Processing ${toolCallsInResponse.length} tool calls (iteration ${iterationCount + 1}/${MAX_TOOL_ITERATIONS})` ) // Track time for tool calls in this batch @@ -491,7 +439,15 @@ export const azureOpenAIProvider: ProviderConfig = { currentResponse = await azureOpenAI.chat.completions.create(nextPayload) // Check if any forced tools were used in this response - checkForForcedToolUsage(currentResponse, nextPayload.tool_choice) + const nextCheckResult = checkForForcedToolUsage( + currentResponse, + nextPayload.tool_choice, + logger, + forcedTools, + usedForcedTools + ) + hasUsedForcedTool = nextCheckResult.hasUsedForcedTool + usedForcedTools = nextCheckResult.usedForcedTools const nextModelEndTime = Date.now() const thisModelTime = nextModelEndTime - nextModelStartTime diff --git a/apps/sim/providers/azure-openai/utils.ts b/apps/sim/providers/azure-openai/utils.ts new file mode 100644 index 0000000000..b8baf9978d --- /dev/null +++ b/apps/sim/providers/azure-openai/utils.ts @@ -0,0 +1,70 @@ +import type { Logger } from '@/lib/logs/console/logger' +import { trackForcedToolUsage } from '@/providers/utils' + +/** + * Helper function to convert an Azure OpenAI stream to a standard ReadableStream + * and collect completion metrics + */ +export function createReadableStreamFromAzureOpenAIStream( + azureOpenAIStream: any, + onComplete?: (content: string, usage?: any) => void +): ReadableStream { + let fullContent = '' + let usageData: any = null + + return new ReadableStream({ + async start(controller) { + try { + for await (const chunk of azureOpenAIStream) { + if (chunk.usage) { + usageData = chunk.usage + } + + const content = chunk.choices[0]?.delta?.content || '' + if (content) { + fullContent += content + controller.enqueue(new TextEncoder().encode(content)) + } + } + + if (onComplete) { + onComplete(fullContent, usageData) + } + + controller.close() + } catch (error) { + controller.error(error) + } + }, + }) +} + +/** + * Helper function to check for forced tool usage in responses + */ +export function checkForForcedToolUsage( + response: any, + toolChoice: string | { type: string; function?: { name: string }; name?: string; any?: any }, + logger: Logger, + forcedTools: string[], + usedForcedTools: string[] +): { hasUsedForcedTool: boolean; usedForcedTools: string[] } { + let hasUsedForcedTool = false + let updatedUsedForcedTools = [...usedForcedTools] + + if (typeof toolChoice === 'object' && response.choices[0]?.message?.tool_calls) { + const toolCallsResponse = response.choices[0].message.tool_calls + const result = trackForcedToolUsage( + toolCallsResponse, + toolChoice, + logger, + 'azure-openai', + forcedTools, + updatedUsedForcedTools + ) + hasUsedForcedTool = result.hasUsedForcedTool + updatedUsedForcedTools = result.usedForcedTools + } + + return { hasUsedForcedTool, usedForcedTools: updatedUsedForcedTools } +} diff --git a/apps/sim/providers/cerebras/index.ts b/apps/sim/providers/cerebras/index.ts index 3ebc8b412d..f017565ab2 100644 --- a/apps/sim/providers/cerebras/index.ts +++ b/apps/sim/providers/cerebras/index.ts @@ -1,6 +1,9 @@ import { Cerebras } from '@cerebras/cerebras_cloud_sdk' import { createLogger } from '@/lib/logs/console/logger' import type { StreamingExecution } from '@/executor/types' +import { MAX_TOOL_ITERATIONS } from '@/providers' +import type { CerebrasResponse } from '@/providers/cerebras/types' +import { createReadableStreamFromCerebrasStream } from '@/providers/cerebras/utils' import { getProviderDefaultModel, getProviderModels } from '@/providers/models' import type { ProviderConfig, @@ -14,35 +17,9 @@ import { trackForcedToolUsage, } from '@/providers/utils' import { executeTool } from '@/tools' -import type { CerebrasResponse } from './types' const logger = createLogger('CerebrasProvider') -/** - * Helper to convert a Cerebras streaming response (async iterable) into a ReadableStream. - * Enqueues only the model's text delta chunks as UTF-8 encoded bytes. - */ -function createReadableStreamFromCerebrasStream( - cerebrasStream: AsyncIterable -): ReadableStream { - return new ReadableStream({ - async start(controller) { - try { - for await (const chunk of cerebrasStream) { - // Expecting delta content similar to OpenAI: chunk.choices[0]?.delta?.content - const content = chunk.choices?.[0]?.delta?.content || '' - if (content) { - controller.enqueue(new TextEncoder().encode(content)) - } - } - controller.close() - } catch (error) { - controller.error(error) - } - }, - }) -} - export const cerebrasProvider: ProviderConfig = { id: 'cerebras', name: 'Cerebras', @@ -223,7 +200,6 @@ export const cerebrasProvider: ProviderConfig = { const toolResults = [] const currentMessages = [...allMessages] let iterationCount = 0 - const MAX_ITERATIONS = 10 // Prevent infinite loops // Track time spent in model vs tools let modelTime = firstResponseTime @@ -246,7 +222,7 @@ export const cerebrasProvider: ProviderConfig = { const toolCallSignatures = new Set() try { - while (iterationCount < MAX_ITERATIONS) { + while (iterationCount < MAX_TOOL_ITERATIONS) { // Check for tool calls const toolCallsInResponse = currentResponse.choices[0]?.message?.tool_calls diff --git a/apps/sim/providers/cerebras/utils.ts b/apps/sim/providers/cerebras/utils.ts new file mode 100644 index 0000000000..01dcfd5fea --- /dev/null +++ b/apps/sim/providers/cerebras/utils.ts @@ -0,0 +1,23 @@ +/** + * Helper to convert a Cerebras streaming response (async iterable) into a ReadableStream. + * Enqueues only the model's text delta chunks as UTF-8 encoded bytes. + */ +export function createReadableStreamFromCerebrasStream( + cerebrasStream: AsyncIterable +): ReadableStream { + return new ReadableStream({ + async start(controller) { + try { + for await (const chunk of cerebrasStream) { + const content = chunk.choices?.[0]?.delta?.content || '' + if (content) { + controller.enqueue(new TextEncoder().encode(content)) + } + } + controller.close() + } catch (error) { + controller.error(error) + } + }, + }) +} diff --git a/apps/sim/providers/deepseek/index.ts b/apps/sim/providers/deepseek/index.ts index a303b70b65..7425d84fa4 100644 --- a/apps/sim/providers/deepseek/index.ts +++ b/apps/sim/providers/deepseek/index.ts @@ -1,6 +1,8 @@ import OpenAI from 'openai' import { createLogger } from '@/lib/logs/console/logger' import type { StreamingExecution } from '@/executor/types' +import { MAX_TOOL_ITERATIONS } from '@/providers' +import { createReadableStreamFromDeepseekStream } from '@/providers/deepseek/utils' import { getProviderDefaultModel, getProviderModels } from '@/providers/models' import type { ProviderConfig, @@ -17,28 +19,6 @@ import { executeTool } from '@/tools' const logger = createLogger('DeepseekProvider') -/** - * Helper function to convert a DeepSeek (OpenAI-compatible) stream to a ReadableStream - * of text chunks that can be consumed by the browser. - */ -function createReadableStreamFromDeepseekStream(deepseekStream: any): ReadableStream { - return new ReadableStream({ - async start(controller) { - try { - for await (const chunk of deepseekStream) { - const content = chunk.choices[0]?.delta?.content || '' - if (content) { - controller.enqueue(new TextEncoder().encode(content)) - } - } - controller.close() - } catch (error) { - controller.error(error) - } - }, - }) -} - export const deepseekProvider: ProviderConfig = { id: 'deepseek', name: 'Deepseek', @@ -231,7 +211,6 @@ export const deepseekProvider: ProviderConfig = { const toolResults = [] const currentMessages = [...allMessages] let iterationCount = 0 - const MAX_ITERATIONS = 10 // Prevent infinite loops // Track if a forced tool has been used let hasUsedForcedTool = false @@ -270,7 +249,7 @@ export const deepseekProvider: ProviderConfig = { } try { - while (iterationCount < MAX_ITERATIONS) { + while (iterationCount < MAX_TOOL_ITERATIONS) { // Check for tool calls const toolCallsInResponse = currentResponse.choices[0]?.message?.tool_calls if (!toolCallsInResponse || toolCallsInResponse.length === 0) { diff --git a/apps/sim/providers/deepseek/utils.ts b/apps/sim/providers/deepseek/utils.ts new file mode 100644 index 0000000000..228f5e346c --- /dev/null +++ b/apps/sim/providers/deepseek/utils.ts @@ -0,0 +1,21 @@ +/** + * Helper function to convert a DeepSeek (OpenAI-compatible) stream to a ReadableStream + * of text chunks that can be consumed by the browser. + */ +export function createReadableStreamFromDeepseekStream(deepseekStream: any): ReadableStream { + return new ReadableStream({ + async start(controller) { + try { + for await (const chunk of deepseekStream) { + const content = chunk.choices[0]?.delta?.content || '' + if (content) { + controller.enqueue(new TextEncoder().encode(content)) + } + } + controller.close() + } catch (error) { + controller.error(error) + } + }, + }) +} diff --git a/apps/sim/providers/google/index.ts b/apps/sim/providers/google/index.ts index 0ff67344fd..fdd225a462 100644 --- a/apps/sim/providers/google/index.ts +++ b/apps/sim/providers/google/index.ts @@ -1,5 +1,12 @@ import { createLogger } from '@/lib/logs/console/logger' import type { StreamingExecution } from '@/executor/types' +import { MAX_TOOL_ITERATIONS } from '@/providers' +import { + cleanSchemaForGemini, + convertToGeminiFormat, + extractFunctionCall, + extractTextContent, +} from '@/providers/google/utils' import { getProviderDefaultModel, getProviderModels } from '@/providers/models' import type { ProviderConfig, @@ -19,7 +26,13 @@ const logger = createLogger('GoogleProvider') /** * Creates a ReadableStream from Google's Gemini stream response */ -function createReadableStreamFromGeminiStream(response: Response): ReadableStream { +function createReadableStreamFromGeminiStream( + response: Response, + onComplete?: ( + content: string, + usage?: { promptTokenCount?: number; candidatesTokenCount?: number; totalTokenCount?: number } + ) => void +): ReadableStream { const reader = response.body?.getReader() if (!reader) { throw new Error('Failed to get reader from response body') @@ -29,18 +42,24 @@ function createReadableStreamFromGeminiStream(response: Response): ReadableStrea async start(controller) { try { let buffer = '' + let fullContent = '' + let usageData: { + promptTokenCount?: number + candidatesTokenCount?: number + totalTokenCount?: number + } | null = null while (true) { const { done, value } = await reader.read() if (done) { - // Try to parse any remaining buffer as complete JSON if (buffer.trim()) { - // Processing final buffer try { const data = JSON.parse(buffer.trim()) + if (data.usageMetadata) { + usageData = data.usageMetadata + } const candidate = data.candidates?.[0] if (candidate?.content?.parts) { - // Check if this is a function call const functionCall = extractFunctionCall(candidate) if (functionCall) { logger.debug( @@ -49,26 +68,27 @@ function createReadableStreamFromGeminiStream(response: Response): ReadableStrea functionName: functionCall.name, } ) - // Function calls should not be streamed - end the stream early + if (onComplete) onComplete(fullContent, usageData || undefined) controller.close() return } const content = extractTextContent(candidate) if (content) { + fullContent += content controller.enqueue(new TextEncoder().encode(content)) } } } catch (e) { - // Final buffer not valid JSON, checking if it contains JSON array - // Try parsing as JSON array if it starts with [ if (buffer.trim().startsWith('[')) { try { const dataArray = JSON.parse(buffer.trim()) if (Array.isArray(dataArray)) { for (const item of dataArray) { + if (item.usageMetadata) { + usageData = item.usageMetadata + } const candidate = item.candidates?.[0] if (candidate?.content?.parts) { - // Check if this is a function call const functionCall = extractFunctionCall(candidate) if (functionCall) { logger.debug( @@ -77,11 +97,13 @@ function createReadableStreamFromGeminiStream(response: Response): ReadableStrea functionName: functionCall.name, } ) + if (onComplete) onComplete(fullContent, usageData || undefined) controller.close() return } const content = extractTextContent(candidate) if (content) { + fullContent += content controller.enqueue(new TextEncoder().encode(content)) } } @@ -93,6 +115,7 @@ function createReadableStreamFromGeminiStream(response: Response): ReadableStrea } } } + if (onComplete) onComplete(fullContent, usageData || undefined) controller.close() break } @@ -100,14 +123,11 @@ function createReadableStreamFromGeminiStream(response: Response): ReadableStrea const text = new TextDecoder().decode(value) buffer += text - // Try to find complete JSON objects in buffer - // Look for patterns like: {...}\n{...} or just a single {...} let searchIndex = 0 while (searchIndex < buffer.length) { const openBrace = buffer.indexOf('{', searchIndex) if (openBrace === -1) break - // Try to find the matching closing brace let braceCount = 0 let inString = false let escaped = false @@ -138,28 +158,34 @@ function createReadableStreamFromGeminiStream(response: Response): ReadableStrea } if (closeBrace !== -1) { - // Found a complete JSON object const jsonStr = buffer.substring(openBrace, closeBrace + 1) try { const data = JSON.parse(jsonStr) - // JSON parsed successfully from stream + + if (data.usageMetadata) { + usageData = data.usageMetadata + } const candidate = data.candidates?.[0] - // Handle specific finish reasons if (candidate?.finishReason === 'UNEXPECTED_TOOL_CALL') { logger.warn('Gemini returned UNEXPECTED_TOOL_CALL in streaming mode', { finishReason: candidate.finishReason, hasContent: !!candidate?.content, hasParts: !!candidate?.content?.parts, }) - // This indicates a configuration issue - tools might be improperly configured for streaming - continue + const textContent = extractTextContent(candidate) + if (textContent) { + fullContent += textContent + controller.enqueue(new TextEncoder().encode(textContent)) + } + if (onComplete) onComplete(fullContent, usageData || undefined) + controller.close() + return } if (candidate?.content?.parts) { - // Check if this is a function call const functionCall = extractFunctionCall(candidate) if (functionCall) { logger.debug( @@ -168,13 +194,13 @@ function createReadableStreamFromGeminiStream(response: Response): ReadableStrea functionName: functionCall.name, } ) - // Function calls should not be streamed - we need to end the stream - // and let the non-streaming tool execution flow handle this + if (onComplete) onComplete(fullContent, usageData || undefined) controller.close() return } const content = extractTextContent(candidate) if (content) { + fullContent += content controller.enqueue(new TextEncoder().encode(content)) } } @@ -185,7 +211,6 @@ function createReadableStreamFromGeminiStream(response: Response): ReadableStrea }) } - // Remove processed JSON from buffer and continue searching buffer = buffer.substring(closeBrace + 1) searchIndex = 0 } else { @@ -232,45 +257,36 @@ export const googleProvider: ProviderConfig = { streaming: !!request.stream, }) - // Start execution timer for the entire provider execution const providerStartTime = Date.now() const providerStartTimeISO = new Date(providerStartTime).toISOString() try { - // Convert messages to Gemini format const { contents, tools, systemInstruction } = convertToGeminiFormat(request) const requestedModel = request.model || 'gemini-2.5-pro' - // Build request payload const payload: any = { contents, generationConfig: {}, } - // Add temperature if specified if (request.temperature !== undefined && request.temperature !== null) { payload.generationConfig.temperature = request.temperature } - // Add max tokens if specified if (request.maxTokens !== undefined) { payload.generationConfig.maxOutputTokens = request.maxTokens } - // Add system instruction if provided if (systemInstruction) { payload.systemInstruction = systemInstruction } - // Add structured output format if requested (but not when tools are present) if (request.responseFormat && !tools?.length) { const responseFormatSchema = request.responseFormat.schema || request.responseFormat - // Clean the schema using our helper function const cleanSchema = cleanSchemaForGemini(responseFormatSchema) - // Use Gemini's native structured output approach payload.generationConfig.responseMimeType = 'application/json' payload.generationConfig.responseSchema = cleanSchema @@ -284,7 +300,6 @@ export const googleProvider: ProviderConfig = { ) } - // Handle tools and tool usage control let preparedTools: ReturnType | null = null if (tools?.length) { @@ -298,7 +313,6 @@ export const googleProvider: ProviderConfig = { }, ] - // Add Google-specific tool configuration if (toolConfig) { payload.toolConfig = toolConfig } @@ -313,14 +327,10 @@ export const googleProvider: ProviderConfig = { } } - // Make the API request const initialCallTime = Date.now() - // Disable streaming for initial requests when tools are present to avoid function calls in streams - // Only enable streaming for the final response after tool execution const shouldStream = request.stream && !tools?.length - // Use streamGenerateContent for streaming requests const endpoint = shouldStream ? `https://generativelanguage.googleapis.com/v1beta/models/${requestedModel}:streamGenerateContent?key=${request.apiKey}` : `https://generativelanguage.googleapis.com/v1beta/models/${requestedModel}:generateContent?key=${request.apiKey}` @@ -352,16 +362,11 @@ export const googleProvider: ProviderConfig = { const firstResponseTime = Date.now() - initialCallTime - // Handle streaming response if (shouldStream) { logger.info('Handling Google Gemini streaming response') - // Create a ReadableStream from the Google Gemini stream - const stream = createReadableStreamFromGeminiStream(response) - - // Create an object that combines the stream with execution metadata - const streamingExecution: StreamingExecution = { - stream, + const streamingResult: StreamingExecution = { + stream: null as any, execution: { success: true, output: { @@ -389,7 +394,6 @@ export const googleProvider: ProviderConfig = { duration: firstResponseTime, }, ], - // Cost will be calculated in logger }, }, logs: [], @@ -402,18 +406,49 @@ export const googleProvider: ProviderConfig = { }, } - return streamingExecution + streamingResult.stream = createReadableStreamFromGeminiStream( + response, + (content, usage) => { + streamingResult.execution.output.content = content + + const streamEndTime = Date.now() + const streamEndTimeISO = new Date(streamEndTime).toISOString() + + if (streamingResult.execution.output.providerTiming) { + streamingResult.execution.output.providerTiming.endTime = streamEndTimeISO + streamingResult.execution.output.providerTiming.duration = + streamEndTime - providerStartTime + + if (streamingResult.execution.output.providerTiming.timeSegments?.[0]) { + streamingResult.execution.output.providerTiming.timeSegments[0].endTime = + streamEndTime + streamingResult.execution.output.providerTiming.timeSegments[0].duration = + streamEndTime - providerStartTime + } + } + + if (usage) { + streamingResult.execution.output.tokens = { + prompt: usage.promptTokenCount || 0, + completion: usage.candidatesTokenCount || 0, + total: + usage.totalTokenCount || + (usage.promptTokenCount || 0) + (usage.candidatesTokenCount || 0), + } + } + } + ) + + return streamingResult } let geminiResponse = await response.json() - // Check structured output format if (payload.generationConfig?.responseSchema) { const candidate = geminiResponse.candidates?.[0] if (candidate?.content?.parts?.[0]?.text) { const text = candidate.content.parts[0].text try { - // Validate JSON structure JSON.parse(text) logger.info('Successfully received structured JSON output') } catch (_e) { @@ -422,7 +457,6 @@ export const googleProvider: ProviderConfig = { } } - // Initialize response tracking variables let content = '' let tokens = { prompt: 0, @@ -432,16 +466,13 @@ export const googleProvider: ProviderConfig = { const toolCalls = [] const toolResults = [] let iterationCount = 0 - const MAX_ITERATIONS = 10 // Prevent infinite loops - // Track forced tools and their usage (similar to OpenAI pattern) const originalToolConfig = preparedTools?.toolConfig const forcedTools = preparedTools?.forcedTools || [] let usedForcedTools: string[] = [] let hasUsedForcedTool = false let currentToolConfig = originalToolConfig - // Helper function to check for forced tool usage in responses const checkForForcedToolUsage = (functionCall: { name: string; args: any }) => { if (currentToolConfig && forcedTools.length > 0) { const toolCallsForTracking = [{ name: functionCall.name, arguments: functionCall.args }] @@ -466,11 +497,9 @@ export const googleProvider: ProviderConfig = { } } - // Track time spent in model vs tools let modelTime = firstResponseTime let toolsTime = 0 - // Track each model and tool call segment with timestamps const timeSegments: TimeSegment[] = [ { type: 'model', @@ -482,46 +511,50 @@ export const googleProvider: ProviderConfig = { ] try { - // Extract content or function calls from initial response const candidate = geminiResponse.candidates?.[0] - // Check if response contains function calls + if (candidate?.finishReason === 'UNEXPECTED_TOOL_CALL') { + logger.warn( + 'Gemini returned UNEXPECTED_TOOL_CALL - model attempted to call a tool that was not provided', + { + finishReason: candidate.finishReason, + hasContent: !!candidate?.content, + hasParts: !!candidate?.content?.parts, + } + ) + content = extractTextContent(candidate) + } + const functionCall = extractFunctionCall(candidate) if (functionCall) { logger.info(`Received function call from Gemini: ${functionCall.name}`) - // Process function calls in a loop - while (iterationCount < MAX_ITERATIONS) { - // Get the latest function calls + while (iterationCount < MAX_TOOL_ITERATIONS) { const latestResponse = geminiResponse.candidates?.[0] const latestFunctionCall = extractFunctionCall(latestResponse) if (!latestFunctionCall) { - // No more function calls - extract final text content content = extractTextContent(latestResponse) break } logger.info( - `Processing function call: ${latestFunctionCall.name} (iteration ${iterationCount + 1}/${MAX_ITERATIONS})` + `Processing function call: ${latestFunctionCall.name} (iteration ${iterationCount + 1}/${MAX_TOOL_ITERATIONS})` ) - // Track time for tool calls const toolsStartTime = Date.now() try { const toolName = latestFunctionCall.name const toolArgs = latestFunctionCall.args || {} - // Get the tool from the tools registry const tool = request.tools?.find((t) => t.id === toolName) if (!tool) { logger.warn(`Tool ${toolName} not found in registry, skipping`) break } - // Execute the tool const toolCallStartTime = Date.now() const { toolParams, executionParams } = prepareToolExecution(tool, toolArgs, request) @@ -529,7 +562,6 @@ export const googleProvider: ProviderConfig = { const toolCallEndTime = Date.now() const toolCallDuration = toolCallEndTime - toolCallStartTime - // Add to time segments for both success and failure timeSegments.push({ type: 'tool', name: toolName, @@ -538,13 +570,11 @@ export const googleProvider: ProviderConfig = { duration: toolCallDuration, }) - // Prepare result content for the LLM let resultContent: any if (result.success) { toolResults.push(result.output) resultContent = result.output } else { - // Include error information so LLM can respond appropriately resultContent = { error: true, message: result.error || 'Tool execution failed', @@ -562,14 +592,10 @@ export const googleProvider: ProviderConfig = { success: result.success, }) - // Prepare for next request with simplified messages - // Use simple format: original query + most recent function call + result const simplifiedMessages = [ - // Original user request - find the first user request ...(contents.filter((m) => m.role === 'user').length > 0 ? [contents.filter((m) => m.role === 'user')[0]] : [contents[0]]), - // Function call from model { role: 'model', parts: [ @@ -581,7 +607,6 @@ export const googleProvider: ProviderConfig = { }, ], }, - // Function response - but use USER role since Gemini only accepts user or model { role: 'user', parts: [ @@ -592,35 +617,27 @@ export const googleProvider: ProviderConfig = { }, ] - // Calculate tool call time const thisToolsTime = Date.now() - toolsStartTime toolsTime += thisToolsTime - // Check for forced tool usage and update configuration checkForForcedToolUsage(latestFunctionCall) - // Make the next request with updated messages const nextModelStartTime = Date.now() try { - // Check if we should stream the final response after tool calls if (request.stream) { - // Create a payload for the streaming response after tool calls const streamingPayload = { ...payload, contents: simplifiedMessages, } - // Check if we should remove tools and enable structured output for final response const allForcedToolsUsed = forcedTools.length > 0 && usedForcedTools.length === forcedTools.length if (allForcedToolsUsed && request.responseFormat) { - // All forced tools have been used, we can now remove tools and enable structured output streamingPayload.tools = undefined streamingPayload.toolConfig = undefined - // Add structured output format for final response const responseFormatSchema = request.responseFormat.schema || request.responseFormat const cleanSchema = cleanSchemaForGemini(responseFormatSchema) @@ -633,7 +650,6 @@ export const googleProvider: ProviderConfig = { logger.info('Using structured output for final response after tool execution') } else { - // Use updated tool configuration if available, otherwise default to AUTO if (currentToolConfig) { streamingPayload.toolConfig = currentToolConfig } else { @@ -641,11 +657,8 @@ export const googleProvider: ProviderConfig = { } } - // Check if we should handle this as a potential forced tool call - // First make a non-streaming request to see if we get a function call const checkPayload = { ...streamingPayload, - // Remove stream property to get non-streaming response } checkPayload.stream = undefined @@ -677,7 +690,6 @@ export const googleProvider: ProviderConfig = { const checkFunctionCall = extractFunctionCall(checkCandidate) if (checkFunctionCall) { - // We have a function call - handle it in non-streaming mode logger.info( 'Function call detected in follow-up, handling in non-streaming mode', { @@ -685,10 +697,8 @@ export const googleProvider: ProviderConfig = { } ) - // Update geminiResponse to continue the tool execution loop geminiResponse = checkResult - // Update token counts if available if (checkResult.usageMetadata) { tokens.prompt += checkResult.usageMetadata.promptTokenCount || 0 tokens.completion += checkResult.usageMetadata.candidatesTokenCount || 0 @@ -697,12 +707,10 @@ export const googleProvider: ProviderConfig = { (checkResult.usageMetadata.candidatesTokenCount || 0) } - // Calculate timing for this model call const nextModelEndTime = Date.now() const thisModelTime = nextModelEndTime - nextModelStartTime modelTime += thisModelTime - // Add to time segments timeSegments.push({ type: 'model', name: `Model response (iteration ${iterationCount + 1})`, @@ -711,14 +719,32 @@ export const googleProvider: ProviderConfig = { duration: thisModelTime, }) - // Continue the loop to handle the function call iterationCount++ continue } - // No function call - proceed with streaming logger.info('No function call detected, proceeding with streaming response') - // Make the streaming request with streamGenerateContent endpoint + // Apply structured output for the final response if responseFormat is specified + // This works regardless of whether tools were forced or auto + if (request.responseFormat) { + streamingPayload.tools = undefined + streamingPayload.toolConfig = undefined + + const responseFormatSchema = + request.responseFormat.schema || request.responseFormat + const cleanSchema = cleanSchemaForGemini(responseFormatSchema) + + if (!streamingPayload.generationConfig) { + streamingPayload.generationConfig = {} + } + streamingPayload.generationConfig.responseMimeType = 'application/json' + streamingPayload.generationConfig.responseSchema = cleanSchema + + logger.info( + 'Using structured output for final streaming response after tool execution' + ) + } + const streamingResponse = await fetch( `https://generativelanguage.googleapis.com/v1beta/models/${requestedModel}:streamGenerateContent?key=${request.apiKey}`, { @@ -742,15 +768,10 @@ export const googleProvider: ProviderConfig = { ) } - // Create a stream from the response - const stream = createReadableStreamFromGeminiStream(streamingResponse) - - // Calculate timing information const nextModelEndTime = Date.now() const thisModelTime = nextModelEndTime - nextModelStartTime modelTime += thisModelTime - // Add to time segments timeSegments.push({ type: 'model', name: 'Final streaming response after tool calls', @@ -759,9 +780,8 @@ export const googleProvider: ProviderConfig = { duration: thisModelTime, }) - // Return a streaming execution with tool call information const streamingExecution: StreamingExecution = { - stream, + stream: null as any, execution: { success: true, output: { @@ -786,7 +806,6 @@ export const googleProvider: ProviderConfig = { iterations: iterationCount + 1, timeSegments, }, - // Cost will be calculated in logger }, logs: [], metadata: { @@ -798,25 +817,55 @@ export const googleProvider: ProviderConfig = { }, } + streamingExecution.stream = createReadableStreamFromGeminiStream( + streamingResponse, + (content, usage) => { + streamingExecution.execution.output.content = content + + const streamEndTime = Date.now() + const streamEndTimeISO = new Date(streamEndTime).toISOString() + + if (streamingExecution.execution.output.providerTiming) { + streamingExecution.execution.output.providerTiming.endTime = + streamEndTimeISO + streamingExecution.execution.output.providerTiming.duration = + streamEndTime - providerStartTime + } + + if (usage) { + const existingTokens = streamingExecution.execution.output.tokens || { + prompt: 0, + completion: 0, + total: 0, + } + streamingExecution.execution.output.tokens = { + prompt: (existingTokens.prompt || 0) + (usage.promptTokenCount || 0), + completion: + (existingTokens.completion || 0) + (usage.candidatesTokenCount || 0), + total: + (existingTokens.total || 0) + + (usage.totalTokenCount || + (usage.promptTokenCount || 0) + (usage.candidatesTokenCount || 0)), + } + } + } + ) + return streamingExecution } - // Make the next request for non-streaming response const nextPayload = { ...payload, contents: simplifiedMessages, } - // Check if we should remove tools and enable structured output for final response const allForcedToolsUsed = forcedTools.length > 0 && usedForcedTools.length === forcedTools.length if (allForcedToolsUsed && request.responseFormat) { - // All forced tools have been used, we can now remove tools and enable structured output nextPayload.tools = undefined nextPayload.toolConfig = undefined - // Add structured output format for final response const responseFormatSchema = request.responseFormat.schema || request.responseFormat const cleanSchema = cleanSchemaForGemini(responseFormatSchema) @@ -831,7 +880,6 @@ export const googleProvider: ProviderConfig = { 'Using structured output for final non-streaming response after tool execution' ) } else { - // Add updated tool configuration if available if (currentToolConfig) { nextPayload.toolConfig = currentToolConfig } @@ -864,7 +912,6 @@ export const googleProvider: ProviderConfig = { const nextModelEndTime = Date.now() const thisModelTime = nextModelEndTime - nextModelStartTime - // Add to time segments timeSegments.push({ type: 'model', name: `Model response (iteration ${iterationCount + 1})`, @@ -873,15 +920,65 @@ export const googleProvider: ProviderConfig = { duration: thisModelTime, }) - // Add to model time modelTime += thisModelTime - // Check if we need to continue or break const nextCandidate = geminiResponse.candidates?.[0] const nextFunctionCall = extractFunctionCall(nextCandidate) if (!nextFunctionCall) { - content = extractTextContent(nextCandidate) + // If responseFormat is specified, make one final request with structured output + if (request.responseFormat) { + const finalPayload = { + ...payload, + contents: nextPayload.contents, + tools: undefined, + toolConfig: undefined, + } + + const responseFormatSchema = + request.responseFormat.schema || request.responseFormat + const cleanSchema = cleanSchemaForGemini(responseFormatSchema) + + if (!finalPayload.generationConfig) { + finalPayload.generationConfig = {} + } + finalPayload.generationConfig.responseMimeType = 'application/json' + finalPayload.generationConfig.responseSchema = cleanSchema + + logger.info('Making final request with structured output after tool execution') + + const finalResponse = await fetch( + `https://generativelanguage.googleapis.com/v1beta/models/${requestedModel}:generateContent?key=${request.apiKey}`, + { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(finalPayload), + } + ) + + if (finalResponse.ok) { + const finalResult = await finalResponse.json() + const finalCandidate = finalResult.candidates?.[0] + content = extractTextContent(finalCandidate) + + if (finalResult.usageMetadata) { + tokens.prompt += finalResult.usageMetadata.promptTokenCount || 0 + tokens.completion += finalResult.usageMetadata.candidatesTokenCount || 0 + tokens.total += + (finalResult.usageMetadata.promptTokenCount || 0) + + (finalResult.usageMetadata.candidatesTokenCount || 0) + } + } else { + logger.warn( + 'Failed to get structured output, falling back to regular response' + ) + content = extractTextContent(nextCandidate) + } + } else { + content = extractTextContent(nextCandidate) + } break } @@ -902,7 +999,6 @@ export const googleProvider: ProviderConfig = { } } } else { - // Regular text response content = extractTextContent(candidate) } } catch (error) { @@ -911,18 +1007,15 @@ export const googleProvider: ProviderConfig = { iterationCount, }) - // Don't rethrow, so we can still return partial results if (!content && toolCalls.length > 0) { content = `Tool call(s) executed: ${toolCalls.map((t) => t.name).join(', ')}. Results are available in the tool results.` } } - // Calculate overall timing const providerEndTime = Date.now() const providerEndTimeISO = new Date(providerEndTime).toISOString() const totalDuration = providerEndTime - providerStartTime - // Extract token usage if available if (geminiResponse.usageMetadata) { tokens = { prompt: geminiResponse.usageMetadata.promptTokenCount || 0, @@ -949,10 +1042,8 @@ export const googleProvider: ProviderConfig = { iterations: iterationCount + 1, timeSegments: timeSegments, }, - // Cost will be calculated in logger } } catch (error) { - // Include timing information even for errors const providerEndTime = Date.now() const providerEndTimeISO = new Date(providerEndTime).toISOString() const totalDuration = providerEndTime - providerStartTime @@ -962,7 +1053,6 @@ export const googleProvider: ProviderConfig = { duration: totalDuration, }) - // Create a new error with timing information const enhancedError = new Error(error instanceof Error ? error.message : String(error)) // @ts-ignore - Adding timing property to the error enhancedError.timing = { @@ -975,200 +1065,3 @@ export const googleProvider: ProviderConfig = { } }, } - -/** - * Helper function to remove additionalProperties from a schema object - * and perform a deep copy of the schema to avoid modifying the original - */ -function cleanSchemaForGemini(schema: any): any { - // Handle base cases - if (schema === null || schema === undefined) return schema - if (typeof schema !== 'object') return schema - if (Array.isArray(schema)) { - return schema.map((item) => cleanSchemaForGemini(item)) - } - - // Create a new object for the deep copy - const cleanedSchema: any = {} - - // Process each property in the schema - for (const key in schema) { - // Skip additionalProperties - if (key === 'additionalProperties') continue - - // Deep copy nested objects - cleanedSchema[key] = cleanSchemaForGemini(schema[key]) - } - - return cleanedSchema -} - -/** - * Helper function to extract content from a Gemini response, handling structured output - */ -function extractTextContent(candidate: any): string { - if (!candidate?.content?.parts) return '' - - // Check for JSON response (typically from structured output) - if (candidate.content.parts?.length === 1 && candidate.content.parts[0].text) { - const text = candidate.content.parts[0].text - if (text && (text.trim().startsWith('{') || text.trim().startsWith('['))) { - try { - JSON.parse(text) // Validate JSON - return text // Return valid JSON as-is - } catch (_e) { - /* Not valid JSON, continue with normal extraction */ - } - } - } - - // Standard text extraction - return candidate.content.parts - .filter((part: any) => part.text) - .map((part: any) => part.text) - .join('\n') -} - -/** - * Helper function to extract a function call from a Gemini response - */ -function extractFunctionCall(candidate: any): { name: string; args: any } | null { - if (!candidate?.content?.parts) return null - - // Check for functionCall in parts - for (const part of candidate.content.parts) { - if (part.functionCall) { - const args = part.functionCall.args || {} - // Parse string args if they look like JSON - if ( - typeof part.functionCall.args === 'string' && - part.functionCall.args.trim().startsWith('{') - ) { - try { - return { name: part.functionCall.name, args: JSON.parse(part.functionCall.args) } - } catch (_e) { - return { name: part.functionCall.name, args: part.functionCall.args } - } - } - return { name: part.functionCall.name, args } - } - } - - // Check for alternative function_call format - if (candidate.content.function_call) { - const args = - typeof candidate.content.function_call.arguments === 'string' - ? JSON.parse(candidate.content.function_call.arguments || '{}') - : candidate.content.function_call.arguments || {} - return { name: candidate.content.function_call.name, args } - } - - return null -} - -/** - * Convert OpenAI-style request format to Gemini format - */ -function convertToGeminiFormat(request: ProviderRequest): { - contents: any[] - tools: any[] | undefined - systemInstruction: any | undefined -} { - const contents = [] - let systemInstruction - - // Handle system prompt - if (request.systemPrompt) { - systemInstruction = { parts: [{ text: request.systemPrompt }] } - } - - // Add context as user message if present - if (request.context) { - contents.push({ role: 'user', parts: [{ text: request.context }] }) - } - - // Process messages - if (request.messages && request.messages.length > 0) { - for (const message of request.messages) { - if (message.role === 'system') { - // Add to system instruction - if (!systemInstruction) { - systemInstruction = { parts: [{ text: message.content }] } - } else { - // Append to existing system instruction - systemInstruction.parts[0].text = `${systemInstruction.parts[0].text || ''}\n${message.content}` - } - } else if (message.role === 'user' || message.role === 'assistant') { - // Convert to Gemini role format - const geminiRole = message.role === 'user' ? 'user' : 'model' - - // Add text content - if (message.content) { - contents.push({ role: geminiRole, parts: [{ text: message.content }] }) - } - - // Handle tool calls - if (message.role === 'assistant' && message.tool_calls && message.tool_calls.length > 0) { - const functionCalls = message.tool_calls.map((toolCall) => ({ - functionCall: { - name: toolCall.function?.name, - args: JSON.parse(toolCall.function?.arguments || '{}'), - }, - })) - - contents.push({ role: 'model', parts: functionCalls }) - } - } else if (message.role === 'tool') { - // Convert tool response (Gemini only accepts user/model roles) - contents.push({ - role: 'user', - parts: [{ text: `Function result: ${message.content}` }], - }) - } - } - } - - // Convert tools to Gemini function declarations - const tools = request.tools?.map((tool) => { - const toolParameters = { ...(tool.parameters || {}) } - - // Process schema properties - if (toolParameters.properties) { - const properties = { ...toolParameters.properties } - const required = toolParameters.required ? [...toolParameters.required] : [] - - // Remove defaults and optional parameters - for (const key in properties) { - const prop = properties[key] as any - - if (prop.default !== undefined) { - const { default: _, ...cleanProp } = prop - properties[key] = cleanProp - } - } - - // Build Gemini-compatible parameters schema - const parameters = { - type: toolParameters.type || 'object', - properties, - ...(required.length > 0 ? { required } : {}), - } - - // Clean schema for Gemini - return { - name: tool.id, - description: tool.description || `Execute the ${tool.id} function`, - parameters: cleanSchemaForGemini(parameters), - } - } - - // Simple schema case - return { - name: tool.id, - description: tool.description || `Execute the ${tool.id} function`, - parameters: cleanSchemaForGemini(toolParameters), - } - }) - - return { contents, tools, systemInstruction } -} diff --git a/apps/sim/providers/google/utils.ts b/apps/sim/providers/google/utils.ts new file mode 100644 index 0000000000..8c14687e60 --- /dev/null +++ b/apps/sim/providers/google/utils.ts @@ -0,0 +1,171 @@ +import type { ProviderRequest } from '@/providers/types' + +/** + * Removes additionalProperties from a schema object (not supported by Gemini) + */ +export function cleanSchemaForGemini(schema: any): any { + if (schema === null || schema === undefined) return schema + if (typeof schema !== 'object') return schema + if (Array.isArray(schema)) { + return schema.map((item) => cleanSchemaForGemini(item)) + } + + const cleanedSchema: any = {} + + for (const key in schema) { + if (key === 'additionalProperties') continue + cleanedSchema[key] = cleanSchemaForGemini(schema[key]) + } + + return cleanedSchema +} + +/** + * Extracts text content from a Gemini response candidate, handling structured output + */ +export function extractTextContent(candidate: any): string { + if (!candidate?.content?.parts) return '' + + if (candidate.content.parts?.length === 1 && candidate.content.parts[0].text) { + const text = candidate.content.parts[0].text + if (text && (text.trim().startsWith('{') || text.trim().startsWith('['))) { + try { + JSON.parse(text) + return text + } catch (_e) { + /* Not valid JSON, continue with normal extraction */ + } + } + } + + return candidate.content.parts + .filter((part: any) => part.text) + .map((part: any) => part.text) + .join('\n') +} + +/** + * Extracts a function call from a Gemini response candidate + */ +export function extractFunctionCall(candidate: any): { name: string; args: any } | null { + if (!candidate?.content?.parts) return null + + for (const part of candidate.content.parts) { + if (part.functionCall) { + const args = part.functionCall.args || {} + if ( + typeof part.functionCall.args === 'string' && + part.functionCall.args.trim().startsWith('{') + ) { + try { + return { name: part.functionCall.name, args: JSON.parse(part.functionCall.args) } + } catch (_e) { + return { name: part.functionCall.name, args: part.functionCall.args } + } + } + return { name: part.functionCall.name, args } + } + } + + if (candidate.content.function_call) { + const args = + typeof candidate.content.function_call.arguments === 'string' + ? JSON.parse(candidate.content.function_call.arguments || '{}') + : candidate.content.function_call.arguments || {} + return { name: candidate.content.function_call.name, args } + } + + return null +} + +/** + * Converts OpenAI-style request format to Gemini format + */ +export function convertToGeminiFormat(request: ProviderRequest): { + contents: any[] + tools: any[] | undefined + systemInstruction: any | undefined +} { + const contents: any[] = [] + let systemInstruction + + if (request.systemPrompt) { + systemInstruction = { parts: [{ text: request.systemPrompt }] } + } + + if (request.context) { + contents.push({ role: 'user', parts: [{ text: request.context }] }) + } + + if (request.messages && request.messages.length > 0) { + for (const message of request.messages) { + if (message.role === 'system') { + if (!systemInstruction) { + systemInstruction = { parts: [{ text: message.content }] } + } else { + systemInstruction.parts[0].text = `${systemInstruction.parts[0].text || ''}\n${message.content}` + } + } else if (message.role === 'user' || message.role === 'assistant') { + const geminiRole = message.role === 'user' ? 'user' : 'model' + + if (message.content) { + contents.push({ role: geminiRole, parts: [{ text: message.content }] }) + } + + if (message.role === 'assistant' && message.tool_calls && message.tool_calls.length > 0) { + const functionCalls = message.tool_calls.map((toolCall) => ({ + functionCall: { + name: toolCall.function?.name, + args: JSON.parse(toolCall.function?.arguments || '{}'), + }, + })) + + contents.push({ role: 'model', parts: functionCalls }) + } + } else if (message.role === 'tool') { + contents.push({ + role: 'user', + parts: [{ text: `Function result: ${message.content}` }], + }) + } + } + } + + const tools = request.tools?.map((tool) => { + const toolParameters = { ...(tool.parameters || {}) } + + if (toolParameters.properties) { + const properties = { ...toolParameters.properties } + const required = toolParameters.required ? [...toolParameters.required] : [] + + for (const key in properties) { + const prop = properties[key] as any + + if (prop.default !== undefined) { + const { default: _, ...cleanProp } = prop + properties[key] = cleanProp + } + } + + const parameters = { + type: toolParameters.type || 'object', + properties, + ...(required.length > 0 ? { required } : {}), + } + + return { + name: tool.id, + description: tool.description || `Execute the ${tool.id} function`, + parameters: cleanSchemaForGemini(parameters), + } + } + + return { + name: tool.id, + description: tool.description || `Execute the ${tool.id} function`, + parameters: cleanSchemaForGemini(toolParameters), + } + }) + + return { contents, tools, systemInstruction } +} diff --git a/apps/sim/providers/groq/index.ts b/apps/sim/providers/groq/index.ts index 027f501920..97e00ac198 100644 --- a/apps/sim/providers/groq/index.ts +++ b/apps/sim/providers/groq/index.ts @@ -1,6 +1,8 @@ import { Groq } from 'groq-sdk' import { createLogger } from '@/lib/logs/console/logger' import type { StreamingExecution } from '@/executor/types' +import { MAX_TOOL_ITERATIONS } from '@/providers' +import { createReadableStreamFromGroqStream } from '@/providers/groq/utils' import { getProviderDefaultModel, getProviderModels } from '@/providers/models' import type { ProviderConfig, @@ -17,27 +19,6 @@ import { executeTool } from '@/tools' const logger = createLogger('GroqProvider') -/** - * Helper to wrap Groq streaming into a browser-friendly ReadableStream - * of raw assistant text chunks. - */ -function createReadableStreamFromGroqStream(groqStream: any): ReadableStream { - return new ReadableStream({ - async start(controller) { - try { - for await (const chunk of groqStream) { - if (chunk.choices[0]?.delta?.content) { - controller.enqueue(new TextEncoder().encode(chunk.choices[0].delta.content)) - } - } - controller.close() - } catch (err) { - controller.error(err) - } - }, - }) -} - export const groqProvider: ProviderConfig = { id: 'groq', name: 'Groq', @@ -225,7 +206,6 @@ export const groqProvider: ProviderConfig = { const toolResults = [] const currentMessages = [...allMessages] let iterationCount = 0 - const MAX_ITERATIONS = 10 // Prevent infinite loops // Track time spent in model vs tools let modelTime = firstResponseTime @@ -243,7 +223,7 @@ export const groqProvider: ProviderConfig = { ] try { - while (iterationCount < MAX_ITERATIONS) { + while (iterationCount < MAX_TOOL_ITERATIONS) { // Check for tool calls const toolCallsInResponse = currentResponse.choices[0]?.message?.tool_calls if (!toolCallsInResponse || toolCallsInResponse.length === 0) { diff --git a/apps/sim/providers/groq/utils.ts b/apps/sim/providers/groq/utils.ts new file mode 100644 index 0000000000..845c73af1e --- /dev/null +++ b/apps/sim/providers/groq/utils.ts @@ -0,0 +1,23 @@ +/** + * Helper to wrap Groq streaming into a browser-friendly ReadableStream + * of raw assistant text chunks. + * + * @param groqStream - The Groq streaming response + * @returns A ReadableStream that emits text chunks + */ +export function createReadableStreamFromGroqStream(groqStream: any): ReadableStream { + return new ReadableStream({ + async start(controller) { + try { + for await (const chunk of groqStream) { + if (chunk.choices[0]?.delta?.content) { + controller.enqueue(new TextEncoder().encode(chunk.choices[0].delta.content)) + } + } + controller.close() + } catch (err) { + controller.error(err) + } + }, + }) +} diff --git a/apps/sim/providers/index.ts b/apps/sim/providers/index.ts index 72d1423e15..3dbed8f423 100644 --- a/apps/sim/providers/index.ts +++ b/apps/sim/providers/index.ts @@ -12,6 +12,12 @@ import { const logger = createLogger('Providers') +/** + * Maximum number of iterations for tool call loops to prevent infinite loops. + * Used across all providers that support tool/function calling. + */ +export const MAX_TOOL_ITERATIONS = 20 + function sanitizeRequest(request: ProviderRequest): ProviderRequest { const sanitizedRequest = { ...request } @@ -44,7 +50,6 @@ export async function executeProviderRequest( } const sanitizedRequest = sanitizeRequest(request) - // If responseFormat is provided, modify the system prompt to enforce structured output if (sanitizedRequest.responseFormat) { if ( typeof sanitizedRequest.responseFormat === 'string' && @@ -53,12 +58,10 @@ export async function executeProviderRequest( logger.info('Empty response format provided, ignoring it') sanitizedRequest.responseFormat = undefined } else { - // Generate structured output instructions const structuredOutputInstructions = generateStructuredOutputInstructions( sanitizedRequest.responseFormat ) - // Only add additional instructions if they're not empty if (structuredOutputInstructions.trim()) { const originalPrompt = sanitizedRequest.systemPrompt || '' sanitizedRequest.systemPrompt = @@ -69,10 +72,8 @@ export async function executeProviderRequest( } } - // Execute the request using the provider's implementation const response = await provider.executeRequest(sanitizedRequest) - // If we received a StreamingExecution or ReadableStream, just pass it through if (isStreamingExecution(response)) { logger.info('Provider returned StreamingExecution') return response diff --git a/apps/sim/providers/mistral/index.ts b/apps/sim/providers/mistral/index.ts index e2a194962f..c4d05fdfb2 100644 --- a/apps/sim/providers/mistral/index.ts +++ b/apps/sim/providers/mistral/index.ts @@ -1,6 +1,8 @@ import OpenAI from 'openai' import { createLogger } from '@/lib/logs/console/logger' import type { StreamingExecution } from '@/executor/types' +import { MAX_TOOL_ITERATIONS } from '@/providers' +import { createReadableStreamFromMistralStream } from '@/providers/mistral/utils' import { getProviderDefaultModel, getProviderModels } from '@/providers/models' import type { ProviderConfig, @@ -17,40 +19,6 @@ import { executeTool } from '@/tools' const logger = createLogger('MistralProvider') -function createReadableStreamFromMistralStream( - mistralStream: any, - onComplete?: (content: string, usage?: any) => void -): ReadableStream { - let fullContent = '' - let usageData: any = null - - return new ReadableStream({ - async start(controller) { - try { - for await (const chunk of mistralStream) { - if (chunk.usage) { - usageData = chunk.usage - } - - const content = chunk.choices[0]?.delta?.content || '' - if (content) { - fullContent += content - controller.enqueue(new TextEncoder().encode(content)) - } - } - - if (onComplete) { - onComplete(fullContent, usageData) - } - - controller.close() - } catch (error) { - controller.error(error) - } - }, - }) -} - /** * Mistral AI provider configuration */ @@ -288,7 +256,6 @@ export const mistralProvider: ProviderConfig = { const toolResults = [] const currentMessages = [...allMessages] let iterationCount = 0 - const MAX_ITERATIONS = 10 let modelTime = firstResponseTime let toolsTime = 0 @@ -307,14 +274,14 @@ export const mistralProvider: ProviderConfig = { checkForForcedToolUsage(currentResponse, originalToolChoice) - while (iterationCount < MAX_ITERATIONS) { + while (iterationCount < MAX_TOOL_ITERATIONS) { const toolCallsInResponse = currentResponse.choices[0]?.message?.tool_calls if (!toolCallsInResponse || toolCallsInResponse.length === 0) { break } logger.info( - `Processing ${toolCallsInResponse.length} tool calls (iteration ${iterationCount + 1}/${MAX_ITERATIONS})` + `Processing ${toolCallsInResponse.length} tool calls (iteration ${iterationCount + 1}/${MAX_TOOL_ITERATIONS})` ) const toolsStartTime = Date.now() diff --git a/apps/sim/providers/mistral/utils.ts b/apps/sim/providers/mistral/utils.ts new file mode 100644 index 0000000000..f33f517d02 --- /dev/null +++ b/apps/sim/providers/mistral/utils.ts @@ -0,0 +1,39 @@ +/** + * Creates a ReadableStream from a Mistral AI streaming response + * @param mistralStream - The Mistral AI stream object + * @param onComplete - Optional callback when streaming completes + * @returns A ReadableStream that yields text chunks + */ +export function createReadableStreamFromMistralStream( + mistralStream: any, + onComplete?: (content: string, usage?: any) => void +): ReadableStream { + let fullContent = '' + let usageData: any = null + + return new ReadableStream({ + async start(controller) { + try { + for await (const chunk of mistralStream) { + if (chunk.usage) { + usageData = chunk.usage + } + + const content = chunk.choices[0]?.delta?.content || '' + if (content) { + fullContent += content + controller.enqueue(new TextEncoder().encode(content)) + } + } + + if (onComplete) { + onComplete(fullContent, usageData) + } + + controller.close() + } catch (error) { + controller.error(error) + } + }, + }) +} diff --git a/apps/sim/providers/models.ts b/apps/sim/providers/models.ts index aac7c30b40..4183fc720e 100644 --- a/apps/sim/providers/models.ts +++ b/apps/sim/providers/models.ts @@ -19,6 +19,7 @@ import { OllamaIcon, OpenAIIcon, OpenRouterIcon, + VertexIcon, VllmIcon, xAIIcon, } from '@/components/icons' @@ -130,7 +131,7 @@ export const PROVIDER_DEFINITIONS: Record = { }, capabilities: { reasoningEffort: { - values: ['none', 'low', 'medium', 'high'], + values: ['none', 'minimal', 'low', 'medium', 'high', 'xhigh'], }, verbosity: { values: ['low', 'medium', 'high'], @@ -283,7 +284,11 @@ export const PROVIDER_DEFINITIONS: Record = { output: 60, updatedAt: '2025-06-17', }, - capabilities: {}, + capabilities: { + reasoningEffort: { + values: ['low', 'medium', 'high'], + }, + }, contextWindow: 200000, }, { @@ -294,7 +299,11 @@ export const PROVIDER_DEFINITIONS: Record = { output: 8, updatedAt: '2025-06-17', }, - capabilities: {}, + capabilities: { + reasoningEffort: { + values: ['low', 'medium', 'high'], + }, + }, contextWindow: 128000, }, { @@ -305,7 +314,11 @@ export const PROVIDER_DEFINITIONS: Record = { output: 4.4, updatedAt: '2025-06-17', }, - capabilities: {}, + capabilities: { + reasoningEffort: { + values: ['low', 'medium', 'high'], + }, + }, contextWindow: 128000, }, { @@ -383,7 +396,7 @@ export const PROVIDER_DEFINITIONS: Record = { }, capabilities: { reasoningEffort: { - values: ['none', 'low', 'medium', 'high'], + values: ['none', 'minimal', 'low', 'medium', 'high', 'xhigh'], }, verbosity: { values: ['low', 'medium', 'high'], @@ -536,7 +549,11 @@ export const PROVIDER_DEFINITIONS: Record = { output: 40, updatedAt: '2025-06-15', }, - capabilities: {}, + capabilities: { + reasoningEffort: { + values: ['low', 'medium', 'high'], + }, + }, contextWindow: 128000, }, { @@ -547,7 +564,11 @@ export const PROVIDER_DEFINITIONS: Record = { output: 4.4, updatedAt: '2025-06-15', }, - capabilities: {}, + capabilities: { + reasoningEffort: { + values: ['low', 'medium', 'high'], + }, + }, contextWindow: 128000, }, { @@ -708,9 +729,22 @@ export const PROVIDER_DEFINITIONS: Record = { id: 'gemini-3-pro-preview', pricing: { input: 2.0, - cachedInput: 1.0, + cachedInput: 0.2, output: 12.0, - updatedAt: '2025-11-18', + updatedAt: '2025-12-17', + }, + capabilities: { + temperature: { min: 0, max: 2 }, + }, + contextWindow: 1000000, + }, + { + id: 'gemini-3-flash-preview', + pricing: { + input: 0.5, + cachedInput: 0.05, + output: 3.0, + updatedAt: '2025-12-17', }, capabilities: { temperature: { min: 0, max: 2 }, @@ -756,6 +790,132 @@ export const PROVIDER_DEFINITIONS: Record = { }, contextWindow: 1048576, }, + { + id: 'gemini-2.0-flash', + pricing: { + input: 0.1, + output: 0.4, + updatedAt: '2025-12-17', + }, + capabilities: { + temperature: { min: 0, max: 2 }, + }, + contextWindow: 1000000, + }, + { + id: 'gemini-2.0-flash-lite', + pricing: { + input: 0.075, + output: 0.3, + updatedAt: '2025-12-17', + }, + capabilities: { + temperature: { min: 0, max: 2 }, + }, + contextWindow: 1000000, + }, + ], + }, + vertex: { + id: 'vertex', + name: 'Vertex AI', + description: "Google's Vertex AI platform for Gemini models", + defaultModel: 'vertex/gemini-2.5-pro', + modelPatterns: [/^vertex\//], + icon: VertexIcon, + capabilities: { + toolUsageControl: true, + }, + models: [ + { + id: 'vertex/gemini-3-pro-preview', + pricing: { + input: 2.0, + cachedInput: 0.2, + output: 12.0, + updatedAt: '2025-12-17', + }, + capabilities: { + temperature: { min: 0, max: 2 }, + }, + contextWindow: 1000000, + }, + { + id: 'vertex/gemini-3-flash-preview', + pricing: { + input: 0.5, + cachedInput: 0.05, + output: 3.0, + updatedAt: '2025-12-17', + }, + capabilities: { + temperature: { min: 0, max: 2 }, + }, + contextWindow: 1000000, + }, + { + id: 'vertex/gemini-2.5-pro', + pricing: { + input: 1.25, + cachedInput: 0.125, + output: 10.0, + updatedAt: '2025-12-02', + }, + capabilities: { + temperature: { min: 0, max: 2 }, + }, + contextWindow: 1048576, + }, + { + id: 'vertex/gemini-2.5-flash', + pricing: { + input: 0.3, + cachedInput: 0.03, + output: 2.5, + updatedAt: '2025-12-02', + }, + capabilities: { + temperature: { min: 0, max: 2 }, + }, + contextWindow: 1048576, + }, + { + id: 'vertex/gemini-2.5-flash-lite', + pricing: { + input: 0.1, + cachedInput: 0.01, + output: 0.4, + updatedAt: '2025-12-02', + }, + capabilities: { + temperature: { min: 0, max: 2 }, + }, + contextWindow: 1048576, + }, + { + id: 'vertex/gemini-2.0-flash', + pricing: { + input: 0.1, + output: 0.4, + updatedAt: '2025-12-17', + }, + capabilities: { + temperature: { min: 0, max: 2 }, + }, + contextWindow: 1000000, + }, + { + id: 'vertex/gemini-2.0-flash-lite', + pricing: { + input: 0.075, + output: 0.3, + updatedAt: '2025-12-17', + }, + capabilities: { + temperature: { min: 0, max: 2 }, + }, + contextWindow: 1000000, + }, ], }, deepseek: { @@ -1708,6 +1868,20 @@ export function getModelsWithReasoningEffort(): string[] { return models } +/** + * Get the reasoning effort values for a specific model + * Returns the valid options for that model, or null if the model doesn't support reasoning effort + */ +export function getReasoningEffortValuesForModel(modelId: string): string[] | null { + for (const provider of Object.values(PROVIDER_DEFINITIONS)) { + const model = provider.models.find((m) => m.id.toLowerCase() === modelId.toLowerCase()) + if (model?.capabilities.reasoningEffort) { + return model.capabilities.reasoningEffort.values + } + } + return null +} + /** * Get all models that support verbosity */ @@ -1722,3 +1896,17 @@ export function getModelsWithVerbosity(): string[] { } return models } + +/** + * Get the verbosity values for a specific model + * Returns the valid options for that model, or null if the model doesn't support verbosity + */ +export function getVerbosityValuesForModel(modelId: string): string[] | null { + for (const provider of Object.values(PROVIDER_DEFINITIONS)) { + const model = provider.models.find((m) => m.id.toLowerCase() === modelId.toLowerCase()) + if (model?.capabilities.verbosity) { + return model.capabilities.verbosity.values + } + } + return null +} diff --git a/apps/sim/providers/ollama/index.ts b/apps/sim/providers/ollama/index.ts index 0118e53ffc..acdafa91a2 100644 --- a/apps/sim/providers/ollama/index.ts +++ b/apps/sim/providers/ollama/index.ts @@ -2,7 +2,9 @@ import OpenAI from 'openai' import { env } from '@/lib/core/config/env' import { createLogger } from '@/lib/logs/console/logger' import type { StreamingExecution } from '@/executor/types' +import { MAX_TOOL_ITERATIONS } from '@/providers' import type { ModelsObject } from '@/providers/ollama/types' +import { createReadableStreamFromOllamaStream } from '@/providers/ollama/utils' import type { ProviderConfig, ProviderRequest, @@ -16,46 +18,6 @@ import { executeTool } from '@/tools' const logger = createLogger('OllamaProvider') const OLLAMA_HOST = env.OLLAMA_URL || 'http://localhost:11434' -/** - * Helper function to convert an Ollama stream to a standard ReadableStream - * and collect completion metrics - */ -function createReadableStreamFromOllamaStream( - ollamaStream: any, - onComplete?: (content: string, usage?: any) => void -): ReadableStream { - let fullContent = '' - let usageData: any = null - - return new ReadableStream({ - async start(controller) { - try { - for await (const chunk of ollamaStream) { - // Check for usage data in the final chunk - if (chunk.usage) { - usageData = chunk.usage - } - - const content = chunk.choices[0]?.delta?.content || '' - if (content) { - fullContent += content - controller.enqueue(new TextEncoder().encode(content)) - } - } - - // Once stream is complete, call the completion callback with the final content and usage - if (onComplete) { - onComplete(fullContent, usageData) - } - - controller.close() - } catch (error) { - controller.error(error) - } - }, - }) -} - export const ollamaProvider: ProviderConfig = { id: 'ollama', name: 'Ollama', @@ -334,7 +296,6 @@ export const ollamaProvider: ProviderConfig = { const toolResults = [] const currentMessages = [...allMessages] let iterationCount = 0 - const MAX_ITERATIONS = 10 // Prevent infinite loops // Track time spent in model vs tools let modelTime = firstResponseTime @@ -351,7 +312,7 @@ export const ollamaProvider: ProviderConfig = { }, ] - while (iterationCount < MAX_ITERATIONS) { + while (iterationCount < MAX_TOOL_ITERATIONS) { // Check for tool calls const toolCallsInResponse = currentResponse.choices[0]?.message?.tool_calls if (!toolCallsInResponse || toolCallsInResponse.length === 0) { @@ -359,7 +320,7 @@ export const ollamaProvider: ProviderConfig = { } logger.info( - `Processing ${toolCallsInResponse.length} tool calls (iteration ${iterationCount + 1}/${MAX_ITERATIONS})` + `Processing ${toolCallsInResponse.length} tool calls (iteration ${iterationCount + 1}/${MAX_TOOL_ITERATIONS})` ) // Track time for tool calls in this batch diff --git a/apps/sim/providers/ollama/utils.ts b/apps/sim/providers/ollama/utils.ts new file mode 100644 index 0000000000..fc012f366f --- /dev/null +++ b/apps/sim/providers/ollama/utils.ts @@ -0,0 +1,37 @@ +/** + * Helper function to convert an Ollama stream to a standard ReadableStream + * and collect completion metrics + */ +export function createReadableStreamFromOllamaStream( + ollamaStream: any, + onComplete?: (content: string, usage?: any) => void +): ReadableStream { + let fullContent = '' + let usageData: any = null + + return new ReadableStream({ + async start(controller) { + try { + for await (const chunk of ollamaStream) { + if (chunk.usage) { + usageData = chunk.usage + } + + const content = chunk.choices[0]?.delta?.content || '' + if (content) { + fullContent += content + controller.enqueue(new TextEncoder().encode(content)) + } + } + + if (onComplete) { + onComplete(fullContent, usageData) + } + + controller.close() + } catch (error) { + controller.error(error) + } + }, + }) +} diff --git a/apps/sim/providers/openai/index.ts b/apps/sim/providers/openai/index.ts index b925dc7d1f..3758fea1f5 100644 --- a/apps/sim/providers/openai/index.ts +++ b/apps/sim/providers/openai/index.ts @@ -1,7 +1,9 @@ import OpenAI from 'openai' import { createLogger } from '@/lib/logs/console/logger' import type { StreamingExecution } from '@/executor/types' +import { MAX_TOOL_ITERATIONS } from '@/providers' import { getProviderDefaultModel, getProviderModels } from '@/providers/models' +import { createReadableStreamFromOpenAIStream } from '@/providers/openai/utils' import type { ProviderConfig, ProviderRequest, @@ -17,46 +19,6 @@ import { executeTool } from '@/tools' const logger = createLogger('OpenAIProvider') -/** - * Helper function to convert an OpenAI stream to a standard ReadableStream - * and collect completion metrics - */ -function createReadableStreamFromOpenAIStream( - openaiStream: any, - onComplete?: (content: string, usage?: any) => void -): ReadableStream { - let fullContent = '' - let usageData: any = null - - return new ReadableStream({ - async start(controller) { - try { - for await (const chunk of openaiStream) { - // Check for usage data in the final chunk - if (chunk.usage) { - usageData = chunk.usage - } - - const content = chunk.choices[0]?.delta?.content || '' - if (content) { - fullContent += content - controller.enqueue(new TextEncoder().encode(content)) - } - } - - // Once stream is complete, call the completion callback with the final content and usage - if (onComplete) { - onComplete(fullContent, usageData) - } - - controller.close() - } catch (error) { - controller.error(error) - } - }, - }) -} - /** * OpenAI provider configuration */ @@ -319,7 +281,6 @@ export const openaiProvider: ProviderConfig = { const toolResults = [] const currentMessages = [...allMessages] let iterationCount = 0 - const MAX_ITERATIONS = 10 // Prevent infinite loops // Track time spent in model vs tools let modelTime = firstResponseTime @@ -342,7 +303,7 @@ export const openaiProvider: ProviderConfig = { // Check if a forced tool was used in the first response checkForForcedToolUsage(currentResponse, originalToolChoice) - while (iterationCount < MAX_ITERATIONS) { + while (iterationCount < MAX_TOOL_ITERATIONS) { // Check for tool calls const toolCallsInResponse = currentResponse.choices[0]?.message?.tool_calls if (!toolCallsInResponse || toolCallsInResponse.length === 0) { @@ -350,7 +311,7 @@ export const openaiProvider: ProviderConfig = { } logger.info( - `Processing ${toolCallsInResponse.length} tool calls (iteration ${iterationCount + 1}/${MAX_ITERATIONS})` + `Processing ${toolCallsInResponse.length} tool calls (iteration ${iterationCount + 1}/${MAX_TOOL_ITERATIONS})` ) // Track time for tool calls in this batch diff --git a/apps/sim/providers/openai/utils.ts b/apps/sim/providers/openai/utils.ts new file mode 100644 index 0000000000..1f35bf6c31 --- /dev/null +++ b/apps/sim/providers/openai/utils.ts @@ -0,0 +1,37 @@ +/** + * Helper function to convert an OpenAI stream to a standard ReadableStream + * and collect completion metrics + */ +export function createReadableStreamFromOpenAIStream( + openaiStream: any, + onComplete?: (content: string, usage?: any) => void +): ReadableStream { + let fullContent = '' + let usageData: any = null + + return new ReadableStream({ + async start(controller) { + try { + for await (const chunk of openaiStream) { + if (chunk.usage) { + usageData = chunk.usage + } + + const content = chunk.choices[0]?.delta?.content || '' + if (content) { + fullContent += content + controller.enqueue(new TextEncoder().encode(content)) + } + } + + if (onComplete) { + onComplete(fullContent, usageData) + } + + controller.close() + } catch (error) { + controller.error(error) + } + }, + }) +} diff --git a/apps/sim/providers/openrouter/index.ts b/apps/sim/providers/openrouter/index.ts index 979b5783ac..00fb33db0f 100644 --- a/apps/sim/providers/openrouter/index.ts +++ b/apps/sim/providers/openrouter/index.ts @@ -1,56 +1,23 @@ import OpenAI from 'openai' import { createLogger } from '@/lib/logs/console/logger' import type { StreamingExecution } from '@/executor/types' +import { MAX_TOOL_ITERATIONS } from '@/providers' import { getProviderDefaultModel, getProviderModels } from '@/providers/models' +import { + checkForForcedToolUsage, + createReadableStreamFromOpenAIStream, +} from '@/providers/openrouter/utils' import type { ProviderConfig, ProviderRequest, ProviderResponse, TimeSegment, } from '@/providers/types' -import { - prepareToolExecution, - prepareToolsWithUsageControl, - trackForcedToolUsage, -} from '@/providers/utils' +import { prepareToolExecution, prepareToolsWithUsageControl } from '@/providers/utils' import { executeTool } from '@/tools' const logger = createLogger('OpenRouterProvider') -function createReadableStreamFromOpenAIStream( - openaiStream: any, - onComplete?: (content: string, usage?: any) => void -): ReadableStream { - let fullContent = '' - let usageData: any = null - - return new ReadableStream({ - async start(controller) { - try { - for await (const chunk of openaiStream) { - if (chunk.usage) { - usageData = chunk.usage - } - - const content = chunk.choices[0]?.delta?.content || '' - if (content) { - fullContent += content - controller.enqueue(new TextEncoder().encode(content)) - } - } - - if (onComplete) { - onComplete(fullContent, usageData) - } - - controller.close() - } catch (error) { - controller.error(error) - } - }, - }) -} - export const openRouterProvider: ProviderConfig = { id: 'openrouter', name: 'OpenRouter', @@ -227,7 +194,6 @@ export const openRouterProvider: ProviderConfig = { const toolResults = [] as any[] const currentMessages = [...allMessages] let iterationCount = 0 - const MAX_ITERATIONS = 10 let modelTime = firstResponseTime let toolsTime = 0 let hasUsedForcedTool = false @@ -241,28 +207,16 @@ export const openRouterProvider: ProviderConfig = { }, ] - const checkForForcedToolUsage = ( - response: any, - toolChoice: string | { type: string; function?: { name: string }; name?: string; any?: any } - ) => { - if (typeof toolChoice === 'object' && response.choices[0]?.message?.tool_calls) { - const toolCallsResponse = response.choices[0].message.tool_calls - const result = trackForcedToolUsage( - toolCallsResponse, - toolChoice, - logger, - 'openrouter', - forcedTools, - usedForcedTools - ) - hasUsedForcedTool = result.hasUsedForcedTool - usedForcedTools = result.usedForcedTools - } - } - - checkForForcedToolUsage(currentResponse, originalToolChoice) + const forcedToolResult = checkForForcedToolUsage( + currentResponse, + originalToolChoice, + forcedTools, + usedForcedTools + ) + hasUsedForcedTool = forcedToolResult.hasUsedForcedTool + usedForcedTools = forcedToolResult.usedForcedTools - while (iterationCount < MAX_ITERATIONS) { + while (iterationCount < MAX_TOOL_ITERATIONS) { const toolCallsInResponse = currentResponse.choices[0]?.message?.tool_calls if (!toolCallsInResponse || toolCallsInResponse.length === 0) { break @@ -359,7 +313,14 @@ export const openRouterProvider: ProviderConfig = { const nextModelStartTime = Date.now() currentResponse = await client.chat.completions.create(nextPayload) - checkForForcedToolUsage(currentResponse, nextPayload.tool_choice) + const nextForcedToolResult = checkForForcedToolUsage( + currentResponse, + nextPayload.tool_choice, + forcedTools, + usedForcedTools + ) + hasUsedForcedTool = nextForcedToolResult.hasUsedForcedTool + usedForcedTools = nextForcedToolResult.usedForcedTools const nextModelEndTime = Date.now() const thisModelTime = nextModelEndTime - nextModelStartTime timeSegments.push({ diff --git a/apps/sim/providers/openrouter/utils.ts b/apps/sim/providers/openrouter/utils.ts new file mode 100644 index 0000000000..fc9a4254d4 --- /dev/null +++ b/apps/sim/providers/openrouter/utils.ts @@ -0,0 +1,78 @@ +import { createLogger } from '@/lib/logs/console/logger' +import { trackForcedToolUsage } from '@/providers/utils' + +const logger = createLogger('OpenRouterProvider') + +/** + * Creates a ReadableStream from an OpenAI-compatible stream response + * @param openaiStream - The OpenAI stream to convert + * @param onComplete - Optional callback when streaming is complete with content and usage data + * @returns ReadableStream that emits text chunks + */ +export function createReadableStreamFromOpenAIStream( + openaiStream: any, + onComplete?: (content: string, usage?: any) => void +): ReadableStream { + let fullContent = '' + let usageData: any = null + + return new ReadableStream({ + async start(controller) { + try { + for await (const chunk of openaiStream) { + if (chunk.usage) { + usageData = chunk.usage + } + + const content = chunk.choices[0]?.delta?.content || '' + if (content) { + fullContent += content + controller.enqueue(new TextEncoder().encode(content)) + } + } + + if (onComplete) { + onComplete(fullContent, usageData) + } + + controller.close() + } catch (error) { + controller.error(error) + } + }, + }) +} + +/** + * Checks if a forced tool was used in the response and updates tracking + * @param response - The API response containing tool calls + * @param toolChoice - The tool choice configuration (string or object) + * @param forcedTools - Array of forced tool names + * @param usedForcedTools - Array of already used forced tools + * @returns Object with hasUsedForcedTool flag and updated usedForcedTools array + */ +export function checkForForcedToolUsage( + response: any, + toolChoice: string | { type: string; function?: { name: string }; name?: string; any?: any }, + forcedTools: string[], + usedForcedTools: string[] +): { hasUsedForcedTool: boolean; usedForcedTools: string[] } { + let hasUsedForcedTool = false + let updatedUsedForcedTools = usedForcedTools + + if (typeof toolChoice === 'object' && response.choices[0]?.message?.tool_calls) { + const toolCallsResponse = response.choices[0].message.tool_calls + const result = trackForcedToolUsage( + toolCallsResponse, + toolChoice, + logger, + 'openrouter', + forcedTools, + updatedUsedForcedTools + ) + hasUsedForcedTool = result.hasUsedForcedTool + updatedUsedForcedTools = result.usedForcedTools + } + + return { hasUsedForcedTool, usedForcedTools: updatedUsedForcedTools } +} diff --git a/apps/sim/providers/types.ts b/apps/sim/providers/types.ts index 6c2fd1f00a..4ada41589a 100644 --- a/apps/sim/providers/types.ts +++ b/apps/sim/providers/types.ts @@ -5,6 +5,7 @@ export type ProviderId = | 'azure-openai' | 'anthropic' | 'google' + | 'vertex' | 'deepseek' | 'xai' | 'cerebras' @@ -163,6 +164,9 @@ export interface ProviderRequest { // Azure OpenAI specific parameters azureEndpoint?: string azureApiVersion?: string + // Vertex AI specific parameters + vertexProject?: string + vertexLocation?: string // GPT-5 specific parameters reasoningEffort?: string verbosity?: string diff --git a/apps/sim/providers/utils.test.ts b/apps/sim/providers/utils.test.ts index 4fa9132141..9085908c29 100644 --- a/apps/sim/providers/utils.test.ts +++ b/apps/sim/providers/utils.test.ts @@ -383,6 +383,17 @@ describe('Model Capabilities', () => { expect(MODELS_WITH_REASONING_EFFORT).toContain('azure/gpt-5-mini') expect(MODELS_WITH_REASONING_EFFORT).toContain('azure/gpt-5-nano') + // Should contain gpt-5.2 models + expect(MODELS_WITH_REASONING_EFFORT).toContain('gpt-5.2') + expect(MODELS_WITH_REASONING_EFFORT).toContain('azure/gpt-5.2') + + // Should contain o-series reasoning models (reasoning_effort added Dec 17, 2024) + expect(MODELS_WITH_REASONING_EFFORT).toContain('o1') + expect(MODELS_WITH_REASONING_EFFORT).toContain('o3') + expect(MODELS_WITH_REASONING_EFFORT).toContain('o4-mini') + expect(MODELS_WITH_REASONING_EFFORT).toContain('azure/o3') + expect(MODELS_WITH_REASONING_EFFORT).toContain('azure/o4-mini') + // Should NOT contain non-reasoning GPT-5 models expect(MODELS_WITH_REASONING_EFFORT).not.toContain('gpt-5-chat-latest') expect(MODELS_WITH_REASONING_EFFORT).not.toContain('azure/gpt-5-chat-latest') @@ -390,7 +401,6 @@ describe('Model Capabilities', () => { // Should NOT contain other models expect(MODELS_WITH_REASONING_EFFORT).not.toContain('gpt-4o') expect(MODELS_WITH_REASONING_EFFORT).not.toContain('claude-sonnet-4-0') - expect(MODELS_WITH_REASONING_EFFORT).not.toContain('o1') }) it.concurrent('should have correct models in MODELS_WITH_VERBOSITY', () => { @@ -409,19 +419,37 @@ describe('Model Capabilities', () => { expect(MODELS_WITH_VERBOSITY).toContain('azure/gpt-5-mini') expect(MODELS_WITH_VERBOSITY).toContain('azure/gpt-5-nano') + // Should contain gpt-5.2 models + expect(MODELS_WITH_VERBOSITY).toContain('gpt-5.2') + expect(MODELS_WITH_VERBOSITY).toContain('azure/gpt-5.2') + // Should NOT contain non-reasoning GPT-5 models expect(MODELS_WITH_VERBOSITY).not.toContain('gpt-5-chat-latest') expect(MODELS_WITH_VERBOSITY).not.toContain('azure/gpt-5-chat-latest') + // Should NOT contain o-series models (they support reasoning_effort but not verbosity) + expect(MODELS_WITH_VERBOSITY).not.toContain('o1') + expect(MODELS_WITH_VERBOSITY).not.toContain('o3') + expect(MODELS_WITH_VERBOSITY).not.toContain('o4-mini') + // Should NOT contain other models expect(MODELS_WITH_VERBOSITY).not.toContain('gpt-4o') expect(MODELS_WITH_VERBOSITY).not.toContain('claude-sonnet-4-0') - expect(MODELS_WITH_VERBOSITY).not.toContain('o1') }) - it.concurrent('should have same models in both reasoning effort and verbosity arrays', () => { - // GPT-5 models that support reasoning effort should also support verbosity and vice versa - expect(MODELS_WITH_REASONING_EFFORT.sort()).toEqual(MODELS_WITH_VERBOSITY.sort()) + it.concurrent('should have GPT-5 models in both reasoning effort and verbosity arrays', () => { + // GPT-5 series models support both reasoning effort and verbosity + const gpt5ModelsWithReasoningEffort = MODELS_WITH_REASONING_EFFORT.filter( + (m) => m.includes('gpt-5') && !m.includes('chat-latest') + ) + const gpt5ModelsWithVerbosity = MODELS_WITH_VERBOSITY.filter( + (m) => m.includes('gpt-5') && !m.includes('chat-latest') + ) + expect(gpt5ModelsWithReasoningEffort.sort()).toEqual(gpt5ModelsWithVerbosity.sort()) + + // o-series models have reasoning effort but NOT verbosity + expect(MODELS_WITH_REASONING_EFFORT).toContain('o1') + expect(MODELS_WITH_VERBOSITY).not.toContain('o1') }) }) }) diff --git a/apps/sim/providers/utils.ts b/apps/sim/providers/utils.ts index d1cbe1b819..179df6e0b7 100644 --- a/apps/sim/providers/utils.ts +++ b/apps/sim/providers/utils.ts @@ -21,6 +21,8 @@ import { getModelsWithVerbosity, getProviderModels as getProviderModelsFromDefinitions, getProvidersWithToolUsageControl, + getReasoningEffortValuesForModel as getReasoningEffortValuesForModelFromDefinitions, + getVerbosityValuesForModel as getVerbosityValuesForModelFromDefinitions, PROVIDER_DEFINITIONS, supportsTemperature as supportsTemperatureFromDefinitions, supportsToolUsageControl as supportsToolUsageControlFromDefinitions, @@ -30,6 +32,7 @@ import { ollamaProvider } from '@/providers/ollama' import { openaiProvider } from '@/providers/openai' import { openRouterProvider } from '@/providers/openrouter' import type { ProviderConfig, ProviderId, ProviderToolConfig } from '@/providers/types' +import { vertexProvider } from '@/providers/vertex' import { vllmProvider } from '@/providers/vllm' import { xAIProvider } from '@/providers/xai' import { useCustomToolsStore } from '@/stores/custom-tools/store' @@ -67,6 +70,11 @@ export const providers: Record< models: getProviderModelsFromDefinitions('google'), modelPatterns: PROVIDER_DEFINITIONS.google.modelPatterns, }, + vertex: { + ...vertexProvider, + models: getProviderModelsFromDefinitions('vertex'), + modelPatterns: PROVIDER_DEFINITIONS.vertex.modelPatterns, + }, deepseek: { ...deepseekProvider, models: getProviderModelsFromDefinitions('deepseek'), @@ -274,16 +282,12 @@ export function getProviderIcon(model: string): React.ComponentType<{ className? } export function generateStructuredOutputInstructions(responseFormat: any): string { - // Handle null/undefined input if (!responseFormat) return '' - // If using the new JSON Schema format, don't add additional instructions - // This is necessary because providers now handle the schema directly if (responseFormat.schema || (responseFormat.type === 'object' && responseFormat.properties)) { return '' } - // Handle legacy format with fields array if (!responseFormat.fields) return '' function generateFieldStructure(field: any): string { @@ -335,10 +339,8 @@ Each metric should be an object containing 'score' (number) and 'reasoning' (str } export function extractAndParseJSON(content: string): any { - // First clean up the string const trimmed = content.trim() - // Find the first '{' and last '}' const firstBrace = trimmed.indexOf('{') const lastBrace = trimmed.lastIndexOf('}') @@ -346,17 +348,15 @@ export function extractAndParseJSON(content: string): any { throw new Error('No JSON object found in content') } - // Extract just the JSON part const jsonStr = trimmed.slice(firstBrace, lastBrace + 1) try { return JSON.parse(jsonStr) } catch (_error) { - // If parsing fails, try to clean up common issues const cleaned = jsonStr - .replace(/\n/g, ' ') // Remove newlines - .replace(/\s+/g, ' ') // Normalize whitespace - .replace(/,\s*([}\]])/g, '$1') // Remove trailing commas + .replace(/\n/g, ' ') + .replace(/\s+/g, ' ') + .replace(/,\s*([}\]])/g, '$1') try { return JSON.parse(cleaned) @@ -386,10 +386,10 @@ export function transformCustomTool(customTool: any): ProviderToolConfig { } return { - id: `custom_${customTool.id}`, // Prefix with 'custom_' to identify custom tools + id: `custom_${customTool.id}`, name: schema.function.name, description: schema.function.description || '', - params: {}, // This will be derived from parameters + params: {}, parameters: { type: schema.function.parameters.type, properties: schema.function.parameters.properties, @@ -402,10 +402,8 @@ export function transformCustomTool(customTool: any): ProviderToolConfig { * Gets all available custom tools as provider tool configs */ export function getCustomTools(): ProviderToolConfig[] { - // Get custom tools from the store const customTools = useCustomToolsStore.getState().getAllTools() - // Transform each custom tool into a provider tool config return customTools.map(transformCustomTool) } @@ -427,20 +425,16 @@ export async function transformBlockTool( ): Promise { const { selectedOperation, getAllBlocks, getTool, getToolAsync } = options - // Get the block definition const blockDef = getAllBlocks().find((b: any) => b.type === block.type) if (!blockDef) { logger.warn(`Block definition not found for type: ${block.type}`) return null } - // If the block has multiple operations, use the selected one or the first one let toolId: string | null = null if ((blockDef.tools?.access?.length || 0) > 1) { - // If we have an operation dropdown in the block and a selected operation if (selectedOperation && blockDef.tools?.config?.tool) { - // Use the block's tool selection function to get the right tool try { toolId = blockDef.tools.config.tool({ ...block.params, @@ -455,11 +449,9 @@ export async function transformBlockTool( return null } } else { - // Default to first tool if no operation specified toolId = blockDef.tools.access[0] } } else { - // Single tool case toolId = blockDef.tools?.access?.[0] || null } @@ -468,14 +460,11 @@ export async function transformBlockTool( return null } - // Get the tool config - check if it's a custom tool that needs async fetching let toolConfig: any if (toolId.startsWith('custom_') && getToolAsync) { - // Use the async version for custom tools toolConfig = await getToolAsync(toolId) } else { - // Use the synchronous version for built-in tools toolConfig = getTool(toolId) } @@ -484,16 +473,12 @@ export async function transformBlockTool( return null } - // Import the new tool parameter utilities const { createLLMToolSchema } = await import('@/tools/params') - // Get user-provided parameters from the block const userProvidedParams = block.params || {} - // Create LLM schema that excludes user-provided parameters const llmSchema = await createLLMToolSchema(toolConfig, userProvidedParams) - // Return formatted tool config return { id: toolConfig.id, name: toolConfig.name, @@ -521,15 +506,12 @@ export function calculateCost( inputMultiplier?: number, outputMultiplier?: number ) { - // First check if it's an embedding model let pricing = getEmbeddingModelPricing(model) - // If not found, check chat models if (!pricing) { pricing = getModelPricingFromDefinitions(model) } - // If no pricing found, return default pricing if (!pricing) { const defaultPricing = { input: 1.0, @@ -545,8 +527,6 @@ export function calculateCost( } } - // Calculate costs in USD - // Convert from "per million tokens" to "per token" by dividing by 1,000,000 const inputCost = promptTokens * (useCachedInput && pricing.cachedInput @@ -559,7 +539,7 @@ export function calculateCost( const finalTotalCost = finalInputCost + finalOutputCost return { - input: Number.parseFloat(finalInputCost.toFixed(8)), // Use 8 decimal places for small costs + input: Number.parseFloat(finalInputCost.toFixed(8)), output: Number.parseFloat(finalOutputCost.toFixed(8)), total: Number.parseFloat(finalTotalCost.toFixed(8)), pricing, @@ -997,6 +977,22 @@ export function supportsToolUsageControl(provider: string): boolean { return supportsToolUsageControlFromDefinitions(provider) } +/** + * Get reasoning effort values for a specific model + * Returns the valid options for that model, or null if the model doesn't support reasoning effort + */ +export function getReasoningEffortValuesForModel(model: string): string[] | null { + return getReasoningEffortValuesForModelFromDefinitions(model) +} + +/** + * Get verbosity values for a specific model + * Returns the valid options for that model, or null if the model doesn't support verbosity + */ +export function getVerbosityValuesForModel(model: string): string[] | null { + return getVerbosityValuesForModelFromDefinitions(model) +} + /** * Prepare tool execution parameters, separating tool parameters from system parameters */ diff --git a/apps/sim/providers/vertex/index.ts b/apps/sim/providers/vertex/index.ts new file mode 100644 index 0000000000..0a25d304da --- /dev/null +++ b/apps/sim/providers/vertex/index.ts @@ -0,0 +1,899 @@ +import { env } from '@/lib/core/config/env' +import { createLogger } from '@/lib/logs/console/logger' +import type { StreamingExecution } from '@/executor/types' +import { MAX_TOOL_ITERATIONS } from '@/providers' +import { + cleanSchemaForGemini, + convertToGeminiFormat, + extractFunctionCall, + extractTextContent, +} from '@/providers/google/utils' +import { getProviderDefaultModel, getProviderModels } from '@/providers/models' +import type { + ProviderConfig, + ProviderRequest, + ProviderResponse, + TimeSegment, +} from '@/providers/types' +import { + prepareToolExecution, + prepareToolsWithUsageControl, + trackForcedToolUsage, +} from '@/providers/utils' +import { buildVertexEndpoint, createReadableStreamFromVertexStream } from '@/providers/vertex/utils' +import { executeTool } from '@/tools' + +const logger = createLogger('VertexProvider') + +/** + * Vertex AI provider configuration + */ +export const vertexProvider: ProviderConfig = { + id: 'vertex', + name: 'Vertex AI', + description: "Google's Vertex AI platform for Gemini models", + version: '1.0.0', + models: getProviderModels('vertex'), + defaultModel: getProviderDefaultModel('vertex'), + + executeRequest: async ( + request: ProviderRequest + ): Promise => { + const vertexProject = env.VERTEX_PROJECT || request.vertexProject + const vertexLocation = env.VERTEX_LOCATION || request.vertexLocation || 'us-central1' + + if (!vertexProject) { + throw new Error( + 'Vertex AI project is required. Please provide it via VERTEX_PROJECT environment variable or vertexProject parameter.' + ) + } + + if (!request.apiKey) { + throw new Error( + 'Access token is required for Vertex AI. Run `gcloud auth print-access-token` to get one, or use a service account.' + ) + } + + logger.info('Preparing Vertex AI request', { + model: request.model || 'vertex/gemini-2.5-pro', + hasSystemPrompt: !!request.systemPrompt, + hasMessages: !!request.messages?.length, + hasTools: !!request.tools?.length, + toolCount: request.tools?.length || 0, + hasResponseFormat: !!request.responseFormat, + streaming: !!request.stream, + project: vertexProject, + location: vertexLocation, + }) + + const providerStartTime = Date.now() + const providerStartTimeISO = new Date(providerStartTime).toISOString() + + try { + const { contents, tools, systemInstruction } = convertToGeminiFormat(request) + + const requestedModel = (request.model || 'vertex/gemini-2.5-pro').replace('vertex/', '') + + const payload: any = { + contents, + generationConfig: {}, + } + + if (request.temperature !== undefined && request.temperature !== null) { + payload.generationConfig.temperature = request.temperature + } + + if (request.maxTokens !== undefined) { + payload.generationConfig.maxOutputTokens = request.maxTokens + } + + if (systemInstruction) { + payload.systemInstruction = systemInstruction + } + + if (request.responseFormat && !tools?.length) { + const responseFormatSchema = request.responseFormat.schema || request.responseFormat + const cleanSchema = cleanSchemaForGemini(responseFormatSchema) + + payload.generationConfig.responseMimeType = 'application/json' + payload.generationConfig.responseSchema = cleanSchema + + logger.info('Using Vertex AI native structured output format', { + hasSchema: !!cleanSchema, + mimeType: 'application/json', + }) + } else if (request.responseFormat && tools?.length) { + logger.warn( + 'Vertex AI does not support structured output (responseFormat) with function calling (tools). Structured output will be ignored.' + ) + } + + let preparedTools: ReturnType | null = null + + if (tools?.length) { + preparedTools = prepareToolsWithUsageControl(tools, request.tools, logger, 'google') + const { tools: filteredTools, toolConfig } = preparedTools + + if (filteredTools?.length) { + payload.tools = [ + { + functionDeclarations: filteredTools, + }, + ] + + if (toolConfig) { + payload.toolConfig = toolConfig + } + + logger.info('Vertex AI request with tools:', { + toolCount: filteredTools.length, + model: requestedModel, + tools: filteredTools.map((t) => t.name), + hasToolConfig: !!toolConfig, + toolConfig: toolConfig, + }) + } + } + + const initialCallTime = Date.now() + const shouldStream = !!(request.stream && !tools?.length) + + const endpoint = buildVertexEndpoint( + vertexProject, + vertexLocation, + requestedModel, + shouldStream + ) + + if (request.stream && tools?.length) { + logger.info('Streaming disabled for initial request due to tools presence', { + toolCount: tools.length, + willStreamAfterTools: true, + }) + } + + const response = await fetch(endpoint, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + Authorization: `Bearer ${request.apiKey}`, + }, + body: JSON.stringify(payload), + }) + + if (!response.ok) { + const responseText = await response.text() + logger.error('Vertex AI API error details:', { + status: response.status, + statusText: response.statusText, + responseBody: responseText, + }) + throw new Error(`Vertex AI API error: ${response.status} ${response.statusText}`) + } + + const firstResponseTime = Date.now() - initialCallTime + + if (shouldStream) { + logger.info('Handling Vertex AI streaming response') + + const streamingResult: StreamingExecution = { + stream: null as any, + execution: { + success: true, + output: { + content: '', + model: request.model, + tokens: { + prompt: 0, + completion: 0, + total: 0, + }, + providerTiming: { + startTime: providerStartTimeISO, + endTime: new Date().toISOString(), + duration: firstResponseTime, + modelTime: firstResponseTime, + toolsTime: 0, + firstResponseTime, + iterations: 1, + timeSegments: [ + { + type: 'model', + name: 'Initial streaming response', + startTime: initialCallTime, + endTime: initialCallTime + firstResponseTime, + duration: firstResponseTime, + }, + ], + }, + }, + logs: [], + metadata: { + startTime: providerStartTimeISO, + endTime: new Date().toISOString(), + duration: firstResponseTime, + }, + isStreaming: true, + }, + } + + streamingResult.stream = createReadableStreamFromVertexStream( + response, + (content, usage) => { + streamingResult.execution.output.content = content + + const streamEndTime = Date.now() + const streamEndTimeISO = new Date(streamEndTime).toISOString() + + if (streamingResult.execution.output.providerTiming) { + streamingResult.execution.output.providerTiming.endTime = streamEndTimeISO + streamingResult.execution.output.providerTiming.duration = + streamEndTime - providerStartTime + + if (streamingResult.execution.output.providerTiming.timeSegments?.[0]) { + streamingResult.execution.output.providerTiming.timeSegments[0].endTime = + streamEndTime + streamingResult.execution.output.providerTiming.timeSegments[0].duration = + streamEndTime - providerStartTime + } + } + + if (usage) { + streamingResult.execution.output.tokens = { + prompt: usage.promptTokenCount || 0, + completion: usage.candidatesTokenCount || 0, + total: + usage.totalTokenCount || + (usage.promptTokenCount || 0) + (usage.candidatesTokenCount || 0), + } + } + } + ) + + return streamingResult + } + + let geminiResponse = await response.json() + + if (payload.generationConfig?.responseSchema) { + const candidate = geminiResponse.candidates?.[0] + if (candidate?.content?.parts?.[0]?.text) { + const text = candidate.content.parts[0].text + try { + JSON.parse(text) + logger.info('Successfully received structured JSON output') + } catch (_e) { + logger.warn('Failed to parse structured output as JSON') + } + } + } + + let content = '' + let tokens = { + prompt: 0, + completion: 0, + total: 0, + } + const toolCalls = [] + const toolResults = [] + let iterationCount = 0 + + const originalToolConfig = preparedTools?.toolConfig + const forcedTools = preparedTools?.forcedTools || [] + let usedForcedTools: string[] = [] + let hasUsedForcedTool = false + let currentToolConfig = originalToolConfig + + const checkForForcedToolUsage = (functionCall: { name: string; args: any }) => { + if (currentToolConfig && forcedTools.length > 0) { + const toolCallsForTracking = [{ name: functionCall.name, arguments: functionCall.args }] + const result = trackForcedToolUsage( + toolCallsForTracking, + currentToolConfig, + logger, + 'google', + forcedTools, + usedForcedTools + ) + hasUsedForcedTool = result.hasUsedForcedTool + usedForcedTools = result.usedForcedTools + + if (result.nextToolConfig) { + currentToolConfig = result.nextToolConfig + logger.info('Updated tool config for next iteration', { + hasNextToolConfig: !!currentToolConfig, + usedForcedTools: usedForcedTools, + }) + } + } + } + + let modelTime = firstResponseTime + let toolsTime = 0 + + const timeSegments: TimeSegment[] = [ + { + type: 'model', + name: 'Initial response', + startTime: initialCallTime, + endTime: initialCallTime + firstResponseTime, + duration: firstResponseTime, + }, + ] + + try { + const candidate = geminiResponse.candidates?.[0] + + if (candidate?.finishReason === 'UNEXPECTED_TOOL_CALL') { + logger.warn( + 'Vertex AI returned UNEXPECTED_TOOL_CALL - model attempted to call a tool that was not provided', + { + finishReason: candidate.finishReason, + hasContent: !!candidate?.content, + hasParts: !!candidate?.content?.parts, + } + ) + content = extractTextContent(candidate) + } + + const functionCall = extractFunctionCall(candidate) + + if (functionCall) { + logger.info(`Received function call from Vertex AI: ${functionCall.name}`) + + while (iterationCount < MAX_TOOL_ITERATIONS) { + const latestResponse = geminiResponse.candidates?.[0] + const latestFunctionCall = extractFunctionCall(latestResponse) + + if (!latestFunctionCall) { + content = extractTextContent(latestResponse) + break + } + + logger.info( + `Processing function call: ${latestFunctionCall.name} (iteration ${iterationCount + 1}/${MAX_TOOL_ITERATIONS})` + ) + + const toolsStartTime = Date.now() + + try { + const toolName = latestFunctionCall.name + const toolArgs = latestFunctionCall.args || {} + + const tool = request.tools?.find((t) => t.id === toolName) + if (!tool) { + logger.warn(`Tool ${toolName} not found in registry, skipping`) + break + } + + const toolCallStartTime = Date.now() + + const { toolParams, executionParams } = prepareToolExecution(tool, toolArgs, request) + const result = await executeTool(toolName, executionParams, true) + const toolCallEndTime = Date.now() + const toolCallDuration = toolCallEndTime - toolCallStartTime + + timeSegments.push({ + type: 'tool', + name: toolName, + startTime: toolCallStartTime, + endTime: toolCallEndTime, + duration: toolCallDuration, + }) + + let resultContent: any + if (result.success) { + toolResults.push(result.output) + resultContent = result.output + } else { + resultContent = { + error: true, + message: result.error || 'Tool execution failed', + tool: toolName, + } + } + + toolCalls.push({ + name: toolName, + arguments: toolParams, + startTime: new Date(toolCallStartTime).toISOString(), + endTime: new Date(toolCallEndTime).toISOString(), + duration: toolCallDuration, + result: resultContent, + success: result.success, + }) + + const simplifiedMessages = [ + ...(contents.filter((m) => m.role === 'user').length > 0 + ? [contents.filter((m) => m.role === 'user')[0]] + : [contents[0]]), + { + role: 'model', + parts: [ + { + functionCall: { + name: latestFunctionCall.name, + args: latestFunctionCall.args, + }, + }, + ], + }, + { + role: 'user', + parts: [ + { + text: `Function ${latestFunctionCall.name} result: ${JSON.stringify(resultContent)}`, + }, + ], + }, + ] + + const thisToolsTime = Date.now() - toolsStartTime + toolsTime += thisToolsTime + + checkForForcedToolUsage(latestFunctionCall) + + const nextModelStartTime = Date.now() + + try { + if (request.stream) { + const streamingPayload = { + ...payload, + contents: simplifiedMessages, + } + + const allForcedToolsUsed = + forcedTools.length > 0 && usedForcedTools.length === forcedTools.length + + if (allForcedToolsUsed && request.responseFormat) { + streamingPayload.tools = undefined + streamingPayload.toolConfig = undefined + + const responseFormatSchema = + request.responseFormat.schema || request.responseFormat + const cleanSchema = cleanSchemaForGemini(responseFormatSchema) + + if (!streamingPayload.generationConfig) { + streamingPayload.generationConfig = {} + } + streamingPayload.generationConfig.responseMimeType = 'application/json' + streamingPayload.generationConfig.responseSchema = cleanSchema + + logger.info('Using structured output for final response after tool execution') + } else { + if (currentToolConfig) { + streamingPayload.toolConfig = currentToolConfig + } else { + streamingPayload.toolConfig = { functionCallingConfig: { mode: 'AUTO' } } + } + } + + const checkPayload = { + ...streamingPayload, + } + checkPayload.stream = undefined + + const checkEndpoint = buildVertexEndpoint( + vertexProject, + vertexLocation, + requestedModel, + false + ) + + const checkResponse = await fetch(checkEndpoint, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + Authorization: `Bearer ${request.apiKey}`, + }, + body: JSON.stringify(checkPayload), + }) + + if (!checkResponse.ok) { + const errorBody = await checkResponse.text() + logger.error('Error in Vertex AI check request:', { + status: checkResponse.status, + statusText: checkResponse.statusText, + responseBody: errorBody, + }) + throw new Error( + `Vertex AI API check error: ${checkResponse.status} ${checkResponse.statusText}` + ) + } + + const checkResult = await checkResponse.json() + const checkCandidate = checkResult.candidates?.[0] + const checkFunctionCall = extractFunctionCall(checkCandidate) + + if (checkFunctionCall) { + logger.info( + 'Function call detected in follow-up, handling in non-streaming mode', + { + functionName: checkFunctionCall.name, + } + ) + + geminiResponse = checkResult + + if (checkResult.usageMetadata) { + tokens.prompt += checkResult.usageMetadata.promptTokenCount || 0 + tokens.completion += checkResult.usageMetadata.candidatesTokenCount || 0 + tokens.total += + (checkResult.usageMetadata.promptTokenCount || 0) + + (checkResult.usageMetadata.candidatesTokenCount || 0) + } + + const nextModelEndTime = Date.now() + const thisModelTime = nextModelEndTime - nextModelStartTime + modelTime += thisModelTime + + timeSegments.push({ + type: 'model', + name: `Model response (iteration ${iterationCount + 1})`, + startTime: nextModelStartTime, + endTime: nextModelEndTime, + duration: thisModelTime, + }) + + iterationCount++ + continue + } + + logger.info('No function call detected, proceeding with streaming response') + + // Apply structured output for the final response if responseFormat is specified + // This works regardless of whether tools were forced or auto + if (request.responseFormat) { + streamingPayload.tools = undefined + streamingPayload.toolConfig = undefined + + const responseFormatSchema = + request.responseFormat.schema || request.responseFormat + const cleanSchema = cleanSchemaForGemini(responseFormatSchema) + + if (!streamingPayload.generationConfig) { + streamingPayload.generationConfig = {} + } + streamingPayload.generationConfig.responseMimeType = 'application/json' + streamingPayload.generationConfig.responseSchema = cleanSchema + + logger.info( + 'Using structured output for final streaming response after tool execution' + ) + } + + const streamEndpoint = buildVertexEndpoint( + vertexProject, + vertexLocation, + requestedModel, + true + ) + + const streamingResponse = await fetch(streamEndpoint, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + Authorization: `Bearer ${request.apiKey}`, + }, + body: JSON.stringify(streamingPayload), + }) + + if (!streamingResponse.ok) { + const errorBody = await streamingResponse.text() + logger.error('Error in Vertex AI streaming follow-up request:', { + status: streamingResponse.status, + statusText: streamingResponse.statusText, + responseBody: errorBody, + }) + throw new Error( + `Vertex AI API streaming error: ${streamingResponse.status} ${streamingResponse.statusText}` + ) + } + + const nextModelEndTime = Date.now() + const thisModelTime = nextModelEndTime - nextModelStartTime + modelTime += thisModelTime + + timeSegments.push({ + type: 'model', + name: 'Final streaming response after tool calls', + startTime: nextModelStartTime, + endTime: nextModelEndTime, + duration: thisModelTime, + }) + + const streamingExecution: StreamingExecution = { + stream: null as any, + execution: { + success: true, + output: { + content: '', + model: request.model, + tokens, + toolCalls: + toolCalls.length > 0 + ? { + list: toolCalls, + count: toolCalls.length, + } + : undefined, + toolResults, + providerTiming: { + startTime: providerStartTimeISO, + endTime: new Date().toISOString(), + duration: Date.now() - providerStartTime, + modelTime, + toolsTime, + firstResponseTime, + iterations: iterationCount + 1, + timeSegments, + }, + }, + logs: [], + metadata: { + startTime: providerStartTimeISO, + endTime: new Date().toISOString(), + duration: Date.now() - providerStartTime, + }, + isStreaming: true, + }, + } + + streamingExecution.stream = createReadableStreamFromVertexStream( + streamingResponse, + (content, usage) => { + streamingExecution.execution.output.content = content + + const streamEndTime = Date.now() + const streamEndTimeISO = new Date(streamEndTime).toISOString() + + if (streamingExecution.execution.output.providerTiming) { + streamingExecution.execution.output.providerTiming.endTime = + streamEndTimeISO + streamingExecution.execution.output.providerTiming.duration = + streamEndTime - providerStartTime + } + + if (usage) { + const existingTokens = streamingExecution.execution.output.tokens || { + prompt: 0, + completion: 0, + total: 0, + } + streamingExecution.execution.output.tokens = { + prompt: (existingTokens.prompt || 0) + (usage.promptTokenCount || 0), + completion: + (existingTokens.completion || 0) + (usage.candidatesTokenCount || 0), + total: + (existingTokens.total || 0) + + (usage.totalTokenCount || + (usage.promptTokenCount || 0) + (usage.candidatesTokenCount || 0)), + } + } + } + ) + + return streamingExecution + } + + const nextPayload = { + ...payload, + contents: simplifiedMessages, + } + + const allForcedToolsUsed = + forcedTools.length > 0 && usedForcedTools.length === forcedTools.length + + if (allForcedToolsUsed && request.responseFormat) { + nextPayload.tools = undefined + nextPayload.toolConfig = undefined + + const responseFormatSchema = + request.responseFormat.schema || request.responseFormat + const cleanSchema = cleanSchemaForGemini(responseFormatSchema) + + if (!nextPayload.generationConfig) { + nextPayload.generationConfig = {} + } + nextPayload.generationConfig.responseMimeType = 'application/json' + nextPayload.generationConfig.responseSchema = cleanSchema + + logger.info( + 'Using structured output for final non-streaming response after tool execution' + ) + } else { + if (currentToolConfig) { + nextPayload.toolConfig = currentToolConfig + } + } + + const nextEndpoint = buildVertexEndpoint( + vertexProject, + vertexLocation, + requestedModel, + false + ) + + const nextResponse = await fetch(nextEndpoint, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + Authorization: `Bearer ${request.apiKey}`, + }, + body: JSON.stringify(nextPayload), + }) + + if (!nextResponse.ok) { + const errorBody = await nextResponse.text() + logger.error('Error in Vertex AI follow-up request:', { + status: nextResponse.status, + statusText: nextResponse.statusText, + responseBody: errorBody, + iterationCount, + }) + break + } + + geminiResponse = await nextResponse.json() + + const nextModelEndTime = Date.now() + const thisModelTime = nextModelEndTime - nextModelStartTime + + timeSegments.push({ + type: 'model', + name: `Model response (iteration ${iterationCount + 1})`, + startTime: nextModelStartTime, + endTime: nextModelEndTime, + duration: thisModelTime, + }) + + modelTime += thisModelTime + + const nextCandidate = geminiResponse.candidates?.[0] + const nextFunctionCall = extractFunctionCall(nextCandidate) + + if (!nextFunctionCall) { + // If responseFormat is specified, make one final request with structured output + if (request.responseFormat) { + const finalPayload = { + ...payload, + contents: nextPayload.contents, + tools: undefined, + toolConfig: undefined, + } + + const responseFormatSchema = + request.responseFormat.schema || request.responseFormat + const cleanSchema = cleanSchemaForGemini(responseFormatSchema) + + if (!finalPayload.generationConfig) { + finalPayload.generationConfig = {} + } + finalPayload.generationConfig.responseMimeType = 'application/json' + finalPayload.generationConfig.responseSchema = cleanSchema + + logger.info('Making final request with structured output after tool execution') + + const finalEndpoint = buildVertexEndpoint( + vertexProject, + vertexLocation, + requestedModel, + false + ) + + const finalResponse = await fetch(finalEndpoint, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + Authorization: `Bearer ${request.apiKey}`, + }, + body: JSON.stringify(finalPayload), + }) + + if (finalResponse.ok) { + const finalResult = await finalResponse.json() + const finalCandidate = finalResult.candidates?.[0] + content = extractTextContent(finalCandidate) + + if (finalResult.usageMetadata) { + tokens.prompt += finalResult.usageMetadata.promptTokenCount || 0 + tokens.completion += finalResult.usageMetadata.candidatesTokenCount || 0 + tokens.total += + (finalResult.usageMetadata.promptTokenCount || 0) + + (finalResult.usageMetadata.candidatesTokenCount || 0) + } + } else { + logger.warn( + 'Failed to get structured output, falling back to regular response' + ) + content = extractTextContent(nextCandidate) + } + } else { + content = extractTextContent(nextCandidate) + } + break + } + + iterationCount++ + } catch (error) { + logger.error('Error in Vertex AI follow-up request:', { + error: error instanceof Error ? error.message : String(error), + iterationCount, + }) + break + } + } catch (error) { + logger.error('Error processing function call:', { + error: error instanceof Error ? error.message : String(error), + functionName: latestFunctionCall?.name || 'unknown', + }) + break + } + } + } else { + content = extractTextContent(candidate) + } + } catch (error) { + logger.error('Error processing Vertex AI response:', { + error: error instanceof Error ? error.message : String(error), + iterationCount, + }) + + if (!content && toolCalls.length > 0) { + content = `Tool call(s) executed: ${toolCalls.map((t) => t.name).join(', ')}. Results are available in the tool results.` + } + } + + const providerEndTime = Date.now() + const providerEndTimeISO = new Date(providerEndTime).toISOString() + const totalDuration = providerEndTime - providerStartTime + + if (geminiResponse.usageMetadata) { + tokens = { + prompt: geminiResponse.usageMetadata.promptTokenCount || 0, + completion: geminiResponse.usageMetadata.candidatesTokenCount || 0, + total: + (geminiResponse.usageMetadata.promptTokenCount || 0) + + (geminiResponse.usageMetadata.candidatesTokenCount || 0), + } + } + + return { + content, + model: request.model, + tokens, + toolCalls: toolCalls.length > 0 ? toolCalls : undefined, + toolResults: toolResults.length > 0 ? toolResults : undefined, + timing: { + startTime: providerStartTimeISO, + endTime: providerEndTimeISO, + duration: totalDuration, + modelTime: modelTime, + toolsTime: toolsTime, + firstResponseTime: firstResponseTime, + iterations: iterationCount + 1, + timeSegments: timeSegments, + }, + } + } catch (error) { + const providerEndTime = Date.now() + const providerEndTimeISO = new Date(providerEndTime).toISOString() + const totalDuration = providerEndTime - providerStartTime + + logger.error('Error in Vertex AI request:', { + error: error instanceof Error ? error.message : String(error), + duration: totalDuration, + }) + + const enhancedError = new Error(error instanceof Error ? error.message : String(error)) + // @ts-ignore - Adding timing property to the error + enhancedError.timing = { + startTime: providerStartTimeISO, + endTime: providerEndTimeISO, + duration: totalDuration, + } + + throw enhancedError + } + }, +} diff --git a/apps/sim/providers/vertex/utils.ts b/apps/sim/providers/vertex/utils.ts new file mode 100644 index 0000000000..70ac83e329 --- /dev/null +++ b/apps/sim/providers/vertex/utils.ts @@ -0,0 +1,233 @@ +import { createLogger } from '@/lib/logs/console/logger' +import { extractFunctionCall, extractTextContent } from '@/providers/google/utils' + +const logger = createLogger('VertexUtils') + +/** + * Creates a ReadableStream from Vertex AI's Gemini stream response + */ +export function createReadableStreamFromVertexStream( + response: Response, + onComplete?: ( + content: string, + usage?: { promptTokenCount?: number; candidatesTokenCount?: number; totalTokenCount?: number } + ) => void +): ReadableStream { + const reader = response.body?.getReader() + if (!reader) { + throw new Error('Failed to get reader from response body') + } + + return new ReadableStream({ + async start(controller) { + try { + let buffer = '' + let fullContent = '' + let usageData: { + promptTokenCount?: number + candidatesTokenCount?: number + totalTokenCount?: number + } | null = null + + while (true) { + const { done, value } = await reader.read() + if (done) { + if (buffer.trim()) { + try { + const data = JSON.parse(buffer.trim()) + if (data.usageMetadata) { + usageData = data.usageMetadata + } + const candidate = data.candidates?.[0] + if (candidate?.content?.parts) { + const functionCall = extractFunctionCall(candidate) + if (functionCall) { + logger.debug( + 'Function call detected in final buffer, ending stream to execute tool', + { + functionName: functionCall.name, + } + ) + if (onComplete) onComplete(fullContent, usageData || undefined) + controller.close() + return + } + const content = extractTextContent(candidate) + if (content) { + fullContent += content + controller.enqueue(new TextEncoder().encode(content)) + } + } + } catch (e) { + if (buffer.trim().startsWith('[')) { + try { + const dataArray = JSON.parse(buffer.trim()) + if (Array.isArray(dataArray)) { + for (const item of dataArray) { + if (item.usageMetadata) { + usageData = item.usageMetadata + } + const candidate = item.candidates?.[0] + if (candidate?.content?.parts) { + const functionCall = extractFunctionCall(candidate) + if (functionCall) { + logger.debug( + 'Function call detected in array item, ending stream to execute tool', + { + functionName: functionCall.name, + } + ) + if (onComplete) onComplete(fullContent, usageData || undefined) + controller.close() + return + } + const content = extractTextContent(candidate) + if (content) { + fullContent += content + controller.enqueue(new TextEncoder().encode(content)) + } + } + } + } + } catch (arrayError) { + // Buffer is not valid JSON array + } + } + } + } + if (onComplete) onComplete(fullContent, usageData || undefined) + controller.close() + break + } + + const text = new TextDecoder().decode(value) + buffer += text + + let searchIndex = 0 + while (searchIndex < buffer.length) { + const openBrace = buffer.indexOf('{', searchIndex) + if (openBrace === -1) break + + let braceCount = 0 + let inString = false + let escaped = false + let closeBrace = -1 + + for (let i = openBrace; i < buffer.length; i++) { + const char = buffer[i] + + if (!inString) { + if (char === '"' && !escaped) { + inString = true + } else if (char === '{') { + braceCount++ + } else if (char === '}') { + braceCount-- + if (braceCount === 0) { + closeBrace = i + break + } + } + } else { + if (char === '"' && !escaped) { + inString = false + } + } + + escaped = char === '\\' && !escaped + } + + if (closeBrace !== -1) { + const jsonStr = buffer.substring(openBrace, closeBrace + 1) + + try { + const data = JSON.parse(jsonStr) + + if (data.usageMetadata) { + usageData = data.usageMetadata + } + + const candidate = data.candidates?.[0] + + if (candidate?.finishReason === 'UNEXPECTED_TOOL_CALL') { + logger.warn( + 'Vertex AI returned UNEXPECTED_TOOL_CALL - model attempted to call a tool that was not provided', + { + finishReason: candidate.finishReason, + hasContent: !!candidate?.content, + hasParts: !!candidate?.content?.parts, + } + ) + const textContent = extractTextContent(candidate) + if (textContent) { + fullContent += textContent + controller.enqueue(new TextEncoder().encode(textContent)) + } + if (onComplete) onComplete(fullContent, usageData || undefined) + controller.close() + return + } + + if (candidate?.content?.parts) { + const functionCall = extractFunctionCall(candidate) + if (functionCall) { + logger.debug( + 'Function call detected in stream, ending stream to execute tool', + { + functionName: functionCall.name, + } + ) + if (onComplete) onComplete(fullContent, usageData || undefined) + controller.close() + return + } + const content = extractTextContent(candidate) + if (content) { + fullContent += content + controller.enqueue(new TextEncoder().encode(content)) + } + } + } catch (e) { + logger.error('Error parsing JSON from stream', { + error: e instanceof Error ? e.message : String(e), + jsonPreview: jsonStr.substring(0, 200), + }) + } + + buffer = buffer.substring(closeBrace + 1) + searchIndex = 0 + } else { + break + } + } + } + } catch (e) { + logger.error('Error reading Vertex AI stream', { + error: e instanceof Error ? e.message : String(e), + }) + controller.error(e) + } + }, + async cancel() { + await reader.cancel() + }, + }) +} + +/** + * Build Vertex AI endpoint URL + */ +export function buildVertexEndpoint( + project: string, + location: string, + model: string, + isStreaming: boolean +): string { + const action = isStreaming ? 'streamGenerateContent' : 'generateContent' + + if (location === 'global') { + return `https://aiplatform.googleapis.com/v1/projects/${project}/locations/global/publishers/google/models/${model}:${action}` + } + + return `https://${location}-aiplatform.googleapis.com/v1/projects/${project}/locations/${location}/publishers/google/models/${model}:${action}` +} diff --git a/apps/sim/providers/vllm/index.ts b/apps/sim/providers/vllm/index.ts index bd6805be78..14acdc0e4d 100644 --- a/apps/sim/providers/vllm/index.ts +++ b/apps/sim/providers/vllm/index.ts @@ -2,6 +2,7 @@ import OpenAI from 'openai' import { env } from '@/lib/core/config/env' import { createLogger } from '@/lib/logs/console/logger' import type { StreamingExecution } from '@/executor/types' +import { MAX_TOOL_ITERATIONS } from '@/providers' import { getProviderDefaultModel, getProviderModels } from '@/providers/models' import type { ProviderConfig, @@ -14,50 +15,13 @@ import { prepareToolsWithUsageControl, trackForcedToolUsage, } from '@/providers/utils' +import { createReadableStreamFromVLLMStream } from '@/providers/vllm/utils' import { useProvidersStore } from '@/stores/providers/store' import { executeTool } from '@/tools' const logger = createLogger('VLLMProvider') const VLLM_VERSION = '1.0.0' -/** - * Helper function to convert a vLLM stream to a standard ReadableStream - * and collect completion metrics - */ -function createReadableStreamFromVLLMStream( - vllmStream: any, - onComplete?: (content: string, usage?: any) => void -): ReadableStream { - let fullContent = '' - let usageData: any = null - - return new ReadableStream({ - async start(controller) { - try { - for await (const chunk of vllmStream) { - if (chunk.usage) { - usageData = chunk.usage - } - - const content = chunk.choices[0]?.delta?.content || '' - if (content) { - fullContent += content - controller.enqueue(new TextEncoder().encode(content)) - } - } - - if (onComplete) { - onComplete(fullContent, usageData) - } - - controller.close() - } catch (error) { - controller.error(error) - } - }, - }) -} - export const vllmProvider: ProviderConfig = { id: 'vllm', name: 'vLLM', @@ -341,7 +305,6 @@ export const vllmProvider: ProviderConfig = { const toolResults = [] const currentMessages = [...allMessages] let iterationCount = 0 - const MAX_ITERATIONS = 10 let modelTime = firstResponseTime let toolsTime = 0 @@ -360,14 +323,14 @@ export const vllmProvider: ProviderConfig = { checkForForcedToolUsage(currentResponse, originalToolChoice) - while (iterationCount < MAX_ITERATIONS) { + while (iterationCount < MAX_TOOL_ITERATIONS) { const toolCallsInResponse = currentResponse.choices[0]?.message?.tool_calls if (!toolCallsInResponse || toolCallsInResponse.length === 0) { break } logger.info( - `Processing ${toolCallsInResponse.length} tool calls (iteration ${iterationCount + 1}/${MAX_ITERATIONS})` + `Processing ${toolCallsInResponse.length} tool calls (iteration ${iterationCount + 1}/${MAX_TOOL_ITERATIONS})` ) const toolsStartTime = Date.now() diff --git a/apps/sim/providers/vllm/utils.ts b/apps/sim/providers/vllm/utils.ts new file mode 100644 index 0000000000..56afadf0d0 --- /dev/null +++ b/apps/sim/providers/vllm/utils.ts @@ -0,0 +1,37 @@ +/** + * Helper function to convert a vLLM stream to a standard ReadableStream + * and collect completion metrics + */ +export function createReadableStreamFromVLLMStream( + vllmStream: any, + onComplete?: (content: string, usage?: any) => void +): ReadableStream { + let fullContent = '' + let usageData: any = null + + return new ReadableStream({ + async start(controller) { + try { + for await (const chunk of vllmStream) { + if (chunk.usage) { + usageData = chunk.usage + } + + const content = chunk.choices[0]?.delta?.content || '' + if (content) { + fullContent += content + controller.enqueue(new TextEncoder().encode(content)) + } + } + + if (onComplete) { + onComplete(fullContent, usageData) + } + + controller.close() + } catch (error) { + controller.error(error) + } + }, + }) +} diff --git a/apps/sim/providers/xai/index.ts b/apps/sim/providers/xai/index.ts index cfa73baf27..f1faa6480d 100644 --- a/apps/sim/providers/xai/index.ts +++ b/apps/sim/providers/xai/index.ts @@ -1,6 +1,7 @@ import OpenAI from 'openai' import { createLogger } from '@/lib/logs/console/logger' import type { StreamingExecution } from '@/executor/types' +import { MAX_TOOL_ITERATIONS } from '@/providers' import { getProviderDefaultModel, getProviderModels } from '@/providers/models' import type { ProviderConfig, @@ -8,37 +9,16 @@ import type { ProviderResponse, TimeSegment, } from '@/providers/types' +import { prepareToolExecution, prepareToolsWithUsageControl } from '@/providers/utils' import { - prepareToolExecution, - prepareToolsWithUsageControl, - trackForcedToolUsage, -} from '@/providers/utils' + checkForForcedToolUsage, + createReadableStreamFromXAIStream, + createResponseFormatPayload, +} from '@/providers/xai/utils' import { executeTool } from '@/tools' const logger = createLogger('XAIProvider') -/** - * Helper to wrap XAI (OpenAI-compatible) streaming into a browser-friendly - * ReadableStream of raw assistant text chunks. - */ -function createReadableStreamFromXAIStream(xaiStream: any): ReadableStream { - return new ReadableStream({ - async start(controller) { - try { - for await (const chunk of xaiStream) { - const content = chunk.choices[0]?.delta?.content || '' - if (content) { - controller.enqueue(new TextEncoder().encode(content)) - } - } - controller.close() - } catch (err) { - controller.error(err) - } - }, - }) -} - export const xAIProvider: ProviderConfig = { id: 'xai', name: 'xAI', @@ -115,27 +95,6 @@ export const xAIProvider: ProviderConfig = { if (request.temperature !== undefined) basePayload.temperature = request.temperature if (request.maxTokens !== undefined) basePayload.max_tokens = request.maxTokens - // Function to create response format configuration - const createResponseFormatPayload = (messages: any[] = allMessages) => { - const payload = { - ...basePayload, - messages, - } - - if (request.responseFormat) { - payload.response_format = { - type: 'json_schema', - json_schema: { - name: request.responseFormat.name || 'structured_response', - schema: request.responseFormat.schema || request.responseFormat, - strict: request.responseFormat.strict !== false, - }, - } - } - - return payload - } - // Handle tools and tool usage control let preparedTools: ReturnType | null = null @@ -154,7 +113,7 @@ export const xAIProvider: ProviderConfig = { // Use response format payload if needed, otherwise use base payload const streamingPayload = request.responseFormat - ? createResponseFormatPayload() + ? createResponseFormatPayload(basePayload, allMessages, request.responseFormat) : { ...basePayload, stream: true } if (!request.responseFormat) { @@ -243,7 +202,11 @@ export const xAIProvider: ProviderConfig = { originalToolChoice = toolChoice } else if (request.responseFormat) { // Only add response format if there are no tools - const responseFormatPayload = createResponseFormatPayload() + const responseFormatPayload = createResponseFormatPayload( + basePayload, + allMessages, + request.responseFormat + ) Object.assign(initialPayload, responseFormatPayload) } @@ -260,7 +223,6 @@ export const xAIProvider: ProviderConfig = { const toolResults = [] const currentMessages = [...allMessages] let iterationCount = 0 - const MAX_ITERATIONS = 10 // Track if a forced tool has been used let hasUsedForcedTool = false @@ -280,33 +242,20 @@ export const xAIProvider: ProviderConfig = { }, ] - // Helper function to check for forced tool usage in responses - const checkForForcedToolUsage = ( - response: any, - toolChoice: string | { type: string; function?: { name: string }; name?: string; any?: any } - ) => { - if (typeof toolChoice === 'object' && response.choices[0]?.message?.tool_calls) { - const toolCallsResponse = response.choices[0].message.tool_calls - const result = trackForcedToolUsage( - toolCallsResponse, - toolChoice, - logger, - 'xai', - forcedTools, - usedForcedTools - ) - hasUsedForcedTool = result.hasUsedForcedTool - usedForcedTools = result.usedForcedTools - } - } - // Check if a forced tool was used in the first response if (originalToolChoice) { - checkForForcedToolUsage(currentResponse, originalToolChoice) + const result = checkForForcedToolUsage( + currentResponse, + originalToolChoice, + forcedTools, + usedForcedTools + ) + hasUsedForcedTool = result.hasUsedForcedTool + usedForcedTools = result.usedForcedTools } try { - while (iterationCount < MAX_ITERATIONS) { + while (iterationCount < MAX_TOOL_ITERATIONS) { // Check for tool calls const toolCallsInResponse = currentResponse.choices[0]?.message?.tool_calls if (!toolCallsInResponse || toolCallsInResponse.length === 0) { @@ -432,7 +381,12 @@ export const xAIProvider: ProviderConfig = { } else { // All forced tools have been used, check if we need response format for final response if (request.responseFormat) { - nextPayload = createResponseFormatPayload(currentMessages) + nextPayload = createResponseFormatPayload( + basePayload, + allMessages, + request.responseFormat, + currentMessages + ) } else { nextPayload = { ...basePayload, @@ -446,7 +400,12 @@ export const xAIProvider: ProviderConfig = { // Normal tool processing - check if this might be the final response if (request.responseFormat) { // Use response format for what might be the final response - nextPayload = createResponseFormatPayload(currentMessages) + nextPayload = createResponseFormatPayload( + basePayload, + allMessages, + request.responseFormat, + currentMessages + ) } else { nextPayload = { ...basePayload, @@ -464,7 +423,14 @@ export const xAIProvider: ProviderConfig = { // Check if any forced tools were used in this response if (nextPayload.tool_choice && typeof nextPayload.tool_choice === 'object') { - checkForForcedToolUsage(currentResponse, nextPayload.tool_choice) + const result = checkForForcedToolUsage( + currentResponse, + nextPayload.tool_choice, + forcedTools, + usedForcedTools + ) + hasUsedForcedTool = result.hasUsedForcedTool + usedForcedTools = result.usedForcedTools } const nextModelEndTime = Date.now() @@ -509,7 +475,12 @@ export const xAIProvider: ProviderConfig = { if (request.responseFormat) { // Use response format, no tools finalStreamingPayload = { - ...createResponseFormatPayload(currentMessages), + ...createResponseFormatPayload( + basePayload, + allMessages, + request.responseFormat, + currentMessages + ), stream: true, } } else { diff --git a/apps/sim/providers/xai/utils.ts b/apps/sim/providers/xai/utils.ts new file mode 100644 index 0000000000..c5ee067e56 --- /dev/null +++ b/apps/sim/providers/xai/utils.ts @@ -0,0 +1,83 @@ +import { createLogger } from '@/lib/logs/console/logger' +import { trackForcedToolUsage } from '@/providers/utils' + +const logger = createLogger('XAIProvider') + +/** + * Helper to wrap XAI (OpenAI-compatible) streaming into a browser-friendly + * ReadableStream of raw assistant text chunks. + */ +export function createReadableStreamFromXAIStream(xaiStream: any): ReadableStream { + return new ReadableStream({ + async start(controller) { + try { + for await (const chunk of xaiStream) { + const content = chunk.choices[0]?.delta?.content || '' + if (content) { + controller.enqueue(new TextEncoder().encode(content)) + } + } + controller.close() + } catch (err) { + controller.error(err) + } + }, + }) +} + +/** + * Creates a response format payload for XAI API requests. + */ +export function createResponseFormatPayload( + basePayload: any, + allMessages: any[], + responseFormat: any, + currentMessages?: any[] +) { + const payload = { + ...basePayload, + messages: currentMessages || allMessages, + } + + if (responseFormat) { + payload.response_format = { + type: 'json_schema', + json_schema: { + name: responseFormat.name || 'structured_response', + schema: responseFormat.schema || responseFormat, + strict: responseFormat.strict !== false, + }, + } + } + + return payload +} + +/** + * Helper function to check for forced tool usage in responses. + */ +export function checkForForcedToolUsage( + response: any, + toolChoice: string | { type: string; function?: { name: string }; name?: string; any?: any }, + forcedTools: string[], + usedForcedTools: string[] +): { hasUsedForcedTool: boolean; usedForcedTools: string[] } { + let hasUsedForcedTool = false + let updatedUsedForcedTools = usedForcedTools + + if (typeof toolChoice === 'object' && response.choices[0]?.message?.tool_calls) { + const toolCallsResponse = response.choices[0].message.tool_calls + const result = trackForcedToolUsage( + toolCallsResponse, + toolChoice, + logger, + 'xai', + forcedTools, + updatedUsedForcedTools + ) + hasUsedForcedTool = result.hasUsedForcedTool + updatedUsedForcedTools = result.usedForcedTools + } + + return { hasUsedForcedTool, usedForcedTools: updatedUsedForcedTools } +} diff --git a/apps/sim/tools/llm/chat.ts b/apps/sim/tools/llm/chat.ts index 536400734f..7af74232db 100644 --- a/apps/sim/tools/llm/chat.ts +++ b/apps/sim/tools/llm/chat.ts @@ -13,6 +13,8 @@ interface LLMChatParams { maxTokens?: number azureEndpoint?: string azureApiVersion?: string + vertexProject?: string + vertexLocation?: string } interface LLMChatResponse extends ToolResponse { @@ -77,6 +79,18 @@ export const llmChatTool: ToolConfig = { visibility: 'hidden', description: 'Azure OpenAI API version', }, + vertexProject: { + type: 'string', + required: false, + visibility: 'hidden', + description: 'Google Cloud project ID for Vertex AI', + }, + vertexLocation: { + type: 'string', + required: false, + visibility: 'hidden', + description: 'Google Cloud location for Vertex AI (defaults to us-central1)', + }, }, request: { @@ -98,6 +112,8 @@ export const llmChatTool: ToolConfig = { maxTokens: params.maxTokens, azureEndpoint: params.azureEndpoint, azureApiVersion: params.azureApiVersion, + vertexProject: params.vertexProject, + vertexLocation: params.vertexLocation, } }, }, diff --git a/bun.lock b/bun.lock index c5863930ca..e13beed623 100644 --- a/bun.lock +++ b/bun.lock @@ -1,5 +1,6 @@ { "lockfileVersion": 1, + "configVersion": 0, "workspaces": { "": { "name": "simstudio", @@ -266,12 +267,12 @@ "sharp", ], "overrides": { - "react": "19.2.1", - "react-dom": "19.2.1", - "next": "16.1.0-canary.21", "@next/env": "16.1.0-canary.21", "drizzle-orm": "^0.44.5", + "next": "16.1.0-canary.21", "postgres": "^3.4.5", + "react": "19.2.1", + "react-dom": "19.2.1", }, "packages": { "@adobe/css-tools": ["@adobe/css-tools@4.4.4", "", {}, "sha512-Elp+iwUx5rN5+Y8xLt5/GRoG20WGoDCQ/1Fb+1LiGtvwbDavuSk0jhD/eZdckHAuzcDzccnkv+rEjyWfRx18gg=="], From c4a6d11cc08fb11e3aceb712893a23729c5c1adf Mon Sep 17 00:00:00 2001 From: Waleed Date: Wed, 17 Dec 2025 15:29:25 -0800 Subject: [PATCH 15/15] fix(condition): used isolated vms for condition block RCE (#2432) * fix(condition): used isolated vms for condition block RCE * ack PR comment * one more * remove inputForm from sched, update loop condition to also use isolated vm * hide servicenow --- apps/sim/blocks/blocks/schedule.ts | 9 --- apps/sim/blocks/blocks/servicenow.ts | 1 + .../condition/condition-handler.test.ts | 70 ++++++++++++------- .../handlers/condition/condition-handler.ts | 38 +++++++--- apps/sim/executor/orchestrators/loop.ts | 56 +++++++++++---- apps/sim/executor/orchestrators/node.ts | 11 +-- apps/sim/lib/execution/isolated-vm.ts | 33 ++++++++- 7 files changed, 154 insertions(+), 64 deletions(-) diff --git a/apps/sim/blocks/blocks/schedule.ts b/apps/sim/blocks/blocks/schedule.ts index 1b6de427c5..edf21e2df3 100644 --- a/apps/sim/blocks/blocks/schedule.ts +++ b/apps/sim/blocks/blocks/schedule.ts @@ -155,15 +155,6 @@ export const ScheduleBlock: BlockConfig = { condition: { field: 'scheduleType', value: ['minutes', 'hourly'], not: true }, }, - { - id: 'inputFormat', - title: 'Input Format', - type: 'input-format', - description: - 'Define input parameters that will be available when the schedule triggers. Use Value to set default values for scheduled executions.', - mode: 'trigger', - }, - { id: 'scheduleSave', type: 'schedule-save', diff --git a/apps/sim/blocks/blocks/servicenow.ts b/apps/sim/blocks/blocks/servicenow.ts index 110323dc1f..b18ef6be94 100644 --- a/apps/sim/blocks/blocks/servicenow.ts +++ b/apps/sim/blocks/blocks/servicenow.ts @@ -8,6 +8,7 @@ export const ServiceNowBlock: BlockConfig = { name: 'ServiceNow', description: 'Create, read, update, delete, and bulk import ServiceNow records', authMode: AuthMode.OAuth, + hideFromToolbar: true, longDescription: 'Integrate ServiceNow into your workflow. Can create, read, update, and delete records in any ServiceNow table (incidents, tasks, users, etc.). Supports bulk import operations for data migration and ETL.', docsLink: 'https://docs.sim.ai/tools/servicenow', diff --git a/apps/sim/executor/handlers/condition/condition-handler.test.ts b/apps/sim/executor/handlers/condition/condition-handler.test.ts index 07805b7517..fbdedc739e 100644 --- a/apps/sim/executor/handlers/condition/condition-handler.test.ts +++ b/apps/sim/executor/handlers/condition/condition-handler.test.ts @@ -1,11 +1,47 @@ -import '@/executor/__test-utils__/mock-dependencies' - import { beforeEach, describe, expect, it, vi } from 'vitest' import { BlockType } from '@/executor/constants' import { ConditionBlockHandler } from '@/executor/handlers/condition/condition-handler' import type { BlockState, ExecutionContext } from '@/executor/types' import type { SerializedBlock, SerializedWorkflow } from '@/serializer/types' +vi.mock('@/lib/logs/console/logger', () => ({ + createLogger: vi.fn(() => ({ + info: vi.fn(), + error: vi.fn(), + warn: vi.fn(), + debug: vi.fn(), + })), +})) + +vi.mock('@/lib/core/utils/request', () => ({ + generateRequestId: vi.fn(() => 'test-request-id'), +})) + +vi.mock('@/lib/execution/isolated-vm', () => ({ + executeInIsolatedVM: vi.fn(), +})) + +import { executeInIsolatedVM } from '@/lib/execution/isolated-vm' + +const mockExecuteInIsolatedVM = executeInIsolatedVM as ReturnType + +function simulateIsolatedVMExecution( + code: string, + contextVariables: Record +): { result: unknown; stdout: string; error?: { message: string; name: string } } { + try { + const fn = new Function(...Object.keys(contextVariables), code) + const result = fn(...Object.values(contextVariables)) + return { result, stdout: '' } + } catch (error: any) { + return { + result: null, + stdout: '', + error: { message: error.message, name: error.name || 'Error' }, + } + } +} + describe('ConditionBlockHandler', () => { let handler: ConditionBlockHandler let mockBlock: SerializedBlock @@ -18,7 +54,6 @@ describe('ConditionBlockHandler', () => { let mockPathTracker: any beforeEach(() => { - // Define blocks first mockSourceBlock = { id: 'source-block-1', metadata: { id: 'source', name: 'Source Block' }, @@ -33,7 +68,7 @@ describe('ConditionBlockHandler', () => { metadata: { id: BlockType.CONDITION, name: 'Test Condition' }, position: { x: 50, y: 50 }, config: { tool: BlockType.CONDITION, params: {} }, - inputs: { conditions: 'json' }, // Corrected based on previous step + inputs: { conditions: 'json' }, outputs: {}, enabled: true, } @@ -56,7 +91,6 @@ describe('ConditionBlockHandler', () => { enabled: true, } - // Then define workflow using the block objects mockWorkflow = { blocks: [mockSourceBlock, mockBlock, mockTargetBlock1, mockTargetBlock2], connections: [ @@ -84,7 +118,6 @@ describe('ConditionBlockHandler', () => { handler = new ConditionBlockHandler(mockPathTracker, mockResolver) - // Define mock context *after* workflow and blocks are set up mockContext = { workflowId: 'test-workflow-id', blockStates: new Map([ @@ -99,7 +132,7 @@ describe('ConditionBlockHandler', () => { ]), blockLogs: [], metadata: { duration: 0 }, - environmentVariables: {}, // Now set the context's env vars + environmentVariables: {}, decisions: { router: new Map(), condition: new Map() }, loopExecutions: new Map(), executedBlocks: new Set([mockSourceBlock.id]), @@ -108,11 +141,11 @@ describe('ConditionBlockHandler', () => { completedLoops: new Set(), } - // Reset mocks using vi vi.clearAllMocks() - // Default mock implementations - Removed as it's in the shared mock now - // mockResolver.resolveBlockReferences.mockImplementation((value) => value) + mockExecuteInIsolatedVM.mockImplementation(async ({ code, contextVariables }) => { + return simulateIsolatedVMExecution(code, contextVariables) + }) }) it('should handle condition blocks', () => { @@ -141,7 +174,6 @@ describe('ConditionBlockHandler', () => { selectedOption: 'cond1', } - // Mock the full resolution pipeline mockResolver.resolveVariableReferences.mockReturnValue('context.value > 5') mockResolver.resolveBlockReferences.mockReturnValue('context.value > 5') mockResolver.resolveEnvVariables.mockReturnValue('context.value > 5') @@ -182,7 +214,6 @@ describe('ConditionBlockHandler', () => { selectedOption: 'else1', } - // Mock the full resolution pipeline mockResolver.resolveVariableReferences.mockReturnValue('context.value < 0') mockResolver.resolveBlockReferences.mockReturnValue('context.value < 0') mockResolver.resolveEnvVariables.mockReturnValue('context.value < 0') @@ -207,7 +238,7 @@ describe('ConditionBlockHandler', () => { const inputs = { conditions: '{ "invalid json ' } await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow( - /^Invalid conditions format: Unterminated string.*/ + /^Invalid conditions format:/ ) }) @@ -218,7 +249,6 @@ describe('ConditionBlockHandler', () => { ] const inputs = { conditions: JSON.stringify(conditions) } - // Mock the full resolution pipeline mockResolver.resolveVariableReferences.mockReturnValue('{{source-block-1.value}} > 5') mockResolver.resolveBlockReferences.mockReturnValue('10 > 5') mockResolver.resolveEnvVariables.mockReturnValue('10 > 5') @@ -245,7 +275,6 @@ describe('ConditionBlockHandler', () => { ] const inputs = { conditions: JSON.stringify(conditions) } - // Mock the full resolution pipeline for variable resolution mockResolver.resolveVariableReferences.mockReturnValue('"john" !== null') mockResolver.resolveBlockReferences.mockReturnValue('"john" !== null') mockResolver.resolveEnvVariables.mockReturnValue('"john" !== null') @@ -272,7 +301,6 @@ describe('ConditionBlockHandler', () => { ] const inputs = { conditions: JSON.stringify(conditions) } - // Mock the full resolution pipeline for env variable resolution mockResolver.resolveVariableReferences.mockReturnValue('{{POOP}} === "hi"') mockResolver.resolveBlockReferences.mockReturnValue('{{POOP}} === "hi"') mockResolver.resolveEnvVariables.mockReturnValue('"hi" === "hi"') @@ -300,7 +328,6 @@ describe('ConditionBlockHandler', () => { const inputs = { conditions: JSON.stringify(conditions) } const resolutionError = new Error('Could not resolve reference: invalid-ref') - // Mock the pipeline to throw at the variable resolution stage mockResolver.resolveVariableReferences.mockImplementation(() => { throw resolutionError }) @@ -317,7 +344,6 @@ describe('ConditionBlockHandler', () => { ] const inputs = { conditions: JSON.stringify(conditions) } - // Mock the full resolution pipeline mockResolver.resolveVariableReferences.mockReturnValue( 'context.nonExistentProperty.doSomething()' ) @@ -325,7 +351,7 @@ describe('ConditionBlockHandler', () => { mockResolver.resolveEnvVariables.mockReturnValue('context.nonExistentProperty.doSomething()') await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow( - /^Evaluation error in condition "if": Evaluation error in condition: Cannot read properties of undefined \(reading 'doSomething'\)\. \(Resolved: context\.nonExistentProperty\.doSomething\(\)\)$/ + /Evaluation error in condition "if".*doSomething/ ) }) @@ -333,7 +359,6 @@ describe('ConditionBlockHandler', () => { const conditions = [{ id: 'cond1', title: 'if', value: 'true' }] const inputs = { conditions: JSON.stringify(conditions) } - // Create a new context with empty blockStates instead of trying to delete from readonly map const contextWithoutSource = { ...mockContext, blockStates: new Map(), @@ -355,7 +380,6 @@ describe('ConditionBlockHandler', () => { mockContext.workflow!.blocks = [mockSourceBlock, mockBlock, mockTargetBlock2] - // Mock the full resolution pipeline mockResolver.resolveVariableReferences.mockReturnValue('true') mockResolver.resolveBlockReferences.mockReturnValue('true') mockResolver.resolveEnvVariables.mockReturnValue('true') @@ -381,7 +405,6 @@ describe('ConditionBlockHandler', () => { }, ] - // Mock the full resolution pipeline mockResolver.resolveVariableReferences .mockReturnValueOnce('false') .mockReturnValueOnce('context.value === 99') @@ -394,12 +417,10 @@ describe('ConditionBlockHandler', () => { const result = await handler.execute(mockContext, mockBlock, inputs) - // Should return success with no path selected (branch ends gracefully) expect((result as any).conditionResult).toBe(false) expect((result as any).selectedPath).toBeNull() expect((result as any).selectedConditionId).toBeNull() expect((result as any).selectedOption).toBeNull() - // Decision should not be set when no condition matches expect(mockContext.decisions.condition.has(mockBlock.id)).toBe(false) }) @@ -410,7 +431,6 @@ describe('ConditionBlockHandler', () => { ] const inputs = { conditions: JSON.stringify(conditions) } - // Mock the full resolution pipeline mockResolver.resolveVariableReferences.mockReturnValue('context.item === "apple"') mockResolver.resolveBlockReferences.mockReturnValue('context.item === "apple"') mockResolver.resolveEnvVariables.mockReturnValue('context.item === "apple"') diff --git a/apps/sim/executor/handlers/condition/condition-handler.ts b/apps/sim/executor/handlers/condition/condition-handler.ts index 452f40da9b..0c8736c5d2 100644 --- a/apps/sim/executor/handlers/condition/condition-handler.ts +++ b/apps/sim/executor/handlers/condition/condition-handler.ts @@ -1,3 +1,5 @@ +import { generateRequestId } from '@/lib/core/utils/request' +import { executeInIsolatedVM } from '@/lib/execution/isolated-vm' import { createLogger } from '@/lib/logs/console/logger' import type { BlockOutput } from '@/blocks/types' import { BlockType, CONDITION, DEFAULTS, EDGE } from '@/executor/constants' @@ -6,6 +8,8 @@ import type { SerializedBlock } from '@/serializer/types' const logger = createLogger('ConditionBlockHandler') +const CONDITION_TIMEOUT_MS = 5000 + /** * Evaluates a single condition expression with variable/block reference resolution * Returns true if condition is met, false otherwise @@ -35,11 +39,32 @@ export async function evaluateConditionExpression( } try { - const conditionMet = new Function( - 'context', - `with(context) { return ${resolvedConditionValue} }` - )(evalContext) - return Boolean(conditionMet) + const requestId = generateRequestId() + + const code = `return Boolean(${resolvedConditionValue})` + + const result = await executeInIsolatedVM({ + code, + params: {}, + envVars: {}, + contextVariables: { context: evalContext }, + timeoutMs: CONDITION_TIMEOUT_MS, + requestId, + }) + + if (result.error) { + logger.error(`Failed to evaluate condition: ${result.error.message}`, { + originalCondition: conditionExpression, + resolvedCondition: resolvedConditionValue, + evalContext, + error: result.error, + }) + throw new Error( + `Evaluation error in condition: ${result.error.message}. (Resolved: ${resolvedConditionValue})` + ) + } + + return Boolean(result.result) } catch (evalError: any) { logger.error(`Failed to evaluate condition: ${evalError.message}`, { originalCondition: conditionExpression, @@ -87,7 +112,6 @@ export class ConditionBlockHandler implements BlockHandler { block ) - // Handle case where no condition matched and no else exists - branch ends gracefully if (!selectedConnection || !selectedCondition) { return { ...((sourceOutput as any) || {}), @@ -206,14 +230,12 @@ export class ConditionBlockHandler implements BlockHandler { if (elseConnection) { return { selectedConnection: elseConnection, selectedCondition: elseCondition } } - // Else exists but has no connection - treat as no match, branch ends logger.info(`No condition matched and else has no connection - branch ending`, { blockId: block.id, }) return { selectedConnection: null, selectedCondition: null } } - // No condition matched and no else exists - branch ends gracefully logger.info(`No condition matched and no else block - branch ending`, { blockId: block.id }) return { selectedConnection: null, selectedCondition: null } } diff --git a/apps/sim/executor/orchestrators/loop.ts b/apps/sim/executor/orchestrators/loop.ts index 2e3d6b81e4..2378cded58 100644 --- a/apps/sim/executor/orchestrators/loop.ts +++ b/apps/sim/executor/orchestrators/loop.ts @@ -1,3 +1,5 @@ +import { generateRequestId } from '@/lib/core/utils/request' +import { executeInIsolatedVM } from '@/lib/execution/isolated-vm' import { createLogger } from '@/lib/logs/console/logger' import { buildLoopIndexCondition, DEFAULTS, EDGE } from '@/executor/constants' import type { DAG } from '@/executor/dag/builder' @@ -17,6 +19,8 @@ import type { SerializedLoop } from '@/serializer/types' const logger = createLogger('LoopOrchestrator') +const LOOP_CONDITION_TIMEOUT_MS = 5000 + export type LoopRoute = typeof EDGE.LOOP_CONTINUE | typeof EDGE.LOOP_EXIT export interface LoopContinuationResult { @@ -112,7 +116,10 @@ export class LoopOrchestrator { scope.currentIterationOutputs.set(baseId, output) } - evaluateLoopContinuation(ctx: ExecutionContext, loopId: string): LoopContinuationResult { + async evaluateLoopContinuation( + ctx: ExecutionContext, + loopId: string + ): Promise { const scope = ctx.loopExecutions?.get(loopId) if (!scope) { logger.error('Loop scope not found during continuation evaluation', { loopId }) @@ -123,7 +130,6 @@ export class LoopOrchestrator { } } - // Check for cancellation if (ctx.isCancelled) { logger.info('Loop execution cancelled', { loopId, iteration: scope.iteration }) return this.createExitResult(ctx, loopId, scope) @@ -140,7 +146,7 @@ export class LoopOrchestrator { scope.currentIterationOutputs.clear() - if (!this.evaluateCondition(ctx, scope, scope.iteration + 1)) { + if (!(await this.evaluateCondition(ctx, scope, scope.iteration + 1))) { return this.createExitResult(ctx, loopId, scope) } @@ -173,7 +179,11 @@ export class LoopOrchestrator { } } - private evaluateCondition(ctx: ExecutionContext, scope: LoopScope, iteration?: number): boolean { + private async evaluateCondition( + ctx: ExecutionContext, + scope: LoopScope, + iteration?: number + ): Promise { if (!scope.condition) { logger.warn('No condition defined for loop') return false @@ -184,7 +194,7 @@ export class LoopOrchestrator { scope.iteration = iteration } - const result = this.evaluateWhileCondition(ctx, scope.condition, scope) + const result = await this.evaluateWhileCondition(ctx, scope.condition, scope) if (iteration !== undefined) { scope.iteration = currentIteration @@ -223,7 +233,6 @@ export class LoopOrchestrator { const loopNodes = loopConfig.nodes const allLoopNodeIds = new Set([sentinelStartId, sentinelEndId, ...loopNodes]) - // Clear deactivated edges for loop nodes so error/success edges can be re-evaluated if (this.edgeManager) { this.edgeManager.clearDeactivatedEdgesForNodes(allLoopNodeIds) } @@ -263,7 +272,7 @@ export class LoopOrchestrator { * * @returns true if the loop should execute, false if it should be skipped */ - evaluateInitialCondition(ctx: ExecutionContext, loopId: string): boolean { + async evaluateInitialCondition(ctx: ExecutionContext, loopId: string): Promise { const scope = ctx.loopExecutions?.get(loopId) if (!scope) { logger.warn('Loop scope not found for initial condition evaluation', { loopId }) @@ -300,7 +309,7 @@ export class LoopOrchestrator { return false } - const result = this.evaluateWhileCondition(ctx, scope.condition, scope) + const result = await this.evaluateWhileCondition(ctx, scope.condition, scope) logger.info('While loop initial condition evaluation', { loopId, condition: scope.condition, @@ -327,11 +336,11 @@ export class LoopOrchestrator { return undefined } - private evaluateWhileCondition( + private async evaluateWhileCondition( ctx: ExecutionContext, condition: string, scope: LoopScope - ): boolean { + ): Promise { if (!condition) { return false } @@ -343,7 +352,6 @@ export class LoopOrchestrator { workflowVariables: ctx.workflowVariables, }) - // Use generic utility for smart variable reference replacement const evaluatedCondition = replaceValidReferences(condition, (match) => { const resolved = this.resolver.resolveSingleReference(ctx, '', match, scope) logger.info('Resolved variable reference in loop condition', { @@ -352,11 +360,9 @@ export class LoopOrchestrator { resolvedType: typeof resolved, }) if (resolved !== undefined) { - // For booleans and numbers, return as-is (no quotes) if (typeof resolved === 'boolean' || typeof resolved === 'number') { return String(resolved) } - // For strings that represent booleans, return without quotes if (typeof resolved === 'string') { const lower = resolved.toLowerCase().trim() if (lower === 'true' || lower === 'false') { @@ -364,13 +370,33 @@ export class LoopOrchestrator { } return `"${resolved}"` } - // For other types, stringify them return JSON.stringify(resolved) } return match }) - const result = Boolean(new Function(`return (${evaluatedCondition})`)()) + const requestId = generateRequestId() + const code = `return Boolean(${evaluatedCondition})` + + const vmResult = await executeInIsolatedVM({ + code, + params: {}, + envVars: {}, + contextVariables: {}, + timeoutMs: LOOP_CONDITION_TIMEOUT_MS, + requestId, + }) + + if (vmResult.error) { + logger.error('Failed to evaluate loop condition', { + condition, + evaluatedCondition, + error: vmResult.error, + }) + return false + } + + const result = Boolean(vmResult.result) logger.info('Loop condition evaluation result', { originalCondition: condition, diff --git a/apps/sim/executor/orchestrators/node.ts b/apps/sim/executor/orchestrators/node.ts index 2157807f3f..26ecb1c0ae 100644 --- a/apps/sim/executor/orchestrators/node.ts +++ b/apps/sim/executor/orchestrators/node.ts @@ -68,7 +68,7 @@ export class NodeExecutionOrchestrator { } if (node.metadata.isSentinel) { - const output = this.handleSentinel(ctx, node) + const output = await this.handleSentinel(ctx, node) const isFinalOutput = node.outgoingEdges.size === 0 return { nodeId, @@ -86,14 +86,17 @@ export class NodeExecutionOrchestrator { } } - private handleSentinel(ctx: ExecutionContext, node: DAGNode): NormalizedBlockOutput { + private async handleSentinel( + ctx: ExecutionContext, + node: DAGNode + ): Promise { const sentinelType = node.metadata.sentinelType const loopId = node.metadata.loopId switch (sentinelType) { case 'start': { if (loopId) { - const shouldExecute = this.loopOrchestrator.evaluateInitialCondition(ctx, loopId) + const shouldExecute = await this.loopOrchestrator.evaluateInitialCondition(ctx, loopId) if (!shouldExecute) { logger.info('While loop initial condition false, skipping loop body', { loopId }) return { @@ -112,7 +115,7 @@ export class NodeExecutionOrchestrator { return { shouldExit: true, selectedRoute: EDGE.LOOP_EXIT } } - const continuationResult = this.loopOrchestrator.evaluateLoopContinuation(ctx, loopId) + const continuationResult = await this.loopOrchestrator.evaluateLoopContinuation(ctx, loopId) if (continuationResult.shouldContinue) { return { diff --git a/apps/sim/lib/execution/isolated-vm.ts b/apps/sim/lib/execution/isolated-vm.ts index 142f59ff33..8cbbec8dba 100644 --- a/apps/sim/lib/execution/isolated-vm.ts +++ b/apps/sim/lib/execution/isolated-vm.ts @@ -204,12 +204,17 @@ async function ensureWorker(): Promise { import('node:child_process').then(({ spawn }) => { worker = spawn('node', [workerPath], { - stdio: ['ignore', 'pipe', 'inherit', 'ipc'], + stdio: ['ignore', 'pipe', 'pipe', 'ipc'], serialization: 'json', }) worker.on('message', handleWorkerMessage) + let stderrData = '' + worker.stderr?.on('data', (data: Buffer) => { + stderrData += data.toString() + }) + const startTimeout = setTimeout(() => { worker?.kill() worker = null @@ -232,20 +237,42 @@ async function ensureWorker(): Promise { } worker.on('message', readyHandler) - worker.on('exit', () => { + worker.on('exit', (code) => { if (workerIdleTimeout) { clearTimeout(workerIdleTimeout) workerIdleTimeout = null } + + const wasStartupFailure = !workerReady && workerReadyPromise + worker = null workerReady = false workerReadyPromise = null + + let errorMessage = 'Worker process exited unexpectedly' + if (stderrData.includes('isolated_vm') || stderrData.includes('MODULE_NOT_FOUND')) { + errorMessage = + 'Code execution requires the isolated-vm native module which failed to load. ' + + 'This usually means the module needs to be rebuilt for your Node.js version. ' + + 'Please run: cd node_modules/isolated-vm && npm rebuild' + logger.error('isolated-vm module failed to load', { stderr: stderrData }) + } else if (stderrData) { + errorMessage = `Worker process failed: ${stderrData.slice(0, 500)}` + logger.error('Worker process failed', { stderr: stderrData }) + } + + if (wasStartupFailure) { + clearTimeout(startTimeout) + reject(new Error(errorMessage)) + return + } + for (const [id, pending] of pendingExecutions) { clearTimeout(pending.timeout) pending.resolve({ result: null, stdout: '', - error: { message: 'Worker process exited unexpectedly', name: 'WorkerError' }, + error: { message: errorMessage, name: 'WorkerError' }, }) pendingExecutions.delete(id) }