diff --git a/apps/sim/app/api/chat/utils.ts b/apps/sim/app/api/chat/utils.ts index dbf50d5411..2d954c8ddc 100644 --- a/apps/sim/app/api/chat/utils.ts +++ b/apps/sim/app/api/chat/utils.ts @@ -3,8 +3,9 @@ import { type NextRequest, NextResponse } from 'next/server' import { v4 as uuidv4 } from 'uuid' import { env } from '@/lib/env' import { createLogger } from '@/lib/logs/console-logger' -import { persistExecutionLogs } from '@/lib/logs/execution-logger' +import { EnhancedLoggingSession } from '@/lib/logs/enhanced-logging-session' import { buildTraceSpans } from '@/lib/logs/trace-spans' +import { processStreamingBlockLogs } from '@/lib/tokenization' import { decryptSecret } from '@/lib/utils' import { db } from '@/db' import { chat, environment as envTable, userStats, workflow } from '@/db/schema' @@ -252,11 +253,14 @@ export async function executeWorkflowForChat( const deployment = deploymentResult[0] const workflowId = deployment.workflowId + const executionId = uuidv4() + + // Set up enhanced logging for chat execution + const loggingSession = new EnhancedLoggingSession(workflowId, executionId, 'chat', requestId) // Check for multi-output configuration in customizations const customizations = (deployment.customizations || {}) as Record let outputBlockIds: string[] = [] - let outputPaths: string[] = [] // Extract output configs from the new schema format if (deployment.outputConfigs && Array.isArray(deployment.outputConfigs)) { @@ -271,13 +275,11 @@ export async function executeWorkflowForChat( }) outputBlockIds = deployment.outputConfigs.map((config) => config.blockId) - outputPaths = deployment.outputConfigs.map((config) => config.path || '') } else { // Use customizations as fallback outputBlockIds = Array.isArray(customizations.outputBlockIds) ? customizations.outputBlockIds : [] - outputPaths = Array.isArray(customizations.outputPaths) ? customizations.outputPaths : [] } // Fall back to customizations if we still have no outputs @@ -287,7 +289,6 @@ export async function executeWorkflowForChat( customizations.outputBlockIds.length > 0 ) { outputBlockIds = customizations.outputBlockIds - outputPaths = customizations.outputPaths || new Array(outputBlockIds.length).fill('') } logger.debug(`[${requestId}] Using ${outputBlockIds.length} output blocks for extraction`) @@ -407,6 +408,13 @@ export async function executeWorkflowForChat( {} as Record> ) + // Start enhanced logging session + await loggingSession.safeStart({ + userId: deployment.userId, + workspaceId: '', // TODO: Get from workflow + variables: workflowVariables, + }) + const stream = new ReadableStream({ async start(controller) { const encoder = new TextEncoder() @@ -458,16 +466,41 @@ export async function executeWorkflowForChat( }, }) - const result = await executor.execute(workflowId) + // Set up enhanced logging on the executor + loggingSession.setupExecutor(executor) + + let result + try { + result = await executor.execute(workflowId) + } catch (error: any) { + logger.error(`[${requestId}] Chat workflow execution failed:`, error) + await loggingSession.safeCompleteWithError({ + endedAt: new Date().toISOString(), + totalDurationMs: 0, + error: { + message: error.message || 'Chat workflow execution failed', + stackTrace: error.stack, + }, + }) + throw error + } if (result && 'success' in result) { - result.logs?.forEach((log: BlockLog) => { - if (streamedContent.has(log.blockId)) { - if (log.output) { - log.output.content = streamedContent.get(log.blockId) + // Update streamed content and apply tokenization + if (result.logs) { + result.logs.forEach((log: BlockLog) => { + if (streamedContent.has(log.blockId)) { + const content = streamedContent.get(log.blockId) + if (log.output) { + log.output.content = content + } } - } - }) + }) + + // Process all logs for streaming tokenization + const processedCount = processStreamingBlockLogs(result.logs, streamedContent) + logger.info(`[CHAT-API] Processed ${processedCount} blocks for streaming tokenization`) + } const { traceSpans, totalDuration } = buildTraceSpans(result) const enrichedResult = { ...result, traceSpans, totalDuration } @@ -481,8 +514,7 @@ export async function executeWorkflowForChat( ;(enrichedResult.metadata as any).conversationId = conversationId } const executionId = uuidv4() - await persistExecutionLogs(workflowId, executionId, enrichedResult, 'chat') - logger.debug(`Persisted logs for deployed chat: ${executionId}`) + logger.debug(`Generated execution ID for deployed chat: ${executionId}`) if (result.success) { try { @@ -506,6 +538,17 @@ export async function executeWorkflowForChat( ) } + // Complete enhanced logging session (for both success and failure) + if (result && 'success' in result) { + const { traceSpans } = buildTraceSpans(result) + await loggingSession.safeComplete({ + endedAt: new Date().toISOString(), + totalDurationMs: result.metadata?.duration || 0, + finalOutput: result.output, + traceSpans, + }) + } + controller.close() }, }) diff --git a/apps/sim/app/api/logs/[executionId]/frozen-canvas/route.ts b/apps/sim/app/api/logs/[executionId]/frozen-canvas/route.ts new file mode 100644 index 0000000000..ab0cd77eeb --- /dev/null +++ b/apps/sim/app/api/logs/[executionId]/frozen-canvas/route.ts @@ -0,0 +1,76 @@ +import { eq } from 'drizzle-orm' +import { type NextRequest, NextResponse } from 'next/server' +import { createLogger } from '@/lib/logs/console-logger' +import { db } from '@/db' +import { workflowExecutionLogs, workflowExecutionSnapshots } from '@/db/schema' + +const logger = createLogger('FrozenCanvasAPI') + +export async function GET( + _request: NextRequest, + { params }: { params: Promise<{ executionId: string }> } +) { + try { + const { executionId } = await params + + logger.debug(`Fetching frozen canvas data for execution: ${executionId}`) + + // Get the workflow execution log to find the snapshot + const [workflowLog] = await db + .select() + .from(workflowExecutionLogs) + .where(eq(workflowExecutionLogs.executionId, executionId)) + .limit(1) + + if (!workflowLog) { + return NextResponse.json({ error: 'Workflow execution not found' }, { status: 404 }) + } + + // Get the workflow state snapshot + const [snapshot] = await db + .select() + .from(workflowExecutionSnapshots) + .where(eq(workflowExecutionSnapshots.id, workflowLog.stateSnapshotId)) + .limit(1) + + if (!snapshot) { + return NextResponse.json({ error: 'Workflow state snapshot not found' }, { status: 404 }) + } + + const response = { + executionId, + workflowId: workflowLog.workflowId, + workflowState: snapshot.stateData, + executionMetadata: { + trigger: workflowLog.trigger, + startedAt: workflowLog.startedAt.toISOString(), + endedAt: workflowLog.endedAt?.toISOString(), + totalDurationMs: workflowLog.totalDurationMs, + blockStats: { + total: workflowLog.blockCount, + success: workflowLog.successCount, + error: workflowLog.errorCount, + skipped: workflowLog.skippedCount, + }, + cost: { + total: workflowLog.totalCost ? Number.parseFloat(workflowLog.totalCost) : null, + input: workflowLog.totalInputCost ? Number.parseFloat(workflowLog.totalInputCost) : null, + output: workflowLog.totalOutputCost + ? Number.parseFloat(workflowLog.totalOutputCost) + : null, + }, + totalTokens: workflowLog.totalTokens, + }, + } + + logger.debug(`Successfully fetched frozen canvas data for execution: ${executionId}`) + logger.debug( + `Workflow state contains ${Object.keys((snapshot.stateData as any)?.blocks || {}).length} blocks` + ) + + return NextResponse.json(response) + } catch (error) { + logger.error('Error fetching frozen canvas data:', error) + return NextResponse.json({ error: 'Failed to fetch frozen canvas data' }, { status: 500 }) + } +} diff --git a/apps/sim/app/api/logs/cleanup/route.ts b/apps/sim/app/api/logs/cleanup/route.ts index a79e4ddc34..4333d3769d 100644 --- a/apps/sim/app/api/logs/cleanup/route.ts +++ b/apps/sim/app/api/logs/cleanup/route.ts @@ -3,9 +3,10 @@ import { and, eq, inArray, lt, sql } from 'drizzle-orm' import { NextResponse } from 'next/server' import { env } from '@/lib/env' import { createLogger } from '@/lib/logs/console-logger' +import { snapshotService } from '@/lib/logs/snapshot-service' import { getS3Client } from '@/lib/uploads/s3/s3-client' import { db } from '@/db' -import { subscription, user, workflow, workflowLogs } from '@/db/schema' +import { subscription, user, workflow, workflowExecutionLogs } from '@/db/schema' export const dynamic = 'force-dynamic' @@ -66,99 +67,143 @@ export async function GET(request: Request) { const workflowIds = workflowsQuery.map((w) => w.id) const results = { - total: 0, - archived: 0, - archiveFailed: 0, - deleted: 0, - deleteFailed: 0, + enhancedLogs: { + total: 0, + archived: 0, + archiveFailed: 0, + deleted: 0, + deleteFailed: 0, + }, + snapshots: { + cleaned: 0, + cleanupFailed: 0, + }, } const startTime = Date.now() const MAX_BATCHES = 10 + // Process enhanced logging cleanup let batchesProcessed = 0 let hasMoreLogs = true + logger.info(`Starting enhanced logs cleanup for ${workflowIds.length} workflows`) + while (hasMoreLogs && batchesProcessed < MAX_BATCHES) { - const oldLogs = await db + // Query enhanced execution logs that need cleanup + const oldEnhancedLogs = await db .select({ - id: workflowLogs.id, - workflowId: workflowLogs.workflowId, - executionId: workflowLogs.executionId, - level: workflowLogs.level, - message: workflowLogs.message, - duration: workflowLogs.duration, - trigger: workflowLogs.trigger, - createdAt: workflowLogs.createdAt, - metadata: workflowLogs.metadata, + id: workflowExecutionLogs.id, + workflowId: workflowExecutionLogs.workflowId, + executionId: workflowExecutionLogs.executionId, + stateSnapshotId: workflowExecutionLogs.stateSnapshotId, + level: workflowExecutionLogs.level, + message: workflowExecutionLogs.message, + trigger: workflowExecutionLogs.trigger, + startedAt: workflowExecutionLogs.startedAt, + endedAt: workflowExecutionLogs.endedAt, + totalDurationMs: workflowExecutionLogs.totalDurationMs, + blockCount: workflowExecutionLogs.blockCount, + successCount: workflowExecutionLogs.successCount, + errorCount: workflowExecutionLogs.errorCount, + skippedCount: workflowExecutionLogs.skippedCount, + totalCost: workflowExecutionLogs.totalCost, + totalInputCost: workflowExecutionLogs.totalInputCost, + totalOutputCost: workflowExecutionLogs.totalOutputCost, + totalTokens: workflowExecutionLogs.totalTokens, + metadata: workflowExecutionLogs.metadata, + createdAt: workflowExecutionLogs.createdAt, }) - .from(workflowLogs) + .from(workflowExecutionLogs) .where( and( - inArray(workflowLogs.workflowId, workflowIds), - lt(workflowLogs.createdAt, retentionDate) + inArray(workflowExecutionLogs.workflowId, workflowIds), + lt(workflowExecutionLogs.createdAt, retentionDate) ) ) .limit(BATCH_SIZE) - results.total += oldLogs.length + results.enhancedLogs.total += oldEnhancedLogs.length - for (const log of oldLogs) { + for (const log of oldEnhancedLogs) { const today = new Date().toISOString().split('T')[0] - const logKey = `archived-logs/${today}/${log.id}.json` - const logData = JSON.stringify(log) + // Archive enhanced log with more detailed structure + const enhancedLogKey = `archived-enhanced-logs/${today}/${log.id}.json` + const enhancedLogData = JSON.stringify({ + ...log, + archivedAt: new Date().toISOString(), + logType: 'enhanced', + }) try { await getS3Client().send( new PutObjectCommand({ Bucket: S3_CONFIG.bucket, - Key: logKey, - Body: logData, + Key: enhancedLogKey, + Body: enhancedLogData, ContentType: 'application/json', Metadata: { logId: String(log.id), workflowId: String(log.workflowId), + executionId: String(log.executionId), + logType: 'enhanced', archivedAt: new Date().toISOString(), }, }) ) - results.archived++ + results.enhancedLogs.archived++ try { + // Delete enhanced log (will cascade to workflowExecutionBlocks due to foreign key) const deleteResult = await db - .delete(workflowLogs) - .where(eq(workflowLogs.id, log.id)) - .returning({ id: workflowLogs.id }) + .delete(workflowExecutionLogs) + .where(eq(workflowExecutionLogs.id, log.id)) + .returning({ id: workflowExecutionLogs.id }) if (deleteResult.length > 0) { - results.deleted++ + results.enhancedLogs.deleted++ } else { - results.deleteFailed++ - logger.warn(`Failed to delete log ${log.id} after archiving: No rows deleted`) + results.enhancedLogs.deleteFailed++ + logger.warn( + `Failed to delete enhanced log ${log.id} after archiving: No rows deleted` + ) } } catch (deleteError) { - results.deleteFailed++ - logger.error(`Error deleting log ${log.id} after archiving:`, { deleteError }) + results.enhancedLogs.deleteFailed++ + logger.error(`Error deleting enhanced log ${log.id} after archiving:`, { deleteError }) } } catch (archiveError) { - results.archiveFailed++ - logger.error(`Failed to archive log ${log.id}:`, { archiveError }) + results.enhancedLogs.archiveFailed++ + logger.error(`Failed to archive enhanced log ${log.id}:`, { archiveError }) } } batchesProcessed++ - hasMoreLogs = oldLogs.length === BATCH_SIZE + hasMoreLogs = oldEnhancedLogs.length === BATCH_SIZE + + logger.info( + `Processed enhanced logs batch ${batchesProcessed}: ${oldEnhancedLogs.length} logs` + ) + } - logger.info(`Processed batch ${batchesProcessed}: ${oldLogs.length} logs`) + // Cleanup orphaned snapshots + try { + const snapshotRetentionDays = Number(env.FREE_PLAN_LOG_RETENTION_DAYS || '7') + 1 // Keep snapshots 1 day longer + const cleanedSnapshots = await snapshotService.cleanupOrphanedSnapshots(snapshotRetentionDays) + results.snapshots.cleaned = cleanedSnapshots + logger.info(`Cleaned up ${cleanedSnapshots} orphaned snapshots`) + } catch (snapshotError) { + results.snapshots.cleanupFailed = 1 + logger.error('Error cleaning up orphaned snapshots:', { snapshotError }) } const timeElapsed = (Date.now() - startTime) / 1000 const reachedLimit = batchesProcessed >= MAX_BATCHES && hasMoreLogs return NextResponse.json({ - message: `Processed ${batchesProcessed} batches (${results.total} logs) in ${timeElapsed.toFixed(2)}s${reachedLimit ? ' (batch limit reached)' : ''}`, + message: `Processed ${batchesProcessed} enhanced log batches (${results.enhancedLogs.total} logs) in ${timeElapsed.toFixed(2)}s${reachedLimit ? ' (batch limit reached)' : ''}`, results, complete: !hasMoreLogs, batchLimitReached: reachedLimit, diff --git a/apps/sim/app/api/logs/enhanced/route.ts b/apps/sim/app/api/logs/enhanced/route.ts new file mode 100644 index 0000000000..a866a3686f --- /dev/null +++ b/apps/sim/app/api/logs/enhanced/route.ts @@ -0,0 +1,499 @@ +import { and, desc, eq, gte, inArray, lte, or, type SQL, sql } from 'drizzle-orm' +import { type NextRequest, NextResponse } from 'next/server' +import { z } from 'zod' +import { getSession } from '@/lib/auth' +import { createLogger } from '@/lib/logs/console-logger' +import { db } from '@/db' +import { workflow, workflowExecutionBlocks, workflowExecutionLogs } from '@/db/schema' + +const logger = createLogger('EnhancedLogsAPI') + +// Helper function to extract block executions from trace spans +function extractBlockExecutionsFromTraceSpans(traceSpans: any[]): any[] { + const blockExecutions: any[] = [] + + function processSpan(span: any) { + if (span.blockId) { + blockExecutions.push({ + id: span.id, + blockId: span.blockId, + blockName: span.name || '', + blockType: span.type, + startedAt: span.startTime, + endedAt: span.endTime, + durationMs: span.duration || 0, + status: span.status || 'success', + errorMessage: span.output?.error || undefined, + inputData: span.input || {}, + outputData: span.output || {}, + cost: span.cost || undefined, + metadata: {}, + }) + } + + // Process children recursively + if (span.children && Array.isArray(span.children)) { + span.children.forEach(processSpan) + } + } + + traceSpans.forEach(processSpan) + return blockExecutions +} + +export const dynamic = 'force-dynamic' +export const revalidate = 0 + +const QueryParamsSchema = z.object({ + includeWorkflow: z.coerce.boolean().optional().default(false), + includeBlocks: z.coerce.boolean().optional().default(false), + limit: z.coerce.number().optional().default(100), + offset: z.coerce.number().optional().default(0), + level: z.string().optional(), + workflowIds: z.string().optional(), // Comma-separated list of workflow IDs + folderIds: z.string().optional(), // Comma-separated list of folder IDs + triggers: z.string().optional(), // Comma-separated list of trigger types + startDate: z.string().optional(), + endDate: z.string().optional(), + search: z.string().optional(), +}) + +export async function GET(request: NextRequest) { + const requestId = crypto.randomUUID().slice(0, 8) + + try { + const session = await getSession() + if (!session?.user?.id) { + logger.warn(`[${requestId}] Unauthorized enhanced logs access attempt`) + return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) + } + + const userId = session.user.id + + try { + const { searchParams } = new URL(request.url) + const params = QueryParamsSchema.parse(Object.fromEntries(searchParams.entries())) + + // Get user's workflows + const userWorkflows = await db + .select({ id: workflow.id, folderId: workflow.folderId }) + .from(workflow) + .where(eq(workflow.userId, userId)) + + const userWorkflowIds = userWorkflows.map((w) => w.id) + + if (userWorkflowIds.length === 0) { + return NextResponse.json({ data: [], total: 0 }, { status: 200 }) + } + + // Build conditions for enhanced logs + let conditions: SQL | undefined = inArray(workflowExecutionLogs.workflowId, userWorkflowIds) + + // Filter by level + if (params.level && params.level !== 'all') { + conditions = and(conditions, eq(workflowExecutionLogs.level, params.level)) + } + + // Filter by specific workflow IDs + if (params.workflowIds) { + const workflowIds = params.workflowIds.split(',').filter(Boolean) + const filteredWorkflowIds = workflowIds.filter((id) => userWorkflowIds.includes(id)) + if (filteredWorkflowIds.length > 0) { + conditions = and( + conditions, + inArray(workflowExecutionLogs.workflowId, filteredWorkflowIds) + ) + } + } + + // Filter by folder IDs + if (params.folderIds) { + const folderIds = params.folderIds.split(',').filter(Boolean) + const workflowsInFolders = userWorkflows + .filter((w) => w.folderId && folderIds.includes(w.folderId)) + .map((w) => w.id) + + if (workflowsInFolders.length > 0) { + conditions = and( + conditions, + inArray(workflowExecutionLogs.workflowId, workflowsInFolders) + ) + } + } + + // Filter by triggers + if (params.triggers) { + const triggers = params.triggers.split(',').filter(Boolean) + if (triggers.length > 0 && !triggers.includes('all')) { + conditions = and(conditions, inArray(workflowExecutionLogs.trigger, triggers)) + } + } + + // Filter by date range + if (params.startDate) { + conditions = and( + conditions, + gte(workflowExecutionLogs.startedAt, new Date(params.startDate)) + ) + } + if (params.endDate) { + conditions = and(conditions, lte(workflowExecutionLogs.startedAt, new Date(params.endDate))) + } + + // Filter by search query + if (params.search) { + const searchTerm = `%${params.search}%` + conditions = and( + conditions, + or( + sql`${workflowExecutionLogs.message} ILIKE ${searchTerm}`, + sql`${workflowExecutionLogs.executionId} ILIKE ${searchTerm}` + ) + ) + } + + // Execute the query + const logs = await db + .select() + .from(workflowExecutionLogs) + .where(conditions) + .orderBy(desc(workflowExecutionLogs.startedAt)) + .limit(params.limit) + .offset(params.offset) + + // Get total count for pagination + const countResult = await db + .select({ count: sql`count(*)` }) + .from(workflowExecutionLogs) + .where(conditions) + + const count = countResult[0]?.count || 0 + + // Get block executions for all workflow executions + const executionIds = logs.map((log) => log.executionId) + let blockExecutionsByExecution: Record = {} + + if (executionIds.length > 0) { + const blockLogs = await db + .select() + .from(workflowExecutionBlocks) + .where(inArray(workflowExecutionBlocks.executionId, executionIds)) + .orderBy(workflowExecutionBlocks.startedAt) + + // Group block logs by execution ID + blockExecutionsByExecution = blockLogs.reduce( + (acc, blockLog) => { + if (!acc[blockLog.executionId]) { + acc[blockLog.executionId] = [] + } + acc[blockLog.executionId].push({ + id: blockLog.id, + blockId: blockLog.blockId, + blockName: blockLog.blockName || '', + blockType: blockLog.blockType, + startedAt: blockLog.startedAt.toISOString(), + endedAt: blockLog.endedAt?.toISOString() || blockLog.startedAt.toISOString(), + durationMs: blockLog.durationMs || 0, + status: blockLog.status, + errorMessage: blockLog.errorMessage || undefined, + errorStackTrace: blockLog.errorStackTrace || undefined, + inputData: blockLog.inputData, + outputData: blockLog.outputData, + cost: blockLog.costTotal + ? { + input: Number(blockLog.costInput) || 0, + output: Number(blockLog.costOutput) || 0, + total: Number(blockLog.costTotal) || 0, + tokens: { + prompt: blockLog.tokensPrompt || 0, + completion: blockLog.tokensCompletion || 0, + total: blockLog.tokensTotal || 0, + }, + model: blockLog.modelUsed || '', + } + : undefined, + metadata: blockLog.metadata || {}, + }) + return acc + }, + {} as Record + ) + } + + // Create clean trace spans from block executions + const createTraceSpans = (blockExecutions: any[]) => { + return blockExecutions.map((block, index) => { + // For error blocks, include error information in the output + let output = block.outputData + if (block.status === 'error' && block.errorMessage) { + output = { + ...output, + error: block.errorMessage, + stackTrace: block.errorStackTrace, + } + } + + return { + id: block.id, + name: `Block ${block.blockName || block.blockType} (${block.blockType})`, + type: block.blockType, + duration: block.durationMs, + startTime: block.startedAt, + endTime: block.endedAt, + status: block.status === 'success' ? 'success' : 'error', + blockId: block.blockId, + input: block.inputData, + output, + tokens: block.cost?.tokens?.total || 0, + relativeStartMs: index * 100, + children: [], + toolCalls: [], + } + }) + } + + // Extract cost information from block executions + const extractCostSummary = (blockExecutions: any[]) => { + let totalCost = 0 + let totalInputCost = 0 + let totalOutputCost = 0 + let totalTokens = 0 + let totalPromptTokens = 0 + let totalCompletionTokens = 0 + const models = new Map() + + blockExecutions.forEach((block) => { + if (block.cost) { + totalCost += Number(block.cost.total) || 0 + totalInputCost += Number(block.cost.input) || 0 + totalOutputCost += Number(block.cost.output) || 0 + totalTokens += block.cost.tokens?.total || 0 + totalPromptTokens += block.cost.tokens?.prompt || 0 + totalCompletionTokens += block.cost.tokens?.completion || 0 + + // Track per-model costs + if (block.cost.model) { + if (!models.has(block.cost.model)) { + models.set(block.cost.model, { + input: 0, + output: 0, + total: 0, + tokens: { prompt: 0, completion: 0, total: 0 }, + }) + } + const modelCost = models.get(block.cost.model) + modelCost.input += Number(block.cost.input) || 0 + modelCost.output += Number(block.cost.output) || 0 + modelCost.total += Number(block.cost.total) || 0 + modelCost.tokens.prompt += block.cost.tokens?.prompt || 0 + modelCost.tokens.completion += block.cost.tokens?.completion || 0 + modelCost.tokens.total += block.cost.tokens?.total || 0 + } + } + }) + + return { + total: totalCost, + input: totalInputCost, + output: totalOutputCost, + tokens: { + total: totalTokens, + prompt: totalPromptTokens, + completion: totalCompletionTokens, + }, + models: Object.fromEntries(models), // Convert Map to object for JSON serialization + } + } + + // Transform to clean enhanced log format + const enhancedLogs = logs.map((log) => { + const blockExecutions = blockExecutionsByExecution[log.executionId] || [] + + // Use stored trace spans from metadata if available, otherwise create from block executions + const storedTraceSpans = (log.metadata as any)?.traceSpans + const traceSpans = + storedTraceSpans && Array.isArray(storedTraceSpans) && storedTraceSpans.length > 0 + ? storedTraceSpans + : createTraceSpans(blockExecutions) + + // Use extracted cost summary if available, otherwise use stored values + const costSummary = + blockExecutions.length > 0 + ? extractCostSummary(blockExecutions) + : { + input: Number(log.totalInputCost) || 0, + output: Number(log.totalOutputCost) || 0, + total: Number(log.totalCost) || 0, + tokens: { + total: log.totalTokens || 0, + prompt: (log.metadata as any)?.tokenBreakdown?.prompt || 0, + completion: (log.metadata as any)?.tokenBreakdown?.completion || 0, + }, + models: (log.metadata as any)?.models || {}, + } + + return { + id: log.id, + workflowId: log.workflowId, + executionId: log.executionId, + level: log.level, + message: log.message, + duration: log.totalDurationMs ? `${log.totalDurationMs}ms` : null, + trigger: log.trigger, + createdAt: log.startedAt.toISOString(), + metadata: { + totalDuration: log.totalDurationMs, + cost: costSummary, + blockStats: { + total: log.blockCount, + success: log.successCount, + error: log.errorCount, + skipped: log.skippedCount, + }, + traceSpans, + blockExecutions, + enhanced: true, + }, + } + }) + + if (params.includeWorkflow) { + const workflowIds = [...new Set(logs.map((log) => log.workflowId))] + const workflowConditions = inArray(workflow.id, workflowIds) + + const workflowData = await db.select().from(workflow).where(workflowConditions) + const workflowMap = new Map(workflowData.map((w) => [w.id, w])) + + const logsWithWorkflow = enhancedLogs.map((log) => ({ + ...log, + workflow: workflowMap.get(log.workflowId) || null, + })) + + return NextResponse.json( + { + data: logsWithWorkflow, + total: Number(count), + page: Math.floor(params.offset / params.limit) + 1, + pageSize: params.limit, + totalPages: Math.ceil(Number(count) / params.limit), + }, + { status: 200 } + ) + } + + // Include block execution data if requested + if (params.includeBlocks) { + const executionIds = logs.map((log) => log.executionId) + + if (executionIds.length > 0) { + const blockLogs = await db + .select() + .from(workflowExecutionBlocks) + .where(inArray(workflowExecutionBlocks.executionId, executionIds)) + .orderBy(workflowExecutionBlocks.startedAt) + + // Group block logs by execution ID + const blockLogsByExecution = blockLogs.reduce( + (acc, blockLog) => { + if (!acc[blockLog.executionId]) { + acc[blockLog.executionId] = [] + } + acc[blockLog.executionId].push({ + id: blockLog.id, + blockId: blockLog.blockId, + blockName: blockLog.blockName || '', + blockType: blockLog.blockType, + startedAt: blockLog.startedAt.toISOString(), + endedAt: blockLog.endedAt?.toISOString() || blockLog.startedAt.toISOString(), + durationMs: blockLog.durationMs || 0, + status: blockLog.status, + errorMessage: blockLog.errorMessage || undefined, + inputData: blockLog.inputData, + outputData: blockLog.outputData, + cost: blockLog.costTotal + ? { + input: Number(blockLog.costInput) || 0, + output: Number(blockLog.costOutput) || 0, + total: Number(blockLog.costTotal) || 0, + tokens: { + prompt: blockLog.tokensPrompt || 0, + completion: blockLog.tokensCompletion || 0, + total: blockLog.tokensTotal || 0, + }, + model: blockLog.modelUsed || '', + } + : undefined, + }) + return acc + }, + {} as Record + ) + + // For executions with no block logs in the database, + // extract block executions from stored trace spans in metadata + logs.forEach((log) => { + if ( + !blockLogsByExecution[log.executionId] || + blockLogsByExecution[log.executionId].length === 0 + ) { + const storedTraceSpans = (log.metadata as any)?.traceSpans + if (storedTraceSpans && Array.isArray(storedTraceSpans)) { + blockLogsByExecution[log.executionId] = + extractBlockExecutionsFromTraceSpans(storedTraceSpans) + } + } + }) + + // Add block logs to metadata + const logsWithBlocks = enhancedLogs.map((log) => ({ + ...log, + metadata: { + ...log.metadata, + blockExecutions: blockLogsByExecution[log.executionId] || [], + }, + })) + + return NextResponse.json( + { + data: logsWithBlocks, + total: Number(count), + page: Math.floor(params.offset / params.limit) + 1, + pageSize: params.limit, + totalPages: Math.ceil(Number(count) / params.limit), + }, + { status: 200 } + ) + } + } + + // Return basic logs + return NextResponse.json( + { + data: enhancedLogs, + total: Number(count), + page: Math.floor(params.offset / params.limit) + 1, + pageSize: params.limit, + totalPages: Math.ceil(Number(count) / params.limit), + }, + { status: 200 } + ) + } catch (validationError) { + if (validationError instanceof z.ZodError) { + logger.warn(`[${requestId}] Invalid enhanced logs request parameters`, { + errors: validationError.errors, + }) + return NextResponse.json( + { + error: 'Invalid request parameters', + details: validationError.errors, + }, + { status: 400 } + ) + } + throw validationError + } + } catch (error: any) { + logger.error(`[${requestId}] Enhanced logs fetch error`, error) + return NextResponse.json({ error: error.message }, { status: 500 }) + } +} diff --git a/apps/sim/app/api/schedules/execute/route.test.ts b/apps/sim/app/api/schedules/execute/route.test.ts index e05cea9514..3b8ae0dd6c 100644 --- a/apps/sim/app/api/schedules/execute/route.test.ts +++ b/apps/sim/app/api/schedules/execute/route.test.ts @@ -5,7 +5,6 @@ */ import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest' import { - createMockRequest, mockExecutionDependencies, mockScheduleExecuteDb, sampleWorkflowState, @@ -23,7 +22,7 @@ describe('Scheduled Workflow Execution API Route', () => { blocks: sampleWorkflowState.blocks, edges: sampleWorkflowState.edges || [], loops: sampleWorkflowState.loops || {}, - parallels: sampleWorkflowState.parallels || {}, + parallels: {}, isFromNormalizedTables: true, }), })) @@ -122,9 +121,8 @@ describe('Scheduled Workflow Execution API Route', () => { })), })) - const req = createMockRequest('GET') const { GET } = await import('./route') - const response = await GET(req) + const response = await GET() expect(response).toBeDefined() const data = await response.json() @@ -136,7 +134,6 @@ describe('Scheduled Workflow Execution API Route', () => { const persistExecutionErrorMock = vi.fn().mockResolvedValue(undefined) vi.doMock('@/lib/logs/execution-logger', () => ({ - persistExecutionLogs: vi.fn().mockResolvedValue(undefined), persistExecutionError: persistExecutionErrorMock, })) @@ -146,9 +143,8 @@ describe('Scheduled Workflow Execution API Route', () => { })), })) - const req = createMockRequest('GET') const { GET } = await import('./route') - const response = await GET(req) + const response = await GET() expect(response).toBeDefined() @@ -176,9 +172,8 @@ describe('Scheduled Workflow Execution API Route', () => { return { db: mockDb } }) - const req = createMockRequest('GET') const { GET } = await import('./route') - const response = await GET(req) + const response = await GET() expect(response.status).toBe(200) const data = await response.json() expect(data).toHaveProperty('executedCount', 0) @@ -205,9 +200,8 @@ describe('Scheduled Workflow Execution API Route', () => { return { db: mockDb } }) - const req = createMockRequest('GET') const { GET } = await import('./route') - const response = await GET(req) + const response = await GET() expect(response.status).toBe(500) const data = await response.json() @@ -238,9 +232,8 @@ describe('Scheduled Workflow Execution API Route', () => { ], }) - const req = createMockRequest('GET') const { GET } = await import('./route') - const response = await GET(req) + const response = await GET() expect(response.status).toBe(200) }) @@ -269,9 +262,8 @@ describe('Scheduled Workflow Execution API Route', () => { ], }) - const req = createMockRequest('GET') const { GET } = await import('./route') - const response = await GET(req) + const response = await GET() expect(response.status).toBe(200) const data = await response.json() diff --git a/apps/sim/app/api/schedules/execute/route.ts b/apps/sim/app/api/schedules/execute/route.ts index 84652ee3bd..45dfedb83f 100644 --- a/apps/sim/app/api/schedules/execute/route.ts +++ b/apps/sim/app/api/schedules/execute/route.ts @@ -1,10 +1,10 @@ import { Cron } from 'croner' import { and, eq, lte, not, sql } from 'drizzle-orm' -import { type NextRequest, NextResponse } from 'next/server' +import { NextResponse } from 'next/server' import { v4 as uuidv4 } from 'uuid' import { z } from 'zod' import { createLogger } from '@/lib/logs/console-logger' -import { persistExecutionError, persistExecutionLogs } from '@/lib/logs/execution-logger' +import { EnhancedLoggingSession } from '@/lib/logs/enhanced-logging-session' import { buildTraceSpans } from '@/lib/logs/trace-spans' import { type BlockState, @@ -17,7 +17,7 @@ import { decryptSecret } from '@/lib/utils' import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/db-helpers' import { updateWorkflowRunCounts } from '@/lib/workflows/utils' import { db } from '@/db' -import { environment, userStats, workflow, workflowSchedule } from '@/db/schema' +import { environment as environmentTable, userStats, workflow, workflowSchedule } from '@/db/schema' import { Executor } from '@/executor' import { Serializer } from '@/serializer' import { mergeSubblockState } from '@/stores/workflows/server-utils' @@ -58,7 +58,7 @@ const EnvVarsSchema = z.record(z.string()) const runningExecutions = new Set() -export async function GET(req: NextRequest) { +export async function GET() { logger.info(`Scheduled execution triggered at ${new Date().toISOString()}`) const requestId = crypto.randomUUID().slice(0, 8) const now = new Date() @@ -85,6 +85,7 @@ export async function GET(req: NextRequest) { for (const schedule of dueSchedules) { const executionId = uuidv4() + let loggingSession: EnhancedLoggingSession | null = null try { if (runningExecutions.has(schedule.workflowId)) { @@ -118,15 +119,7 @@ export async function GET(req: NextRequest) { } ) - await persistExecutionError( - schedule.workflowId, - executionId, - new Error( - usageCheck.message || - 'Usage limit exceeded. Please upgrade your plan to continue running scheduled workflows.' - ), - 'schedule' - ) + // Error logging handled by enhanced logging session const retryDelay = 24 * 60 * 60 * 1000 // 24 hour delay for exceeded limits const nextRetryAt = new Date(now.getTime() + retryDelay) @@ -176,8 +169,8 @@ export async function GET(req: NextRequest) { // Retrieve environment variables for this user (if any). const [userEnv] = await db .select() - .from(environment) - .where(eq(environment.userId, workflowRecord.userId)) + .from(environmentTable) + .where(eq(environmentTable.userId, workflowRecord.userId)) .limit(1) if (!userEnv) { @@ -306,6 +299,30 @@ export async function GET(req: NextRequest) { logger.debug(`[${requestId}] No workflow variables found for: ${schedule.workflowId}`) } + // Start enhanced logging + loggingSession = new EnhancedLoggingSession( + schedule.workflowId, + executionId, + 'schedule', + requestId + ) + + // Load the actual workflow state from normalized tables + const enhancedNormalizedData = await loadWorkflowFromNormalizedTables(schedule.workflowId) + + if (!enhancedNormalizedData) { + throw new Error( + `Workflow ${schedule.workflowId} has no normalized data available. Ensure the workflow is properly saved to normalized tables.` + ) + } + + // Start enhanced logging with environment variables + await loggingSession.safeStart({ + userId: workflowRecord.userId, + workspaceId: workflowRecord.workspaceId || '', + variables: variables || {}, + }) + const executor = new Executor( serializedWorkflow, processedBlockStates, @@ -313,6 +330,10 @@ export async function GET(req: NextRequest) { input, workflowVariables ) + + // Set up enhanced logging on the executor + loggingSession.setupExecutor(executor) + const result = await executor.execute(schedule.workflowId) const executionResult = @@ -343,13 +364,16 @@ export async function GET(req: NextRequest) { const { traceSpans, totalDuration } = buildTraceSpans(executionResult) - const enrichedResult = { - ...executionResult, - traceSpans, - totalDuration, - } + // Log individual block executions to enhanced system are automatically + // handled by the logging session - await persistExecutionLogs(schedule.workflowId, executionId, enrichedResult, 'schedule') + // Complete enhanced logging + await loggingSession.safeComplete({ + endedAt: new Date().toISOString(), + totalDurationMs: totalDuration || 0, + finalOutput: executionResult.output || {}, + traceSpans: (traceSpans || []) as any, + }) if (executionResult.success) { logger.info(`[${requestId}] Workflow ${schedule.workflowId} executed successfully`) @@ -413,7 +437,18 @@ export async function GET(req: NextRequest) { error ) - await persistExecutionError(schedule.workflowId, executionId, error, 'schedule') + // Error logging handled by enhanced logging session + + if (loggingSession) { + await loggingSession.safeCompleteWithError({ + endedAt: new Date().toISOString(), + totalDurationMs: 0, + error: { + message: error.message || 'Scheduled workflow execution failed', + stackTrace: error.stack, + }, + }) + } let nextRunAt: Date try { diff --git a/apps/sim/app/api/webhooks/trigger/[path]/route.test.ts b/apps/sim/app/api/webhooks/trigger/[path]/route.test.ts index 8293dbed4f..46bfc711a6 100644 --- a/apps/sim/app/api/webhooks/trigger/[path]/route.test.ts +++ b/apps/sim/app/api/webhooks/trigger/[path]/route.test.ts @@ -32,7 +32,6 @@ const executeMock = vi.fn().mockResolvedValue({ endTime: new Date().toISOString(), }, }) -const persistExecutionLogsMock = vi.fn().mockResolvedValue(undefined) const persistExecutionErrorMock = vi.fn().mockResolvedValue(undefined) // Mock the DB schema objects @@ -80,7 +79,6 @@ vi.mock('@/executor', () => ({ })) vi.mock('@/lib/logs/execution-logger', () => ({ - persistExecutionLogs: persistExecutionLogsMock, persistExecutionError: persistExecutionErrorMock, })) diff --git a/apps/sim/app/api/workflows/[id]/deploy/route.ts b/apps/sim/app/api/workflows/[id]/deploy/route.ts index 3ea517f142..449e86b708 100644 --- a/apps/sim/app/api/workflows/[id]/deploy/route.ts +++ b/apps/sim/app/api/workflows/[id]/deploy/route.ts @@ -139,7 +139,7 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{ return createErrorResponse(validation.error.message, validation.error.status) } - // Get the workflow to find the user + // Get the workflow to find the user (removed deprecated state column) const workflowData = await db .select({ userId: workflow.userId, diff --git a/apps/sim/app/api/workflows/[id]/execute/route.test.ts b/apps/sim/app/api/workflows/[id]/execute/route.test.ts index dce39e5bab..a2dfbe0f54 100644 --- a/apps/sim/app/api/workflows/[id]/execute/route.test.ts +++ b/apps/sim/app/api/workflows/[id]/execute/route.test.ts @@ -88,6 +88,7 @@ describe('Workflow Execution API Route', () => { vi.doMock('@/executor', () => ({ Executor: vi.fn().mockImplementation(() => ({ execute: executeMock, + setEnhancedLogger: vi.fn(), })), })) @@ -104,6 +105,14 @@ describe('Workflow Execution API Route', () => { persistExecutionError: vi.fn().mockResolvedValue(undefined), })) + vi.doMock('@/lib/logs/enhanced-execution-logger', () => ({ + enhancedExecutionLogger: { + startWorkflowExecution: vi.fn().mockResolvedValue(undefined), + logBlockExecution: vi.fn().mockResolvedValue(undefined), + completeWorkflowExecution: vi.fn().mockResolvedValue(undefined), + }, + })) + vi.doMock('@/lib/logs/trace-spans', () => ({ buildTraceSpans: vi.fn().mockReturnValue({ traceSpans: [], @@ -395,6 +404,7 @@ describe('Workflow Execution API Route', () => { vi.doMock('@/executor', () => ({ Executor: vi.fn().mockImplementation(() => ({ execute: vi.fn().mockRejectedValue(new Error('Execution failed')), + setEnhancedLogger: vi.fn(), })), })) @@ -418,10 +428,10 @@ describe('Workflow Execution API Route', () => { expect(data).toHaveProperty('error') expect(data.error).toContain('Execution failed') - // Verify error logger was called - const persistExecutionError = (await import('@/lib/logs/execution-logger')) - .persistExecutionError - expect(persistExecutionError).toHaveBeenCalled() + // Verify enhanced logger was called for error completion + const enhancedExecutionLogger = (await import('@/lib/logs/enhanced-execution-logger')) + .enhancedExecutionLogger + expect(enhancedExecutionLogger.completeWorkflowExecution).toHaveBeenCalled() }) /** diff --git a/apps/sim/app/api/workflows/[id]/execute/route.ts b/apps/sim/app/api/workflows/[id]/execute/route.ts index 119dd23e52..1520caeec3 100644 --- a/apps/sim/app/api/workflows/[id]/execute/route.ts +++ b/apps/sim/app/api/workflows/[id]/execute/route.ts @@ -3,7 +3,7 @@ import { type NextRequest, NextResponse } from 'next/server' import { v4 as uuidv4 } from 'uuid' import { z } from 'zod' import { createLogger } from '@/lib/logs/console-logger' -import { persistExecutionError, persistExecutionLogs } from '@/lib/logs/execution-logger' +import { EnhancedLoggingSession } from '@/lib/logs/enhanced-logging-session' import { buildTraceSpans } from '@/lib/logs/trace-spans' import { checkServerSideUsageLimits } from '@/lib/usage-monitor' import { decryptSecret } from '@/lib/utils' @@ -14,11 +14,10 @@ import { workflowHasResponseBlock, } from '@/lib/workflows/utils' import { db } from '@/db' -import { environment, userStats } from '@/db/schema' +import { environment as environmentTable, userStats } from '@/db/schema' import { Executor } from '@/executor' import { Serializer } from '@/serializer' import { mergeSubblockState } from '@/stores/workflows/server-utils' -import type { WorkflowState } from '@/stores/workflows/workflow/types' import { validateWorkflowAccess } from '../../middleware' import { createErrorResponse, createSuccessResponse } from '../../utils' @@ -59,6 +58,8 @@ async function executeWorkflow(workflow: any, requestId: string, input?: any) { throw new Error('Execution is already running') } + const loggingSession = new EnhancedLoggingSession(workflowId, executionId, 'api', requestId) + // Check if the user has exceeded their usage limits const usageCheck = await checkServerSideUsageLimits(workflow.userId) if (usageCheck.isExceeded) { @@ -92,39 +93,30 @@ async function executeWorkflow(workflow: any, requestId: string, input?: any) { logger.debug(`[${requestId}] Loading workflow ${workflowId} from normalized tables`) const normalizedData = await loadWorkflowFromNormalizedTables(workflowId) - let blocks: Record - let edges: any[] - let loops: Record - let parallels: Record - - if (normalizedData) { - // Use normalized data as primary source - ;({ blocks, edges, loops, parallels } = normalizedData) - logger.info(`[${requestId}] Using normalized tables for workflow execution: ${workflowId}`) - } else { - // Fallback to deployed state if available (for legacy workflows) - logger.warn( - `[${requestId}] No normalized data found, falling back to deployed state for workflow: ${workflowId}` + if (!normalizedData) { + throw new Error( + `Workflow ${workflowId} has no normalized data available. Ensure the workflow is properly saved to normalized tables.` ) - - if (!workflow.deployedState) { - throw new Error( - `Workflow ${workflowId} has no deployed state and no normalized data available` - ) - } - - const deployedState = workflow.deployedState as WorkflowState - ;({ blocks, edges, loops, parallels } = deployedState) } + // Use normalized data as primary source + const { blocks, edges, loops, parallels } = normalizedData + logger.info(`[${requestId}] Using normalized tables for workflow execution: ${workflowId}`) + logger.debug(`[${requestId}] Normalized data loaded:`, { + blocksCount: Object.keys(blocks || {}).length, + edgesCount: (edges || []).length, + loopsCount: Object.keys(loops || {}).length, + parallelsCount: Object.keys(parallels || {}).length, + }) + // Use the same execution flow as in scheduled executions const mergedStates = mergeSubblockState(blocks) // Fetch the user's environment variables (if any) const [userEnv] = await db .select() - .from(environment) - .where(eq(environment.userId, workflow.userId)) + .from(environmentTable) + .where(eq(environmentTable.userId, workflow.userId)) .limit(1) if (!userEnv) { @@ -133,9 +125,14 @@ async function executeWorkflow(workflow: any, requestId: string, input?: any) { ) } - // Parse and validate environment variables. const variables = EnvVarsSchema.parse(userEnv?.variables ?? {}) + await loggingSession.safeStart({ + userId: workflow.userId, + workspaceId: workflow.workspaceId, + variables, + }) + // Replace environment variables in the block states const currentBlockStates = await Object.entries(mergedStates).reduce( async (accPromise, [id, block]) => { @@ -260,6 +257,9 @@ async function executeWorkflow(workflow: any, requestId: string, input?: any) { workflowVariables ) + // Set up enhanced logging on the executor + loggingSession.setupExecutor(executor) + const result = await executor.execute(workflowId) // Check if we got a StreamingExecution result (with stream + execution properties) @@ -271,6 +271,9 @@ async function executeWorkflow(workflow: any, requestId: string, input?: any) { executionTime: executionResult.metadata?.duration, }) + // Build trace spans from execution result (works for both success and failure) + const { traceSpans, totalDuration } = buildTraceSpans(executionResult) + // Update workflow run counts if execution was successful if (executionResult.success) { await updateWorkflowRunCounts(workflowId) @@ -285,24 +288,26 @@ async function executeWorkflow(workflow: any, requestId: string, input?: any) { .where(eq(userStats.userId, workflow.userId)) } - // Build trace spans from execution logs - const { traceSpans, totalDuration } = buildTraceSpans(executionResult) - - // Add trace spans to the execution result - const enrichedResult = { - ...executionResult, - traceSpans, - totalDuration, - } - - // Log each execution step and the final result - await persistExecutionLogs(workflowId, executionId, enrichedResult, 'api') + await loggingSession.safeComplete({ + endedAt: new Date().toISOString(), + totalDurationMs: totalDuration || 0, + finalOutput: executionResult.output || {}, + traceSpans: (traceSpans || []) as any, + }) return executionResult } catch (error: any) { logger.error(`[${requestId}] Workflow execution failed: ${workflowId}`, error) - // Log the error - await persistExecutionError(workflowId, executionId, error, 'api') + + await loggingSession.safeCompleteWithError({ + endedAt: new Date().toISOString(), + totalDurationMs: 0, + error: { + message: error.message || 'Workflow execution failed', + stackTrace: error.stack, + }, + }) + throw error } finally { runningExecutions.delete(executionKey) diff --git a/apps/sim/app/api/workflows/[id]/log/route.ts b/apps/sim/app/api/workflows/[id]/log/route.ts index af75a5201d..dee260c3ed 100644 --- a/apps/sim/app/api/workflows/[id]/log/route.ts +++ b/apps/sim/app/api/workflows/[id]/log/route.ts @@ -1,7 +1,7 @@ import type { NextRequest } from 'next/server' -import { v4 as uuidv4 } from 'uuid' import { createLogger } from '@/lib/logs/console-logger' -import { persistExecutionLogs, persistLog } from '@/lib/logs/execution-logger' +import { EnhancedLoggingSession } from '@/lib/logs/enhanced-logging-session' +import { buildTraceSpans } from '@/lib/logs/trace-spans' import { validateWorkflowAccess } from '../../middleware' import { createErrorResponse, createSuccessResponse } from '../../utils' @@ -33,9 +33,25 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{ // Check if this execution is from chat using only the explicit source flag const isChatExecution = result.metadata?.source === 'chat' - // Use persistExecutionLogs which handles tool call extraction - // Use 'chat' trigger type for chat executions, otherwise 'manual' - await persistExecutionLogs(id, executionId, result, isChatExecution ? 'chat' : 'manual') + // Also log to enhanced system + const triggerType = isChatExecution ? 'chat' : 'manual' + const loggingSession = new EnhancedLoggingSession(id, executionId, triggerType, requestId) + + await loggingSession.safeStart({ + userId: '', // TODO: Get from session + workspaceId: '', // TODO: Get from workflow + variables: {}, + }) + + // Build trace spans from execution logs + const { traceSpans } = buildTraceSpans(result) + + await loggingSession.safeComplete({ + endedAt: new Date().toISOString(), + totalDurationMs: result.metadata?.duration || 0, + finalOutput: result.output || {}, + traceSpans, + }) return createSuccessResponse({ message: 'Execution logs persisted successfully', @@ -52,21 +68,6 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{ executionId, }) - // Persist each log using the original method - for (const log of logs) { - await persistLog({ - id: uuidv4(), - workflowId: id, - executionId, - level: log.level, - message: log.message, - duration: log.duration, - trigger: log.trigger || 'manual', - createdAt: new Date(log.createdAt || new Date()), - metadata: log.metadata, - }) - } - return createSuccessResponse({ message: 'Logs persisted successfully' }) } catch (error: any) { logger.error(`[${requestId}] Error persisting logs for workflow: ${id}`, error) diff --git a/apps/sim/app/workspace/[workspaceId]/logs/components/control-bar/control-bar.tsx b/apps/sim/app/workspace/[workspaceId]/logs/components/control-bar/control-bar.tsx index 738ddeedaa..e8accf86e6 100644 --- a/apps/sim/app/workspace/[workspaceId]/logs/components/control-bar/control-bar.tsx +++ b/apps/sim/app/workspace/[workspaceId]/logs/components/control-bar/control-bar.tsx @@ -36,7 +36,7 @@ export function ControlBar() { const fetchLogs = async () => { try { const queryParams = buildQueryParams(1, 50) // Get first 50 logs for refresh - const response = await fetch(`/api/logs?${queryParams}`) + const response = await fetch(`/api/logs/enhanced?${queryParams}`) if (!response.ok) { throw new Error(`Error fetching logs: ${response.statusText}`) diff --git a/apps/sim/app/workspace/[workspaceId]/logs/components/frozen-canvas/frozen-canvas-modal.tsx b/apps/sim/app/workspace/[workspaceId]/logs/components/frozen-canvas/frozen-canvas-modal.tsx new file mode 100644 index 0000000000..281648f3ac --- /dev/null +++ b/apps/sim/app/workspace/[workspaceId]/logs/components/frozen-canvas/frozen-canvas-modal.tsx @@ -0,0 +1,99 @@ +'use client' + +import { useState } from 'react' +import { Eye, Maximize2, Minimize2, X } from 'lucide-react' +import { Badge } from '@/components/ui/badge' +import { Button } from '@/components/ui/button' +import { Dialog, DialogContent, DialogHeader, DialogTitle } from '@/components/ui/dialog' +import { cn } from '@/lib/utils' +import { FrozenCanvas } from './frozen-canvas' + +interface FrozenCanvasModalProps { + executionId: string + workflowName?: string + trigger?: string + traceSpans?: any[] // TraceSpans data from log metadata + isOpen: boolean + onClose: () => void +} + +export function FrozenCanvasModal({ + executionId, + workflowName, + trigger, + traceSpans, + isOpen, + onClose, +}: FrozenCanvasModalProps) { + const [isFullscreen, setIsFullscreen] = useState(false) + + const toggleFullscreen = () => { + setIsFullscreen(!isFullscreen) + } + + return ( + + + {/* Header */} + +
+ +
+ + Logged Workflow State + +
+ {workflowName && ( + {workflowName} + )} + {trigger && ( + + {trigger} + + )} + + {executionId.slice(0, 8)}... + +
+
+
+ +
+ + +
+
+ + {/* Canvas Container */} +
+ +
+ + {/* Footer with instructions */} +
+
+ 💡 Click on blocks to see their input and output data at execution time. This canvas + shows the exact state of the workflow when this execution was captured. +
+
+
+
+ ) +} diff --git a/apps/sim/app/workspace/[workspaceId]/logs/components/frozen-canvas/frozen-canvas.tsx b/apps/sim/app/workspace/[workspaceId]/logs/components/frozen-canvas/frozen-canvas.tsx new file mode 100644 index 0000000000..bc2e11c0b0 --- /dev/null +++ b/apps/sim/app/workspace/[workspaceId]/logs/components/frozen-canvas/frozen-canvas.tsx @@ -0,0 +1,467 @@ +'use client' + +import { useEffect, useState } from 'react' +import { + AlertCircle, + ChevronLeft, + ChevronRight, + Clock, + DollarSign, + Hash, + Loader2, + X, + Zap, +} from 'lucide-react' +import { Badge } from '@/components/ui/badge' +import { Card, CardContent, CardHeader, CardTitle } from '@/components/ui/card' +import { createLogger } from '@/lib/logs/console-logger' +import { cn, redactApiKeys } from '@/lib/utils' +import { WorkflowPreview } from '@/app/workspace/[workspaceId]/w/components/workflow-preview/workflow-preview' +import type { WorkflowState } from '@/stores/workflows/workflow/types' + +const logger = createLogger('FrozenCanvas') + +function formatExecutionData(executionData: any) { + const { + inputData, + outputData, + cost, + tokens, + durationMs, + status, + blockName, + blockType, + errorMessage, + errorStackTrace, + } = executionData + + return { + blockName: blockName || 'Unknown Block', + blockType: blockType || 'unknown', + status, + duration: durationMs ? `${durationMs}ms` : 'N/A', + input: redactApiKeys(inputData || {}), + output: redactApiKeys(outputData || {}), + errorMessage, + errorStackTrace, + cost: cost + ? { + input: cost.input || 0, + output: cost.output || 0, + total: cost.total || 0, + } + : null, + tokens: tokens + ? { + prompt: tokens.prompt || 0, + completion: tokens.completion || 0, + total: tokens.total || 0, + } + : null, + } +} + +function getCurrentIterationData(blockExecutionData: any) { + if (blockExecutionData.iterations && Array.isArray(blockExecutionData.iterations)) { + const currentIndex = blockExecutionData.currentIteration ?? 0 + return { + executionData: blockExecutionData.iterations[currentIndex], + currentIteration: currentIndex, + totalIterations: blockExecutionData.totalIterations ?? blockExecutionData.iterations.length, + hasMultipleIterations: blockExecutionData.iterations.length > 1, + } + } + + return { + executionData: blockExecutionData, + currentIteration: 0, + totalIterations: 1, + hasMultipleIterations: false, + } +} + +function PinnedLogs({ executionData, onClose }: { executionData: any; onClose: () => void }) { + const [currentIterationIndex, setCurrentIterationIndex] = useState(0) + + const iterationInfo = getCurrentIterationData({ + ...executionData, + currentIteration: currentIterationIndex, + }) + + const formatted = formatExecutionData(iterationInfo.executionData) + + const totalIterations = executionData.iterations?.length || 1 + + const goToPreviousIteration = () => { + if (currentIterationIndex > 0) { + setCurrentIterationIndex(currentIterationIndex - 1) + } + } + + const goToNextIteration = () => { + if (currentIterationIndex < totalIterations - 1) { + setCurrentIterationIndex(currentIterationIndex + 1) + } + } + + useEffect(() => { + setCurrentIterationIndex(0) + }, [executionData]) + + return ( + + +
+ + + {formatted.blockName} + + +
+
+
+ + {formatted.blockType} + + {formatted.status} +
+ + {/* Iteration Navigation */} + {iterationInfo.hasMultipleIterations && ( +
+ + + {currentIterationIndex + 1} / {iterationInfo.totalIterations} + + +
+ )} +
+
+ + +
+
+ + {formatted.duration} +
+ + {formatted.cost && ( +
+ + ${formatted.cost.total.toFixed(5)} +
+ )} + + {formatted.tokens && ( +
+ + {formatted.tokens.total} tokens +
+ )} +
+ +
+

Input

+
+
{JSON.stringify(formatted.input, null, 2)}
+
+
+ +
+

Output

+
+
{JSON.stringify(formatted.output, null, 2)}
+
+
+ + {formatted.cost && ( +
+

Cost Breakdown

+
+
+ Input: + ${formatted.cost.input.toFixed(5)} +
+
+ Output: + ${formatted.cost.output.toFixed(5)} +
+
+ Total: + ${formatted.cost.total.toFixed(5)} +
+
+
+ )} + + {formatted.tokens && ( +
+

Token Usage

+
+
+ Prompt: + {formatted.tokens.prompt} +
+
+ Completion: + {formatted.tokens.completion} +
+
+ Total: + {formatted.tokens.total} +
+
+
+ )} +
+
+ ) +} + +interface FrozenCanvasData { + executionId: string + workflowId: string + workflowState: WorkflowState + executionMetadata: { + trigger: string + startedAt: string + endedAt?: string + totalDurationMs?: number + blockStats: { + total: number + success: number + error: number + skipped: number + } + cost: { + total: number | null + input: number | null + output: number | null + } + totalTokens: number | null + } +} + +interface FrozenCanvasProps { + executionId: string + traceSpans?: any[] + className?: string + height?: string | number + width?: string | number +} + +export function FrozenCanvas({ + executionId, + traceSpans, + className, + height = '100%', + width = '100%', +}: FrozenCanvasProps) { + const [data, setData] = useState(null) + const [blockExecutions, setBlockExecutions] = useState>({}) + const [loading, setLoading] = useState(true) + const [error, setError] = useState(null) + + const [pinnedBlockId, setPinnedBlockId] = useState(null) + + // Process traceSpans to create blockExecutions map + useEffect(() => { + if (traceSpans && Array.isArray(traceSpans)) { + const blockExecutionMap: Record = {} + + const workflowSpan = traceSpans[0] + if (workflowSpan?.children && Array.isArray(workflowSpan.children)) { + const traceSpansByBlockId = workflowSpan.children.reduce((acc: any, span: any) => { + if (span.blockId) { + if (!acc[span.blockId]) { + acc[span.blockId] = [] + } + acc[span.blockId].push(span) + } + return acc + }, {}) + + for (const [blockId, spans] of Object.entries(traceSpansByBlockId)) { + const spanArray = spans as any[] + + const iterations = spanArray.map((span: any) => { + // Extract error information from span output if status is error + let errorMessage = null + let errorStackTrace = null + + if (span.status === 'error' && span.output) { + // Error information can be in different formats in the output + if (typeof span.output === 'string') { + errorMessage = span.output + } else if (span.output.error) { + errorMessage = span.output.error + errorStackTrace = span.output.stackTrace || span.output.stack + } else if (span.output.message) { + errorMessage = span.output.message + errorStackTrace = span.output.stackTrace || span.output.stack + } else { + // Fallback: stringify the entire output for error cases + errorMessage = JSON.stringify(span.output) + } + } + + return { + id: span.id, + blockId: span.blockId, + blockName: span.name, + blockType: span.type, + status: span.status, + startedAt: span.startTime, + endedAt: span.endTime, + durationMs: span.duration, + inputData: span.input, + outputData: span.output, + errorMessage, + errorStackTrace, + cost: span.cost || { + input: null, + output: null, + total: null, + }, + tokens: span.tokens || { + prompt: null, + completion: null, + total: null, + }, + modelUsed: span.model || null, + metadata: {}, + } + }) + + blockExecutionMap[blockId] = { + iterations, + currentIteration: 0, + totalIterations: iterations.length, + } + } + } + + setBlockExecutions(blockExecutionMap) + } + }, [traceSpans]) + + useEffect(() => { + const fetchData = async () => { + try { + setLoading(true) + setError(null) + + const response = await fetch(`/api/logs/${executionId}/frozen-canvas`) + if (!response.ok) { + throw new Error(`Failed to fetch frozen canvas data: ${response.statusText}`) + } + + const result = await response.json() + setData(result) + logger.debug(`Loaded frozen canvas data for execution: ${executionId}`) + } catch (err) { + const errorMessage = err instanceof Error ? err.message : 'Unknown error' + logger.error('Failed to fetch frozen canvas data:', err) + setError(errorMessage) + } finally { + setLoading(false) + } + } + + fetchData() + }, [executionId]) + + // No need to create a temporary workflow - just use the workflowState directly + + if (loading) { + return ( +
+
+ + Loading frozen canvas... +
+
+ ) + } + + if (error) { + return ( +
+
+ + Failed to load frozen canvas: {error} +
+
+ ) + } + + if (!data) { + return ( +
+
No data available
+
+ ) + } + + // Check if this is a migrated log without real workflow state + const isMigratedLog = (data.workflowState as any)?._migrated === true + if (isMigratedLog) { + return ( +
+
+ + Logged State Not Found +
+
+ This log was migrated from the old logging system. The workflow state at execution time is + not available. +
+
+ Note: {(data.workflowState as any)?._note} +
+
+ ) + } + + return ( + <> +
+ { + if (blockExecutions[blockId]) { + setPinnedBlockId(blockId) + } + }} + /> +
+ + {pinnedBlockId && blockExecutions[pinnedBlockId] && ( + setPinnedBlockId(null)} + /> + )} + + ) +} diff --git a/apps/sim/app/workspace/[workspaceId]/logs/components/frozen-canvas/index.ts b/apps/sim/app/workspace/[workspaceId]/logs/components/frozen-canvas/index.ts new file mode 100644 index 0000000000..2a5f550a3b --- /dev/null +++ b/apps/sim/app/workspace/[workspaceId]/logs/components/frozen-canvas/index.ts @@ -0,0 +1,2 @@ +export { FrozenCanvas } from './frozen-canvas' +export { FrozenCanvasModal } from './frozen-canvas-modal' diff --git a/apps/sim/app/workspace/[workspaceId]/logs/components/sidebar/sidebar.tsx b/apps/sim/app/workspace/[workspaceId]/logs/components/sidebar/sidebar.tsx index 65d070356b..78371d6e7f 100644 --- a/apps/sim/app/workspace/[workspaceId]/logs/components/sidebar/sidebar.tsx +++ b/apps/sim/app/workspace/[workspaceId]/logs/components/sidebar/sidebar.tsx @@ -1,7 +1,7 @@ 'use client' import { useEffect, useMemo, useRef, useState } from 'react' -import { ChevronDown, ChevronUp, X } from 'lucide-react' +import { ChevronDown, ChevronUp, Eye, X } from 'lucide-react' import { Button } from '@/components/ui/button' import { CopyButton } from '@/components/ui/copy-button' import { ScrollArea } from '@/components/ui/scroll-area' @@ -10,6 +10,7 @@ import { redactApiKeys } from '@/lib/utils' import type { WorkflowLog } from '@/app/workspace/[workspaceId]/logs/stores/types' import { formatDate } from '@/app/workspace/[workspaceId]/logs/utils/format-date' import { formatCost } from '@/providers/utils' +import { FrozenCanvasModal } from '../frozen-canvas/frozen-canvas-modal' import { ToolCallsDisplay } from '../tool-calls/tool-calls-display' import { TraceSpansDisplay } from '../trace-spans/trace-spans-display' import LogMarkdownRenderer from './components/markdown-renderer' @@ -153,7 +154,7 @@ const BlockContentDisplay = ({ <> {isJson ? ( -
+              
                 {redactedOutput}
               
) : ( @@ -166,7 +167,7 @@ const BlockContentDisplay = ({ text={JSON.stringify(redactedBlockInput, null, 2)} className='z-10 h-7 w-7' /> -
+            
               {JSON.stringify(redactedBlockInput, null, 2)}
             
@@ -193,6 +194,8 @@ export function Sidebar({ const [isDragging, setIsDragging] = useState(false) const [_currentLogId, setCurrentLogId] = useState(null) const [isTraceExpanded, setIsTraceExpanded] = useState(false) + const [isModelsExpanded, setIsModelsExpanded] = useState(false) + const [isFrozenCanvasOpen, setIsFrozenCanvasOpen] = useState(false) const scrollAreaRef = useRef(null) // Update currentLogId when log changes @@ -238,22 +241,26 @@ export function Sidebar({ // Determine if this is a workflow execution log const isWorkflowExecutionLog = useMemo(() => { if (!log) return false - // Check if message contains "workflow executed" or similar phrases + // Check if message contains workflow execution phrases (success or failure) return ( log.message.toLowerCase().includes('workflow executed') || log.message.toLowerCase().includes('execution completed') || - (log.trigger === 'manual' && log.duration) + log.message.toLowerCase().includes('workflow execution failed') || + log.message.toLowerCase().includes('execution failed') || + (log.trigger === 'manual' && log.duration) || + // Also check if we have enhanced logging metadata with trace spans + (log.metadata?.enhanced && log.metadata?.traceSpans) ) }, [log]) - // Helper to determine if we have trace spans to display - const _hasTraceSpans = useMemo(() => { - return !!(log?.metadata?.traceSpans && log.metadata.traceSpans.length > 0) - }, [log]) - // Helper to determine if we have cost information to display const hasCostInfo = useMemo(() => { - return !!(log?.metadata?.cost && (log.metadata.cost.input || log.metadata.cost.output)) + return !!( + log?.metadata?.cost && + ((log.metadata.cost.input && log.metadata.cost.input > 0) || + (log.metadata.cost.output && log.metadata.cost.output > 0) || + (log.metadata.cost.total && log.metadata.cost.total > 0)) + ) }, [log]) const isWorkflowWithCost = useMemo(() => { @@ -487,6 +494,103 @@ export function Sidebar({ )} + {/* Enhanced Stats - only show for enhanced logs */} + {log.metadata?.enhanced && log.metadata?.blockStats && ( +
+

+ Block Execution Stats +

+
+
+ Total Blocks: + {log.metadata.blockStats.total} +
+
+ Successful: + + {log.metadata.blockStats.success} + +
+ {log.metadata.blockStats.error > 0 && ( +
+ Failed: + + {log.metadata.blockStats.error} + +
+ )} + {log.metadata.blockStats.skipped > 0 && ( +
+ Skipped: + + {log.metadata.blockStats.skipped} + +
+ )} +
+
+ )} + + {/* Enhanced Cost - only show for enhanced logs with actual cost data */} + {log.metadata?.enhanced && hasCostInfo && ( +
+

Cost Breakdown

+
+ {(log.metadata?.cost?.total ?? 0) > 0 && ( +
+ Total Cost: + + ${log.metadata?.cost?.total?.toFixed(4)} + +
+ )} + {(log.metadata?.cost?.input ?? 0) > 0 && ( +
+ Input Cost: + + ${log.metadata?.cost?.input?.toFixed(4)} + +
+ )} + {(log.metadata?.cost?.output ?? 0) > 0 && ( +
+ Output Cost: + + ${log.metadata?.cost?.output?.toFixed(4)} + +
+ )} + {(log.metadata?.cost?.tokens?.total ?? 0) > 0 && ( +
+ Total Tokens: + + {log.metadata?.cost?.tokens?.total?.toLocaleString()} + +
+ )} +
+
+ )} + + {/* Frozen Canvas Button - only show for workflow execution logs with execution ID */} + {isWorkflowExecutionLog && log.executionId && ( +
+

Workflow State

+ +

+ See the exact workflow state and block inputs/outputs at execution time +

+
+ )} + {/* Message Content */}

Message

@@ -517,42 +621,94 @@ export function Sidebar({ )} {/* Cost Information (moved to bottom) */} - {hasCostInfo && log.metadata?.cost && ( + {hasCostInfo && (
-

- {isWorkflowWithCost ? 'Total Model Cost' : 'Model Cost'} -

+

Models

- {log.metadata.cost.model && ( -
- Model: - {log.metadata.cost.model} -
- )}
Input: - {formatCost(log.metadata.cost.input || 0)} + + {formatCost(log.metadata?.cost?.input || 0)} +
Output: - {formatCost(log.metadata.cost.output || 0)} + + {formatCost(log.metadata?.cost?.output || 0)} +
Total: - {formatCost(log.metadata.cost.total || 0)} + {formatCost(log.metadata?.cost?.total || 0)}
Tokens: - {log.metadata.cost.tokens?.prompt || 0} in /{' '} - {log.metadata.cost.tokens?.completion || 0} out + {log.metadata?.cost?.tokens?.prompt || 0} in /{' '} + {log.metadata?.cost?.tokens?.completion || 0} out
+ {/* Models Breakdown */} + {log.metadata?.cost?.models && + Object.keys(log.metadata?.cost?.models).length > 0 && ( +
+ + + {isModelsExpanded && ( +
+ {Object.entries(log.metadata?.cost?.models || {}).map( + ([model, cost]: [string, any]) => ( +
+
{model}
+
+
+ Input: + {formatCost(cost.input || 0)} +
+
+ Output: + {formatCost(cost.output || 0)} +
+
+ Total: + + {formatCost(cost.total || 0)} + +
+
+ Tokens: + + {cost.tokens?.prompt || 0} in /{' '} + {cost.tokens?.completion || 0} out + +
+
+
+ ) + )} +
+ )} +
+ )} + {isWorkflowWithCost && (

@@ -568,6 +724,18 @@ export function Sidebar({ )} + + {/* Frozen Canvas Modal */} + {log?.executionId && ( + setIsFrozenCanvasOpen(false)} + /> + )}

) } diff --git a/apps/sim/app/workspace/[workspaceId]/logs/components/tool-calls/tool-calls-display.tsx b/apps/sim/app/workspace/[workspaceId]/logs/components/tool-calls/tool-calls-display.tsx index 0e9e2fbad7..fdf6ca08c8 100644 --- a/apps/sim/app/workspace/[workspaceId]/logs/components/tool-calls/tool-calls-display.tsx +++ b/apps/sim/app/workspace/[workspaceId]/logs/components/tool-calls/tool-calls-display.tsx @@ -111,7 +111,7 @@ function ToolCallItem({ toolCall, index }: ToolCallItemProps) { {toolCall.input && (
Input
-
+                
                   
                   {JSON.stringify(toolCall.input, null, 2)}
                 
@@ -122,7 +122,7 @@ function ToolCallItem({ toolCall, index }: ToolCallItemProps) { {toolCall.status === 'success' && toolCall.output && (
Output
-
+                
                   
                   {JSON.stringify(toolCall.output, null, 2)}
                 
@@ -132,7 +132,7 @@ function ToolCallItem({ toolCall, index }: ToolCallItemProps) { {toolCall.status === 'error' && toolCall.error && (
Error
-
+                
                   
                   {toolCall.error}
                 
diff --git a/apps/sim/app/workspace/[workspaceId]/logs/components/trace-spans/trace-spans-display.tsx b/apps/sim/app/workspace/[workspaceId]/logs/components/trace-spans/trace-spans-display.tsx index 31695ea0ea..056372083a 100644 --- a/apps/sim/app/workspace/[workspaceId]/logs/components/trace-spans/trace-spans-display.tsx +++ b/apps/sim/app/workspace/[workspaceId]/logs/components/trace-spans/trace-spans-display.tsx @@ -27,6 +27,174 @@ interface TraceSpansDisplayProps { onExpansionChange?: (expanded: boolean) => void } +// Transform raw block data into clean, user-friendly format +function transformBlockData(data: any, blockType: string, isInput: boolean) { + if (!data) return null + + // For input data, filter out sensitive information + if (isInput) { + const cleanInput = { ...data } + + // Remove sensitive fields + if (cleanInput.apiKey) { + cleanInput.apiKey = '***' + } + if (cleanInput.azureApiKey) { + cleanInput.azureApiKey = '***' + } + + // Remove null/undefined values for cleaner display + Object.keys(cleanInput).forEach((key) => { + if (cleanInput[key] === null || cleanInput[key] === undefined) { + delete cleanInput[key] + } + }) + + return cleanInput + } + + // For output data, extract meaningful information based on block type + if (data.response) { + const response = data.response + + switch (blockType) { + case 'agent': + return { + content: response.content, + model: data.model, + tokens: data.tokens, + toolCalls: response.toolCalls, + ...(data.cost && { cost: data.cost }), + } + + case 'function': + return { + result: response.result, + stdout: response.stdout, + ...(response.executionTime && { executionTime: `${response.executionTime}ms` }), + } + + case 'api': + return { + data: response.data, + status: response.status, + headers: response.headers, + } + + default: + // For other block types, show the response content + return response + } + } + + return data +} + +// Component to display block input/output data in a clean, readable format +function BlockDataDisplay({ + data, + blockType, + isInput = false, + isError = false, +}: { + data: any + blockType?: string + isInput?: boolean + isError?: boolean +}) { + if (!data) return null + + // Handle different data types + const renderValue = (value: any, key?: string): React.ReactNode => { + if (value === null) return null + if (value === undefined) return undefined + + if (typeof value === 'string') { + return "{value}" + } + + if (typeof value === 'number') { + return {value} + } + + if (typeof value === 'boolean') { + return {value.toString()} + } + + if (Array.isArray(value)) { + if (value.length === 0) return [] + return ( +
+ [ +
+ {value.map((item, index) => ( +
+ {index}: +
{renderValue(item)}
+
+ ))} +
+ ] +
+ ) + } + + if (typeof value === 'object') { + const entries = Object.entries(value) + if (entries.length === 0) return {'{}'} + + return ( +
+ {entries.map(([objKey, objValue]) => ( +
+ + {objKey}: + +
{renderValue(objValue, objKey)}
+
+ ))} +
+ ) + } + + return {String(value)} + } + + // Transform the data for better display + const transformedData = transformBlockData(data, blockType || 'unknown', isInput) + + // Special handling for error output + if (isError && data.error) { + return ( +
+
+
Error
+
{data.error}
+
+ {/* Show other output data if available */} + {transformedData && + Object.keys(transformedData).filter((key) => key !== 'error' && key !== 'success') + .length > 0 && ( +
+ {Object.entries(transformedData) + .filter(([key]) => key !== 'error' && key !== 'success') + .map(([key, value]) => ( +
+ {key}: + {renderValue(value, key)} +
+ ))} +
+ )} +
+ ) + } + + return ( +
{renderValue(transformedData || data)}
+ ) +} + export function TraceSpansDisplay({ traceSpans, totalDuration = 0, @@ -35,6 +203,30 @@ export function TraceSpansDisplay({ // Keep track of expanded spans const [expandedSpans, setExpandedSpans] = useState>(new Set()) + // Function to collect all span IDs recursively (for expand all functionality) + const collectAllSpanIds = (spans: TraceSpan[]): string[] => { + const ids: string[] = [] + + const collectIds = (span: TraceSpan) => { + const spanId = span.id || `span-${span.name}-${span.startTime}` + ids.push(spanId) + + // Process children + if (span.children && span.children.length > 0) { + span.children.forEach(collectIds) + } + } + + spans.forEach(collectIds) + return ids + } + + const allSpanIds = useMemo(() => { + if (!traceSpans || traceSpans.length === 0) return [] + return collectAllSpanIds(traceSpans) + }, [traceSpans]) + + // Early return after all hooks if (!traceSpans || traceSpans.length === 0) { return
No trace data available
} @@ -61,26 +253,6 @@ export function TraceSpansDisplay({ // This ensures parallel spans are represented correctly in the timeline const actualTotalDuration = workflowEndTime - workflowStartTime - // Function to collect all span IDs recursively (for expand all functionality) - const collectAllSpanIds = (spans: TraceSpan[]): string[] => { - const ids: string[] = [] - - const collectIds = (span: TraceSpan) => { - const spanId = span.id || `span-${span.name}-${span.startTime}` - ids.push(spanId) - - // Process children - if (span.children && span.children.length > 0) { - span.children.forEach(collectIds) - } - } - - spans.forEach(collectIds) - return ids - } - - const allSpanIds = useMemo(() => collectAllSpanIds(traceSpans), [traceSpans]) - // Handle span toggling const handleSpanToggle = (spanId: string, expanded: boolean, hasSubItems: boolean) => { const newExpandedSpans = new Set(expandedSpans) @@ -140,11 +312,14 @@ export function TraceSpansDisplay({ )}
-
+
{traceSpans.map((span, index) => { - const hasSubItems = + const hasSubItems = Boolean( (span.children && span.children.length > 0) || - (span.toolCalls && span.toolCalls.length > 0) + (span.toolCalls && span.toolCalls.length > 0) || + span.input || + span.output + ) return (
+ {/* Children and tool calls */} + {expanded && ( +
+ {/* Block Input/Output Data */} + {(span.input || span.output) && ( +
+ {/* Input Data */} + {span.input && ( +
+

Input

+
+ +
+
+ )} + + {/* Output Data */} + {span.output && ( +
+

+ {span.status === 'error' ? 'Error Details' : 'Output'} +

+
+ +
+
+ )} +
+ )} +
+ )} + {/* Children and tool calls */} {expanded && (
@@ -437,9 +649,12 @@ function TraceSpanItem({ {hasChildren && (
{span.children?.map((childSpan, index) => { - const childHasSubItems = + const childHasSubItems = Boolean( (childSpan.children && childSpan.children.length > 0) || - (childSpan.toolCalls && childSpan.toolCalls.length > 0) + (childSpan.toolCalls && childSpan.toolCalls.length > 0) || + childSpan.input || + childSpan.output + ) return ( { - switch (level.toLowerCase()) { - case 'error': - return 'bg-destructive/20 text-destructive error-badge' - case 'warn': - return 'bg-warning/20 text-warning' - default: - return 'bg-secondary text-secondary-foreground' - } -} - -const getTriggerBadgeStyles = (trigger: string) => { - switch (trigger.toLowerCase()) { - case 'manual': - return 'bg-secondary text-secondary-foreground' - case 'api': - return 'bg-blue-100 dark:bg-blue-950/40 text-blue-700 dark:text-blue-400' - case 'webhook': - return 'bg-orange-100 dark:bg-orange-950/40 text-orange-700 dark:text-orange-400' - case 'schedule': - return 'bg-green-100 dark:bg-green-950/40 text-green-700 dark:text-green-400' - case 'chat': - return 'bg-purple-100 dark:bg-purple-950/40 text-purple-700 dark:text-purple-400' - default: - return 'bg-gray-100 dark:bg-gray-800 text-gray-700 dark:text-gray-400' - } -} - const selectedRowAnimation = ` @keyframes borderPulse { 0% { border-left-color: hsl(var(--primary) / 0.3) } @@ -87,28 +59,6 @@ export default function Logs() { const isSidebarCollapsed = mode === 'expanded' ? !isExpanded : mode === 'collapsed' || mode === 'hover' - const executionGroups = useMemo(() => { - const groups: Record = {} - - // Group logs by executionId - logs.forEach((log) => { - if (log.executionId) { - if (!groups[log.executionId]) { - groups[log.executionId] = [] - } - groups[log.executionId].push(log) - } - }) - - Object.keys(groups).forEach((executionId) => { - groups[executionId].sort( - (a, b) => new Date(a.createdAt).getTime() - new Date(b.createdAt).getTime() - ) - }) - - return groups - }, [logs]) - const handleLogClick = (log: WorkflowLog) => { setSelectedLog(log) const index = logs.findIndex((l) => l.id === log.id) @@ -134,6 +84,8 @@ export default function Logs() { const handleCloseSidebar = () => { setIsSidebarOpen(false) + setSelectedLog(null) + setSelectedLogIndex(-1) } useEffect(() => { @@ -155,7 +107,7 @@ export default function Logs() { } const queryParams = buildQueryParams(pageNum, LOGS_PER_PAGE) - const response = await fetch(`/api/logs?${queryParams}`) + const response = await fetch(`/api/logs/enhanced?${queryParams}`) if (!response.ok) { throw new Error(`Error fetching logs: ${response.statusText}`) @@ -203,7 +155,7 @@ export default function Logs() { try { setLoading(true) const queryParams = buildQueryParams(1, LOGS_PER_PAGE) - const response = await fetch(`/api/logs?${queryParams}`) + const response = await fetch(`/api/logs/enhanced?${queryParams}`) if (!response.ok) { throw new Error(`Error fetching logs: ${response.statusText}`) @@ -353,46 +305,16 @@ export default function Logs() {
{/* Table container */}
- {/* Table header - fixed */} -
- - - - - - - - - - - - - - - - - - - - - -
- Time - - Status - - Workflow - - id - - Trigger - - Message - - Duration -
+ {/* Simple header */} +
+
+
Time
+
Status
+
Workflow
+
Trigger
+
Cost
+
Duration
+
{/* Table body - scrollable */} @@ -419,163 +341,106 @@ export default function Logs() {
) : ( - - - - - - - - - - - - {logs.map((log) => { - const formattedDate = formatDate(log.createdAt) - const isSelected = selectedLog?.id === log.id - const _isWorkflowExecutionLog = - log.executionId && executionGroups[log.executionId].length === 1 - - return ( - handleLogClick(log)} - > - {/* Time column */} - + - {/* Level column */} - - - {/* Workflow column */} - - - {/* ID column - hidden on small screens */} - - - {/* Trigger column - hidden on medium screens and below */} - - - {/* Message column */} - + - {/* Duration column */} - - - ) - })} - - {/* Infinite scroll loader */} - {hasMore && ( - - - - )} - - {/* Footer status indicator - useful for development */} - - - - -
-
-
- {formattedDate.formatted} - - • - - - {new Date(log.createdAt).toLocaleDateString('en-US', { - month: 'short', - day: 'numeric', - year: 'numeric', - })} - -
-
- {formattedDate.relative} -
+
+ {logs.map((log) => { + const formattedDate = formatDate(log.createdAt) + const isSelected = selectedLog?.id === log.id + + return ( +
handleLogClick(log)} + > +
+ {/* Time */} +
+
{formattedDate.formatted}
+
+ {formattedDate.relative}
-
+ {/* Status */} +
- {log.level} + + {log.level === 'error' ? 'Failed' : 'Success'} +
-
- {log.workflow && ( -
- {log.workflow.name} -
- )} -
-
- {log.executionId ? `#${log.executionId.substring(0, 4)}` : '—'} +
+ + {/* Workflow */} +
+
+ {log.workflow?.name || 'Unknown Workflow'}
-
- {log.trigger && ( -
- {log.trigger} -
- )} -
-
+
{log.message}
-
+ {/* Trigger */} +
- {log.duration || '—'} + {log.trigger || '—'} +
+
+ + {/* Cost */} +
+
+ {log.metadata?.enhanced && log.metadata?.cost?.total ? ( + + ${log.metadata.cost.total.toFixed(4)} + + ) : ( + + )}
-
-
- {isFetchingMore && ( -
- - Loading more logs... -
- )}
-
-
- Showing {logs.length} logs -
- {isFetchingMore ? ( -
- ) : hasMore ? ( - - ) : ( - End of logs - )} + + {/* Duration */} +
+
+ {log.duration || '—'} +
-
+
+ ) + })} + + {/* Infinite scroll loader */} + {hasMore && ( +
+
+ {isFetchingMore ? ( + <> + + Loading more... + + ) : ( + Scroll to load more + )} +
+
+ )} +
)}
diff --git a/apps/sim/app/workspace/[workspaceId]/logs/stores/types.ts b/apps/sim/app/workspace/[workspaceId]/logs/stores/types.ts index c6266fd396..0108ada3dc 100644 --- a/apps/sim/app/workspace/[workspaceId]/logs/stores/types.ts +++ b/apps/sim/app/workspace/[workspaceId]/logs/stores/types.ts @@ -22,7 +22,19 @@ export interface ToolCallMetadata { } export interface CostMetadata { - model?: string + models?: Record< + string, + { + input: number + output: number + total: number + tokens?: { + prompt?: number + completion?: number + total?: number + } + } + > input?: number output?: number total?: number @@ -53,6 +65,7 @@ export interface TraceSpan { relativeStartMs?: number // Time in ms from the start of the parent span blockId?: string // Added to track the original block ID for relationship mapping input?: Record // Added to store input data for this span + output?: Record // Added to store output data for this span } export interface WorkflowLog { @@ -70,6 +83,29 @@ export interface WorkflowLog { totalDuration?: number cost?: CostMetadata blockInput?: Record + enhanced?: boolean + blockStats?: { + total: number + success: number + error: number + skipped: number + } + blockExecutions?: Array<{ + id: string + blockId: string + blockName: string + blockType: string + startedAt: string + endedAt: string + durationMs: number + status: 'success' | 'error' | 'skipped' + errorMessage?: string + errorStackTrace?: string + inputData: any + outputData: any + cost?: CostMetadata + metadata: any + }> } } diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-workflow-execution.ts b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-workflow-execution.ts index 620fe14e2c..55710599b0 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-workflow-execution.ts +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-workflow-execution.ts @@ -2,6 +2,7 @@ import { useCallback, useState } from 'react' import { v4 as uuidv4 } from 'uuid' import { createLogger } from '@/lib/logs/console-logger' import { buildTraceSpans } from '@/lib/logs/trace-spans' +import { processStreamingBlockLogs } from '@/lib/tokenization' import type { BlockOutput } from '@/blocks/types' import { Executor } from '@/executor' import type { BlockLog, ExecutionResult, StreamingExecution } from '@/executor/types' @@ -211,15 +212,22 @@ export function useWorkflowExecution() { result.metadata = { duration: 0, startTime: new Date().toISOString() } } ;(result.metadata as any).source = 'chat' - result.logs?.forEach((log: BlockLog) => { - if (streamedContent.has(log.blockId)) { - const content = streamedContent.get(log.blockId) || '' - if (log.output) { - log.output.content = content + // Update streamed content and apply tokenization + if (result.logs) { + result.logs.forEach((log: BlockLog) => { + if (streamedContent.has(log.blockId)) { + const content = streamedContent.get(log.blockId) || '' + if (log.output) { + log.output.content = content + } + useConsoleStore.getState().updateConsole(log.blockId, content) } - useConsoleStore.getState().updateConsole(log.blockId, content) - } - }) + }) + + // Process all logs for streaming tokenization + const processedCount = processStreamingBlockLogs(result.logs, streamedContent) + logger.info(`Processed ${processedCount} blocks for streaming tokenization`) + } controller.enqueue( encoder.encode(`data: ${JSON.stringify({ event: 'final', data: result })}\n\n`) diff --git a/apps/sim/app/workspace/[workspaceId]/w/components/workflow-preview/workflow-preview.tsx b/apps/sim/app/workspace/[workspaceId]/w/components/workflow-preview/workflow-preview.tsx index ff6a7c2e97..23a29d78b9 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/components/workflow-preview/workflow-preview.tsx +++ b/apps/sim/app/workspace/[workspaceId]/w/components/workflow-preview/workflow-preview.tsx @@ -33,6 +33,7 @@ interface WorkflowPreviewProps { isPannable?: boolean defaultPosition?: { x: number; y: number } defaultZoom?: number + onNodeClick?: (blockId: string, mousePosition: { x: number; y: number }) => void } // Define node types - the components now handle preview mode internally @@ -55,6 +56,7 @@ export function WorkflowPreview({ isPannable = false, defaultPosition, defaultZoom, + onNodeClick, }: WorkflowPreviewProps) { const blocksStructure = useMemo( () => ({ @@ -256,6 +258,14 @@ export function WorkflowPreview({ elementsSelectable={false} nodesDraggable={false} nodesConnectable={false} + onNodeClick={ + onNodeClick + ? (event, node) => { + logger.debug('Node clicked:', { nodeId: node.id, event }) + onNodeClick(node.id, { x: event.clientX, y: event.clientY }) + } + : undefined + } > diff --git a/apps/sim/components/ui/tag-dropdown.tsx b/apps/sim/components/ui/tag-dropdown.tsx index 035ab9534d..323ad831cf 100644 --- a/apps/sim/components/ui/tag-dropdown.tsx +++ b/apps/sim/components/ui/tag-dropdown.tsx @@ -202,6 +202,16 @@ export const TagDropdown: React.FC = ({ } } + // Check for invalid blocks before serialization to prevent race conditions + const hasInvalidBlocks = Object.values(blocks).some((block) => !block || !block.type) + if (hasInvalidBlocks) { + return { + tags: [], + variableInfoMap: {}, + blockTagGroups: [], + } + } + // Create serialized workflow for BlockPathCalculator const serializer = new Serializer() const serializedWorkflow = serializer.serializeWorkflow(blocks, edges, loops, parallels) diff --git a/apps/sim/components/ui/tooltip.tsx b/apps/sim/components/ui/tooltip.tsx index 44af2ea25a..a042fa403f 100644 --- a/apps/sim/components/ui/tooltip.tsx +++ b/apps/sim/components/ui/tooltip.tsx @@ -21,7 +21,7 @@ const TooltipContent = React.forwardRef< ref={ref} sideOffset={sideOffset} className={cn( - 'fade-in-0 zoom-in-95 data-[state=closed]:fade-out-0 data-[state=closed]:zoom-out-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2 z-50 animate-in overflow-hidden rounded-md bg-black px-3 py-1.5 text-white text-xs shadow-md data-[state=closed]:animate-out dark:bg-white dark:text-black', + 'fade-in-0 zoom-in-95 data-[state=closed]:fade-out-0 data-[state=closed]:zoom-out-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2 z-[60] animate-in overflow-hidden rounded-md bg-black px-3 py-1.5 text-white text-xs shadow-md data-[state=closed]:animate-out dark:bg-white dark:text-black', className )} {...props} diff --git a/apps/sim/executor/handlers/workflow/workflow-handler.ts b/apps/sim/executor/handlers/workflow/workflow-handler.ts index 3459b782d7..6c4e593812 100644 --- a/apps/sim/executor/handlers/workflow/workflow-handler.ts +++ b/apps/sim/executor/handlers/workflow/workflow-handler.ts @@ -159,7 +159,7 @@ export class WorkflowBlockHandler implements BlockHandler { logger.info(`Loaded child workflow: ${workflowData.name} (${workflowId})`) - // Extract the workflow state + // Extract the workflow state (API returns normalized data in state field) const workflowState = workflowData.state if (!workflowState || !workflowState.blocks) { @@ -167,7 +167,7 @@ export class WorkflowBlockHandler implements BlockHandler { return null } - // Use blocks directly since DB format should match UI format + // Use blocks directly since API returns data from normalized tables const serializedWorkflow = this.serializer.serializeWorkflow( workflowState.blocks, workflowState.edges || [], diff --git a/apps/sim/lib/logs/enhanced-execution-logger.test.ts b/apps/sim/lib/logs/enhanced-execution-logger.test.ts new file mode 100644 index 0000000000..dbc154b54f --- /dev/null +++ b/apps/sim/lib/logs/enhanced-execution-logger.test.ts @@ -0,0 +1,34 @@ +import { beforeEach, describe, expect, test } from 'vitest' +import { EnhancedExecutionLogger } from './enhanced-execution-logger' + +describe('EnhancedExecutionLogger', () => { + let logger: EnhancedExecutionLogger + + beforeEach(() => { + logger = new EnhancedExecutionLogger() + }) + + describe('class instantiation', () => { + test('should create logger instance', () => { + expect(logger).toBeDefined() + expect(logger).toBeInstanceOf(EnhancedExecutionLogger) + }) + }) + + describe('getTriggerPrefix', () => { + test('should return correct prefixes for trigger types', () => { + // Access the private method for testing + const getTriggerPrefix = (logger as any).getTriggerPrefix.bind(logger) + + expect(getTriggerPrefix('api')).toBe('API') + expect(getTriggerPrefix('webhook')).toBe('Webhook') + expect(getTriggerPrefix('schedule')).toBe('Scheduled') + expect(getTriggerPrefix('manual')).toBe('Manual') + expect(getTriggerPrefix('chat')).toBe('Chat') + expect(getTriggerPrefix('unknown' as any)).toBe('Unknown') + }) + }) + + // Note: Database integration tests would require proper mocking setup + // For now, we're testing the basic functionality without database calls +}) diff --git a/apps/sim/lib/logs/enhanced-execution-logger.ts b/apps/sim/lib/logs/enhanced-execution-logger.ts new file mode 100644 index 0000000000..561e6570fa --- /dev/null +++ b/apps/sim/lib/logs/enhanced-execution-logger.ts @@ -0,0 +1,396 @@ +import { eq } from 'drizzle-orm' +import { v4 as uuidv4 } from 'uuid' +import { db } from '@/db' +import { workflowExecutionBlocks, workflowExecutionLogs } from '@/db/schema' +import { createLogger } from './console-logger' +import { snapshotService } from './snapshot-service' +import type { + BlockExecutionLog, + BlockInputData, + BlockOutputData, + CostBreakdown, + ExecutionEnvironment, + ExecutionTrigger, + ExecutionLoggerService as IExecutionLoggerService, + TraceSpan, + WorkflowExecutionLog, + WorkflowExecutionSnapshot, + WorkflowState, +} from './types' + +const logger = createLogger('EnhancedExecutionLogger') + +export class EnhancedExecutionLogger implements IExecutionLoggerService { + async startWorkflowExecution(params: { + workflowId: string + executionId: string + trigger: ExecutionTrigger + environment: ExecutionEnvironment + workflowState: WorkflowState + }): Promise<{ + workflowLog: WorkflowExecutionLog + snapshot: WorkflowExecutionSnapshot + }> { + const { workflowId, executionId, trigger, environment, workflowState } = params + + logger.debug(`Starting workflow execution ${executionId} for workflow ${workflowId}`) + + const snapshotResult = await snapshotService.createSnapshotWithDeduplication( + workflowId, + workflowState + ) + + const startTime = new Date() + + const [workflowLog] = await db + .insert(workflowExecutionLogs) + .values({ + id: uuidv4(), + workflowId, + executionId, + stateSnapshotId: snapshotResult.snapshot.id, + level: 'info', + message: `${this.getTriggerPrefix(trigger.type)} execution started`, + trigger: trigger.type, + startedAt: startTime, + endedAt: null, + totalDurationMs: null, + blockCount: 0, + successCount: 0, + errorCount: 0, + skippedCount: 0, + totalCost: null, + totalInputCost: null, + totalOutputCost: null, + totalTokens: null, + metadata: { + environment, + trigger, + }, + }) + .returning() + + logger.debug(`Created workflow log ${workflowLog.id} for execution ${executionId}`) + + return { + workflowLog: { + id: workflowLog.id, + workflowId: workflowLog.workflowId, + executionId: workflowLog.executionId, + stateSnapshotId: workflowLog.stateSnapshotId, + level: workflowLog.level as 'info' | 'error', + message: workflowLog.message, + trigger: workflowLog.trigger as ExecutionTrigger['type'], + startedAt: workflowLog.startedAt.toISOString(), + endedAt: workflowLog.endedAt?.toISOString() || workflowLog.startedAt.toISOString(), + totalDurationMs: workflowLog.totalDurationMs || 0, + blockCount: workflowLog.blockCount, + successCount: workflowLog.successCount, + errorCount: workflowLog.errorCount, + skippedCount: workflowLog.skippedCount, + totalCost: Number(workflowLog.totalCost) || 0, + totalInputCost: Number(workflowLog.totalInputCost) || 0, + totalOutputCost: Number(workflowLog.totalOutputCost) || 0, + totalTokens: workflowLog.totalTokens || 0, + metadata: workflowLog.metadata as WorkflowExecutionLog['metadata'], + createdAt: workflowLog.createdAt.toISOString(), + }, + snapshot: snapshotResult.snapshot, + } + } + + async logBlockExecution(params: { + executionId: string + workflowId: string + blockId: string + blockName: string + blockType: string + input: BlockInputData + output: BlockOutputData + timing: { + startedAt: string + endedAt: string + durationMs: number + } + status: BlockExecutionLog['status'] + error?: { + message: string + stackTrace?: string + } + cost?: CostBreakdown + metadata?: BlockExecutionLog['metadata'] + }): Promise { + const { + executionId, + workflowId, + blockId, + blockName, + blockType, + input, + output, + timing, + status, + error, + cost, + metadata, + } = params + + logger.debug(`Logging block execution ${blockId} for execution ${executionId}`) + + const blockLogId = uuidv4() + + const [blockLog] = await db + .insert(workflowExecutionBlocks) + .values({ + id: blockLogId, + executionId, + workflowId, + blockId, + blockName, + blockType, + startedAt: new Date(timing.startedAt), + endedAt: new Date(timing.endedAt), + durationMs: timing.durationMs, + status, + errorMessage: error?.message || null, + errorStackTrace: error?.stackTrace || null, + inputData: input, + outputData: output, + costInput: cost?.input ? cost.input.toString() : null, + costOutput: cost?.output ? cost.output.toString() : null, + costTotal: cost?.total ? cost.total.toString() : null, + tokensPrompt: cost?.tokens?.prompt || null, + tokensCompletion: cost?.tokens?.completion || null, + tokensTotal: cost?.tokens?.total || null, + modelUsed: cost?.model || null, + metadata: metadata || {}, + }) + .returning() + + logger.debug(`Created block log ${blockLog.id} for block ${blockId}`) + + return { + id: blockLog.id, + executionId: blockLog.executionId, + workflowId: blockLog.workflowId, + blockId: blockLog.blockId, + blockName: blockLog.blockName || '', + blockType: blockLog.blockType, + startedAt: blockLog.startedAt.toISOString(), + endedAt: blockLog.endedAt?.toISOString() || timing.endedAt, + durationMs: blockLog.durationMs || timing.durationMs, + status: blockLog.status as BlockExecutionLog['status'], + errorMessage: blockLog.errorMessage || undefined, + errorStackTrace: blockLog.errorStackTrace || undefined, + inputData: input, + outputData: output, + cost: cost || null, + metadata: (blockLog.metadata as BlockExecutionLog['metadata']) || {}, + createdAt: blockLog.createdAt.toISOString(), + } + } + + async completeWorkflowExecution(params: { + executionId: string + endedAt: string + totalDurationMs: number + blockStats: { + total: number + success: number + error: number + skipped: number + } + costSummary: { + totalCost: number + totalInputCost: number + totalOutputCost: number + totalTokens: number + totalPromptTokens: number + totalCompletionTokens: number + models: Record< + string, + { + input: number + output: number + total: number + tokens: { prompt: number; completion: number; total: number } + } + > + } + finalOutput: BlockOutputData + traceSpans?: TraceSpan[] + }): Promise { + const { + executionId, + endedAt, + totalDurationMs, + blockStats, + costSummary, + finalOutput, + traceSpans, + } = params + + logger.debug(`Completing workflow execution ${executionId}`) + + const level = blockStats.error > 0 ? 'error' : 'info' + const message = + blockStats.error > 0 + ? `Workflow execution failed: ${blockStats.error} error(s), ${blockStats.success} success(es)` + : `Workflow execution completed: ${blockStats.success} block(s) executed successfully` + + const [updatedLog] = await db + .update(workflowExecutionLogs) + .set({ + level, + message, + endedAt: new Date(endedAt), + totalDurationMs, + blockCount: blockStats.total, + successCount: blockStats.success, + errorCount: blockStats.error, + skippedCount: blockStats.skipped, + totalCost: costSummary.totalCost.toString(), + totalInputCost: costSummary.totalInputCost.toString(), + totalOutputCost: costSummary.totalOutputCost.toString(), + totalTokens: costSummary.totalTokens, + metadata: { + traceSpans, + finalOutput, + tokenBreakdown: { + prompt: costSummary.totalPromptTokens, + completion: costSummary.totalCompletionTokens, + total: costSummary.totalTokens, + }, + models: costSummary.models, + }, + }) + .where(eq(workflowExecutionLogs.executionId, executionId)) + .returning() + + if (!updatedLog) { + throw new Error(`Workflow log not found for execution ${executionId}`) + } + + logger.debug(`Completed workflow execution ${executionId}`) + + return { + id: updatedLog.id, + workflowId: updatedLog.workflowId, + executionId: updatedLog.executionId, + stateSnapshotId: updatedLog.stateSnapshotId, + level: updatedLog.level as 'info' | 'error', + message: updatedLog.message, + trigger: updatedLog.trigger as ExecutionTrigger['type'], + startedAt: updatedLog.startedAt.toISOString(), + endedAt: updatedLog.endedAt?.toISOString() || endedAt, + totalDurationMs: updatedLog.totalDurationMs || totalDurationMs, + blockCount: updatedLog.blockCount, + successCount: updatedLog.successCount, + errorCount: updatedLog.errorCount, + skippedCount: updatedLog.skippedCount, + totalCost: Number(updatedLog.totalCost) || 0, + totalInputCost: Number(updatedLog.totalInputCost) || 0, + totalOutputCost: Number(updatedLog.totalOutputCost) || 0, + totalTokens: updatedLog.totalTokens || 0, + metadata: updatedLog.metadata as WorkflowExecutionLog['metadata'], + createdAt: updatedLog.createdAt.toISOString(), + } + } + + async getBlockExecutionsForWorkflow(executionId: string): Promise { + const blockLogs = await db + .select() + .from(workflowExecutionBlocks) + .where(eq(workflowExecutionBlocks.executionId, executionId)) + .orderBy(workflowExecutionBlocks.startedAt) + + return blockLogs.map((log) => ({ + id: log.id, + executionId: log.executionId, + workflowId: log.workflowId, + blockId: log.blockId, + blockName: log.blockName || '', + blockType: log.blockType, + startedAt: log.startedAt.toISOString(), + endedAt: log.endedAt?.toISOString() || log.startedAt.toISOString(), + durationMs: log.durationMs || 0, + status: log.status as BlockExecutionLog['status'], + errorMessage: log.errorMessage || undefined, + errorStackTrace: log.errorStackTrace || undefined, + inputData: log.inputData as BlockInputData, + outputData: log.outputData as BlockOutputData, + cost: log.costTotal + ? { + input: Number(log.costInput) || 0, + output: Number(log.costOutput) || 0, + total: Number(log.costTotal) || 0, + tokens: { + prompt: log.tokensPrompt || 0, + completion: log.tokensCompletion || 0, + total: log.tokensTotal || 0, + }, + model: log.modelUsed || '', + pricing: { + input: 0, + output: 0, + updatedAt: new Date().toISOString(), + }, + } + : null, + metadata: (log.metadata as BlockExecutionLog['metadata']) || {}, + createdAt: log.createdAt.toISOString(), + })) + } + + async getWorkflowExecution(executionId: string): Promise { + const [workflowLog] = await db + .select() + .from(workflowExecutionLogs) + .where(eq(workflowExecutionLogs.executionId, executionId)) + .limit(1) + + if (!workflowLog) return null + + return { + id: workflowLog.id, + workflowId: workflowLog.workflowId, + executionId: workflowLog.executionId, + stateSnapshotId: workflowLog.stateSnapshotId, + level: workflowLog.level as 'info' | 'error', + message: workflowLog.message, + trigger: workflowLog.trigger as ExecutionTrigger['type'], + startedAt: workflowLog.startedAt.toISOString(), + endedAt: workflowLog.endedAt?.toISOString() || workflowLog.startedAt.toISOString(), + totalDurationMs: workflowLog.totalDurationMs || 0, + blockCount: workflowLog.blockCount, + successCount: workflowLog.successCount, + errorCount: workflowLog.errorCount, + skippedCount: workflowLog.skippedCount, + totalCost: Number(workflowLog.totalCost) || 0, + totalInputCost: Number(workflowLog.totalInputCost) || 0, + totalOutputCost: Number(workflowLog.totalOutputCost) || 0, + totalTokens: workflowLog.totalTokens || 0, + metadata: workflowLog.metadata as WorkflowExecutionLog['metadata'], + createdAt: workflowLog.createdAt.toISOString(), + } + } + + private getTriggerPrefix(triggerType: ExecutionTrigger['type']): string { + switch (triggerType) { + case 'api': + return 'API' + case 'webhook': + return 'Webhook' + case 'schedule': + return 'Scheduled' + case 'manual': + return 'Manual' + case 'chat': + return 'Chat' + default: + return 'Unknown' + } + } +} + +export const enhancedExecutionLogger = new EnhancedExecutionLogger() diff --git a/apps/sim/lib/logs/enhanced-logging-factory.ts b/apps/sim/lib/logs/enhanced-logging-factory.ts new file mode 100644 index 0000000000..72cde27efd --- /dev/null +++ b/apps/sim/lib/logs/enhanced-logging-factory.ts @@ -0,0 +1,197 @@ +import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/db-helpers' +import type { ExecutionEnvironment, ExecutionTrigger, WorkflowState } from './types' + +export function createTriggerObject( + type: ExecutionTrigger['type'], + additionalData?: Record +): ExecutionTrigger { + return { + type, + source: type, + timestamp: new Date().toISOString(), + ...(additionalData && { data: additionalData }), + } +} + +export function createEnvironmentObject( + workflowId: string, + executionId: string, + userId?: string, + workspaceId?: string, + variables?: Record +): ExecutionEnvironment { + return { + variables: variables || {}, + workflowId, + executionId, + userId: userId || '', + workspaceId: workspaceId || '', + } +} + +export async function loadWorkflowStateForExecution(workflowId: string): Promise { + const normalizedData = await loadWorkflowFromNormalizedTables(workflowId) + + if (!normalizedData) { + throw new Error( + `Workflow ${workflowId} has no normalized data available. Ensure the workflow is properly saved to normalized tables.` + ) + } + + return { + blocks: normalizedData.blocks || {}, + edges: normalizedData.edges || [], + loops: normalizedData.loops || {}, + parallels: normalizedData.parallels || {}, + } +} + +export function calculateBlockStats(traceSpans: any[]): { + total: number + success: number + error: number + skipped: number +} { + if (!traceSpans || traceSpans.length === 0) { + return { total: 0, success: 0, error: 0, skipped: 0 } + } + + // Recursively collect all block spans from the trace span tree + const collectBlockSpans = (spans: any[]): any[] => { + const blocks: any[] = [] + + for (const span of spans) { + // Check if this span is an actual workflow block + if ( + span.type && + span.type !== 'workflow' && + span.type !== 'provider' && + span.type !== 'model' && + span.blockId + ) { + blocks.push(span) + } + + // Recursively check children + if (span.children && Array.isArray(span.children)) { + blocks.push(...collectBlockSpans(span.children)) + } + } + + return blocks + } + + const blockSpans = collectBlockSpans(traceSpans) + + const total = blockSpans.length + const success = blockSpans.filter((span) => span.status === 'success').length + const error = blockSpans.filter((span) => span.status === 'error').length + const skipped = blockSpans.filter((span) => span.status === 'skipped').length + + return { total, success, error, skipped } +} + +export function calculateCostSummary(traceSpans: any[]): { + totalCost: number + totalInputCost: number + totalOutputCost: number + totalTokens: number + totalPromptTokens: number + totalCompletionTokens: number + models: Record< + string, + { + input: number + output: number + total: number + tokens: { prompt: number; completion: number; total: number } + } + > +} { + if (!traceSpans || traceSpans.length === 0) { + return { + totalCost: 0, + totalInputCost: 0, + totalOutputCost: 0, + totalTokens: 0, + totalPromptTokens: 0, + totalCompletionTokens: 0, + models: {}, + } + } + + // Recursively collect all spans with cost information from the trace span tree + const collectCostSpans = (spans: any[]): any[] => { + const costSpans: any[] = [] + + for (const span of spans) { + if (span.cost) { + costSpans.push(span) + } + + if (span.children && Array.isArray(span.children)) { + costSpans.push(...collectCostSpans(span.children)) + } + } + + return costSpans + } + + const costSpans = collectCostSpans(traceSpans) + + let totalCost = 0 + let totalInputCost = 0 + let totalOutputCost = 0 + let totalTokens = 0 + let totalPromptTokens = 0 + let totalCompletionTokens = 0 + const models: Record< + string, + { + input: number + output: number + total: number + tokens: { prompt: number; completion: number; total: number } + } + > = {} + + for (const span of costSpans) { + totalCost += span.cost.total || 0 + totalInputCost += span.cost.input || 0 + totalOutputCost += span.cost.output || 0 + // Tokens are at span.tokens, not span.cost.tokens + totalTokens += span.tokens?.total || 0 + totalPromptTokens += span.tokens?.prompt || 0 + totalCompletionTokens += span.tokens?.completion || 0 + + // Aggregate model-specific costs - model is at span.model, not span.cost.model + if (span.model) { + const model = span.model + if (!models[model]) { + models[model] = { + input: 0, + output: 0, + total: 0, + tokens: { prompt: 0, completion: 0, total: 0 }, + } + } + models[model].input += span.cost.input || 0 + models[model].output += span.cost.output || 0 + models[model].total += span.cost.total || 0 + // Tokens are at span.tokens, not span.cost.tokens + models[model].tokens.prompt += span.tokens?.prompt || 0 + models[model].tokens.completion += span.tokens?.completion || 0 + models[model].tokens.total += span.tokens?.total || 0 + } + } + + return { + totalCost, + totalInputCost, + totalOutputCost, + totalTokens, + totalPromptTokens, + totalCompletionTokens, + models, + } +} diff --git a/apps/sim/lib/logs/enhanced-logging-session.ts b/apps/sim/lib/logs/enhanced-logging-session.ts new file mode 100644 index 0000000000..7d0bc174eb --- /dev/null +++ b/apps/sim/lib/logs/enhanced-logging-session.ts @@ -0,0 +1,199 @@ +import { createLogger } from '@/lib/logs/console-logger' +import { enhancedExecutionLogger } from './enhanced-execution-logger' +import { + calculateBlockStats, + calculateCostSummary, + createEnvironmentObject, + createTriggerObject, + loadWorkflowStateForExecution, +} from './enhanced-logging-factory' +import type { ExecutionEnvironment, ExecutionTrigger, WorkflowState } from './types' + +const logger = createLogger('EnhancedLoggingSession') + +export interface SessionStartParams { + userId?: string + workspaceId?: string + variables?: Record + triggerData?: Record +} + +export interface SessionCompleteParams { + endedAt?: string + totalDurationMs?: number + finalOutput?: any + traceSpans?: any[] +} + +export class EnhancedLoggingSession { + private workflowId: string + private executionId: string + private triggerType: ExecutionTrigger['type'] + private requestId?: string + private trigger?: ExecutionTrigger + private environment?: ExecutionEnvironment + private workflowState?: WorkflowState + private enhancedLogger = enhancedExecutionLogger + + constructor( + workflowId: string, + executionId: string, + triggerType: ExecutionTrigger['type'], + requestId?: string + ) { + this.workflowId = workflowId + this.executionId = executionId + this.triggerType = triggerType + this.requestId = requestId + } + + async start(params: SessionStartParams = {}): Promise { + const { userId, workspaceId, variables, triggerData } = params + + try { + this.trigger = createTriggerObject(this.triggerType, triggerData) + this.environment = createEnvironmentObject( + this.workflowId, + this.executionId, + userId, + workspaceId, + variables + ) + this.workflowState = await loadWorkflowStateForExecution(this.workflowId) + + await enhancedExecutionLogger.startWorkflowExecution({ + workflowId: this.workflowId, + executionId: this.executionId, + trigger: this.trigger, + environment: this.environment, + workflowState: this.workflowState, + }) + + if (this.requestId) { + logger.debug( + `[${this.requestId}] Started enhanced logging for execution ${this.executionId}` + ) + } + } catch (error) { + if (this.requestId) { + logger.error(`[${this.requestId}] Failed to start enhanced logging:`, error) + } + throw error + } + } + + /** + * Set up enhanced logging on an executor instance + * Note: Enhanced logging now works through trace spans only, no direct executor integration needed + */ + setupExecutor(executor: any): void { + // No longer setting enhanced logger on executor - trace spans handle everything + if (this.requestId) { + logger.debug( + `[${this.requestId}] Enhanced logging session ready for execution ${this.executionId}` + ) + } + } + + async complete(params: SessionCompleteParams = {}): Promise { + const { endedAt, totalDurationMs, finalOutput, traceSpans } = params + + try { + const blockStats = calculateBlockStats(traceSpans || []) + const costSummary = calculateCostSummary(traceSpans || []) + + await enhancedExecutionLogger.completeWorkflowExecution({ + executionId: this.executionId, + endedAt: endedAt || new Date().toISOString(), + totalDurationMs: totalDurationMs || 0, + blockStats, + costSummary, + finalOutput: finalOutput || {}, + traceSpans: traceSpans || [], + }) + + if (this.requestId) { + logger.debug( + `[${this.requestId}] Completed enhanced logging for execution ${this.executionId}` + ) + } + } catch (error) { + if (this.requestId) { + logger.error(`[${this.requestId}] Failed to complete enhanced logging:`, error) + } + } + } + + async completeWithError(error?: any): Promise { + try { + const blockStats = { total: 0, success: 0, error: 1, skipped: 0 } + const costSummary = { + totalCost: 0, + totalInputCost: 0, + totalOutputCost: 0, + totalTokens: 0, + totalPromptTokens: 0, + totalCompletionTokens: 0, + models: {}, + } + + await enhancedExecutionLogger.completeWorkflowExecution({ + executionId: this.executionId, + endedAt: new Date().toISOString(), + totalDurationMs: 0, + blockStats, + costSummary, + finalOutput: null, + traceSpans: [], + }) + + if (this.requestId) { + logger.debug( + `[${this.requestId}] Completed enhanced logging with error for execution ${this.executionId}` + ) + } + } catch (enhancedError) { + if (this.requestId) { + logger.error( + `[${this.requestId}] Failed to complete enhanced logging for error:`, + enhancedError + ) + } + } + } + + async safeStart(params: SessionStartParams = {}): Promise { + try { + await this.start(params) + return true + } catch (error) { + if (this.requestId) { + logger.error( + `[${this.requestId}] Enhanced logging start failed, continuing execution:`, + error + ) + } + return false + } + } + + async safeComplete(params: SessionCompleteParams = {}): Promise { + try { + await this.complete(params) + } catch (error) { + if (this.requestId) { + logger.error(`[${this.requestId}] Enhanced logging completion failed:`, error) + } + } + } + + async safeCompleteWithError(error?: any): Promise { + try { + await this.completeWithError(error) + } catch (enhancedError) { + if (this.requestId) { + logger.error(`[${this.requestId}] Enhanced logging error completion failed:`, enhancedError) + } + } + } +} diff --git a/apps/sim/lib/logs/snapshot-service.test.ts b/apps/sim/lib/logs/snapshot-service.test.ts new file mode 100644 index 0000000000..b788936f08 --- /dev/null +++ b/apps/sim/lib/logs/snapshot-service.test.ts @@ -0,0 +1,219 @@ +import { beforeEach, describe, expect, test } from 'vitest' +import { SnapshotService } from './snapshot-service' +import type { WorkflowState } from './types' + +describe('SnapshotService', () => { + let service: SnapshotService + + beforeEach(() => { + service = new SnapshotService() + }) + + describe('computeStateHash', () => { + test('should generate consistent hashes for identical states', () => { + const state: WorkflowState = { + blocks: { + block1: { + id: 'block1', + name: 'Test Agent', + type: 'agent', + position: { x: 100, y: 200 }, + + subBlocks: {}, + outputs: {}, + enabled: true, + horizontalHandles: true, + isWide: false, + advancedMode: false, + height: 0, + }, + }, + edges: [{ id: 'edge1', source: 'block1', target: 'block2' }], + loops: {}, + parallels: {}, + } + + const hash1 = service.computeStateHash(state) + const hash2 = service.computeStateHash(state) + + expect(hash1).toBe(hash2) + expect(hash1).toHaveLength(64) // SHA-256 hex string + }) + + test('should ignore position changes', () => { + const baseState: WorkflowState = { + blocks: { + block1: { + id: 'block1', + name: 'Test Agent', + type: 'agent', + position: { x: 100, y: 200 }, + + subBlocks: {}, + outputs: {}, + enabled: true, + horizontalHandles: true, + isWide: false, + advancedMode: false, + height: 0, + }, + }, + edges: [], + loops: {}, + parallels: {}, + } + + const stateWithDifferentPosition: WorkflowState = { + ...baseState, + blocks: { + block1: { + ...baseState.blocks.block1, + position: { x: 500, y: 600 }, // Different position + }, + }, + } + + const hash1 = service.computeStateHash(baseState) + const hash2 = service.computeStateHash(stateWithDifferentPosition) + + expect(hash1).toBe(hash2) + }) + + test('should detect meaningful changes', () => { + const baseState: WorkflowState = { + blocks: { + block1: { + id: 'block1', + name: 'Test Agent', + type: 'agent', + position: { x: 100, y: 200 }, + + subBlocks: {}, + outputs: {}, + enabled: true, + horizontalHandles: true, + isWide: false, + advancedMode: false, + height: 0, + }, + }, + edges: [], + loops: {}, + parallels: {}, + } + + const stateWithDifferentPrompt: WorkflowState = { + ...baseState, + blocks: { + block1: { + ...baseState.blocks.block1, + // Different block state - we can change outputs to make it different + outputs: { response: { content: 'different result' } as Record }, + }, + }, + } + + const hash1 = service.computeStateHash(baseState) + const hash2 = service.computeStateHash(stateWithDifferentPrompt) + + expect(hash1).not.toBe(hash2) + }) + + test('should handle edge order consistently', () => { + const state1: WorkflowState = { + blocks: {}, + edges: [ + { id: 'edge1', source: 'a', target: 'b' }, + { id: 'edge2', source: 'b', target: 'c' }, + ], + loops: {}, + parallels: {}, + } + + const state2: WorkflowState = { + blocks: {}, + edges: [ + { id: 'edge2', source: 'b', target: 'c' }, // Different order + { id: 'edge1', source: 'a', target: 'b' }, + ], + loops: {}, + parallels: {}, + } + + const hash1 = service.computeStateHash(state1) + const hash2 = service.computeStateHash(state2) + + expect(hash1).toBe(hash2) // Should be same despite different order + }) + + test('should handle empty states', () => { + const emptyState: WorkflowState = { + blocks: {}, + edges: [], + loops: {}, + parallels: {}, + } + + const hash = service.computeStateHash(emptyState) + expect(hash).toHaveLength(64) + }) + + test('should handle complex nested structures', () => { + const complexState: WorkflowState = { + blocks: { + block1: { + id: 'block1', + name: 'Complex Agent', + type: 'agent', + position: { x: 100, y: 200 }, + + subBlocks: { + prompt: { + id: 'prompt', + type: 'short-input', + value: 'Test prompt', + }, + model: { + id: 'model', + type: 'short-input', + value: 'gpt-4', + }, + }, + outputs: { + response: { content: 'Agent response' } as Record, + }, + enabled: true, + horizontalHandles: true, + isWide: false, + advancedMode: true, + height: 200, + }, + }, + edges: [{ id: 'edge1', source: 'block1', target: 'block2', sourceHandle: 'output' }], + loops: { + loop1: { + id: 'loop1', + nodes: ['block1'], + iterations: 10, + loopType: 'for', + }, + }, + parallels: { + parallel1: { + id: 'parallel1', + nodes: ['block1'], + count: 3, + parallelType: 'count', + }, + }, + } + + const hash = service.computeStateHash(complexState) + expect(hash).toHaveLength(64) + + // Should be consistent + const hash2 = service.computeStateHash(complexState) + expect(hash).toBe(hash2) + }) + }) +}) diff --git a/apps/sim/lib/logs/snapshot-service.ts b/apps/sim/lib/logs/snapshot-service.ts new file mode 100644 index 0000000000..d4943f9f5e --- /dev/null +++ b/apps/sim/lib/logs/snapshot-service.ts @@ -0,0 +1,236 @@ +import { createHash } from 'crypto' +import { and, eq, lt } from 'drizzle-orm' +import { v4 as uuidv4 } from 'uuid' +import { db } from '@/db' +import { workflowExecutionSnapshots } from '@/db/schema' +import { createLogger } from './console-logger' +import type { + SnapshotService as ISnapshotService, + SnapshotCreationResult, + WorkflowExecutionSnapshot, + WorkflowExecutionSnapshotInsert, + WorkflowState, +} from './types' + +const logger = createLogger('SnapshotService') + +export class SnapshotService implements ISnapshotService { + async createSnapshot( + workflowId: string, + state: WorkflowState + ): Promise { + const result = await this.createSnapshotWithDeduplication(workflowId, state) + return result.snapshot + } + + async createSnapshotWithDeduplication( + workflowId: string, + state: WorkflowState + ): Promise { + // Hash the position-less state for deduplication (functional equivalence) + const stateHash = this.computeStateHash(state) + + const existingSnapshot = await this.getSnapshotByHash(workflowId, stateHash) + if (existingSnapshot) { + logger.debug(`Reusing existing snapshot for workflow ${workflowId} with hash ${stateHash}`) + return { + snapshot: existingSnapshot, + isNew: false, + } + } + + // Store the FULL state (including positions) so we can recreate the exact workflow + // Even though we hash without positions, we want to preserve the complete state + const snapshotData: WorkflowExecutionSnapshotInsert = { + id: uuidv4(), + workflowId, + stateHash, + stateData: state, // Full state with positions, subblock values, etc. + } + + const [newSnapshot] = await db + .insert(workflowExecutionSnapshots) + .values(snapshotData) + .returning() + + logger.debug(`Created new snapshot for workflow ${workflowId} with hash ${stateHash}`) + logger.debug(`Stored full state with ${Object.keys(state.blocks || {}).length} blocks`) + return { + snapshot: { + ...newSnapshot, + stateData: newSnapshot.stateData as WorkflowState, + createdAt: newSnapshot.createdAt.toISOString(), + }, + isNew: true, + } + } + + async getSnapshot(id: string): Promise { + const [snapshot] = await db + .select() + .from(workflowExecutionSnapshots) + .where(eq(workflowExecutionSnapshots.id, id)) + .limit(1) + + if (!snapshot) return null + + return { + ...snapshot, + stateData: snapshot.stateData as WorkflowState, + createdAt: snapshot.createdAt.toISOString(), + } + } + + async getSnapshotByHash( + workflowId: string, + hash: string + ): Promise { + const [snapshot] = await db + .select() + .from(workflowExecutionSnapshots) + .where( + and( + eq(workflowExecutionSnapshots.workflowId, workflowId), + eq(workflowExecutionSnapshots.stateHash, hash) + ) + ) + .limit(1) + + if (!snapshot) return null + + return { + ...snapshot, + stateData: snapshot.stateData as WorkflowState, + createdAt: snapshot.createdAt.toISOString(), + } + } + + computeStateHash(state: WorkflowState): string { + const normalizedState = this.normalizeStateForHashing(state) + const stateString = this.normalizedStringify(normalizedState) + return createHash('sha256').update(stateString).digest('hex') + } + + async cleanupOrphanedSnapshots(olderThanDays: number): Promise { + const cutoffDate = new Date() + cutoffDate.setDate(cutoffDate.getDate() - olderThanDays) + + const deletedSnapshots = await db + .delete(workflowExecutionSnapshots) + .where(lt(workflowExecutionSnapshots.createdAt, cutoffDate)) + .returning({ id: workflowExecutionSnapshots.id }) + + const deletedCount = deletedSnapshots.length + logger.info(`Cleaned up ${deletedCount} orphaned snapshots older than ${olderThanDays} days`) + return deletedCount + } + + private normalizeStateForHashing(state: WorkflowState): any { + // Use the same normalization logic as hasWorkflowChanged for consistency + + // 1. Normalize edges (same as hasWorkflowChanged) + const normalizedEdges = (state.edges || []) + .map((edge) => ({ + source: edge.source, + sourceHandle: edge.sourceHandle, + target: edge.target, + targetHandle: edge.targetHandle, + })) + .sort((a, b) => + `${a.source}-${a.sourceHandle}-${a.target}-${a.targetHandle}`.localeCompare( + `${b.source}-${b.sourceHandle}-${b.target}-${b.targetHandle}` + ) + ) + + // 2. Normalize blocks (same as hasWorkflowChanged) + const normalizedBlocks: Record = {} + + for (const [blockId, block] of Object.entries(state.blocks || {})) { + // Skip position as it doesn't affect functionality + const { position, ...blockWithoutPosition } = block + + // Handle subBlocks with detailed comparison (same as hasWorkflowChanged) + const subBlocks = blockWithoutPosition.subBlocks || {} + const normalizedSubBlocks: Record = {} + + for (const [subBlockId, subBlock] of Object.entries(subBlocks)) { + // Normalize value with special handling for null/undefined + const value = subBlock.value ?? null + + normalizedSubBlocks[subBlockId] = { + type: subBlock.type, + value: this.normalizeValue(value), + // Include other properties except value + ...Object.fromEntries( + Object.entries(subBlock).filter(([key]) => key !== 'value' && key !== 'type') + ), + } + } + + normalizedBlocks[blockId] = { + ...blockWithoutPosition, + subBlocks: normalizedSubBlocks, + } + } + + // 3. Normalize loops and parallels + const normalizedLoops: Record = {} + for (const [loopId, loop] of Object.entries(state.loops || {})) { + normalizedLoops[loopId] = this.normalizeValue(loop) + } + + const normalizedParallels: Record = {} + for (const [parallelId, parallel] of Object.entries(state.parallels || {})) { + normalizedParallels[parallelId] = this.normalizeValue(parallel) + } + + return { + blocks: normalizedBlocks, + edges: normalizedEdges, + loops: normalizedLoops, + parallels: normalizedParallels, + } + } + + private normalizeValue(value: any): any { + // Handle null/undefined consistently + if (value === null || value === undefined) return null + + // Handle arrays + if (Array.isArray(value)) { + return value.map((item) => this.normalizeValue(item)) + } + + // Handle objects + if (typeof value === 'object') { + const normalized: Record = {} + for (const [key, val] of Object.entries(value)) { + normalized[key] = this.normalizeValue(val) + } + return normalized + } + + // Handle primitives + return value + } + + private normalizedStringify(obj: any): string { + if (obj === null || obj === undefined) return 'null' + if (typeof obj === 'string') return `"${obj}"` + if (typeof obj === 'number' || typeof obj === 'boolean') return String(obj) + + if (Array.isArray(obj)) { + return `[${obj.map((item) => this.normalizedStringify(item)).join(',')}]` + } + + if (typeof obj === 'object') { + const keys = Object.keys(obj).sort() + const pairs = keys.map((key) => `"${key}":${this.normalizedStringify(obj[key])}`) + return `{${pairs.join(',')}}` + } + + return String(obj) + } +} + +export const snapshotService = new SnapshotService() diff --git a/apps/sim/lib/logs/trace-spans.ts b/apps/sim/lib/logs/trace-spans.ts index 20c08c27ba..fa7e277ec0 100644 --- a/apps/sim/lib/logs/trace-spans.ts +++ b/apps/sim/lib/logs/trace-spans.ts @@ -1,6 +1,9 @@ +import { createLogger } from '@/lib/logs/console-logger' import type { TraceSpan } from '@/app/workspace/[workspaceId]/logs/stores/types' import type { ExecutionResult } from '@/executor/types' +const logger = createLogger('TraceSpans') + // Helper function to build a tree of trace spans from execution logs export function buildTraceSpans(result: ExecutionResult): { traceSpans: TraceSpan[] @@ -43,6 +46,16 @@ export function buildTraceSpans(result: ExecutionResult): { const duration = log.durationMs || 0 // Create the span + let output = log.output || {} + + // If there's an error, include it in the output + if (log.error) { + output = { + ...output, + error: log.error, + } + } + const span: TraceSpan = { id: spanId, name: log.blockName || log.blockId, @@ -54,146 +67,68 @@ export function buildTraceSpans(result: ExecutionResult): { children: [], // Store the block ID for later use in identifying direct parent-child relationships blockId: log.blockId, + // Include block input/output data + input: log.input || {}, + output: output, } // Add provider timing data if it exists if (log.output?.providerTiming) { const providerTiming = log.output.providerTiming - // If we have time segments, use them to create a more detailed timeline - if (providerTiming.timeSegments && providerTiming.timeSegments.length > 0) { - const segmentStartTime = new Date(log.startedAt).getTime() - const children: TraceSpan[] = [] - - // Process segments in order - providerTiming.timeSegments.forEach( - ( - segment: { - type: string - name: string - startTime: number - endTime: number - duration: number - }, - index: number - ) => { - // Ensure we have valid startTime and endTime - let segmentStart: number - let segmentEnd: number - - // Handle different time formats - some providers use ISO strings, some use timestamps - if (typeof segment.startTime === 'string') { - try { - segmentStart = new Date(segment.startTime).getTime() - } catch (_e) { - segmentStart = segmentStartTime + index * 1000 // Fallback offset - } - } else { - segmentStart = segment.startTime - } + // Store provider timing as metadata instead of creating child spans + // This keeps the UI cleaner while preserving timing information - if (typeof segment.endTime === 'string') { - try { - segmentEnd = new Date(segment.endTime).getTime() - } catch (_e) { - segmentEnd = segmentStart + (segment.duration || 1000) // Fallback duration - } - } else { - segmentEnd = segment.endTime - } - - // For streaming responses, make sure our timing is valid - if ( - Number.isNaN(segmentStart) || - Number.isNaN(segmentEnd) || - segmentEnd < segmentStart - ) { - // Use fallback values - segmentStart = segmentStartTime + index * 1000 - segmentEnd = segmentStart + (segment.duration || 1000) - } - - const childSpan: TraceSpan = { - id: `${spanId}-segment-${index}`, - name: segment.name || `${segment.type} operation`, - startTime: new Date(segmentStart).toISOString(), - endTime: new Date(segmentEnd).toISOString(), - duration: segment.duration || segmentEnd - segmentStart, - type: - segment.type === 'model' - ? 'model' - : segment.type === 'tool' - ? 'tool' - : 'processing', - status: 'success', - children: [], - } - - // Add any additional metadata - if (segment.type === 'tool' && typeof segment.name === 'string') { - // Add as a custom attribute using type assertion - ;(childSpan as any).toolName = segment.name - } - - children.push(childSpan) - } - ) + ;(span as any).providerTiming = { + duration: providerTiming.duration, + startTime: providerTiming.startTime, + endTime: providerTiming.endTime, + segments: providerTiming.timeSegments || [], + } - // Only add children if we have valid spans - if (children.length > 0) { - span.children = children - } + // Add cost information if available + if (log.output?.cost) { + ;(span as any).cost = log.output.cost + logger.debug(`Added cost to span ${span.id}`, { + blockId: log.blockId, + blockType: log.blockType, + cost: log.output.cost, + }) } - // If no segments but we have provider timing, create a provider span - else { - // Create a child span for the provider execution - const providerSpan: TraceSpan = { - id: `${spanId}-provider`, - name: log.output.model || 'AI Provider', - type: 'provider', - duration: providerTiming.duration || 0, - startTime: providerTiming.startTime || log.startedAt, - endTime: providerTiming.endTime || log.endedAt, - status: 'success', - tokens: log.output.tokens?.total, - } - // If we have model time, create a child span for just the model processing - if (providerTiming.modelTime) { - const modelName = log.output.model || '' - const modelSpan: TraceSpan = { - id: `${spanId}-model`, - name: `Model Generation${modelName ? ` (${modelName})` : ''}`, - type: 'model', - duration: providerTiming.modelTime, - startTime: providerTiming.startTime, // Approximate - endTime: providerTiming.endTime, // Approximate - status: 'success', - tokens: log.output.tokens?.completion, - } + // Add token information if available + if (log.output?.tokens) { + ;(span as any).tokens = log.output.tokens + logger.debug(`Added tokens to span ${span.id}`, { + blockId: log.blockId, + blockType: log.blockType, + tokens: log.output.tokens, + }) + } - if (!providerSpan.children) providerSpan.children = [] - providerSpan.children.push(modelSpan) - } + // Add model information + if (log.output?.model) { + ;(span as any).model = log.output.model + logger.debug(`Added model to span ${span.id}`, { + blockId: log.blockId, + blockType: log.blockType, + model: log.output.model, + }) + } + } else { + // When not using provider timing, still add cost and token information + if (log.output?.cost) { + ;(span as any).cost = log.output.cost + } - if (!span.children) span.children = [] - span.children.push(providerSpan) + if (log.output?.tokens) { + ;(span as any).tokens = log.output.tokens + } - // When using provider timing without segments, still add tool calls if they exist - if (log.output?.toolCalls?.list) { - span.toolCalls = log.output.toolCalls.list.map((tc: any) => ({ - name: stripCustomToolPrefix(tc.name), - duration: tc.duration || 0, - startTime: tc.startTime || log.startedAt, - endTime: tc.endTime || log.endedAt, - status: tc.error ? 'error' : 'success', - input: tc.arguments || tc.input, - output: tc.result || tc.output, - error: tc.error, - })) - } + if (log.output?.model) { + ;(span as any).model = log.output.model } - } else { + // When not using provider timing at all, add tool calls if they exist // Tool calls handling for different formats: // 1. Standard format in response.toolCalls.list @@ -257,96 +192,30 @@ export function buildTraceSpans(result: ExecutionResult): { spanMap.set(spanId, span) }) - // Second pass: Build the hierarchy based on direct relationships - // We'll first need to sort logs chronologically for proper order + // Second pass: Build a flat hierarchy for sequential workflow execution + // For most workflows, blocks execute sequentially and should be shown at the same level + // Only nest blocks that are truly hierarchical (like subflows, loops, etc.) + const sortedLogs = [...result.logs].sort((a, b) => { const aTime = new Date(a.startedAt).getTime() const bTime = new Date(b.startedAt).getTime() return aTime - bTime }) - // Map to track spans by block ID (for parent-child relationship identification) - const blockToSpanMap = new Map() - - // First, map block IDs to their span IDs - sortedLogs.forEach((log) => { - if (!log.blockId) return - - const spanId = `${log.blockId}-${new Date(log.startedAt).getTime()}` - blockToSpanMap.set(log.blockId, spanId) - }) - - // Identify root spans and build relationships const rootSpans: TraceSpan[] = [] - // For sequential blocks, we need to determine if they are true parent-child - // or just execution dependencies. True parent-child should be nested, - // while sequential execution blocks should be at the same level. - - // Identify blocks at the top level (aka "layer 0") - const topLevelBlocks = new Set() - - // Create the array of parent values once before the loop - const parentValues = Array.from(parentChildMap.values()) - - workflowConnections.forEach((conn) => { - // If the source is starter or doesn't exist in our connections as a target, it's top level - if (conn.source === 'starter' || !parentValues.includes(conn.source)) { - topLevelBlocks.add(conn.target) - } - }) - + // For now, treat all blocks as top-level spans in execution order + // This gives a cleaner, more intuitive view of workflow execution sortedLogs.forEach((log) => { if (!log.blockId) return const spanId = `${log.blockId}-${new Date(log.startedAt).getTime()}` const span = spanMap.get(spanId) - if (!span) return - - // Check if this block has a direct parent in the workflow - const parentBlockId = parentChildMap.get(log.blockId) - - // Top level blocks are those that: - // 1. Have no parent (or parent is starter) - // 2. Are identified as top level in our analysis - const isTopLevel = - !parentBlockId || parentBlockId === 'starter' || topLevelBlocks.has(log.blockId) - - if (isTopLevel) { - // This is a top level span + if (span) { rootSpans.push(span) - } else { - // This has a parent - // Only nest as a child if the parent block is NOT a top-level block - // This ensures sequential blocks at the same "layer" stay at the same level - // while true parent-child relationships are preserved - if (parentBlockId && !topLevelBlocks.has(parentBlockId)) { - const parentSpanId = blockToSpanMap.get(parentBlockId) - - if (parentSpanId) { - const parentSpan = spanMap.get(parentSpanId) - if (parentSpan) { - // Add as child to direct parent - if (!parentSpan.children) parentSpan.children = [] - parentSpan.children.push(span) - } else { - // Parent span not found, add as root - rootSpans.push(span) - } - } else { - // Parent block executed but no span, add as root - rootSpans.push(span) - } - } else { - // Parent is a top level block, so this should also be a top level span - // This prevents sequential top-level blocks from being nested - rootSpans.push(span) - } } }) - // Fall back to time-based hierarchy only if we couldn't establish relationships - // This happens when we don't have workflow connection information if (rootSpans.length === 0 && workflowConnections.length === 0) { // Track parent spans using a stack const spanStack: TraceSpan[] = [] diff --git a/apps/sim/lib/logs/types.ts b/apps/sim/lib/logs/types.ts index 7a95088bef..bf1c76f22a 100644 --- a/apps/sim/lib/logs/types.ts +++ b/apps/sim/lib/logs/types.ts @@ -95,7 +95,6 @@ export interface WorkflowExecutionLog { totalInputCost: number totalOutputCost: number totalTokens: number - primaryModel: string metadata: { environment: ExecutionEnvironment trigger: ExecutionTrigger @@ -157,6 +156,7 @@ export interface TraceSpan { relativeStartMs?: number blockId?: string input?: Record + output?: Record } export interface WorkflowExecutionSummary { @@ -180,7 +180,6 @@ export interface WorkflowExecutionSummary { inputCost: number outputCost: number tokens: number - primaryModel: string } stateSnapshotId: string errorSummary?: { @@ -372,7 +371,6 @@ export interface ExecutionLoggerService { totalInputCost: number totalOutputCost: number totalTokens: number - primaryModel: string } finalOutput: BlockOutputData traceSpans?: TraceSpan[] diff --git a/apps/sim/lib/tokenization/calculators.ts b/apps/sim/lib/tokenization/calculators.ts new file mode 100644 index 0000000000..a81c0813d9 --- /dev/null +++ b/apps/sim/lib/tokenization/calculators.ts @@ -0,0 +1,145 @@ +/** + * Cost calculation functions for tokenization + */ + +import { createLogger } from '@/lib/logs/console-logger' +import { calculateCost } from '@/providers/utils' +import { createTokenizationError } from './errors' +import { estimateInputTokens, estimateOutputTokens, estimateTokenCount } from './estimators' +import type { CostBreakdown, StreamingCostResult, TokenizationInput, TokenUsage } from './types' +import { + getProviderForTokenization, + logTokenizationDetails, + validateTokenizationInput, +} from './utils' + +const logger = createLogger('TokenizationCalculators') + +/** + * Calculates cost estimate for streaming execution using token estimation + */ +export function calculateStreamingCost( + model: string, + inputText: string, + outputText: string, + systemPrompt?: string, + context?: string, + messages?: Array<{ role: string; content: string }> +): StreamingCostResult { + try { + // Validate inputs + validateTokenizationInput(model, inputText, outputText) + + const providerId = getProviderForTokenization(model) + + logger.debug('Starting streaming cost calculation', { + model, + providerId, + inputLength: inputText.length, + outputLength: outputText.length, + hasSystemPrompt: !!systemPrompt, + hasContext: !!context, + hasMessages: !!messages?.length, + }) + + // Estimate input tokens (combine all input sources) + const inputEstimate = estimateInputTokens(systemPrompt, context, messages, providerId) + + // Add the main input text to the estimation + const mainInputEstimate = estimateTokenCount(inputText, providerId) + const totalPromptTokens = inputEstimate.count + mainInputEstimate.count + + // Estimate output tokens + const outputEstimate = estimateOutputTokens(outputText, providerId) + const completionTokens = outputEstimate.count + + // Calculate total tokens + const totalTokens = totalPromptTokens + completionTokens + + // Create token usage object + const tokens: TokenUsage = { + prompt: totalPromptTokens, + completion: completionTokens, + total: totalTokens, + } + + // Calculate cost using provider pricing + const costResult = calculateCost(model, totalPromptTokens, completionTokens, false) + + const cost: CostBreakdown = { + input: costResult.input, + output: costResult.output, + total: costResult.total, + } + + const result: StreamingCostResult = { + tokens, + cost, + model, + provider: providerId, + method: 'tokenization', + } + + logTokenizationDetails('Streaming cost calculation completed', { + model, + provider: providerId, + inputLength: inputText.length, + outputLength: outputText.length, + tokens, + cost, + method: 'tokenization', + }) + + return result + } catch (error) { + logger.error('Streaming cost calculation failed', { + model, + inputLength: inputText?.length || 0, + outputLength: outputText?.length || 0, + error: error instanceof Error ? error.message : String(error), + }) + + if (error instanceof Error && error.name === 'TokenizationError') { + throw error + } + + throw createTokenizationError( + 'CALCULATION_FAILED', + `Failed to calculate streaming cost: ${error instanceof Error ? error.message : String(error)}`, + { model, inputLength: inputText?.length || 0, outputLength: outputText?.length || 0 } + ) + } +} + +/** + * Calculates cost for tokenization input object + */ +export function calculateTokenizationCost(input: TokenizationInput): StreamingCostResult { + return calculateStreamingCost( + input.model, + input.inputText, + input.outputText, + input.systemPrompt, + input.context, + input.messages + ) +} + +/** + * Creates a streaming cost result from existing provider response data + */ +export function createCostResultFromProviderData( + model: string, + providerTokens: TokenUsage, + providerCost: CostBreakdown +): StreamingCostResult { + const providerId = getProviderForTokenization(model) + + return { + tokens: providerTokens, + cost: providerCost, + model, + provider: providerId, + method: 'provider_response', + } +} diff --git a/apps/sim/lib/tokenization/constants.ts b/apps/sim/lib/tokenization/constants.ts new file mode 100644 index 0000000000..2548c0b1b0 --- /dev/null +++ b/apps/sim/lib/tokenization/constants.ts @@ -0,0 +1,71 @@ +/** + * Configuration constants for tokenization functionality + */ + +import type { ProviderTokenizationConfig } from './types' + +export const TOKENIZATION_CONFIG = { + providers: { + openai: { + avgCharsPerToken: 4, + confidence: 'high', + supportedMethods: ['heuristic', 'fallback'], + }, + 'azure-openai': { + avgCharsPerToken: 4, + confidence: 'high', + supportedMethods: ['heuristic', 'fallback'], + }, + anthropic: { + avgCharsPerToken: 4.5, + confidence: 'high', + supportedMethods: ['heuristic', 'fallback'], + }, + google: { + avgCharsPerToken: 5, + confidence: 'medium', + supportedMethods: ['heuristic', 'fallback'], + }, + deepseek: { + avgCharsPerToken: 4, + confidence: 'medium', + supportedMethods: ['heuristic', 'fallback'], + }, + xai: { + avgCharsPerToken: 4, + confidence: 'medium', + supportedMethods: ['heuristic', 'fallback'], + }, + cerebras: { + avgCharsPerToken: 4, + confidence: 'medium', + supportedMethods: ['heuristic', 'fallback'], + }, + groq: { + avgCharsPerToken: 4, + confidence: 'medium', + supportedMethods: ['heuristic', 'fallback'], + }, + ollama: { + avgCharsPerToken: 4, + confidence: 'low', + supportedMethods: ['fallback'], + }, + } satisfies Record, + + fallback: { + avgCharsPerToken: 4, + confidence: 'low', + supportedMethods: ['fallback'], + } satisfies ProviderTokenizationConfig, + + defaults: { + model: 'gpt-4o', + provider: 'openai', + }, +} as const + +export const LLM_BLOCK_TYPES = ['agent', 'router', 'evaluator'] as const + +export const MIN_TEXT_LENGTH_FOR_ESTIMATION = 1 +export const MAX_PREVIEW_LENGTH = 100 diff --git a/apps/sim/lib/tokenization/errors.ts b/apps/sim/lib/tokenization/errors.ts new file mode 100644 index 0000000000..d5b35fd410 --- /dev/null +++ b/apps/sim/lib/tokenization/errors.ts @@ -0,0 +1,23 @@ +/** + * Custom error classes for tokenization functionality + */ + +export class TokenizationError extends Error { + public readonly code: 'INVALID_PROVIDER' | 'MISSING_TEXT' | 'CALCULATION_FAILED' | 'INVALID_MODEL' + public readonly details?: Record + + constructor(message: string, code: TokenizationError['code'], details?: Record) { + super(message) + this.name = 'TokenizationError' + this.code = code + this.details = details + } +} + +export function createTokenizationError( + code: TokenizationError['code'], + message: string, + details?: Record +): TokenizationError { + return new TokenizationError(message, code, details) +} diff --git a/apps/sim/lib/tokenization/estimators.ts b/apps/sim/lib/tokenization/estimators.ts new file mode 100644 index 0000000000..afc5cdd5c0 --- /dev/null +++ b/apps/sim/lib/tokenization/estimators.ts @@ -0,0 +1,191 @@ +/** + * Token estimation functions for different providers + */ + +import { createLogger } from '@/lib/logs/console-logger' +import { MIN_TEXT_LENGTH_FOR_ESTIMATION, TOKENIZATION_CONFIG } from './constants' +import type { TokenEstimate } from './types' +import { createTextPreview, getProviderConfig } from './utils' + +const logger = createLogger('TokenizationEstimators') + +/** + * Estimates token count for text using provider-specific heuristics + */ +export function estimateTokenCount(text: string, providerId?: string): TokenEstimate { + if (!text || text.length < MIN_TEXT_LENGTH_FOR_ESTIMATION) { + return { + count: 0, + confidence: 'high', + provider: providerId || 'unknown', + method: 'fallback', + } + } + + const effectiveProviderId = providerId || TOKENIZATION_CONFIG.defaults.provider + const config = getProviderConfig(effectiveProviderId) + + logger.debug('Starting token estimation', { + provider: effectiveProviderId, + textLength: text.length, + preview: createTextPreview(text), + avgCharsPerToken: config.avgCharsPerToken, + }) + + let estimatedTokens: number + + switch (effectiveProviderId) { + case 'openai': + case 'azure-openai': + estimatedTokens = estimateOpenAITokens(text) + break + case 'anthropic': + estimatedTokens = estimateAnthropicTokens(text) + break + case 'google': + estimatedTokens = estimateGoogleTokens(text) + break + default: + estimatedTokens = estimateGenericTokens(text, config.avgCharsPerToken) + } + + const result: TokenEstimate = { + count: Math.max(1, Math.round(estimatedTokens)), + confidence: config.confidence, + provider: effectiveProviderId, + method: 'heuristic', + } + + logger.debug('Token estimation completed', { + provider: effectiveProviderId, + textLength: text.length, + estimatedTokens: result.count, + confidence: result.confidence, + }) + + return result +} + +/** + * OpenAI-specific token estimation using BPE characteristics + */ +function estimateOpenAITokens(text: string): number { + const words = text.trim().split(/\s+/) + let tokenCount = 0 + + for (const word of words) { + if (word.length === 0) continue + + // GPT tokenizer characteristics based on BPE + if (word.length <= 4) { + tokenCount += 1 + } else if (word.length <= 8) { + tokenCount += Math.ceil(word.length / 4.5) + } else { + tokenCount += Math.ceil(word.length / 4) + } + + // Add extra tokens for punctuation + const punctuationCount = (word.match(/[.,!?;:"'()[\]{}<>]/g) || []).length + tokenCount += punctuationCount * 0.5 + } + + // Add tokens for newlines and formatting + const newlineCount = (text.match(/\n/g) || []).length + tokenCount += newlineCount * 0.5 + + return tokenCount +} + +/** + * Anthropic Claude-specific token estimation + */ +function estimateAnthropicTokens(text: string): number { + const words = text.trim().split(/\s+/) + let tokenCount = 0 + + for (const word of words) { + if (word.length === 0) continue + + // Claude tokenizer tends to be slightly more efficient + if (word.length <= 4) { + tokenCount += 1 + } else if (word.length <= 8) { + tokenCount += Math.ceil(word.length / 5) + } else { + tokenCount += Math.ceil(word.length / 4.5) + } + } + + // Claude handles formatting slightly better + const newlineCount = (text.match(/\n/g) || []).length + tokenCount += newlineCount * 0.3 + + return tokenCount +} + +/** + * Google Gemini-specific token estimation + */ +function estimateGoogleTokens(text: string): number { + const words = text.trim().split(/\s+/) + let tokenCount = 0 + + for (const word of words) { + if (word.length === 0) continue + + // Gemini tokenizer characteristics + if (word.length <= 5) { + tokenCount += 1 + } else if (word.length <= 10) { + tokenCount += Math.ceil(word.length / 6) + } else { + tokenCount += Math.ceil(word.length / 5) + } + } + + return tokenCount +} + +/** + * Generic token estimation fallback + */ +function estimateGenericTokens(text: string, avgCharsPerToken: number): number { + const charCount = text.trim().length + return Math.ceil(charCount / avgCharsPerToken) +} + +/** + * Estimates tokens for input content including context + */ +export function estimateInputTokens( + systemPrompt?: string, + context?: string, + messages?: Array<{ role: string; content: string }>, + providerId?: string +): TokenEstimate { + let totalText = '' + + if (systemPrompt) { + totalText += `${systemPrompt}\n` + } + + if (context) { + totalText += `${context}\n` + } + + if (messages) { + for (const message of messages) { + totalText += `${message.role}: ${message.content}\n` + } + } + + return estimateTokenCount(totalText, providerId) +} + +/** + * Estimates tokens for output content + */ +export function estimateOutputTokens(content: string, providerId?: string): TokenEstimate { + return estimateTokenCount(content, providerId) +} diff --git a/apps/sim/lib/tokenization/index.ts b/apps/sim/lib/tokenization/index.ts new file mode 100644 index 0000000000..cfa5d00266 --- /dev/null +++ b/apps/sim/lib/tokenization/index.ts @@ -0,0 +1,43 @@ +/** + * Main tokenization module exports + * + * This module provides token estimation and cost calculation functionality + * for streaming LLM executions where actual token counts are not available. + */ + +// Core calculation functions +export { + calculateStreamingCost, + calculateTokenizationCost, + createCostResultFromProviderData, +} from './calculators' +// Constants +export { LLM_BLOCK_TYPES, TOKENIZATION_CONFIG } from './constants' +// Error handling +export { createTokenizationError, TokenizationError } from './errors' +// Token estimation functions +export { estimateInputTokens, estimateOutputTokens, estimateTokenCount } from './estimators' +// Streaming-specific helpers +export { processStreamingBlockLog, processStreamingBlockLogs } from './streaming' +// Types +export type { + CostBreakdown, + ProviderTokenizationConfig, + StreamingCostResult, + TokenEstimate, + TokenizationInput, + TokenUsage, +} from './types' +// Utility functions +export { + createTextPreview, + extractTextContent, + formatTokenCount, + getProviderConfig, + getProviderForTokenization, + hasRealCostData, + hasRealTokenData, + isTokenizableBlockType, + logTokenizationDetails, + validateTokenizationInput, +} from './utils' diff --git a/apps/sim/lib/tokenization/streaming.ts b/apps/sim/lib/tokenization/streaming.ts new file mode 100644 index 0000000000..6b2ac589ef --- /dev/null +++ b/apps/sim/lib/tokenization/streaming.ts @@ -0,0 +1,158 @@ +/** + * Streaming-specific tokenization helpers + */ + +import { createLogger } from '@/lib/logs/console-logger' +import type { BlockLog } from '@/executor/types' +import { calculateStreamingCost } from './calculators' +import { TOKENIZATION_CONFIG } from './constants' +import { + extractTextContent, + hasRealCostData, + hasRealTokenData, + isTokenizableBlockType, + logTokenizationDetails, +} from './utils' + +const logger = createLogger('StreamingTokenization') + +/** + * Processes a block log and adds tokenization data if needed + */ +export function processStreamingBlockLog(log: BlockLog, streamedContent: string): boolean { + // Check if this block should be tokenized + if (!isTokenizableBlockType(log.blockType)) { + return false + } + + // Check if we already have meaningful token/cost data + if (hasRealTokenData(log.output?.tokens) && hasRealCostData(log.output?.cost)) { + logger.debug(`Block ${log.blockId} already has real token/cost data`, { + blockType: log.blockType, + tokens: log.output?.tokens, + cost: log.output?.cost, + }) + return false + } + + // Check if we have content to tokenize + if (!streamedContent?.trim()) { + logger.debug(`Block ${log.blockId} has no content to tokenize`, { + blockType: log.blockType, + contentLength: streamedContent?.length || 0, + }) + return false + } + + try { + // Determine model to use + const model = getModelForBlock(log) + + // Prepare input text from log + const inputText = extractTextContent(log.input) + + logger.debug(`Starting tokenization for streaming block ${log.blockId}`, { + blockType: log.blockType, + model, + inputLength: inputText.length, + outputLength: streamedContent.length, + hasInput: !!log.input, + }) + + // Calculate streaming cost + const result = calculateStreamingCost( + model, + inputText, + streamedContent, + log.input?.systemPrompt, + log.input?.context, + log.input?.messages + ) + + // Update the log output with tokenization data + if (!log.output) { + log.output = {} + } + + log.output.tokens = result.tokens + log.output.cost = result.cost + log.output.model = result.model + + logTokenizationDetails(`✅ Streaming tokenization completed for ${log.blockType}`, { + blockId: log.blockId, + blockType: log.blockType, + model: result.model, + provider: result.provider, + inputLength: inputText.length, + outputLength: streamedContent.length, + tokens: result.tokens, + cost: result.cost, + method: result.method, + }) + + return true + } catch (error) { + logger.error(`❌ Streaming tokenization failed for block ${log.blockId}`, { + blockType: log.blockType, + error: error instanceof Error ? error.message : String(error), + contentLength: streamedContent?.length || 0, + }) + + // Don't throw - graceful degradation + return false + } +} + +/** + * Determines the appropriate model for a block + */ +function getModelForBlock(log: BlockLog): string { + // Try to get model from output first + if (log.output?.model?.trim()) { + return log.output.model + } + + // Try to get model from input + if (log.input?.model?.trim()) { + return log.input.model + } + + // Use block type specific defaults + const blockType = log.blockType + if (blockType === 'agent' || blockType === 'router' || blockType === 'evaluator') { + return TOKENIZATION_CONFIG.defaults.model + } + + // Final fallback + return TOKENIZATION_CONFIG.defaults.model +} + +/** + * Processes multiple block logs for streaming tokenization + */ +export function processStreamingBlockLogs( + logs: BlockLog[], + streamedContentMap: Map +): number { + let processedCount = 0 + + logger.debug('Processing streaming block logs for tokenization', { + totalLogs: logs.length, + streamedBlocks: streamedContentMap.size, + }) + + for (const log of logs) { + const content = streamedContentMap.get(log.blockId) + if (content && processStreamingBlockLog(log, content)) { + processedCount++ + } + } + + logger.info(`Streaming tokenization summary`, { + totalLogs: logs.length, + processedBlocks: processedCount, + streamedBlocks: streamedContentMap.size, + }) + + return processedCount +} diff --git a/apps/sim/lib/tokenization/types.ts b/apps/sim/lib/tokenization/types.ts new file mode 100644 index 0000000000..10566f9e0a --- /dev/null +++ b/apps/sim/lib/tokenization/types.ts @@ -0,0 +1,69 @@ +/** + * Type definitions for tokenization functionality + */ + +export interface TokenEstimate { + /** Estimated number of tokens */ + count: number + /** Confidence level of the estimation */ + confidence: 'high' | 'medium' | 'low' + /** Provider used for estimation */ + provider: string + /** Method used for estimation */ + method: 'precise' | 'heuristic' | 'fallback' +} + +export interface TokenUsage { + /** Number of prompt/input tokens */ + prompt: number + /** Number of completion/output tokens */ + completion: number + /** Total number of tokens */ + total: number +} + +export interface CostBreakdown { + /** Input cost in USD */ + input: number + /** Output cost in USD */ + output: number + /** Total cost in USD */ + total: number +} + +export interface StreamingCostResult { + /** Token usage breakdown */ + tokens: TokenUsage + /** Cost breakdown */ + cost: CostBreakdown + /** Model used for calculation */ + model: string + /** Provider ID */ + provider: string + /** Estimation method used */ + method: 'tokenization' | 'provider_response' +} + +export interface TokenizationInput { + /** Primary input text */ + inputText: string + /** Generated output text */ + outputText: string + /** Model identifier */ + model: string + /** Optional system prompt */ + systemPrompt?: string + /** Optional context */ + context?: string + /** Optional message history */ + messages?: Array<{ role: string; content: string }> +} + +export interface ProviderTokenizationConfig { + /** Average characters per token for this provider */ + avgCharsPerToken: number + /** Confidence level for this provider's estimation */ + confidence: TokenEstimate['confidence'] + /** Supported token estimation methods */ + supportedMethods: TokenEstimate['method'][] +} diff --git a/apps/sim/lib/tokenization/utils.ts b/apps/sim/lib/tokenization/utils.ts new file mode 100644 index 0000000000..ce2dce50c5 --- /dev/null +++ b/apps/sim/lib/tokenization/utils.ts @@ -0,0 +1,163 @@ +/** + * Utility functions for tokenization + */ + +import { createLogger } from '@/lib/logs/console-logger' +import { getProviderFromModel } from '@/providers/utils' +import { LLM_BLOCK_TYPES, MAX_PREVIEW_LENGTH, TOKENIZATION_CONFIG } from './constants' +import { createTokenizationError } from './errors' +import type { ProviderTokenizationConfig, TokenUsage } from './types' + +const logger = createLogger('TokenizationUtils') + +/** + * Gets tokenization configuration for a specific provider + */ +export function getProviderConfig(providerId: string): ProviderTokenizationConfig { + const config = + TOKENIZATION_CONFIG.providers[providerId as keyof typeof TOKENIZATION_CONFIG.providers] + + if (!config) { + logger.debug(`No specific config for provider ${providerId}, using fallback`, { providerId }) + return TOKENIZATION_CONFIG.fallback + } + + return config +} + +/** + * Extracts provider ID from model name + */ +export function getProviderForTokenization(model: string): string { + try { + return getProviderFromModel(model) + } catch (error) { + logger.warn(`Failed to get provider for model ${model}, using default`, { + model, + error: error instanceof Error ? error.message : String(error), + }) + return TOKENIZATION_CONFIG.defaults.provider + } +} + +/** + * Checks if a block type should be tokenized + */ +export function isTokenizableBlockType(blockType?: string): boolean { + if (!blockType) return false + return LLM_BLOCK_TYPES.includes(blockType as any) +} + +/** + * Checks if tokens/cost data is meaningful (non-zero) + */ +export function hasRealTokenData(tokens?: TokenUsage): boolean { + if (!tokens) return false + return tokens.total > 0 || tokens.prompt > 0 || tokens.completion > 0 +} + +/** + * Checks if cost data is meaningful (non-zero) + */ +export function hasRealCostData(cost?: { + total?: number + input?: number + output?: number +}): boolean { + if (!cost) return false + return (cost.total || 0) > 0 || (cost.input || 0) > 0 || (cost.output || 0) > 0 +} + +/** + * Safely extracts text content from various input formats + */ +export function extractTextContent(input: unknown): string { + if (typeof input === 'string') { + return input.trim() + } + + if (input && typeof input === 'object') { + try { + return JSON.stringify(input) + } catch (error) { + logger.warn('Failed to stringify input object', { + inputType: typeof input, + error: error instanceof Error ? error.message : String(error), + }) + return '' + } + } + + return String(input || '') +} + +/** + * Creates a preview of text for logging (truncated) + */ +export function createTextPreview(text: string): string { + if (text.length <= MAX_PREVIEW_LENGTH) { + return text + } + return `${text.substring(0, MAX_PREVIEW_LENGTH)}...` +} + +/** + * Validates tokenization input + */ +export function validateTokenizationInput( + model: string, + inputText: string, + outputText: string +): void { + if (!model?.trim()) { + throw createTokenizationError('INVALID_MODEL', 'Model is required for tokenization', { model }) + } + + if (!inputText?.trim() && !outputText?.trim()) { + throw createTokenizationError( + 'MISSING_TEXT', + 'Either input text or output text must be provided', + { inputLength: inputText?.length || 0, outputLength: outputText?.length || 0 } + ) + } +} + +/** + * Formats token count for display + */ +export function formatTokenCount(count: number): string { + if (count === 0) return '0' + if (count < 1000) return count.toString() + if (count < 1000000) return `${(count / 1000).toFixed(1)}K` + return `${(count / 1000000).toFixed(1)}M` +} + +/** + * Logs tokenization operation details + */ +export function logTokenizationDetails( + operation: string, + details: { + blockId?: string + blockType?: string + model?: string + provider?: string + inputLength?: number + outputLength?: number + tokens?: TokenUsage + cost?: { input?: number; output?: number; total?: number } + method?: string + } +): void { + logger.info(`${operation}`, { + blockId: details.blockId, + blockType: details.blockType, + model: details.model, + provider: details.provider, + inputLength: details.inputLength, + outputLength: details.outputLength, + tokens: details.tokens, + cost: details.cost, + method: details.method, + }) +} diff --git a/apps/sim/lib/webhooks/utils.ts b/apps/sim/lib/webhooks/utils.ts index 8606d662b0..6343fcd203 100644 --- a/apps/sim/lib/webhooks/utils.ts +++ b/apps/sim/lib/webhooks/utils.ts @@ -2,15 +2,14 @@ import { and, eq, sql } from 'drizzle-orm' import { type NextRequest, NextResponse } from 'next/server' import { v4 as uuidv4 } from 'uuid' import { createLogger } from '@/lib/logs/console-logger' -import { persistExecutionError, persistExecutionLogs } from '@/lib/logs/execution-logger' -import { buildTraceSpans } from '@/lib/logs/trace-spans' +import { EnhancedLoggingSession } from '@/lib/logs/enhanced-logging-session' import { hasProcessedMessage, markMessageAsProcessed } from '@/lib/redis' import { decryptSecret } from '@/lib/utils' import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/db-helpers' import { updateWorkflowRunCounts } from '@/lib/workflows/utils' import { getOAuthToken } from '@/app/api/auth/oauth/utils' import { db } from '@/db' -import { environment, userStats, webhook } from '@/db/schema' +import { environment as environmentTable, userStats, webhook } from '@/db/schema' import { Executor } from '@/executor' import { Serializer } from '@/serializer' import { mergeSubblockStateAsync } from '@/stores/workflows/server-utils' @@ -433,47 +432,13 @@ export async function executeWorkflowFromPayload( triggerSource: 'webhook-payload', }) - // DEBUG: Log specific payload details - if (input?.airtableChanges) { - logger.debug(`[${requestId}] TRACE: Execution received Airtable input`, { - changeCount: input.airtableChanges.length, - firstTableId: input.airtableChanges[0]?.tableId, - timestamp: new Date().toISOString(), - }) - } - - // Validate and ensure proper input structure - if (!input) { - logger.warn(`[${requestId}] Empty input for workflow execution, creating empty object`) - input = {} - } - - // Special handling for Airtable webhook inputs - if (input.airtableChanges) { - if (!Array.isArray(input.airtableChanges)) { - logger.warn( - `[${requestId}] Invalid airtableChanges input type (${typeof input.airtableChanges}), converting to array` - ) - // Force to array if somehow not an array - input.airtableChanges = [input.airtableChanges] - } - - // Log the structure of the payload for debugging - logger.info(`[${requestId}] Airtable webhook payload:`, { - changeCount: input.airtableChanges.length, - hasAirtableChanges: true, - sampleTableIds: input.airtableChanges.slice(0, 2).map((c: any) => c.tableId), - }) - } - - // Log the full input format to help diagnose data issues - logger.debug(`[${requestId}] Workflow input format:`, { - inputKeys: Object.keys(input || {}), - hasAirtableChanges: input?.airtableChanges && Array.isArray(input.airtableChanges), - airtableChangesCount: input?.airtableChanges?.length || 0, - }) + const loggingSession = new EnhancedLoggingSession( + foundWorkflow.id, + executionId, + 'webhook', + requestId + ) - // Returns void as errors are handled internally try { // Load workflow data from normalized tables logger.debug(`[${requestId}] Loading workflow ${foundWorkflow.id} from normalized tables`) @@ -511,19 +476,18 @@ export async function executeWorkflowFromPayload( }) // Retrieve and decrypt environment variables - const envStartTime = Date.now() const [userEnv] = await db .select() - .from(environment) - .where(eq(environment.userId, foundWorkflow.userId)) + .from(environmentTable) + .where(eq(environmentTable.userId, foundWorkflow.userId)) .limit(1) let decryptedEnvVars: Record = {} if (userEnv) { // Decryption logic - const decryptionPromises = Object.entries(userEnv.variables as Record).map( + const decryptionPromises = Object.entries((userEnv.variables as any) || {}).map( async ([key, encryptedValue]) => { try { - const { decrypted } = await decryptSecret(encryptedValue) + const { decrypted } = await decryptSecret(encryptedValue as string) return [key, decrypted] as const } catch (error: any) { logger.error( @@ -536,18 +500,18 @@ export async function executeWorkflowFromPayload( ) const decryptedEntries = await Promise.all(decryptionPromises) decryptedEnvVars = Object.fromEntries(decryptedEntries) - - // DEBUG: Log env vars retrieval - logger.debug(`[${requestId}] TRACE: Environment variables decrypted`, { - duration: `${Date.now() - envStartTime}ms`, - envVarCount: Object.keys(decryptedEnvVars).length, - }) } else { logger.debug(`[${requestId}] TRACE: No environment variables found for user`, { userId: foundWorkflow.userId, }) } + await loggingSession.safeStart({ + userId: foundWorkflow.userId, + workspaceId: foundWorkflow.workspaceId, + variables: decryptedEnvVars, + }) + // Process block states (extract subBlock values, parse responseFormat) const blockStatesStartTime = Date.now() const currentBlockStates = Object.entries(mergedStates).reduce( @@ -683,6 +647,9 @@ export async function executeWorkflowFromPayload( workflowVariables ) + // Set up enhanced logging on the executor + loggingSession.setupExecutor(executor) + // Log workflow execution start time for tracking const executionStartTime = Date.now() logger.info(`[${requestId}] TRACE: Executor instantiated, starting workflow execution now`, { @@ -743,20 +710,45 @@ export async function executeWorkflowFromPayload( lastActive: new Date(), }) .where(eq(userStats.userId, foundWorkflow.userId)) - - // DEBUG: Log stats update - logger.debug(`[${requestId}] TRACE: Workflow stats updated`, { - workflowId: foundWorkflow.id, - userId: foundWorkflow.userId, - }) } - // Build and enrich result with trace spans - const { traceSpans, totalDuration } = buildTraceSpans(executionResult) - const enrichedResult = { ...executionResult, traceSpans, totalDuration } + // Calculate total duration for enhanced logging + const totalDuration = executionResult.metadata?.duration || 0 + + const traceSpans = (executionResult.logs || []).map((blockLog: any, index: number) => { + let output = blockLog.output + if (!blockLog.success && blockLog.error) { + output = { + error: blockLog.error, + success: false, + ...(blockLog.output || {}), + } + } - // Persist logs for this execution using the standard 'webhook' trigger type - await persistExecutionLogs(foundWorkflow.id, executionId, enrichedResult, 'webhook') + return { + id: blockLog.blockId, + name: `Block ${blockLog.blockName || blockLog.blockType} (${blockLog.blockType || 'unknown'})`, + type: blockLog.blockType || 'unknown', + duration: blockLog.durationMs || 0, + startTime: blockLog.startedAt, + endTime: blockLog.endedAt || blockLog.startedAt, + status: blockLog.success ? 'success' : 'error', + blockId: blockLog.blockId, + input: blockLog.input, + output: output, + tokens: blockLog.output?.tokens?.total || 0, + relativeStartMs: index * 100, + children: [], + toolCalls: (blockLog as any).toolCalls || [], + } + }) + + await loggingSession.safeComplete({ + endedAt: new Date().toISOString(), + totalDurationMs: totalDuration || 0, + finalOutput: executionResult.output || {}, + traceSpans: (traceSpans || []) as any, + }) // DEBUG: Final success log logger.info(`[${requestId}] TRACE: Execution logs persisted successfully`, { @@ -781,8 +773,17 @@ export async function executeWorkflowFromPayload( error: error.message, stack: error.stack, }) - // Persist the error for this execution using the standard 'webhook' trigger type - await persistExecutionError(foundWorkflow.id, executionId, error, 'webhook') + // Error logging handled by enhanced logging session + + await loggingSession.safeCompleteWithError({ + endedAt: new Date().toISOString(), + totalDurationMs: 0, + error: { + message: error.message || 'Webhook workflow execution failed', + stackTrace: error.stack, + }, + }) + // Re-throw the error so the caller knows it failed throw error } @@ -914,8 +915,7 @@ export async function fetchAndProcessAirtablePayloads( workflowData: any, requestId: string // Original request ID from the ping, used for the final execution log ) { - // Use a prefix derived from requestId for *internal* polling logs/errors - const internalPollIdPrefix = `poll-${requestId}` + // Enhanced logging handles all error logging let currentCursor: number | null = null let mightHaveMore = true let payloadsFetched = 0 // Track total payloads fetched @@ -943,12 +943,7 @@ export async function fetchAndProcessAirtablePayloads( logger.error( `[${requestId}] Missing baseId or externalId in providerConfig for webhook ${webhookData.id}. Cannot fetch payloads.` ) - await persistExecutionError( - workflowData.id, - `${internalPollIdPrefix}-config-error`, - new Error('Missing Airtable baseId or externalId in providerConfig'), - 'webhook' - ) + // Error logging handled by enhanced logging session return // Exit early } @@ -984,13 +979,7 @@ export async function fetchAndProcessAirtablePayloads( error: initError.message, stack: initError.stack, }) - // Persist the error specifically for cursor initialization failure - await persistExecutionError( - workflowData.id, - `${internalPollIdPrefix}-cursor-init-error`, - initError, - 'webhook' - ) + // Error logging handled by enhanced logging session } } @@ -1028,12 +1017,7 @@ export async function fetchAndProcessAirtablePayloads( userId: workflowData.userId, } ) - await persistExecutionError( - workflowData.id, - `${internalPollIdPrefix}-token-error`, - tokenError, - 'webhook' - ) + // Error logging handled by enhanced logging session return // Exit early } @@ -1097,12 +1081,7 @@ export async function fetchAndProcessAirtablePayloads( error: errorMessage, } ) - await persistExecutionError( - workflowData.id, - `${internalPollIdPrefix}-api-error-${apiCallCount}`, - new Error(`Airtable API Error: ${errorMessage}`), - 'webhook' - ) + // Error logging handled by enhanced logging session mightHaveMore = false break } @@ -1246,12 +1225,7 @@ export async function fetchAndProcessAirtablePayloads( cursor: currentCursor, error: dbError.message, }) - await persistExecutionError( - workflowData.id, - `${internalPollIdPrefix}-cursor-persist-error`, - dbError, - 'webhook' - ) + // Error logging handled by enhanced logging session mightHaveMore = false throw new Error('Failed to save Airtable cursor, stopping processing.') // Re-throw to break loop clearly } @@ -1271,12 +1245,7 @@ export async function fetchAndProcessAirtablePayloads( `[${requestId}] Network error calling Airtable GET /payloads (Call ${apiCallCount}) for webhook ${webhookData.id}`, fetchError ) - await persistExecutionError( - workflowData.id, - `${internalPollIdPrefix}-fetch-error-${apiCallCount}`, - fetchError, - 'webhook' - ) + // Error logging handled by enhanced logging session mightHaveMore = false break } @@ -1347,13 +1316,7 @@ export async function fetchAndProcessAirtablePayloads( error: (error as Error).message, } ) - // Persist this higher-level error - await persistExecutionError( - workflowData.id, - `${internalPollIdPrefix}-processing-error`, - error as Error, - 'webhook' - ) + // Error logging handled by enhanced logging session } // DEBUG: Log function completion diff --git a/apps/sim/socket-server/index.test.ts b/apps/sim/socket-server/index.test.ts index d14e375b06..323e16a14d 100644 --- a/apps/sim/socket-server/index.test.ts +++ b/apps/sim/socket-server/index.test.ts @@ -84,20 +84,43 @@ describe('Socket Server Index Integration', () => { const httpHandler = createHttpHandler(roomManager, logger) httpServer.on('request', httpHandler) - // Start server - await new Promise((resolve) => { + // Start server with timeout handling + await new Promise((resolve, reject) => { + const timeout = setTimeout(() => { + reject(new Error(`Server failed to start on port ${PORT} within 15 seconds`)) + }, 15000) + httpServer.listen(PORT, '0.0.0.0', () => { + clearTimeout(timeout) resolve() }) + + httpServer.on('error', (err: any) => { + clearTimeout(timeout) + if (err.code === 'EADDRINUSE') { + // Try a different port + PORT = 3333 + Math.floor(Math.random() * 1000) + httpServer.listen(PORT, '0.0.0.0', () => { + resolve() + }) + } else { + reject(err) + } + }) }) - }) + }, 20000) afterEach(async () => { + // Properly close servers and wait for them to fully close if (io) { - io.close() + await new Promise((resolve) => { + io.close(() => resolve()) + }) } if (httpServer) { - httpServer.close() + await new Promise((resolve) => { + httpServer.close(() => resolve()) + }) } vi.clearAllMocks() }) @@ -322,7 +345,7 @@ describe('Socket Server Index Integration', () => { expect(() => WorkflowOperationSchema.parse(validEdgeOperation)).not.toThrow() }) - it.concurrent('should validate subflow operations', async () => { + it('should validate subflow operations', async () => { const { WorkflowOperationSchema } = await import('./validation/schemas') const validSubflowOperation = {