onContextMenu(e, row)}>
-
+ {
+ if (e.button !== 0) return
+ onRowToggle(rowIndex, e.shiftKey)
+ }}
+ >
-
{
- if (e.button !== 0) return
- onRowToggle(rowIndex, e.shiftKey)
- }}
- >
+
0 ? `Stop ${runningCount} running` : 'Run row'}
title={runningCount > 0 ? `Stop ${runningCount} running` : 'Run row'}
className='ml-auto flex h-[20px] w-[20px] shrink-0 items-center justify-center rounded text-[var(--text-primary)] transition-colors hover-hover:bg-[var(--surface-2)]'
+ onMouseDown={(e) => {
+ e.stopPropagation()
+ }}
onClick={() => {
if (runningCount > 0) {
onStopRow(row.id)
@@ -3192,7 +3220,13 @@ const DataRow = React.memo(function DataRow({
onCellMouseDown(rowIndex, colIndex, e.shiftKey)
}}
onMouseEnter={() => onCellMouseEnter(rowIndex, colIndex)}
- onClick={() => onClick(row.id, column.name)}
+ onClick={(e) =>
+ onClick(row.id, column.name, {
+ toggleBoolean: Boolean(
+ (e.target as HTMLElement).closest('[data-boolean-cell-toggle]')
+ ),
+ })
+ }
onDoubleClick={() => onDoubleClick(row.id, column.name, column.key)}
>
{isHighlighted && (isMultiCell || isRowChecked) && (
@@ -3307,9 +3341,23 @@ const SelectAllCheckbox = React.memo(function SelectAllCheckbox({
onCheckedChange: () => void
}) {
return (
-
+ {
+ if (e.button !== 0) return
+ onCheckedChange()
+ }}
+ onKeyDown={(e) => {
+ if (e.key !== ' ' && e.key !== 'Enter') return
+ e.preventDefault()
+ onCheckedChange()
+ }}
+ >
-
+
)
diff --git a/apps/sim/app/workspace/[workspaceId]/tables/components/import-csv-dialog/import-csv-dialog.tsx b/apps/sim/app/workspace/[workspaceId]/tables/components/import-csv-dialog/import-csv-dialog.tsx
index 6ae39b07e75..9da13375a22 100644
--- a/apps/sim/app/workspace/[workspaceId]/tables/components/import-csv-dialog/import-csv-dialog.tsx
+++ b/apps/sim/app/workspace/[workspaceId]/tables/components/import-csv-dialog/import-csv-dialog.tsx
@@ -345,7 +345,7 @@ export function ImportCsvDialog({
{!parsed ? (
-
Upload CSV
+
Import CSV
- Upload CSV
+ Import CSV
)}
diff --git a/apps/sim/app/workspace/[workspaceId]/tables/tables.tsx b/apps/sim/app/workspace/[workspaceId]/tables/tables.tsx
index 30ca41b69fb..3389ab35fcf 100644
--- a/apps/sim/app/workspace/[workspaceId]/tables/tables.tsx
+++ b/apps/sim/app/workspace/[workspaceId]/tables/tables.tsx
@@ -450,7 +450,7 @@ export function Tables() {
? `${uploadProgress.completed}/${uploadProgress.total}`
: uploading
? 'Uploading...'
- : 'Upload CSV'
+ : 'Import CSV'
const handleCreateTable = useCallback(async () => {
const existingNames = tables.map((t) => t.name)
diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/selector-combobox/selector-combobox.tsx b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/selector-combobox/selector-combobox.tsx
index f0445fd7d54..0d2964fa6b6 100644
--- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/selector-combobox/selector-combobox.tsx
+++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/selector-combobox/selector-combobox.tsx
@@ -53,6 +53,7 @@ export function SelectorCombobox({
const {
data: options = [],
isLoading,
+ hasMore,
error,
} = useSelectorOptions(selectorKey, {
context: selectorContext,
@@ -67,6 +68,7 @@ export function SelectorCombobox({
Boolean(activeValue) &&
Boolean(missingOptionLabel) &&
!isLoading &&
+ !hasMore &&
!optionMap.get(activeValue!)
const selectedLabel = activeValue
? hasMissingOption
diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-workflow-execution.ts b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-workflow-execution.ts
index f51ed9d8b13..e71041afd05 100644
--- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-workflow-execution.ts
+++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-workflow-execution.ts
@@ -10,6 +10,7 @@ import { requestJson } from '@/lib/api/client/request'
import { cancelWorkflowExecutionContract, workflowLogContract } from '@/lib/api/contracts/workflows'
import { buildTraceSpans } from '@/lib/logs/execution/trace-spans/trace-spans'
import { processStreamingBlockLogs } from '@/lib/tokenization'
+import type { ExecutionPausedData } from '@/lib/workflows/executor/execution-events'
import {
extractTriggerMockPayload,
selectBestTrigger,
@@ -37,7 +38,12 @@ import { hasExecutionResult } from '@/executor/utils/errors'
import { coerceValue } from '@/executor/utils/start-block'
import { subscriptionKeys } from '@/hooks/queries/subscription'
import { getWorkflows } from '@/hooks/queries/utils/workflow-cache'
-import { isExecutionStreamHttpError, useExecutionStream } from '@/hooks/use-execution-stream'
+import {
+ isExecutionStreamHttpError,
+ SSEEventHandlerError,
+ SSEStreamInterruptedError,
+ useExecutionStream,
+} from '@/hooks/use-execution-stream'
import { WorkflowValidationError } from '@/serializer'
import { defaultWorkflowExecutionState, useExecutionStore } from '@/stores/execution'
import { useNotificationStore } from '@/stores/notifications'
@@ -63,10 +69,12 @@ const logger = createLogger('useWorkflowExecution')
*/
const activeReconnections = new Set()
-function isReconnectTerminal(error: unknown): boolean {
+function isReconnectNonRetryable(error: unknown): boolean {
+ const message = error instanceof Error ? error.message : ''
return (
- isExecutionStreamHttpError(error) &&
- (error.httpStatus === 404 || error.httpStatus === 403 || error.httpStatus === 401)
+ message.includes('Execution events pruned before requested event id') ||
+ (isExecutionStreamHttpError(error) &&
+ (error.httpStatus === 404 || error.httpStatus === 403 || error.httpStatus === 401))
)
}
@@ -77,10 +85,25 @@ interface DebugValidationResult {
const WORKFLOW_EXECUTION_FAILURE_MESSAGE = 'Workflow execution failed'
+async function persistExecutionPointerProgress(
+ workflowId: string,
+ executionId: string,
+ lastEventId: number
+): Promise {
+ await consolePersistence.persist()
+ await saveExecutionPointer({ workflowId, executionId, lastEventId })
+}
+
function isRecord(value: unknown): value is Record {
return typeof value === 'object' && value !== null
}
+function isRecoverableStreamRecoveryError(
+ error: unknown
+): error is SSEEventHandlerError | SSEStreamInterruptedError {
+ return error instanceof SSEEventHandlerError || error instanceof SSEStreamInterruptedError
+}
+
function sanitizeMessage(value: unknown): string | undefined {
if (typeof value !== 'string') return undefined
const trimmed = value.trim()
@@ -120,16 +143,23 @@ export function useWorkflowExecution() {
const queryClient = useQueryClient()
const currentWorkflow = useCurrentWorkflow()
const activeWorkflowId = useWorkflowRegistry((s) => s.activeWorkflowId)
- const { toggleConsole, addConsole, updateConsole, cancelRunningEntries, clearExecutionEntries } =
- useTerminalConsoleStore(
- useShallow((s) => ({
- toggleConsole: s.toggleConsole,
- addConsole: s.addConsole,
- updateConsole: s.updateConsole,
- cancelRunningEntries: s.cancelRunningEntries,
- clearExecutionEntries: s.clearExecutionEntries,
- }))
- )
+ const {
+ toggleConsole,
+ addConsole,
+ updateConsole,
+ cancelRunningEntries,
+ finishRunningEntries,
+ clearExecutionEntries,
+ } = useTerminalConsoleStore(
+ useShallow((s) => ({
+ toggleConsole: s.toggleConsole,
+ addConsole: s.addConsole,
+ updateConsole: s.updateConsole,
+ cancelRunningEntries: s.cancelRunningEntries,
+ finishRunningEntries: s.finishRunningEntries,
+ clearExecutionEntries: s.clearExecutionEntries,
+ }))
+ )
const hasHydrated = useTerminalConsoleStore((s) => s._hasHydrated)
const { getVariablesByWorkflowId, variables } = useVariablesStore(
useShallow((s) => ({
@@ -157,10 +187,15 @@ export function useWorkflowExecution() {
const setIsExecuting = useCallback(
(workflowId: string, executing: boolean) => {
+ const wasExecuting = useExecutionStore.getState().getWorkflowExecution(workflowId).isExecuting
if (executing) {
- consolePersistence.executionStarted()
+ if (!wasExecuting) {
+ consolePersistence.executionStarted()
+ }
} else {
- consolePersistence.executionEnded()
+ if (wasExecuting) {
+ consolePersistence.executionEnded()
+ }
clearExecutionPointer(workflowId)
}
rawSetIsExecuting(workflowId, executing)
@@ -178,8 +213,10 @@ export function useWorkflowExecution() {
const getLastExecutionSnapshot = useExecutionStore((s) => s.getLastExecutionSnapshot)
const clearLastExecutionSnapshot = useExecutionStore((s) => s.clearLastExecutionSnapshot)
const [executionResult, setExecutionResult] = useState(null)
+ const [reconnectAttemptNonce, setReconnectAttemptNonce] = useState(0)
const executionStream = useExecutionStream()
const currentChatExecutionIdRef = useRef(null)
+ const runFromBlockOwnerRef = useRef(null)
const lastSeenEventIdRef = useRef(0)
const isViewingDiff = useWorkflowDiffStore((state) => state.isShowingDiff)
const addNotification = useNotificationStore((state) => state.addNotification)
@@ -438,6 +475,7 @@ export function useWorkflowExecution() {
if (isChatExecution) {
let isCancelled = false
const executionId = generateId()
+ let preserveChatExecutionForRecovery = false
currentChatExecutionIdRef.current = executionId
const stream = new ReadableStream({
async start(controller) {
@@ -712,6 +750,15 @@ export function useWorkflowExecution() {
// Note: Logs are already persisted server-side via execution-core.ts
}
} catch (error: any) {
+ if (isRecoverableStreamRecoveryError(error)) {
+ preserveChatExecutionForRecovery = true
+ logger.warn('Chat workflow stream interrupted; waiting for reconnect replay', {
+ workflowId: activeWorkflowId,
+ executionId: error.executionId,
+ error: error.message,
+ })
+ return
+ }
// Create a proper error result for logging
const errorResult = {
success: false,
@@ -733,7 +780,10 @@ export function useWorkflowExecution() {
if (!isCancelled) {
controller.close()
}
- if (currentChatExecutionIdRef.current === executionId) {
+ if (
+ !preserveChatExecutionForRecovery &&
+ currentChatExecutionIdRef.current === executionId
+ ) {
setIsExecuting(activeWorkflowId, false)
setIsDebugging(activeWorkflowId, false)
setActiveBlocks(activeWorkflowId, new Set())
@@ -764,6 +814,10 @@ export function useWorkflowExecution() {
}
return result
} catch (error: any) {
+ if (isRecoverableStreamRecoveryError(error)) {
+ handleExecutionError(error, { executionId: manualExecutionId })
+ throw error
+ }
const errorResult = handleExecutionError(error, { executionId: manualExecutionId })
return errorResult
}
@@ -1047,6 +1101,7 @@ export function useWorkflowExecution() {
loops: latestWorkflowState.loops,
parallels: latestWorkflowState.parallels,
}
+ let executionFinished = false
await executionStream.execute({
workflowId: activeWorkflowId,
@@ -1073,8 +1128,16 @@ export function useWorkflowExecution() {
})
},
callbacks: {
- onEventId: (eventId) => {
+ onEventId: async (eventId) => {
+ if (executionFinished) return
lastSeenEventIdRef.current = eventId
+ if (eventId % 5 === 0 && activeWorkflowId && executionIdRef.current) {
+ await persistExecutionPointerProgress(
+ activeWorkflowId,
+ executionIdRef.current,
+ eventId
+ )
+ }
},
onExecutionStarted: (data) => {
@@ -1121,6 +1184,7 @@ export function useWorkflowExecution() {
},
onExecutionCompleted: (data) => {
+ executionFinished = true
if (
activeWorkflowId &&
executionIdRef.current &&
@@ -1137,7 +1201,7 @@ export function useWorkflowExecution() {
executionIdRef.current,
data.finalBlockLogs
)
- cancelRunningEntries(activeWorkflowId)
+ finishRunningEntries(activeWorkflowId, executionIdRef.current)
}
executionResult = {
@@ -1216,7 +1280,52 @@ export function useWorkflowExecution() {
}
},
+ onExecutionPaused: (data: ExecutionPausedData) => {
+ executionFinished = true
+ if (
+ activeWorkflowId &&
+ executionIdRef.current &&
+ useExecutionStore.getState().getCurrentExecutionId(activeWorkflowId) !==
+ executionIdRef.current
+ )
+ return
+
+ if (activeWorkflowId) {
+ setCurrentExecutionId(activeWorkflowId, null)
+ reconcileFinalBlockLogs(
+ updateConsole,
+ activeWorkflowId,
+ executionIdRef.current,
+ data.finalBlockLogs
+ )
+ finishRunningEntries(activeWorkflowId, executionIdRef.current)
+ }
+
+ executionResult = {
+ success: true,
+ output: data.output,
+ metadata: {
+ duration: data.duration,
+ startTime: data.startTime,
+ endTime: data.endTime,
+ },
+ logs: accumulatedBlockLogs,
+ }
+
+ const workflowExecState = activeWorkflowId
+ ? useExecutionStore.getState().getWorkflowExecution(activeWorkflowId)
+ : null
+ if (activeWorkflowId && !workflowExecState?.isDebugging) {
+ setExecutionResult(executionResult)
+ if (!isExecutingFromChat) {
+ setIsExecuting(activeWorkflowId, false)
+ setActiveBlocks(activeWorkflowId, new Set())
+ }
+ }
+ },
+
onExecutionError: (data) => {
+ executionFinished = true
if (
activeWorkflowId &&
executionIdRef.current &&
@@ -1258,6 +1367,7 @@ export function useWorkflowExecution() {
},
onExecutionCancelled: (data) => {
+ executionFinished = true
if (
activeWorkflowId &&
executionIdRef.current &&
@@ -1288,6 +1398,10 @@ export function useWorkflowExecution() {
return executionResult
} catch (error: any) {
+ if (isRecoverableStreamRecoveryError(error)) {
+ handleExecutionError(error, { executionId: executionIdRef.current })
+ throw error
+ }
if (error.name === 'AbortError' || error.message?.includes('aborted')) {
logger.info('Execution aborted by user')
return executionResult
@@ -1345,6 +1459,12 @@ export function useWorkflowExecution() {
blockName: error.blockName || 'Workflow',
blockType: error.blockType || 'serializer',
})
+ } else if (isRecoverableStreamRecoveryError(error)) {
+ logger.warn('Execution stream needs reconnect without authoritative terminal state', {
+ workflowId: activeWorkflowId,
+ executionId: error.executionId ?? options?.executionId,
+ error: error.message,
+ })
} else {
sharedAddExecutionErrorConsoleEntry(storeAddConsole, {
workflowId: activeWorkflowId || '',
@@ -1365,6 +1485,13 @@ export function useWorkflowExecution() {
}
}
+ if (isRecoverableStreamRecoveryError(error)) {
+ if (activeWorkflowId) {
+ setReconnectAttemptNonce((nonce) => nonce + 1)
+ }
+ return errorResult
+ }
+
setExecutionResult(errorResult)
if (activeWorkflowId) {
setIsExecuting(activeWorkflowId, false)
@@ -1545,21 +1672,41 @@ export function useWorkflowExecution() {
const storedExecutionId = getCurrentExecutionId(activeWorkflowId)
if (storedExecutionId) {
- setCurrentExecutionId(activeWorkflowId, null)
- requestJson(cancelWorkflowExecutionContract, {
+ void requestJson(cancelWorkflowExecutionContract, {
params: { id: activeWorkflowId, executionId: storedExecutionId },
- }).catch(() => {})
- handleExecutionCancelledConsole({
- workflowId: activeWorkflowId,
- executionId: storedExecutionId,
})
- }
+ .then((result) => {
+ if (!result.success) {
+ logger.warn('Workflow execution cancellation was not confirmed', {
+ workflowId: activeWorkflowId,
+ executionId: storedExecutionId,
+ reason: result.reason,
+ })
+ return
+ }
- executionStream.cancel(activeWorkflowId)
- currentChatExecutionIdRef.current = null
- setIsExecuting(activeWorkflowId, false)
- setIsDebugging(activeWorkflowId, false)
- setActiveBlocks(activeWorkflowId, new Set())
+ const currentId = getCurrentExecutionId(activeWorkflowId)
+ if (currentId !== storedExecutionId) return
+
+ logger.info('Workflow execution cancellation confirmed; awaiting terminal event', {
+ workflowId: activeWorkflowId,
+ executionId: storedExecutionId,
+ })
+ })
+ .catch((error) => {
+ logger.warn('Failed to request workflow execution cancellation', {
+ workflowId: activeWorkflowId,
+ executionId: storedExecutionId,
+ error,
+ })
+ })
+ } else {
+ executionStream.cancel(activeWorkflowId)
+ currentChatExecutionIdRef.current = null
+ setIsExecuting(activeWorkflowId, false)
+ setIsDebugging(activeWorkflowId, false)
+ setActiveBlocks(activeWorkflowId, new Set())
+ }
if (isDebugging) {
resetDebugState()
@@ -1573,8 +1720,6 @@ export function useWorkflowExecution() {
setActiveBlocks,
activeWorkflowId,
getCurrentExecutionId,
- setCurrentExecutionId,
- handleExecutionCancelledConsole,
])
/**
@@ -1674,13 +1819,29 @@ export function useWorkflowExecution() {
}
setIsExecuting(workflowId, true)
+ const runOwnerId = generateId()
+ runFromBlockOwnerRef.current = runOwnerId
const executionIdRef = { current: '' }
const accumulatedBlockLogs: BlockLog[] = []
const accumulatedBlockStates = new Map()
const executedBlockIds = new Set()
const activeBlocksSet = new Set()
const activeBlockRefCounts = new Map()
+ const isCurrentRunFromBlockExecution = () => {
+ return (
+ Boolean(executionIdRef.current) &&
+ getCurrentExecutionId(workflowId) === executionIdRef.current
+ )
+ }
+ const clearRunFromBlockExecutionState = () => {
+ if (!isCurrentRunFromBlockExecution()) return false
+ setCurrentExecutionId(workflowId, null)
+ setIsExecuting(workflowId, false)
+ setActiveBlocks(workflowId, new Set())
+ return true
+ }
+ let preserveExecutionForRecovery = false
try {
const blockHandlers = buildBlockEventHandlers({
workflowId,
@@ -1700,23 +1861,33 @@ export function useWorkflowExecution() {
sourceSnapshot: effectiveSnapshot,
input: workflowInput,
onExecutionId: (id) => {
+ if (runFromBlockOwnerRef.current !== runOwnerId) return
executionIdRef.current = id
setCurrentExecutionId(workflowId, id)
+ saveExecutionPointer({
+ workflowId,
+ executionId: id,
+ lastEventId: 0,
+ })
},
callbacks: {
+ onEventId: async (eventId) => {
+ if (executionIdRef.current && !isCurrentRunFromBlockExecution()) return
+ if (eventId % 5 === 0 && executionIdRef.current) {
+ await persistExecutionPointerProgress(workflowId, executionIdRef.current, eventId)
+ }
+ },
+
onBlockStarted: blockHandlers.onBlockStarted,
onBlockCompleted: blockHandlers.onBlockCompleted,
onBlockError: blockHandlers.onBlockError,
onBlockChildWorkflowStarted: blockHandlers.onBlockChildWorkflowStarted,
onExecutionCompleted: (data) => {
- reconcileFinalBlockLogs(
- updateConsole,
- workflowId,
- executionIdRef.current,
- data.finalBlockLogs
- )
- cancelRunningEntries(workflowId)
+ if (!isCurrentRunFromBlockExecution()) return
+ const executionId = executionIdRef.current
+ reconcileFinalBlockLogs(updateConsole, workflowId, executionId, data.finalBlockLogs)
+ finishRunningEntries(workflowId, executionId)
if (data.success) {
executedBlockIds.add(blockId)
@@ -1743,12 +1914,31 @@ export function useWorkflowExecution() {
setLastExecutionSnapshot(workflowId, updatedSnapshot)
}
- setCurrentExecutionId(workflowId, null)
- setIsExecuting(workflowId, false)
- setActiveBlocks(workflowId, new Set())
+ clearRunFromBlockExecutionState()
+ },
+
+ onExecutionPaused: (data) => {
+ if (!isCurrentRunFromBlockExecution()) return
+ const executionId = executionIdRef.current
+ reconcileFinalBlockLogs(updateConsole, workflowId, executionId, data.finalBlockLogs)
+ finishRunningEntries(workflowId, executionId)
+
+ clearRunFromBlockExecutionState()
+ setExecutionResult({
+ success: true,
+ output: data.output,
+ metadata: {
+ duration: data.duration,
+ startTime: data.startTime,
+ endTime: data.endTime,
+ },
+ logs: accumulatedBlockLogs,
+ })
},
onExecutionError: (data) => {
+ if (!isCurrentRunFromBlockExecution()) return
+ const executionId = executionIdRef.current
const isWorkflowModified =
data.error?.includes('Block not found in workflow') ||
data.error?.includes('Upstream dependency not executed')
@@ -1765,42 +1955,70 @@ export function useWorkflowExecution() {
handleExecutionErrorConsole({
workflowId,
- executionId: executionIdRef.current,
+ executionId,
error: data.error,
durationMs: data.duration,
blockLogs: accumulatedBlockLogs,
finalBlockLogs: data.finalBlockLogs,
})
- setCurrentExecutionId(workflowId, null)
- setIsExecuting(workflowId, false)
- setActiveBlocks(workflowId, new Set())
+ clearRunFromBlockExecutionState()
},
onExecutionCancelled: (data) => {
+ if (!isCurrentRunFromBlockExecution()) return
+ const executionId = executionIdRef.current
handleExecutionCancelledConsole({
workflowId,
- executionId: executionIdRef.current,
+ executionId,
durationMs: data?.duration,
finalBlockLogs: data?.finalBlockLogs,
})
- setCurrentExecutionId(workflowId, null)
- setIsExecuting(workflowId, false)
- setActiveBlocks(workflowId, new Set())
+ clearRunFromBlockExecutionState()
},
},
})
} catch (error) {
- if ((error as Error).name !== 'AbortError') {
+ if (isRecoverableStreamRecoveryError(error)) {
+ preserveExecutionForRecovery = true
+ logger.warn('Run-from-block stream interrupted; preserving execution for replay', {
+ workflowId,
+ executionId: error.executionId ?? executionIdRef.current,
+ eventType: error instanceof SSEEventHandlerError ? error.eventType : undefined,
+ eventId: error instanceof SSEEventHandlerError ? error.eventId : undefined,
+ error: error.message,
+ })
+ setReconnectAttemptNonce((nonce) => nonce + 1)
+ } else if ((error as Error).name !== 'AbortError') {
logger.error('Run-from-block failed:', error)
}
} finally {
- const currentId = getCurrentExecutionId(workflowId)
- if (currentId === null || currentId === executionIdRef.current) {
- setCurrentExecutionId(workflowId, null)
- setIsExecuting(workflowId, false)
- setActiveBlocks(workflowId, new Set())
+ if (preserveExecutionForRecovery) {
+ if (runFromBlockOwnerRef.current === runOwnerId) {
+ runFromBlockOwnerRef.current = null
+ }
+ } else {
+ const currentId = getCurrentExecutionId(workflowId)
+ if (executionIdRef.current && currentId === executionIdRef.current) {
+ setCurrentExecutionId(workflowId, null)
+ setIsExecuting(workflowId, false)
+ setActiveBlocks(workflowId, new Set())
+ if (runFromBlockOwnerRef.current === runOwnerId) {
+ runFromBlockOwnerRef.current = null
+ }
+ } else if (
+ !executionIdRef.current &&
+ currentId === null &&
+ runFromBlockOwnerRef.current === runOwnerId
+ ) {
+ const workflowExecState = useExecutionStore.getState().getWorkflowExecution(workflowId)
+ if (workflowExecState.isExecuting) {
+ setIsExecuting(workflowId, false)
+ setActiveBlocks(workflowId, new Set())
+ }
+ runFromBlockOwnerRef.current = null
+ }
}
}
},
@@ -1814,6 +2032,9 @@ export function useWorkflowExecution() {
setActiveBlocks,
setBlockRunStatus,
setEdgeRunStatus,
+ updateConsole,
+ finishRunningEntries,
+ setExecutionResult,
addNotification,
buildBlockEventHandlers,
handleExecutionErrorConsole,
@@ -1843,21 +2064,9 @@ export function useWorkflowExecution() {
} catch (error) {
const errorResult = handleExecutionError(error, { executionId })
return errorResult
- } finally {
- setCurrentExecutionId(workflowId, null)
- setIsExecuting(workflowId, false)
- setIsDebugging(workflowId, false)
- setActiveBlocks(workflowId, new Set())
}
},
- [
- activeWorkflowId,
- setCurrentExecutionId,
- setExecutionResult,
- setIsExecuting,
- setIsDebugging,
- setActiveBlocks,
- ]
+ [activeWorkflowId, setExecutionResult, setIsExecuting]
)
useEffect(() => {
@@ -1866,8 +2075,16 @@ export function useWorkflowExecution() {
let cleanupRan = false
let reconnectionComplete = false
+ let ownsReconnect = false
+ let ownedReconnectExecutionId: string | null = null
const reconnectWorkflowId = activeWorkflowId
+ const releaseReconnectOwnership = () => {
+ activeReconnections.delete(reconnectWorkflowId)
+ ownsReconnect = false
+ ownedReconnectExecutionId = null
+ }
+
const runReconnect = async () => {
let executionId: string | undefined
let fromEventId = 0
@@ -1883,34 +2100,30 @@ export function useWorkflowExecution() {
// fall through to console entries
}
- if (!executionId) {
- const entries = useTerminalConsoleStore.getState().getWorkflowEntries(reconnectWorkflowId)
- const runningEntries = entries.filter(
- (e) => e.isRunning && e.workflowId === reconnectWorkflowId && e.executionId
- )
- if (runningEntries.length === 0) return
-
- const sorted = [...runningEntries].sort((a, b) => {
- const aTime = a.startedAt ? new Date(a.startedAt).getTime() : 0
- const bTime = b.startedAt ? new Date(b.startedAt).getTime() : 0
- return bTime - aTime
- })
- executionId = sorted[0].executionId!
-
- const otherExecutionIds = new Set(
- sorted.filter((e) => e.executionId !== executionId).map((e) => e.executionId!)
- )
- if (otherExecutionIds.size > 0) {
- cancelRunningEntries(reconnectWorkflowId)
- consolePersistence.persist()
+ if (!executionId || cleanupRan) return
+ const capturedExecutionId = executionId
+ const canReconnectClaimWorkflow = () => {
+ const executionState = useExecutionStore
+ .getState()
+ .getWorkflowExecution(reconnectWorkflowId)
+ const currentId = executionState?.currentExecutionId ?? null
+ if (currentId) return currentId === capturedExecutionId
+ return !executionState?.isExecuting
+ }
+ const clearCapturedExecutionPointer = async () => {
+ const pointer = await loadExecutionPointer(reconnectWorkflowId).catch(() => null)
+ if (pointer?.executionId === capturedExecutionId) {
+ await clearExecutionPointer(reconnectWorkflowId)
}
}
-
- if (!executionId || cleanupRan) return
+ if (!canReconnectClaimWorkflow()) {
+ await clearCapturedExecutionPointer()
+ return
+ }
if (activeReconnections.has(reconnectWorkflowId)) return
activeReconnections.add(reconnectWorkflowId)
-
- executionStream.cancel(reconnectWorkflowId)
+ ownsReconnect = true
+ executionStream.cancelExecute(reconnectWorkflowId)
const workflowEdges = useWorkflowStore.getState().edges
const activeBlocksSet = new Set()
@@ -1932,47 +2145,80 @@ export function useWorkflowExecution() {
includeStartConsoleEntry: true,
})
- const capturedExecutionId = executionId
+ ownedReconnectExecutionId = capturedExecutionId
const MAX_ATTEMPTS = 5
const BASE_DELAY_MS = 1000
const MAX_DELAY_MS = 15000
let activated = false
+ let activationStartedPersistence = false
+ const isReconnectStillCurrent = canReconnectClaimWorkflow
+ const stopStaleReconnect = () => {
+ reconnectionComplete = true
+ if (ownedReconnectExecutionId) {
+ executionStream.cancelReconnect(reconnectWorkflowId, ownedReconnectExecutionId)
+ }
+ releaseReconnectOwnership()
+ }
+ const releaseActivatedReconnectState = () => {
+ if (!activated) return
+ const currentId = useExecutionStore.getState().getCurrentExecutionId(reconnectWorkflowId)
+ if (currentId !== capturedExecutionId) return
+ setCurrentExecutionId(reconnectWorkflowId, null)
+ if (activationStartedPersistence) {
+ consolePersistence.executionEnded()
+ activationStartedPersistence = false
+ }
+ rawSetIsExecuting(reconnectWorkflowId, false)
+ setActiveBlocks(reconnectWorkflowId, new Set())
+ }
+ const releaseReconnectStateWithoutTerminal = () => {
+ const executionState = useExecutionStore
+ .getState()
+ .getWorkflowExecution(reconnectWorkflowId)
+ const currentId = executionState?.currentExecutionId ?? null
+ if (currentId && currentId !== capturedExecutionId) return
+ finishRunningEntries(reconnectWorkflowId, capturedExecutionId)
+ setCurrentExecutionId(reconnectWorkflowId, null)
+ setIsExecuting(reconnectWorkflowId, false)
+ setActiveBlocks(reconnectWorkflowId, new Set())
+ activationStartedPersistence = false
+ }
+ const scheduleRetryableReconnect = () => {
+ releaseReconnectOwnership()
+ setTimeout(() => {
+ if (!cleanupRan && !reconnectionComplete) {
+ setReconnectAttemptNonce((nonce) => nonce + 1)
+ }
+ }, MAX_DELAY_MS)
+ }
const ensureActivated = () => {
- if (activated || cleanupRan) return
- activated = true
- setCurrentExecutionId(reconnectWorkflowId, capturedExecutionId)
- setIsExecuting(reconnectWorkflowId, true)
- clearExecutionEntries(capturedExecutionId)
+ if (cleanupRan || reconnectionComplete) return false
+ if (!isReconnectStillCurrent()) {
+ stopStaleReconnect()
+ return false
+ }
+ if (!activated) {
+ activated = true
+ activationStartedPersistence = !useExecutionStore
+ .getState()
+ .getWorkflowExecution(reconnectWorkflowId).isExecuting
+ setCurrentExecutionId(reconnectWorkflowId, capturedExecutionId)
+ setIsExecuting(reconnectWorkflowId, true)
+ if (fromEventId === 0) {
+ clearExecutionEntries(capturedExecutionId)
+ }
+ }
+ return true
}
const wrapHandler =
(handler: (data: T) => void) =>
(data: T) => {
- ensureActivated()
+ if (!ensureActivated()) return
handler(data)
}
- const cleanupFailedReconnect = () => {
- const currentId = useExecutionStore.getState().getCurrentExecutionId(reconnectWorkflowId)
- if (currentId && currentId !== capturedExecutionId) return
-
- const hasRunningEntry = useTerminalConsoleStore
- .getState()
- .getWorkflowEntries(reconnectWorkflowId)
- .some((entry) => entry.isRunning && entry.executionId === capturedExecutionId)
-
- if (activated || hasRunningEntry) {
- cancelRunningEntries(reconnectWorkflowId)
- }
-
- if (currentId === capturedExecutionId) {
- setCurrentExecutionId(reconnectWorkflowId, null)
- setIsExecuting(reconnectWorkflowId, false)
- setActiveBlocks(reconnectWorkflowId, new Set())
- }
- }
-
const attemptReconnect = async (attempt: number): Promise => {
if (cleanupRan || reconnectionComplete) return
@@ -1988,50 +2234,81 @@ export function useWorkflowExecution() {
executionId: capturedExecutionId,
fromEventId,
callbacks: {
- onEventId: (eid) => {
- ensureActivated()
+ onEventId: async (eid) => {
+ if (reconnectionComplete) return
+ if (!isReconnectStillCurrent()) {
+ stopStaleReconnect()
+ return
+ }
fromEventId = eid
+ if (eid % 5 === 0) {
+ await persistExecutionPointerProgress(
+ reconnectWorkflowId,
+ capturedExecutionId,
+ eid
+ )
+ }
},
onBlockStarted: wrapHandler(handlers.onBlockStarted),
onBlockCompleted: wrapHandler(handlers.onBlockCompleted),
onBlockError: wrapHandler(handlers.onBlockError),
onBlockChildWorkflowStarted: wrapHandler(handlers.onBlockChildWorkflowStarted),
onExecutionCompleted: (data) => {
+ if (!ensureActivated()) return
reconnectionComplete = true
- activeReconnections.delete(reconnectWorkflowId)
- if (!activated) {
- clearExecutionPointer(reconnectWorkflowId)
- return
- }
+ releaseReconnectOwnership()
const currentId = useExecutionStore
.getState()
.getCurrentExecutionId(reconnectWorkflowId)
if (currentId !== capturedExecutionId) return
- setCurrentExecutionId(reconnectWorkflowId, null)
- setIsExecuting(reconnectWorkflowId, false)
- setActiveBlocks(reconnectWorkflowId, new Set())
reconcileFinalBlockLogs(
updateConsole,
reconnectWorkflowId,
capturedExecutionId,
data?.finalBlockLogs
)
- cancelRunningEntries(reconnectWorkflowId)
+ finishRunningEntries(reconnectWorkflowId, capturedExecutionId)
+ setCurrentExecutionId(reconnectWorkflowId, null)
+ setIsExecuting(reconnectWorkflowId, false)
+ setActiveBlocks(reconnectWorkflowId, new Set())
},
- onExecutionError: (data) => {
+ onExecutionPaused: (data) => {
+ if (!ensureActivated()) return
reconnectionComplete = true
- activeReconnections.delete(reconnectWorkflowId)
- if (!activated) {
- clearExecutionPointer(reconnectWorkflowId)
- return
- }
+ releaseReconnectOwnership()
const currentId = useExecutionStore
.getState()
.getCurrentExecutionId(reconnectWorkflowId)
if (currentId !== capturedExecutionId) return
+ reconcileFinalBlockLogs(
+ updateConsole,
+ reconnectWorkflowId,
+ capturedExecutionId,
+ data.finalBlockLogs
+ )
+ finishRunningEntries(reconnectWorkflowId, capturedExecutionId)
setCurrentExecutionId(reconnectWorkflowId, null)
setIsExecuting(reconnectWorkflowId, false)
setActiveBlocks(reconnectWorkflowId, new Set())
+ setExecutionResult({
+ success: true,
+ output: data.output,
+ metadata: {
+ duration: data.duration,
+ startTime: data.startTime,
+ endTime: data.endTime,
+ },
+ logs: accumulatedBlockLogs,
+ })
+ },
+ onExecutionError: (data) => {
+ if (!ensureActivated()) return
+ reconnectionComplete = true
+ releaseReconnectOwnership()
+ const currentId = useExecutionStore
+ .getState()
+ .getCurrentExecutionId(reconnectWorkflowId)
+ if (currentId !== capturedExecutionId) return
handleExecutionErrorConsole({
workflowId: reconnectWorkflowId,
executionId: capturedExecutionId,
@@ -2039,39 +2316,40 @@ export function useWorkflowExecution() {
blockLogs: accumulatedBlockLogs,
finalBlockLogs: data.finalBlockLogs,
})
+ setCurrentExecutionId(reconnectWorkflowId, null)
+ setIsExecuting(reconnectWorkflowId, false)
+ setActiveBlocks(reconnectWorkflowId, new Set())
},
onExecutionCancelled: (data) => {
+ if (!ensureActivated()) return
reconnectionComplete = true
- activeReconnections.delete(reconnectWorkflowId)
- if (!activated) {
- clearExecutionPointer(reconnectWorkflowId)
- return
- }
+ releaseReconnectOwnership()
const currentId = useExecutionStore
.getState()
.getCurrentExecutionId(reconnectWorkflowId)
if (currentId !== capturedExecutionId) return
- setCurrentExecutionId(reconnectWorkflowId, null)
- setIsExecuting(reconnectWorkflowId, false)
- setActiveBlocks(reconnectWorkflowId, new Set())
handleExecutionCancelledConsole({
workflowId: reconnectWorkflowId,
executionId: capturedExecutionId,
durationMs: data?.duration,
finalBlockLogs: data?.finalBlockLogs,
})
+ setCurrentExecutionId(reconnectWorkflowId, null)
+ setIsExecuting(reconnectWorkflowId, false)
+ setActiveBlocks(reconnectWorkflowId, new Set())
},
},
})
} catch (error) {
- if (isReconnectTerminal(error)) {
+ if (isReconnectNonRetryable(error)) {
logger.info('Reconnection skipped; run buffer no longer exists', {
executionId: capturedExecutionId,
})
reconnectionComplete = true
- activeReconnections.delete(reconnectWorkflowId)
- clearExecutionPointer(reconnectWorkflowId)
- cleanupFailedReconnect()
+ releaseReconnectStateWithoutTerminal()
+ await consolePersistence.persist()
+ releaseReconnectOwnership()
+ await clearCapturedExecutionPointer()
return
}
@@ -2084,27 +2362,17 @@ export function useWorkflowExecution() {
return attemptReconnect(attempt + 1)
}
if (!cleanupRan && !reconnectionComplete) {
- reconnectionComplete = true
- activeReconnections.delete(reconnectWorkflowId)
- cleanupFailedReconnect()
+ scheduleRetryableReconnect()
+ await consolePersistence.persist()
return
}
}
if (!reconnectionComplete && !cleanupRan) {
reconnectionComplete = true
- activeReconnections.delete(reconnectWorkflowId)
- if (activated) {
- const currentId = useExecutionStore
- .getState()
- .getCurrentExecutionId(reconnectWorkflowId)
- if (currentId === capturedExecutionId) {
- cancelRunningEntries(reconnectWorkflowId)
- setCurrentExecutionId(reconnectWorkflowId, null)
- setIsExecuting(reconnectWorkflowId, false)
- setActiveBlocks(reconnectWorkflowId, new Set())
- }
- }
+ releaseActivatedReconnectState()
+ await consolePersistence.persist()
+ releaseReconnectOwnership()
}
}
@@ -2115,11 +2383,15 @@ export function useWorkflowExecution() {
return () => {
cleanupRan = true
- executionStream.cancel(reconnectWorkflowId)
- activeReconnections.delete(reconnectWorkflowId)
+ if (ownsReconnect) {
+ if (ownedReconnectExecutionId) {
+ executionStream.cancelReconnect(reconnectWorkflowId, ownedReconnectExecutionId)
+ }
+ releaseReconnectOwnership()
+ }
}
// eslint-disable-next-line react-hooks/exhaustive-deps
- }, [activeWorkflowId, hasHydrated])
+ }, [activeWorkflowId, hasHydrated, reconnectAttemptNonce])
return {
isExecuting,
diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/utils/workflow-execution-utils.test.ts b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/utils/workflow-execution-utils.test.ts
index 13840aad4cd..d2c999beef0 100644
--- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/utils/workflow-execution-utils.test.ts
+++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/utils/workflow-execution-utils.test.ts
@@ -5,14 +5,196 @@ import { resetTerminalConsoleMock, terminalConsoleMockFns } from '@sim/testing'
import { beforeEach, describe, expect, it, vi } from 'vitest'
import {
addExecutionErrorConsoleEntry,
+ createBlockEventHandlers,
handleExecutionErrorConsole,
reconcileFinalBlockLogs,
} from '@/app/workspace/[workspaceId]/w/[workflowId]/utils/workflow-execution-utils'
import type { BlockLog } from '@/executor/types'
+import { useExecutionStore } from '@/stores/execution'
describe('workflow-execution-utils', () => {
beforeEach(() => {
resetTerminalConsoleMock()
+ vi.mocked(useExecutionStore.getState).mockReturnValue({
+ getCurrentExecutionId: vi.fn(() => 'exec-1'),
+ } as any)
+ })
+
+ describe('createBlockEventHandlers', () => {
+ it('skips duplicate block start rows during reconnect replay', () => {
+ terminalConsoleMockFns.mockAddConsole({
+ workflowId: 'wf-1',
+ blockId: 'fn-1',
+ blockName: 'Function 1',
+ blockType: 'function',
+ executionId: 'exec-1',
+ executionOrder: 7,
+ isRunning: false,
+ success: true,
+ iterationCurrent: 0,
+ iterationTotal: 2,
+ iterationType: 'loop',
+ iterationContainerId: 'loop-1',
+ childWorkflowBlockId: 'child-inst-1',
+ childWorkflowName: 'Child Workflow',
+ parentIterations: [
+ {
+ iterationCurrent: 1,
+ iterationTotal: 3,
+ iterationType: 'parallel',
+ iterationContainerId: 'parallel-1',
+ },
+ ],
+ })
+
+ const addConsole = vi.fn()
+ const handlers = createBlockEventHandlers(
+ {
+ workflowId: 'wf-1',
+ executionIdRef: { current: 'exec-1' },
+ workflowEdges: [],
+ activeBlocksSet: new Set(),
+ activeBlockRefCounts: new Map(),
+ accumulatedBlockLogs: [],
+ accumulatedBlockStates: new Map(),
+ executedBlockIds: new Set(),
+ includeStartConsoleEntry: true,
+ },
+ {
+ addConsole,
+ updateConsole: vi.fn(),
+ setActiveBlocks: vi.fn(),
+ setBlockRunStatus: vi.fn(),
+ setEdgeRunStatus: vi.fn(),
+ }
+ )
+
+ handlers.onBlockStarted({
+ blockId: 'fn-1',
+ blockName: 'Function 1',
+ blockType: 'function',
+ executionOrder: 7,
+ iterationCurrent: 0,
+ iterationTotal: 2,
+ iterationType: 'loop',
+ iterationContainerId: 'loop-1',
+ childWorkflowBlockId: 'child-inst-1',
+ childWorkflowName: 'Child Workflow',
+ parentIterations: [
+ {
+ iterationCurrent: 1,
+ iterationTotal: 3,
+ iterationType: 'parallel',
+ iterationContainerId: 'parallel-1',
+ },
+ ],
+ })
+
+ expect(addConsole).not.toHaveBeenCalled()
+ })
+
+ it('keeps distinct start rows when replay identity differs', () => {
+ terminalConsoleMockFns.mockAddConsole({
+ workflowId: 'wf-1',
+ blockId: 'fn-1',
+ blockName: 'Function 1',
+ blockType: 'function',
+ executionId: 'exec-1',
+ executionOrder: 7,
+ isRunning: true,
+ iterationCurrent: 0,
+ iterationTotal: 2,
+ iterationType: 'loop',
+ iterationContainerId: 'loop-1',
+ })
+
+ const addConsole = vi.fn()
+ const handlers = createBlockEventHandlers(
+ {
+ workflowId: 'wf-1',
+ executionIdRef: { current: 'exec-1' },
+ workflowEdges: [],
+ activeBlocksSet: new Set(),
+ activeBlockRefCounts: new Map(),
+ accumulatedBlockLogs: [],
+ accumulatedBlockStates: new Map(),
+ executedBlockIds: new Set(),
+ includeStartConsoleEntry: true,
+ },
+ {
+ addConsole,
+ updateConsole: vi.fn(),
+ setActiveBlocks: vi.fn(),
+ setBlockRunStatus: vi.fn(),
+ setEdgeRunStatus: vi.fn(),
+ }
+ )
+
+ handlers.onBlockStarted({
+ blockId: 'fn-1',
+ blockName: 'Function 1',
+ blockType: 'function',
+ executionOrder: 7,
+ iterationCurrent: 1,
+ iterationTotal: 2,
+ iterationType: 'loop',
+ iterationContainerId: 'loop-1',
+ })
+
+ expect(addConsole).toHaveBeenCalledTimes(1)
+ })
+
+ it('replays early child workflow instance updates after the start row is added', () => {
+ const updateConsole = vi.fn()
+ const handlers = createBlockEventHandlers(
+ {
+ workflowId: 'wf-1',
+ executionIdRef: { current: 'exec-1' },
+ workflowEdges: [],
+ activeBlocksSet: new Set(),
+ activeBlockRefCounts: new Map(),
+ accumulatedBlockLogs: [],
+ accumulatedBlockStates: new Map(),
+ executedBlockIds: new Set(),
+ includeStartConsoleEntry: true,
+ },
+ {
+ addConsole: terminalConsoleMockFns.mockAddConsole as any,
+ updateConsole,
+ setActiveBlocks: vi.fn(),
+ setBlockRunStatus: vi.fn(),
+ setEdgeRunStatus: vi.fn(),
+ }
+ )
+
+ handlers.onBlockChildWorkflowStarted({
+ blockId: 'nested-workflow',
+ childWorkflowInstanceId: 'nested-inst-1',
+ executionOrder: 4,
+ childWorkflowBlockId: 'parent-inst-1',
+ childWorkflowName: 'Parent Workflow',
+ })
+ handlers.onBlockStarted({
+ blockId: 'nested-workflow',
+ blockName: 'Nested Workflow',
+ blockType: 'workflow',
+ executionOrder: 4,
+ childWorkflowBlockId: 'parent-inst-1',
+ childWorkflowName: 'Parent Workflow',
+ })
+
+ expect(updateConsole).toHaveBeenCalledTimes(2)
+ expect(updateConsole.mock.calls[1]).toEqual([
+ 'nested-workflow',
+ expect.objectContaining({
+ childWorkflowInstanceId: 'nested-inst-1',
+ childWorkflowBlockId: 'parent-inst-1',
+ childWorkflowName: 'Parent Workflow',
+ executionOrder: 4,
+ }),
+ 'exec-1',
+ ])
+ })
})
describe('addExecutionErrorConsoleEntry', () => {
@@ -225,6 +407,296 @@ describe('workflow-execution-utils', () => {
expect(updateConsole).not.toHaveBeenCalled()
})
+ it('reconciles child workflow spans before running entries are swept to canceled', () => {
+ terminalConsoleMockFns.mockAddConsole({
+ workflowId: 'wf-1',
+ blockId: 'workflow-1',
+ blockName: 'Workflow 1',
+ blockType: 'workflow',
+ executionId: 'exec-1',
+ executionOrder: 2,
+ isRunning: false,
+ success: true,
+ childWorkflowInstanceId: 'child-inst-1',
+ })
+ terminalConsoleMockFns.mockAddConsole({
+ workflowId: 'wf-1',
+ blockId: 'starter',
+ blockName: 'Start',
+ blockType: 'starter',
+ executionId: 'exec-1',
+ executionOrder: 3,
+ isRunning: true,
+ childWorkflowBlockId: 'workflow-1',
+ childWorkflowName: 'Workflow 1',
+ })
+ terminalConsoleMockFns.mockAddConsole({
+ workflowId: 'wf-1',
+ blockId: 'api-1',
+ blockName: 'API 1',
+ blockType: 'api',
+ executionId: 'exec-1',
+ executionOrder: 4,
+ isRunning: true,
+ childWorkflowBlockId: 'child-inst-1',
+ childWorkflowName: 'Workflow 1',
+ })
+
+ const startedAt = new Date().toISOString()
+ const endedAt = new Date(Date.now() + 20).toISOString()
+ const updateConsole = vi.fn()
+ reconcileFinalBlockLogs(updateConsole, 'wf-1', 'exec-1', [
+ makeLog({
+ blockId: 'workflow-1',
+ blockName: 'Workflow 1',
+ blockType: 'workflow',
+ executionOrder: 2,
+ success: true,
+ childTraceSpans: [
+ {
+ id: 'starter-span',
+ name: 'Start',
+ type: 'starter',
+ blockId: 'starter',
+ executionOrder: 3,
+ status: 'success',
+ duration: 5,
+ startTime: startedAt,
+ endTime: endedAt,
+ output: {},
+ },
+ {
+ id: 'api-span',
+ name: 'API 1',
+ type: 'api',
+ blockId: 'api-1',
+ executionOrder: 4,
+ status: 'error',
+ errorHandled: true,
+ duration: 20,
+ startTime: startedAt,
+ endTime: endedAt,
+ output: { error: 'Request failed' },
+ },
+ ],
+ }),
+ ])
+
+ expect(updateConsole).toHaveBeenCalledTimes(2)
+ expect(updateConsole.mock.calls[0]).toEqual([
+ 'starter',
+ expect.objectContaining({
+ success: true,
+ isRunning: false,
+ isCanceled: false,
+ childWorkflowBlockId: 'workflow-1',
+ }),
+ 'exec-1',
+ ])
+ expect(updateConsole.mock.calls[1]).toEqual([
+ 'api-1',
+ expect.objectContaining({
+ executionOrder: 4,
+ success: false,
+ error: 'Request failed',
+ isRunning: false,
+ isCanceled: false,
+ childWorkflowBlockId: 'workflow-1',
+ }),
+ 'exec-1',
+ ])
+ })
+
+ it('uses span execution and iteration identity when reconciling repeated child blocks', () => {
+ terminalConsoleMockFns.mockAddConsole({
+ workflowId: 'wf-1',
+ blockId: 'workflow-1',
+ blockName: 'Workflow 1',
+ blockType: 'workflow',
+ executionId: 'exec-1',
+ executionOrder: 2,
+ success: true,
+ childWorkflowInstanceId: 'child-inst-1',
+ })
+ terminalConsoleMockFns.mockAddConsole({
+ workflowId: 'wf-1',
+ blockId: 'api-1',
+ blockName: 'API 1',
+ blockType: 'api',
+ executionId: 'exec-1',
+ executionOrder: 3,
+ isRunning: true,
+ iterationCurrent: 0,
+ iterationType: 'loop',
+ iterationContainerId: 'loop-1',
+ childWorkflowBlockId: 'workflow-1',
+ })
+ terminalConsoleMockFns.mockAddConsole({
+ workflowId: 'wf-1',
+ blockId: 'api-1',
+ blockName: 'API 1',
+ blockType: 'api',
+ executionId: 'exec-1',
+ executionOrder: 4,
+ isRunning: true,
+ iterationCurrent: 1,
+ iterationType: 'loop',
+ iterationContainerId: 'loop-1',
+ childWorkflowBlockId: 'workflow-1',
+ })
+
+ const startedAt = new Date().toISOString()
+ const endedAt = new Date(Date.now() + 20).toISOString()
+ const updateConsole = vi.fn()
+ reconcileFinalBlockLogs(updateConsole, 'wf-1', 'exec-1', [
+ makeLog({
+ blockId: 'workflow-1',
+ blockType: 'workflow',
+ executionOrder: 2,
+ childTraceSpans: [
+ {
+ id: 'api-iter-0',
+ name: 'API 1',
+ type: 'api',
+ blockId: 'api-1',
+ executionOrder: 3,
+ loopId: 'loop-1',
+ iterationIndex: 0,
+ status: 'success',
+ duration: 10,
+ startTime: startedAt,
+ endTime: endedAt,
+ output: { result: 'first' },
+ },
+ {
+ id: 'api-iter-1',
+ name: 'API 1',
+ type: 'api',
+ blockId: 'api-1',
+ executionOrder: 4,
+ loopId: 'loop-1',
+ iterationIndex: 1,
+ status: 'error',
+ duration: 20,
+ startTime: startedAt,
+ endTime: endedAt,
+ output: { error: new Error('second failed') },
+ },
+ ],
+ }),
+ ])
+
+ expect(updateConsole).toHaveBeenCalledTimes(2)
+ expect(updateConsole.mock.calls[0]).toEqual([
+ 'api-1',
+ expect.objectContaining({
+ executionOrder: 3,
+ iterationCurrent: 0,
+ iterationType: 'loop',
+ iterationContainerId: 'loop-1',
+ replaceOutput: { result: 'first' },
+ success: true,
+ }),
+ 'exec-1',
+ ])
+ expect(updateConsole.mock.calls[1]).toEqual([
+ 'api-1',
+ expect.objectContaining({
+ executionOrder: 4,
+ iterationCurrent: 1,
+ iterationType: 'loop',
+ iterationContainerId: 'loop-1',
+ error: 'second failed',
+ success: false,
+ }),
+ 'exec-1',
+ ])
+ })
+
+ it('recurses into nested workflow spans using the nested workflow instance id', () => {
+ terminalConsoleMockFns.mockAddConsole({
+ workflowId: 'wf-1',
+ blockId: 'workflow-1',
+ blockName: 'Workflow 1',
+ blockType: 'workflow',
+ executionId: 'exec-1',
+ executionOrder: 2,
+ success: true,
+ childWorkflowInstanceId: 'child-inst-1',
+ })
+ terminalConsoleMockFns.mockAddConsole({
+ workflowId: 'wf-1',
+ blockId: 'nested-workflow',
+ blockName: 'Nested Workflow',
+ blockType: 'workflow',
+ executionId: 'exec-1',
+ executionOrder: 3,
+ isRunning: false,
+ childWorkflowBlockId: 'workflow-1',
+ childWorkflowInstanceId: 'nested-inst-1',
+ })
+ terminalConsoleMockFns.mockAddConsole({
+ workflowId: 'wf-1',
+ blockId: 'nested-api',
+ blockName: 'Nested API',
+ blockType: 'api',
+ executionId: 'exec-1',
+ executionOrder: 1,
+ isRunning: true,
+ childWorkflowBlockId: 'nested-workflow',
+ })
+
+ const startedAt = new Date().toISOString()
+ const endedAt = new Date(Date.now() + 20).toISOString()
+ const updateConsole = vi.fn()
+ reconcileFinalBlockLogs(updateConsole, 'wf-1', 'exec-1', [
+ makeLog({
+ blockId: 'workflow-1',
+ blockType: 'workflow',
+ executionOrder: 2,
+ childTraceSpans: [
+ {
+ id: 'nested-workflow-span',
+ name: 'Nested Workflow',
+ type: 'workflow',
+ blockId: 'nested-workflow',
+ executionOrder: 3,
+ status: 'success',
+ duration: 10,
+ startTime: startedAt,
+ endTime: endedAt,
+ output: {},
+ children: [
+ {
+ id: 'nested-api-span',
+ name: 'Nested API',
+ type: 'api',
+ blockId: 'nested-api',
+ executionOrder: 1,
+ status: 'success',
+ duration: 10,
+ startTime: startedAt,
+ endTime: endedAt,
+ output: { ok: true },
+ },
+ ],
+ },
+ ],
+ }),
+ ])
+
+ expect(updateConsole.mock.calls[1]).toEqual([
+ 'nested-api',
+ expect.objectContaining({
+ childWorkflowBlockId: 'nested-workflow',
+ success: true,
+ isRunning: false,
+ isCanceled: false,
+ }),
+ 'exec-1',
+ ])
+ })
+
it('is a no-op when finalBlockLogs is empty or executionId is missing', () => {
const updateConsole = vi.fn()
reconcileFinalBlockLogs(updateConsole, 'wf-1', 'exec-1', [])
@@ -256,6 +728,7 @@ describe('workflow-execution-utils', () => {
expect(calls[0]).toBe('cancel')
expect(calls).toContain('add')
+ expect(cancelRunningEntries).toHaveBeenCalledWith('wf-1', 'exec-1')
})
it('reconciles finalBlockLogs before sweeping running entries (Fix C)', () => {
diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/utils/workflow-execution-utils.ts b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/utils/workflow-execution-utils.ts
index 4872ab7c156..4237e6cf8cf 100644
--- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/utils/workflow-execution-utils.ts
+++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/utils/workflow-execution-utils.ts
@@ -1,19 +1,31 @@
import { createLogger } from '@sim/logger'
+import { toError } from '@sim/utils/errors'
import { generateId } from '@sim/utils/id'
+import type { TraceSpan } from '@/lib/logs/types'
import type {
+ BlockChildWorkflowStartedData,
BlockCompletedData,
BlockErrorData,
BlockStartedData,
} from '@/lib/workflows/executor/execution-events'
import type { BlockLog, BlockState, ExecutionResult, StreamingExecution } from '@/executor/types'
import { stripCloneSuffixes } from '@/executor/utils/subflow-utils'
-import { processSSEStream } from '@/hooks/use-execution-stream'
+import {
+ processSSEStream,
+ SSEEventHandlerError,
+ SSEStreamInterruptedError,
+} from '@/hooks/use-execution-stream'
const logger = createLogger('workflow-execution-utils')
import { useExecutionStore } from '@/stores/execution'
import type { ConsoleEntry, ConsoleUpdate } from '@/stores/terminal'
-import { saveExecutionPointer, useTerminalConsoleStore } from '@/stores/terminal'
+import {
+ clearExecutionPointer,
+ consolePersistence,
+ saveExecutionPointer,
+ useTerminalConsoleStore,
+} from '@/stores/terminal'
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
import { useWorkflowStore } from '@/stores/workflows/workflow/store'
@@ -118,6 +130,8 @@ export interface BlockEventHandlerDeps {
setEdgeRunStatus: (workflowId: string, edgeId: string, status: 'success' | 'error') => void
}
+type BlockChildWorkflowStartedUpdate = BlockChildWorkflowStartedData
+
/**
* Creates block event handlers for SSE execution events.
* Shared by the workflow execution hook and standalone execution utilities.
@@ -140,6 +154,7 @@ export function createBlockEventHandlers(
} = config
const { addConsole, updateConsole, setActiveBlocks, setBlockRunStatus, setEdgeRunStatus } = deps
+ const pendingChildWorkflowStarts = new Map()
const isStaleExecution = () =>
!!(
@@ -178,6 +193,94 @@ export function createBlockEventHandlers(
}),
})
+ const parentIterationsMatch = (
+ left: ConsoleEntry['parentIterations'],
+ right: BlockStartedData['parentIterations']
+ ) => {
+ if (!left?.length && !right?.length) return true
+ if (!left || !right || left.length !== right.length) return false
+ return left.every((entry, index) => {
+ const other = right[index]
+ return (
+ entry.iterationCurrent === other.iterationCurrent &&
+ entry.iterationTotal === other.iterationTotal &&
+ entry.iterationType === other.iterationType &&
+ entry.iterationContainerId === other.iterationContainerId
+ )
+ })
+ }
+
+ type StartedIdentity = {
+ blockId: string
+ executionOrder?: number
+ iterationCurrent?: BlockStartedData['iterationCurrent']
+ iterationTotal?: BlockStartedData['iterationTotal']
+ iterationType?: BlockStartedData['iterationType']
+ iterationContainerId?: BlockStartedData['iterationContainerId']
+ childWorkflowBlockId?: BlockStartedData['childWorkflowBlockId']
+ childWorkflowName?: BlockStartedData['childWorkflowName']
+ parentIterations?: BlockStartedData['parentIterations']
+ }
+
+ const startedEntryKey = (data: StartedIdentity) =>
+ JSON.stringify({
+ blockId: data.blockId,
+ executionOrder: data.executionOrder,
+ iterationCurrent: data.iterationCurrent,
+ iterationTotal: data.iterationTotal,
+ iterationType: data.iterationType,
+ iterationContainerId: data.iterationContainerId,
+ childWorkflowBlockId: data.childWorkflowBlockId,
+ childWorkflowName: data.childWorkflowName,
+ parentIterations: data.parentIterations ?? [],
+ })
+
+ const matchesStartedIdentity = (entry: ConsoleEntry, data: StartedIdentity) =>
+ entry.executionId === executionIdRef.current &&
+ entry.blockId === data.blockId &&
+ (data.executionOrder === undefined || entry.executionOrder === data.executionOrder) &&
+ entry.iterationCurrent === data.iterationCurrent &&
+ entry.iterationTotal === data.iterationTotal &&
+ entry.iterationType === data.iterationType &&
+ entry.iterationContainerId === data.iterationContainerId &&
+ entry.childWorkflowBlockId === data.childWorkflowBlockId &&
+ entry.childWorkflowName === data.childWorkflowName &&
+ parentIterationsMatch(entry.parentIterations, data.parentIterations)
+
+ const hasExistingStartedEntry = (data: StartedIdentity) => {
+ if (!workflowId) return false
+ return useTerminalConsoleStore
+ .getState()
+ .getWorkflowEntries(workflowId)
+ .some((entry) => matchesStartedIdentity(entry, data))
+ }
+
+ const applyChildWorkflowStart = (data: BlockChildWorkflowStartedUpdate) => {
+ updateConsole(
+ data.blockId,
+ {
+ childWorkflowInstanceId: data.childWorkflowInstanceId,
+ ...(data.iterationCurrent !== undefined && { iterationCurrent: data.iterationCurrent }),
+ ...(data.iterationTotal !== undefined && { iterationTotal: data.iterationTotal }),
+ ...(data.iterationType !== undefined && { iterationType: data.iterationType }),
+ ...(data.iterationContainerId !== undefined && {
+ iterationContainerId: data.iterationContainerId,
+ }),
+ ...(data.parentIterations !== undefined && {
+ parentIterations: data.parentIterations,
+ }),
+ ...(data.childWorkflowBlockId !== undefined && {
+ childWorkflowBlockId: data.childWorkflowBlockId,
+ }),
+ ...(data.childWorkflowName !== undefined && {
+ childWorkflowName: data.childWorkflowName,
+ }),
+ ...(data.executionOrder !== undefined && { executionOrder: data.executionOrder }),
+ },
+ executionIdRef.current
+ )
+ }
+
const createBlockLogEntry = (
data: BlockCompletedData | BlockErrorData,
options: { success: boolean; output?: unknown; error?: string }
@@ -237,6 +340,7 @@ export function createBlockEventHandlers(
updateActiveBlocks(data.blockId, true)
if (!includeStartConsoleEntry || !workflowId) return
+ if (hasExistingStartedEntry(data)) return
const startedAt = new Date().toISOString()
addConsole({
@@ -255,6 +359,13 @@ export function createBlockEventHandlers(
isRunning: true,
...extractIterationFields(data),
})
+
+ const pendingKey = startedEntryKey(data)
+ const pending = pendingChildWorkflowStarts.get(pendingKey)
+ if (pending) {
+ applyChildWorkflowStart(pending)
+ pendingChildWorkflowStarts.delete(pendingKey)
+ }
}
const onBlockCompleted = (data: BlockCompletedData) => {
@@ -327,33 +438,19 @@ export function createBlockEventHandlers(
updateConsoleErrorEntry(data)
}
- const onBlockChildWorkflowStarted = (data: {
- blockId: string
- childWorkflowInstanceId: string
- iterationCurrent?: number
- iterationContainerId?: string
- executionOrder?: number
- }) => {
+ const onBlockChildWorkflowStarted = (data: BlockChildWorkflowStartedUpdate) => {
if (isStaleExecution()) return
- updateConsole(
- data.blockId,
- {
- childWorkflowInstanceId: data.childWorkflowInstanceId,
- ...(data.iterationCurrent !== undefined && { iterationCurrent: data.iterationCurrent }),
- ...(data.iterationContainerId !== undefined && {
- iterationContainerId: data.iterationContainerId,
- }),
- ...(data.executionOrder !== undefined && { executionOrder: data.executionOrder }),
- },
- executionIdRef.current
- )
+ applyChildWorkflowStart(data)
+ if (!hasExistingStartedEntry(data)) {
+ pendingChildWorkflowStarts.set(startedEntryKey(data), data)
+ }
}
return { onBlockStarted, onBlockCompleted, onBlockError, onBlockChildWorkflowStarted }
}
type AddConsoleFn = (entry: Omit) => ConsoleEntry | undefined
-type CancelRunningEntriesFn = (workflowId: string) => void
+type CancelRunningEntriesFn = (workflowId: string, executionId?: string) => void
type UpdateConsoleFn = (
blockId: string,
update: string | ConsoleUpdate,
@@ -385,26 +482,155 @@ export function reconcileFinalBlockLogs(
if (!finalBlockLogs?.length || !executionId) return
for (const log of finalBlockLogs) {
const entries = useTerminalConsoleStore.getState().getWorkflowEntries(workflowId)
- const running = entries.find(
- (e) => e.blockId === log.blockId && e.executionId === executionId && e.isRunning
- )
- if (!running) continue
- updateConsole(
- log.blockId,
- {
- executionOrder: log.executionOrder,
- replaceOutput: (log.output ?? {}) as Record,
- ...(log.input ? { input: log.input } : {}),
- success: log.success,
- ...(log.error ? { error: log.error } : {}),
- durationMs: log.durationMs,
- startedAt: log.startedAt,
- endedAt: log.endedAt,
- isRunning: false,
- },
- executionId
+ const matchesFinalLog = (entry: ConsoleEntry) =>
+ entry.blockId === log.blockId &&
+ entry.executionId === executionId &&
+ entry.executionOrder === log.executionOrder
+ const matchingEntry = entries.find(matchesFinalLog)
+ const runningEntry = entries.find((entry) => matchesFinalLog(entry) && entry.isRunning)
+ if (runningEntry) {
+ updateConsole(
+ log.blockId,
+ {
+ executionOrder: log.executionOrder,
+ replaceOutput: (log.output ?? {}) as Record,
+ ...(log.input ? { input: log.input } : {}),
+ success: log.success,
+ ...(log.error ? { error: log.error } : {}),
+ durationMs: log.durationMs,
+ startedAt: log.startedAt,
+ endedAt: log.endedAt,
+ isRunning: false,
+ isCanceled: false,
+ },
+ executionId
+ )
+ }
+
+ const childWorkflowInstanceId = matchingEntry?.childWorkflowInstanceId
+ if (childWorkflowInstanceId && log.childTraceSpans?.length) {
+ reconcileChildTraceSpans(
+ updateConsole,
+ workflowId,
+ log.blockId,
+ childWorkflowInstanceId,
+ executionId,
+ log.childTraceSpans
+ )
+ }
+ }
+}
+
+function reconcileChildTraceSpans(
+ updateConsole: UpdateConsoleFn,
+ workflowId: string,
+ childWorkflowBlockId: string,
+ childWorkflowInstanceId: string,
+ executionId: string,
+ spans: TraceSpan[]
+): void {
+ for (const span of spans) {
+ const matchingEntry = span.blockId
+ ? findConsoleEntryForSpan(workflowId, executionId, childWorkflowBlockId, span)
+ : undefined
+ if (span.blockId) {
+ const errorMessage = normalizeSpanError(span.output?.error)
+ updateConsole(
+ span.blockId,
+ {
+ ...spanConsoleIdentity(span, childWorkflowBlockId),
+ replaceOutput: (span.output ?? {}) as Record,
+ success: span.status !== 'error',
+ ...(errorMessage !== undefined ? { error: errorMessage } : {}),
+ durationMs: span.duration,
+ startedAt: span.startTime,
+ endedAt: span.endTime,
+ isRunning: false,
+ isCanceled: false,
+ },
+ executionId
+ )
+ }
+ if (span.children?.length) {
+ reconcileChildTraceSpans(
+ updateConsole,
+ workflowId,
+ matchingEntry?.blockId ?? childWorkflowBlockId,
+ matchingEntry?.childWorkflowInstanceId ?? childWorkflowInstanceId,
+ executionId,
+ span.children
+ )
+ }
+ }
+}
+
+function spanConsoleIdentity(span: TraceSpan, childWorkflowBlockId: string): ConsoleUpdate {
+ const iterationContainerId = span.loopId ?? span.parallelId
+ const iterationType = span.loopId ? 'loop' : span.parallelId ? 'parallel' : undefined
+ return {
+ ...(span.executionOrder !== undefined && { executionOrder: span.executionOrder }),
+ ...(span.iterationIndex !== undefined && { iterationCurrent: span.iterationIndex }),
+ ...(iterationType !== undefined && { iterationType }),
+ ...(iterationContainerId !== undefined && { iterationContainerId }),
+ ...(span.parentIterations !== undefined && { parentIterations: span.parentIterations }),
+ childWorkflowBlockId,
+ }
+}
+
+function findConsoleEntryForSpan(
+ workflowId: string,
+ executionId: string,
+ childWorkflowBlockId: string,
+ span: TraceSpan
+): ConsoleEntry | undefined {
+ if (!span.blockId) return undefined
+ const identity = spanConsoleIdentity(span, childWorkflowBlockId)
+ return useTerminalConsoleStore
+ .getState()
+ .getWorkflowEntries(workflowId)
+ .find(
+ (entry) =>
+ entry.blockId === span.blockId &&
+ entry.executionId === executionId &&
+ matchesConsoleIdentity(entry, identity)
)
+}
+
+function matchesConsoleIdentity(entry: ConsoleEntry, identity: ConsoleUpdate): boolean {
+ if (identity.executionOrder !== undefined && entry.executionOrder !== identity.executionOrder) {
+ return false
+ }
+ if (
+ identity.iterationCurrent !== undefined &&
+ entry.iterationCurrent !== identity.iterationCurrent
+ ) {
+ return false
+ }
+ if (
+ identity.iterationContainerId !== undefined &&
+ entry.iterationContainerId !== identity.iterationContainerId
+ ) {
+ return false
}
+ if (
+ identity.childWorkflowBlockId !== undefined &&
+ entry.childWorkflowBlockId !== identity.childWorkflowBlockId
+ ) {
+ return false
+ }
+ if (
+ identity.childWorkflowInstanceId !== undefined &&
+ entry.childWorkflowInstanceId !== undefined &&
+ entry.childWorkflowInstanceId !== identity.childWorkflowInstanceId
+ ) {
+ return false
+ }
+ return true
+}
+
+function normalizeSpanError(error: unknown): string | undefined {
+ if (error === undefined || error === null) return undefined
+ return typeof error === 'string' ? error : toError(error).message
}
export interface ExecutionTimingFields {
@@ -500,7 +726,7 @@ export function handleExecutionErrorConsole(
params.executionId,
params.finalBlockLogs
)
- deps.cancelRunningEntries(params.workflowId)
+ deps.cancelRunningEntries(params.workflowId, params.executionId)
addExecutionErrorConsoleEntry(deps.addConsole, params)
}
@@ -585,7 +811,7 @@ export function handleExecutionCancelledConsole(
params.executionId,
params.finalBlockLogs
)
- deps.cancelRunningEntries(params.workflowId)
+ deps.cancelRunningEntries(params.workflowId, params.executionId)
addCancelledConsoleEntry(deps.addConsole, params)
}
@@ -600,6 +826,7 @@ export interface WorkflowExecutionOptions {
useDraftState?: boolean
stopAfterBlockId?: string
abortSignal?: AbortSignal
+ preserveExecutionOnTerminal?: boolean
/** For run_from_block / run_block: start from a specific block using cached state */
runFromBlock?: {
startBlockId: string
@@ -622,7 +849,9 @@ export async function executeWorkflowWithFullLogging(
}
const executionId = options.executionId || generateId()
- const { addConsole, updateConsole, cancelRunningEntries } = useTerminalConsoleStore.getState()
+ const { addConsole, updateConsole, cancelRunningEntries, finishRunningEntries } =
+ useTerminalConsoleStore.getState()
+ const clearOnTerminal = options.preserveExecutionOnTerminal !== true
const { setActiveBlocks, setBlockRunStatus, setEdgeRunStatus, setCurrentExecutionId } =
useExecutionStore.getState()
const wfId = targetWorkflowId
@@ -632,6 +861,17 @@ export async function executeWorkflowWithFullLogging(
const activeBlockRefCounts = new Map()
const executionIdRef = { current: executionId }
const accumulatedBlockLogs: BlockLog[] = []
+ const isCurrentExecution = () => {
+ return useExecutionStore.getState().getCurrentExecutionId(wfId) === executionIdRef.current
+ }
+ const clearExecutionState = () => {
+ if (!isCurrentExecution()) return
+ setCurrentExecutionId(wfId, null)
+ clearExecutionPointer(wfId)
+ consolePersistence.executionEnded()
+ useExecutionStore.getState().setIsExecuting(wfId, false)
+ setActiveBlocks(wfId, new Set())
+ }
const blockHandlers = createBlockEventHandlers(
{
@@ -705,18 +945,24 @@ export async function executeWorkflowWithFullLogging(
output: {},
logs: [],
}
+ let executionFinished = false
+ let preserveExecutionForRecovery = false
try {
await processSSEStream(
response.body.getReader(),
{
onEventId: (eventId) => {
+ if (executionFinished) return
if (wfId && executionIdRef.current && eventId % 5 === 0) {
- saveExecutionPointer({
- workflowId: wfId,
- executionId: executionIdRef.current,
- lastEventId: eventId,
- })
+ const executionId = executionIdRef.current
+ return consolePersistence.persist().then(() =>
+ saveExecutionPointer({
+ workflowId: wfId,
+ executionId,
+ lastEventId: eventId,
+ })
+ )
}
},
@@ -730,9 +976,10 @@ export async function executeWorkflowWithFullLogging(
onBlockChildWorkflowStarted: blockHandlers.onBlockChildWorkflowStarted,
onExecutionCompleted: (data) => {
- setCurrentExecutionId(wfId, null)
+ if (!isCurrentExecution()) return
+ executionFinished = true
reconcileFinalBlockLogs(updateConsole, wfId, executionIdRef.current, data.finalBlockLogs)
- cancelRunningEntries(wfId)
+ finishRunningEntries(wfId, executionIdRef.current)
executionResult = {
success: data.success,
output: data.output,
@@ -743,10 +990,34 @@ export async function executeWorkflowWithFullLogging(
endTime: data.endTime,
},
}
+ if (clearOnTerminal) {
+ clearExecutionState()
+ }
+ },
+
+ onExecutionPaused: (data) => {
+ if (!isCurrentExecution()) return
+ executionFinished = true
+ reconcileFinalBlockLogs(updateConsole, wfId, executionIdRef.current, data.finalBlockLogs)
+ finishRunningEntries(wfId, executionIdRef.current)
+ executionResult = {
+ success: true,
+ output: data.output,
+ logs: accumulatedBlockLogs,
+ metadata: {
+ duration: data.duration,
+ startTime: data.startTime,
+ endTime: data.endTime,
+ },
+ }
+ if (clearOnTerminal) {
+ clearExecutionState()
+ }
},
onExecutionCancelled: (data) => {
- setCurrentExecutionId(wfId, null)
+ if (!isCurrentExecution()) return
+ executionFinished = true
executionResult = {
success: false,
output: {},
@@ -763,10 +1034,14 @@ export async function executeWorkflowWithFullLogging(
finalBlockLogs: data?.finalBlockLogs,
}
)
+ if (clearOnTerminal) {
+ clearExecutionState()
+ }
},
onExecutionError: (data) => {
- setCurrentExecutionId(wfId, null)
+ if (!isCurrentExecution()) return
+ executionFinished = true
const errorMessage = data.error || 'Run failed'
executionResult = {
success: false,
@@ -788,13 +1063,22 @@ export async function executeWorkflowWithFullLogging(
finalBlockLogs: data.finalBlockLogs,
}
)
+ if (clearOnTerminal) {
+ clearExecutionState()
+ }
},
},
'CopilotExecution'
)
+ } catch (error) {
+ if (error instanceof SSEEventHandlerError || error instanceof SSEStreamInterruptedError) {
+ preserveExecutionForRecovery = true
+ }
+ throw error
} finally {
- setCurrentExecutionId(wfId, null)
- setActiveBlocks(wfId, new Set())
+ if (!preserveExecutionForRecovery && clearOnTerminal) {
+ clearExecutionState()
+ }
}
return executionResult
diff --git a/apps/sim/background/run-data-drain.ts b/apps/sim/background/run-data-drain.ts
new file mode 100644
index 00000000000..21d462055df
--- /dev/null
+++ b/apps/sim/background/run-data-drain.ts
@@ -0,0 +1,14 @@
+import { task } from '@trigger.dev/sdk'
+import { runDrain } from '@/lib/data-drains/service'
+import type { RunTrigger } from '@/lib/data-drains/types'
+
+interface RunDataDrainPayload {
+ drainId: string
+ trigger: RunTrigger
+}
+
+export const runDataDrainTask = task({
+ id: 'run-data-drain',
+ run: async ({ drainId, trigger }: RunDataDrainPayload, { signal }) =>
+ runDrain(drainId, trigger, { signal }),
+})
diff --git a/apps/sim/blocks/blocks/wait.ts b/apps/sim/blocks/blocks/wait.ts
index 80de07bc174..3f043df09f7 100644
--- a/apps/sim/blocks/blocks/wait.ts
+++ b/apps/sim/blocks/blocks/wait.ts
@@ -8,13 +8,14 @@ const WaitIcon = (props: SVGProps) => createElement(PauseCircle,
export const WaitBlock: BlockConfig = {
type: 'wait',
name: 'Wait',
- description: 'Pause workflow execution for a specified time delay',
+ description: 'Pause workflow execution for up to 30 days',
longDescription:
- 'Pauses workflow execution for a specified time interval. The wait executes a simple sleep for the configured duration.',
+ 'Pauses workflow execution for a specified time interval. Waits up to five minutes are held in-process; longer waits suspend the workflow and resume automatically once the configured duration elapses.',
bestPractices: `
- - Use for simple time delays (max 10 minutes)
- - Configure the wait amount and unit (seconds or minutes)
- - Time-based waits are interruptible via workflow cancellation
+ - Configure the wait amount and unit (seconds, minutes, hours, or days)
+ - Maximum wait duration is 30 days
+ - Waits up to 5 minutes execute in-process and are interruptible via workflow cancellation
+ - Longer waits suspend the workflow; the execution resumes automatically when the timer fires
- Enter a positive number for the wait amount
`,
category: 'blocks',
@@ -26,7 +27,7 @@ export const WaitBlock: BlockConfig = {
id: 'timeValue',
title: 'Wait Amount',
type: 'short-input',
- description: 'Max: 600 seconds or 10 minutes',
+ description: 'Max: 30 days',
placeholder: '10',
value: () => '10',
required: true,
@@ -38,6 +39,8 @@ export const WaitBlock: BlockConfig = {
options: [
{ label: 'Seconds', id: 'seconds' },
{ label: 'Minutes', id: 'minutes' },
+ { label: 'Hours', id: 'hours' },
+ { label: 'Days', id: 'days' },
],
value: () => 'seconds',
required: true,
@@ -53,7 +56,7 @@ export const WaitBlock: BlockConfig = {
},
timeUnit: {
type: 'string',
- description: 'Wait duration unit (seconds or minutes)',
+ description: 'Wait duration unit (seconds, minutes, hours, or days)',
},
},
outputs: {
@@ -65,5 +68,9 @@ export const WaitBlock: BlockConfig = {
type: 'string',
description: 'Status of the wait block (waiting, completed, cancelled)',
},
+ resumeAt: {
+ type: 'string',
+ description: 'ISO timestamp at which a suspended wait will resume (long waits only)',
+ },
},
}
diff --git a/apps/sim/connectors/confluence/confluence.ts b/apps/sim/connectors/confluence/confluence.ts
index 56527fdc527..2180f932f08 100644
--- a/apps/sim/connectors/confluence/confluence.ts
+++ b/apps/sim/connectors/confluence/confluence.ts
@@ -4,7 +4,7 @@ import { ConfluenceIcon } from '@/components/icons'
import { fetchWithRetry, VALIDATE_RETRY_OPTIONS } from '@/lib/knowledge/documents/utils'
import type { ConnectorConfig, ExternalDocument, ExternalDocumentList } from '@/connectors/types'
import { htmlToPlainText, joinTagArray, parseTagDate } from '@/connectors/utils'
-import { getConfluenceCloudId } from '@/tools/confluence/utils'
+import { getConfluenceCloudId, normalizeConfluenceDomainHost } from '@/tools/confluence/utils'
const logger = createLogger('ConfluenceConnector')
@@ -141,7 +141,15 @@ export const confluenceConnector: ConnectorConfig = {
auth: {
mode: 'oauth',
provider: 'confluence',
- requiredScopes: ['read:confluence-content.all', 'read:page:confluence', 'offline_access'],
+ requiredScopes: [
+ 'read:confluence-content.all',
+ 'read:page:confluence',
+ 'read:blogpost:confluence',
+ 'read:space:confluence',
+ 'read:label:confluence',
+ 'search:confluence',
+ 'offline_access',
+ ],
},
configFields: [
@@ -205,7 +213,7 @@ export const confluenceConnector: ConnectorConfig = {
cursor?: string,
syncContext?: Record
): Promise => {
- const domain = sourceConfig.domain as string
+ const domain = normalizeConfluenceDomainHost(sourceConfig.domain as string)
const spaceKey = sourceConfig.spaceKey as string
const contentType = (sourceConfig.contentType as string) || 'page'
const labelFilter = (sourceConfig.labelFilter as string) || ''
@@ -269,7 +277,7 @@ export const confluenceConnector: ConnectorConfig = {
externalId: string,
syncContext?: Record
): Promise => {
- const domain = sourceConfig.domain as string
+ const domain = normalizeConfluenceDomainHost(sourceConfig.domain as string)
let cloudId = syncContext?.cloudId as string | undefined
if (!cloudId) {
cloudId = await getConfluenceCloudId(domain, accessToken)
diff --git a/apps/sim/ee/data-drains/components/data-drains-settings.tsx b/apps/sim/ee/data-drains/components/data-drains-settings.tsx
new file mode 100644
index 00000000000..d7318eed89f
--- /dev/null
+++ b/apps/sim/ee/data-drains/components/data-drains-settings.tsx
@@ -0,0 +1,435 @@
+'use client'
+
+import { useState } from 'react'
+import { createLogger } from '@sim/logger'
+import { toError } from '@sim/utils/errors'
+import {
+ Badge,
+ Button,
+ Callout,
+ Combobox,
+ DropdownMenu,
+ DropdownMenuContent,
+ DropdownMenuItem,
+ DropdownMenuTrigger,
+ FormField,
+ Input,
+ Modal,
+ ModalBody,
+ ModalContent,
+ ModalFooter,
+ ModalHeader,
+ ModalTitle,
+ MoreHorizontal,
+ Switch,
+ Table,
+ TableBody,
+ TableCell,
+ TableHead,
+ TableHeader,
+ TableRow,
+ toast,
+} from '@/components/emcn'
+import type { CreateDataDrainBody, DataDrain, DataDrainRun } from '@/lib/api/contracts/data-drains'
+import { useSession } from '@/lib/auth/auth-client'
+import { cn } from '@/lib/core/utils/cn'
+import { CADENCE_TYPES, DESTINATION_TYPES, SOURCE_TYPES } from '@/lib/data-drains/types'
+import { getUserRole } from '@/lib/workspaces/organization/utils'
+import { DataDrainsSkeleton } from '@/ee/data-drains/components/data-drains-skeleton'
+import { DESTINATION_FORM_REGISTRY } from '@/ee/data-drains/destinations/registry'
+import {
+ useCreateDataDrain,
+ useDataDrainRuns,
+ useDataDrains,
+ useDeleteDataDrain,
+ useRunDataDrainNow,
+ useTestDataDrain,
+ useUpdateDataDrain,
+} from '@/ee/data-drains/hooks/data-drains'
+import { useOrganizations } from '@/hooks/queries/organization'
+
+const logger = createLogger('DataDrainsSettings')
+
+const SOURCE_LABELS: Record<(typeof SOURCE_TYPES)[number], string> = {
+ workflow_logs: 'Workflow logs',
+ job_logs: 'Job logs',
+ audit_logs: 'Audit logs',
+ copilot_chats: 'Copilot chats',
+ copilot_runs: 'Copilot runs',
+}
+
+const DESTINATION_LABELS: Record<(typeof DESTINATION_TYPES)[number], string> = {
+ s3: 'Amazon S3',
+ webhook: 'HTTPS webhook',
+}
+
+const CADENCE_LABELS: Record<(typeof CADENCE_TYPES)[number], string> = {
+ hourly: 'Every hour',
+ daily: 'Every day',
+}
+
+const SOURCE_OPTIONS = SOURCE_TYPES.map((t) => ({ value: t, label: SOURCE_LABELS[t] }))
+const CADENCE_OPTIONS = CADENCE_TYPES.map((t) => ({ value: t, label: CADENCE_LABELS[t] }))
+const DESTINATION_OPTIONS = DESTINATION_TYPES.map((t) => ({
+ value: t,
+ label: DESTINATION_LABELS[t],
+}))
+
+export function DataDrainsSettings() {
+ const { data: session, isPending: sessionPending } = useSession()
+ const { data: orgsData, isLoading: orgsLoading } = useOrganizations()
+ const activeOrganization = orgsData?.activeOrganization
+ const orgId = activeOrganization?.id
+
+ const userEmail = session?.user?.email
+ const userRole = getUserRole(activeOrganization, userEmail)
+ const canManage = userRole === 'owner' || userRole === 'admin'
+
+ const { data: drains, isLoading: drainsLoading, error: drainsError } = useDataDrains(orgId)
+
+ const [createOpen, setCreateOpen] = useState(false)
+ const [expandedDrainId, setExpandedDrainId] = useState(null)
+
+ if (sessionPending || orgsLoading || drainsLoading) {
+ return
+ }
+
+ if (!orgId) {
+ return (
+
+ Data drains are configured per organization. Join or create one to continue.
+
+ )
+ }
+
+ if (!canManage) {
+ return (
+
+ Only organization owners and admins can configure data drains.
+
+ )
+ }
+
+ return (
+
+
+ Drains continuously export Sim data to your own storage on a schedule. Combine with Data
+ Retention to satisfy long-term compliance archives.
+
+
+
+
+ {drains?.length ?? 0} drain{(drains?.length ?? 0) === 1 ? '' : 's'}
+
+
setCreateOpen(true)}>
+ New drain
+
+
+
+ {drainsError ? (
+
+ Failed to load data drains: {toError(drainsError).message}
+
+ ) : drains && drains.length > 0 ? (
+
+
+
+ Name
+ Source
+ Destination
+ Cadence
+ Last run
+ Enabled
+
+
+
+
+ {drains.map((drain) => (
+
+ setExpandedDrainId(expandedDrainId === drain.id ? null : drain.id)
+ }
+ />
+ ))}
+
+
+ ) : (
+
+
No drains yet
+
+ Create a drain to start exporting workflow logs, audit events, and copilot data to S3 or
+ your own webhook.
+
+
+ )}
+
+ {createOpen && (
+
setCreateOpen(false)} />
+ )}
+
+ )
+}
+
+interface DrainRowProps {
+ drain: DataDrain
+ organizationId: string
+ expanded: boolean
+ onToggleExpand: () => void
+}
+
+function DrainRow({ drain, organizationId, expanded, onToggleExpand }: DrainRowProps) {
+ const updateMutation = useUpdateDataDrain()
+ const deleteMutation = useDeleteDataDrain()
+ const runMutation = useRunDataDrainNow()
+ const testMutation = useTestDataDrain()
+
+ async function handleToggleEnabled() {
+ try {
+ await updateMutation.mutateAsync({
+ organizationId,
+ drainId: drain.id,
+ body: { enabled: !drain.enabled },
+ })
+ toast.success(drain.enabled ? 'Drain disabled' : 'Drain enabled')
+ } catch (error) {
+ toast.error(toError(error).message)
+ }
+ }
+
+ async function handleRunNow() {
+ try {
+ await runMutation.mutateAsync({ organizationId, drainId: drain.id })
+ toast.success('Drain run enqueued')
+ } catch (error) {
+ toast.error(toError(error).message)
+ }
+ }
+
+ async function handleTest() {
+ try {
+ await testMutation.mutateAsync({ organizationId, drainId: drain.id })
+ toast.success('Connection test succeeded')
+ } catch (error) {
+ toast.error(toError(error).message)
+ }
+ }
+
+ async function handleDelete() {
+ if (!window.confirm(`Delete drain "${drain.name}"? This cannot be undone.`)) return
+ try {
+ await deleteMutation.mutateAsync({ organizationId, drainId: drain.id })
+ toast.success('Drain deleted')
+ } catch (error) {
+ toast.error(toError(error).message)
+ }
+ }
+
+ return (
+ <>
+
+ {drain.name}
+
+ {SOURCE_LABELS[drain.source]}
+
+
+ {DESTINATION_LABELS[drain.destinationType]}
+
+ {CADENCE_LABELS[drain.scheduleCadence]}
+
+ {drain.lastRunAt ? new Date(drain.lastRunAt).toLocaleString() : 'Never'}
+
+ e.stopPropagation()}>
+
+
+ e.stopPropagation()}>
+
+
+
+
+
+
+
+
+ Run now
+
+ Test connection
+
+ Delete
+
+
+
+
+
+ {expanded && (
+
+
+
+
+
+ )}
+ >
+ )
+}
+
+interface DrainRunsPanelProps {
+ organizationId: string
+ drainId: string
+}
+
+function DrainRunsPanel({ organizationId, drainId }: DrainRunsPanelProps) {
+ const { data: runs, isLoading } = useDataDrainRuns(organizationId, drainId, 10)
+
+ if (isLoading) {
+ return Loading runs...
+ }
+ if (!runs || runs.length === 0) {
+ return No runs yet.
+ }
+
+ return (
+
+
Recent runs
+ {runs.map((run) => (
+
+ ))}
+
+ )
+}
+
+function RunRow({ run }: { run: DataDrainRun }) {
+ const statusColor =
+ run.status === 'success'
+ ? 'text-green-600'
+ : run.status === 'failed'
+ ? 'text-red-600'
+ : 'text-[var(--text-muted)]'
+ return (
+
+
+
+ {run.status}
+ {run.trigger}
+
+ {new Date(run.startedAt).toLocaleString()}
+
+
+ {run.error &&
{run.error}
}
+
+
+
{run.rowsExported.toLocaleString()} rows
+
{(run.bytesWritten / 1024).toFixed(1)} KB
+
+
+ )
+}
+
+interface CreateDrainModalProps {
+ organizationId: string
+ onClose: () => void
+}
+
+function CreateDrainModal({ organizationId, onClose }: CreateDrainModalProps) {
+ const createMutation = useCreateDataDrain()
+
+ const [name, setName] = useState('')
+ const [source, setSource] = useState<(typeof SOURCE_TYPES)[number]>('workflow_logs')
+ const [cadence, setCadence] = useState<(typeof CADENCE_TYPES)[number]>('daily')
+ const [destinationType, setDestinationType] = useState<(typeof DESTINATION_TYPES)[number]>(
+ DESTINATION_TYPES[0]
+ )
+ const [destState, setDestState] = useState(
+ () => DESTINATION_FORM_REGISTRY[DESTINATION_TYPES[0]].initialState
+ )
+
+ const spec = DESTINATION_FORM_REGISTRY[destinationType]
+ const canSubmit = name.trim().length > 0 && spec.isComplete(destState)
+
+ function handleDestinationChange(next: (typeof DESTINATION_TYPES)[number]) {
+ setDestinationType(next)
+ setDestState(DESTINATION_FORM_REGISTRY[next].initialState)
+ }
+
+ async function handleSubmit() {
+ if (!canSubmit) return
+ try {
+ const body = {
+ name: name.trim(),
+ source,
+ scheduleCadence: cadence,
+ ...spec.toDestinationBranch(destState),
+ } as CreateDataDrainBody
+ await createMutation.mutateAsync({ organizationId, body })
+ toast.success('Drain created')
+ onClose()
+ } catch (error) {
+ const msg = toError(error).message
+ logger.error('Failed to create data drain', { error: msg })
+ toast.error(msg)
+ }
+ }
+
+ return (
+ !open && onClose()}>
+
+
+ New data drain
+
+
+
+ setName(e.target.value)}
+ placeholder='Workflow logs to S3'
+ />
+
+
+ setSource(v as (typeof SOURCE_TYPES)[number])}
+ options={SOURCE_OPTIONS}
+ dropdownWidth='trigger'
+ />
+
+
+ setCadence(v as (typeof CADENCE_TYPES)[number])}
+ options={CADENCE_OPTIONS}
+ dropdownWidth='trigger'
+ />
+
+
+ handleDestinationChange(v as (typeof DESTINATION_TYPES)[number])}
+ options={DESTINATION_OPTIONS}
+ dropdownWidth='trigger'
+ />
+
+
+
+
+
+
+ Cancel
+
+
+ {createMutation.isPending ? 'Creating...' : 'Create drain'}
+
+
+
+
+ )
+}
diff --git a/apps/sim/ee/data-drains/components/data-drains-skeleton.tsx b/apps/sim/ee/data-drains/components/data-drains-skeleton.tsx
new file mode 100644
index 00000000000..464acddeae2
--- /dev/null
+++ b/apps/sim/ee/data-drains/components/data-drains-skeleton.tsx
@@ -0,0 +1,17 @@
+import { Skeleton } from '@/components/emcn'
+
+export function DataDrainsSkeleton() {
+ return (
+
+
+
+
+
+
+ {Array.from({ length: 3 }).map((_, i) => (
+
+ ))}
+
+
+ )
+}
diff --git a/apps/sim/ee/data-drains/destinations/registry.tsx b/apps/sim/ee/data-drains/destinations/registry.tsx
new file mode 100644
index 00000000000..c40a166cd27
--- /dev/null
+++ b/apps/sim/ee/data-drains/destinations/registry.tsx
@@ -0,0 +1,176 @@
+'use client'
+
+import type { ComponentType } from 'react'
+import { FormField, Input, SecretInput, Switch } from '@/components/emcn'
+import type { CreateDataDrainBody } from '@/lib/api/contracts/data-drains'
+import type { DestinationType } from '@/lib/data-drains/types'
+
+type DestinationBranch = Pick<
+ CreateDataDrainBody,
+ 'destinationType' | 'destinationConfig' | 'destinationCredentials'
+>
+
+interface DestinationFormSpec {
+ readonly displayName: string
+ readonly initialState: TState
+ readonly FormFields: ComponentType<{
+ state: TState
+ setState: (state: TState) => void
+ }>
+ readonly isComplete: (state: TState) => boolean
+ readonly toDestinationBranch: (state: TState) => DestinationBranch
+}
+
+interface S3State {
+ bucket: string
+ region: string
+ prefix: string
+ endpoint: string
+ forcePathStyle: boolean
+ accessKeyId: string
+ secretAccessKey: string
+}
+
+const s3FormSpec: DestinationFormSpec = {
+ displayName: 'Amazon S3',
+ initialState: {
+ bucket: '',
+ region: 'us-east-1',
+ prefix: '',
+ endpoint: '',
+ forcePathStyle: false,
+ accessKeyId: '',
+ secretAccessKey: '',
+ },
+ FormFields: ({ state, setState }) => (
+ <>
+
+ setState({ ...state, bucket: e.target.value })}
+ />
+
+
+ setState({ ...state, region: e.target.value })}
+ />
+
+
+ setState({ ...state, prefix: e.target.value })}
+ placeholder='exports/sim'
+ />
+
+
+ setState({ ...state, endpoint: e.target.value })}
+ placeholder='https://s3.example.com'
+ />
+
+
+ setState({ ...state, forcePathStyle: v })}
+ />
+
+
+ setState({ ...state, accessKeyId: v })}
+ />
+
+
+ setState({ ...state, secretAccessKey: v })}
+ />
+
+ >
+ ),
+ isComplete: (s) =>
+ s.bucket.length > 0 &&
+ s.region.length > 0 &&
+ s.accessKeyId.length > 0 &&
+ s.secretAccessKey.length > 0,
+ toDestinationBranch: (s) => ({
+ destinationType: 's3',
+ destinationConfig: {
+ bucket: s.bucket,
+ region: s.region,
+ prefix: s.prefix || undefined,
+ endpoint: s.endpoint || undefined,
+ forcePathStyle: s.forcePathStyle,
+ },
+ destinationCredentials: {
+ accessKeyId: s.accessKeyId,
+ secretAccessKey: s.secretAccessKey,
+ },
+ }),
+}
+
+interface WebhookState {
+ url: string
+ signatureHeader: string
+ signingSecret: string
+ bearerToken: string
+}
+
+const webhookFormSpec: DestinationFormSpec = {
+ displayName: 'HTTPS webhook',
+ initialState: { url: '', signatureHeader: '', signingSecret: '', bearerToken: '' },
+ FormFields: ({ state, setState }) => (
+ <>
+
+ setState({ ...state, url: e.target.value })}
+ placeholder='https://example.com/sim-drain'
+ />
+
+
+ setState({ ...state, signatureHeader: e.target.value })}
+ placeholder='X-Sim-Signature'
+ />
+
+
+ setState({ ...state, signingSecret: v })}
+ />
+
+
+ setState({ ...state, bearerToken: v })}
+ />
+
+ >
+ ),
+ isComplete: (s) => s.url.length > 0 && s.signingSecret.length >= 8,
+ toDestinationBranch: (s) => ({
+ destinationType: 'webhook',
+ destinationConfig: {
+ url: s.url,
+ signatureHeader: s.signatureHeader || undefined,
+ },
+ destinationCredentials: {
+ signingSecret: s.signingSecret,
+ bearerToken: s.bearerToken || undefined,
+ },
+ }),
+}
+
+/**
+ * Client-side mirror of `DESTINATION_REGISTRY`. The settings page selects a
+ * spec by `destinationType` and never branches on the literal — adding a new
+ * destination is one entry here plus one in the server-side registry.
+ */
+export const DESTINATION_FORM_REGISTRY: Record> = {
+ s3: s3FormSpec as DestinationFormSpec,
+ webhook: webhookFormSpec as DestinationFormSpec,
+}
diff --git a/apps/sim/ee/data-drains/hooks/data-drains.ts b/apps/sim/ee/data-drains/hooks/data-drains.ts
new file mode 100644
index 00000000000..d6a9eb912b1
--- /dev/null
+++ b/apps/sim/ee/data-drains/hooks/data-drains.ts
@@ -0,0 +1,173 @@
+import { createLogger } from '@sim/logger'
+import { keepPreviousData, useMutation, useQuery, useQueryClient } from '@tanstack/react-query'
+import { requestJson } from '@/lib/api/client/request'
+import {
+ type CreateDataDrainBody,
+ createDataDrainContract,
+ type DataDrain,
+ type DataDrainRun,
+ deleteDataDrainContract,
+ listDataDrainRunsContract,
+ listDataDrainsContract,
+ runDataDrainContract,
+ testDataDrainContract,
+ type UpdateDataDrainBody,
+ updateDataDrainContract,
+} from '@/lib/api/contracts/data-drains'
+
+const logger = createLogger('DataDrainsQueries')
+
+export const dataDrainKeys = {
+ all: ['data-drains'] as const,
+ lists: () => [...dataDrainKeys.all, 'list'] as const,
+ list: (organizationId?: string) => [...dataDrainKeys.lists(), organizationId ?? ''] as const,
+ runsAll: () => [...dataDrainKeys.all, 'runs'] as const,
+ runs: (drainId?: string) => [...dataDrainKeys.runsAll(), drainId ?? ''] as const,
+ runsList: (organizationId?: string, drainId?: string, limit?: number) =>
+ [...dataDrainKeys.runs(drainId), organizationId ?? '', limit ?? 10] as const,
+}
+
+async function fetchDataDrains(organizationId: string, signal?: AbortSignal): Promise {
+ const { drains } = await requestJson(listDataDrainsContract, {
+ params: { id: organizationId },
+ signal,
+ })
+ return drains
+}
+
+async function fetchDataDrainRuns(
+ organizationId: string,
+ drainId: string,
+ limit: number | undefined,
+ signal?: AbortSignal
+): Promise {
+ const { runs } = await requestJson(listDataDrainRunsContract, {
+ params: { id: organizationId, drainId },
+ query: limit ? { limit } : undefined,
+ signal,
+ })
+ return runs
+}
+
+export function useDataDrains(organizationId?: string) {
+ return useQuery({
+ queryKey: dataDrainKeys.list(organizationId),
+ queryFn: ({ signal }) => fetchDataDrains(organizationId as string, signal),
+ enabled: Boolean(organizationId),
+ staleTime: 60 * 1000,
+ })
+}
+
+export function useDataDrainRuns(organizationId?: string, drainId?: string, limit = 10) {
+ return useQuery({
+ queryKey: dataDrainKeys.runsList(organizationId, drainId, limit),
+ queryFn: ({ signal }) =>
+ fetchDataDrainRuns(organizationId as string, drainId as string, limit, signal),
+ enabled: Boolean(organizationId && drainId),
+ staleTime: 30 * 1000,
+ placeholderData: keepPreviousData,
+ })
+}
+
+interface CreateDataDrainParams {
+ organizationId: string
+ body: CreateDataDrainBody
+}
+
+export function useCreateDataDrain() {
+ const queryClient = useQueryClient()
+ return useMutation({
+ mutationFn: async ({ organizationId, body }: CreateDataDrainParams) => {
+ const { drain } = await requestJson(createDataDrainContract, {
+ params: { id: organizationId },
+ body,
+ })
+ logger.info('Created data drain', { drainId: drain.id, organizationId })
+ return drain
+ },
+ onSuccess: (_drain, variables) => {
+ queryClient.invalidateQueries({ queryKey: dataDrainKeys.list(variables.organizationId) })
+ },
+ })
+}
+
+interface UpdateDataDrainParams {
+ organizationId: string
+ drainId: string
+ body: UpdateDataDrainBody
+}
+
+export function useUpdateDataDrain() {
+ const queryClient = useQueryClient()
+ return useMutation({
+ mutationFn: async ({ organizationId, drainId, body }: UpdateDataDrainParams) => {
+ const { drain } = await requestJson(updateDataDrainContract, {
+ params: { id: organizationId, drainId },
+ body,
+ })
+ logger.info('Updated data drain', { drainId, organizationId })
+ return drain
+ },
+ onSuccess: (_drain, variables) => {
+ queryClient.invalidateQueries({ queryKey: dataDrainKeys.list(variables.organizationId) })
+ },
+ })
+}
+
+interface DeleteDataDrainParams {
+ organizationId: string
+ drainId: string
+}
+
+export function useDeleteDataDrain() {
+ const queryClient = useQueryClient()
+ return useMutation({
+ mutationFn: async ({ organizationId, drainId }: DeleteDataDrainParams) => {
+ await requestJson(deleteDataDrainContract, {
+ params: { id: organizationId, drainId },
+ })
+ logger.info('Deleted data drain', { drainId, organizationId })
+ },
+ onSuccess: (_data, variables) => {
+ queryClient.invalidateQueries({ queryKey: dataDrainKeys.list(variables.organizationId) })
+ queryClient.removeQueries({ queryKey: dataDrainKeys.runs(variables.drainId) })
+ },
+ })
+}
+
+interface RunDataDrainParams {
+ organizationId: string
+ drainId: string
+}
+
+export function useRunDataDrainNow() {
+ const queryClient = useQueryClient()
+ return useMutation({
+ mutationFn: async ({ organizationId, drainId }: RunDataDrainParams) => {
+ const data = await requestJson(runDataDrainContract, {
+ params: { id: organizationId, drainId },
+ })
+ logger.info('Enqueued data drain run', { drainId, jobId: data.jobId })
+ return data
+ },
+ onSuccess: (_data, variables) => {
+ queryClient.invalidateQueries({ queryKey: dataDrainKeys.runs(variables.drainId) })
+ queryClient.invalidateQueries({ queryKey: dataDrainKeys.list(variables.organizationId) })
+ },
+ })
+}
+
+interface TestDataDrainParams {
+ organizationId: string
+ drainId: string
+}
+
+export function useTestDataDrain() {
+ return useMutation({
+ mutationFn: async ({ organizationId, drainId }: TestDataDrainParams) => {
+ return await requestJson(testDataDrainContract, {
+ params: { id: organizationId, drainId },
+ })
+ },
+ })
+}
diff --git a/apps/sim/executor/execution/engine.test.ts b/apps/sim/executor/execution/engine.test.ts
index 6147762d496..f0539ebf4f6 100644
--- a/apps/sim/executor/execution/engine.test.ts
+++ b/apps/sim/executor/execution/engine.test.ts
@@ -158,6 +158,27 @@ describe('ExecutionEngine', () => {
expect(result.status).toBeUndefined()
})
+ it('should not fall back to starter blocks for terminal resume snapshots', async () => {
+ const startNode = createMockNode('start', 'starter')
+ const dag = createMockDAG([startNode])
+ const context = createMockContext({
+ metadata: {
+ executionId: 'test-execution',
+ startTime: new Date().toISOString(),
+ pendingBlocks: [],
+ resumeFromSnapshot: true,
+ },
+ })
+ const edgeManager = createMockEdgeManager()
+ const nodeOrchestrator = createMockNodeOrchestrator()
+
+ const engine = new ExecutionEngine(context, dag, edgeManager, nodeOrchestrator)
+ const result = await engine.run()
+
+ expect(result.success).toBe(true)
+ expect(nodeOrchestrator.executionCount).toBe(0)
+ })
+
it('should execute all nodes in a multi-node workflow', async () => {
const nodes = [
createMockNode('start', 'starter'),
diff --git a/apps/sim/executor/execution/engine.ts b/apps/sim/executor/execution/engine.ts
index d7dc8f44f11..82497858911 100644
--- a/apps/sim/executor/execution/engine.ts
+++ b/apps/sim/executor/execution/engine.ts
@@ -339,6 +339,11 @@ export class ExecutionEngine {
return
}
+ if (this.context.metadata.resumeFromSnapshot === true) {
+ this.execLogger.info('Resume snapshot has no downstream work to queue')
+ return
+ }
+
if (triggerBlockId) {
this.addToQueue(triggerBlockId)
return
@@ -484,6 +489,8 @@ export class ExecutionEngine {
parallelScope: pause.parallelScope,
loopScope: pause.loopScope,
resumeLinks: pause.resumeLinks,
+ pauseKind: pause.pauseKind,
+ resumeAt: pause.resumeAt,
}))
return {
diff --git a/apps/sim/executor/execution/types.ts b/apps/sim/executor/execution/types.ts
index 3c5130d8220..042ca72ac94 100644
--- a/apps/sim/executor/execution/types.ts
+++ b/apps/sim/executor/execution/types.ts
@@ -26,6 +26,7 @@ export interface ExecutionMetadata {
enforceCredentialAccess?: boolean
pendingBlocks?: string[]
resumeFromSnapshot?: boolean
+ resumeTerminalNoop?: boolean
credentialAccountUserId?: string
workflowStateOverride?: {
blocks: Record
@@ -54,6 +55,7 @@ export interface SerializableExecutionState {
activeExecutionPath: string[]
pendingQueue?: string[]
remainingEdges?: Edge[]
+ resumeTerminalNoop?: boolean
dagIncomingEdges?: Record
completedPauseContexts?: string[]
}
@@ -133,8 +135,9 @@ export interface ExecutionCallbacks {
blockId: string,
childWorkflowInstanceId: string,
iterationContext?: IterationContext,
- executionOrder?: number
- ) => void
+ executionOrder?: number,
+ childWorkflowContext?: ChildWorkflowContext
+ ) => Promise
}
export interface ContextExtensions {
@@ -200,8 +203,9 @@ export interface ContextExtensions {
blockId: string,
childWorkflowInstanceId: string,
iterationContext?: IterationContext,
- executionOrder?: number
- ) => void
+ executionOrder?: number,
+ childWorkflowContext?: ChildWorkflowContext
+ ) => Promise
/**
* Run-from-block configuration. When provided, executor runs in partial
diff --git a/apps/sim/executor/handlers/human-in-the-loop/human-in-the-loop-handler.ts b/apps/sim/executor/handlers/human-in-the-loop/human-in-the-loop-handler.ts
index 2208e9911d7..0634aa65dd4 100644
--- a/apps/sim/executor/handlers/human-in-the-loop/human-in-the-loop-handler.ts
+++ b/apps/sim/executor/handlers/human-in-the-loop/human-in-the-loop-handler.ts
@@ -188,6 +188,7 @@ export class HumanInTheLoopBlockHandler implements BlockHandler {
parallelScope,
loopScope,
resumeLinks,
+ pauseKind: 'human',
}
const responseOutput: Record = {
diff --git a/apps/sim/executor/handlers/wait/wait-handler.test.ts b/apps/sim/executor/handlers/wait/wait-handler.test.ts
index 27a4e33bae1..9e1aa288d3e 100644
--- a/apps/sim/executor/handlers/wait/wait-handler.test.ts
+++ b/apps/sim/executor/handlers/wait/wait-handler.test.ts
@@ -1,3 +1,6 @@
+/**
+ * @vitest-environment node
+ */
import '@sim/testing/mocks/executor'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
@@ -50,11 +53,8 @@ describe('WaitBlockHandler', () => {
expect(handler.canHandle(nonWaitBlock)).toBe(false)
})
- it('should wait for specified seconds', async () => {
- const inputs = {
- timeValue: '5',
- timeUnit: 'seconds',
- }
+ it('should wait in-process for short waits in seconds', async () => {
+ const inputs = { timeValue: '5', timeUnit: 'seconds' }
const executePromise = handler.execute(mockContext, mockBlock, inputs)
@@ -68,138 +68,138 @@ describe('WaitBlockHandler', () => {
})
})
- it('should wait for specified minutes', async () => {
- const inputs = {
- timeValue: '2',
- timeUnit: 'minutes',
- }
+ it('should wait in-process for short waits in minutes', async () => {
+ const inputs = { timeValue: '2', timeUnit: 'minutes' }
const executePromise = handler.execute(mockContext, mockBlock, inputs)
- await vi.advanceTimersByTimeAsync(120000)
+ await vi.advanceTimersByTimeAsync(120_000)
const result = await executePromise
expect(result).toEqual({
- waitDuration: 120000,
+ waitDuration: 120_000,
status: 'completed',
})
})
- it('should use default values when not provided', async () => {
- const inputs = {}
-
- const executePromise = handler.execute(mockContext, mockBlock, inputs)
+ it('should default to 10 seconds when inputs are not provided', async () => {
+ const executePromise = handler.execute(mockContext, mockBlock, {})
- await vi.advanceTimersByTimeAsync(10000)
+ await vi.advanceTimersByTimeAsync(10_000)
const result = await executePromise
expect(result).toEqual({
- waitDuration: 10000,
+ waitDuration: 10_000,
status: 'completed',
})
})
- it('should throw error for negative wait times', async () => {
- const inputs = {
- timeValue: '-5',
- timeUnit: 'seconds',
- }
-
- await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
- 'Wait amount must be a positive number'
- )
+ it('should reject negative wait amounts', async () => {
+ await expect(
+ handler.execute(mockContext, mockBlock, { timeValue: '-5', timeUnit: 'seconds' })
+ ).rejects.toThrow('Wait amount must be a positive number')
})
- it('should throw error for zero wait time', async () => {
- const inputs = {
- timeValue: '0',
- timeUnit: 'seconds',
- }
-
- await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
- 'Wait amount must be a positive number'
- )
+ it('should reject zero wait amounts', async () => {
+ await expect(
+ handler.execute(mockContext, mockBlock, { timeValue: '0', timeUnit: 'seconds' })
+ ).rejects.toThrow('Wait amount must be a positive number')
})
- it('should throw error for non-numeric wait times', async () => {
- const inputs = {
- timeValue: 'abc',
- timeUnit: 'seconds',
- }
-
- await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
- 'Wait amount must be a positive number'
- )
+ it('should reject non-numeric wait amounts', async () => {
+ await expect(
+ handler.execute(mockContext, mockBlock, { timeValue: 'abc', timeUnit: 'seconds' })
+ ).rejects.toThrow('Wait amount must be a positive number')
})
- it('should throw error when wait time exceeds maximum (seconds)', async () => {
- const inputs = {
- timeValue: '601',
- timeUnit: 'seconds',
- }
-
- await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
- 'Wait time exceeds maximum of 600 seconds'
- )
+ it('should reject unknown wait units', async () => {
+ await expect(
+ handler.execute(mockContext, mockBlock, { timeValue: '5', timeUnit: 'fortnights' })
+ ).rejects.toThrow('Unknown wait unit: fortnights')
})
- it('should throw error when wait time exceeds maximum (minutes)', async () => {
- const inputs = {
- timeValue: '11',
- timeUnit: 'minutes',
- }
-
- await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
- 'Wait time exceeds maximum of 10 minutes'
- )
+ it('should reject waits longer than the 30-day ceiling', async () => {
+ await expect(
+ handler.execute(mockContext, mockBlock, { timeValue: '31', timeUnit: 'days' })
+ ).rejects.toThrow('Wait time exceeds maximum of 30 days')
})
- it('should allow maximum wait time of exactly 10 minutes', async () => {
- const inputs = {
- timeValue: '10',
- timeUnit: 'minutes',
- }
+ it('should still execute in-process at the 5-minute boundary', async () => {
+ const inputs = { timeValue: '5', timeUnit: 'minutes' }
const executePromise = handler.execute(mockContext, mockBlock, inputs)
- await vi.advanceTimersByTimeAsync(600000)
+ await vi.advanceTimersByTimeAsync(5 * 60 * 1000)
const result = await executePromise
expect(result).toEqual({
- waitDuration: 600000,
+ waitDuration: 5 * 60 * 1000,
status: 'completed',
})
})
- it('should allow maximum wait time of exactly 600 seconds', async () => {
- const inputs = {
- timeValue: '600',
- timeUnit: 'seconds',
- }
+ it('should suspend the workflow when wait exceeds the in-process threshold', async () => {
+ vi.setSystemTime(new Date('2026-04-28T00:00:00.000Z'))
- const executePromise = handler.execute(mockContext, mockBlock, inputs)
+ const inputs = { timeValue: '10', timeUnit: 'minutes' }
- await vi.advanceTimersByTimeAsync(600000)
+ const result = (await handler.execute(mockContext, mockBlock, inputs)) as Record
- const result = await executePromise
+ const waitMs = 10 * 60 * 1000
+ const expectedResumeAt = new Date(Date.now() + waitMs).toISOString()
- expect(result).toEqual({
- waitDuration: 600000,
- status: 'completed',
- })
+ expect(result.status).toBe('waiting')
+ expect(result.waitDuration).toBe(waitMs)
+ expect(result.resumeAt).toBe(expectedResumeAt)
+
+ const pauseMetadata = result._pauseMetadata
+ expect(pauseMetadata).toBeDefined()
+ expect(pauseMetadata.pauseKind).toBe('time')
+ expect(pauseMetadata.resumeAt).toBe(expectedResumeAt)
+ expect(pauseMetadata.contextId).toBe('wait-block-1')
+ expect(pauseMetadata.blockId).toBe('wait-block-1')
+ expect(pauseMetadata.response).toEqual({ waitDuration: waitMs, resumeAt: expectedResumeAt })
+ })
+
+ it('should suspend the workflow for multi-day waits', async () => {
+ vi.setSystemTime(new Date('2026-04-28T00:00:00.000Z'))
+
+ const inputs = { timeValue: '2', timeUnit: 'days' }
+
+ const result = (await handler.execute(mockContext, mockBlock, inputs)) as Record
+
+ const waitMs = 2 * 24 * 60 * 60 * 1000
+ const expectedResumeAt = new Date(Date.now() + waitMs).toISOString()
+
+ expect(result.status).toBe('waiting')
+ expect(result.waitDuration).toBe(waitMs)
+ expect(result.resumeAt).toBe(expectedResumeAt)
+ expect(result._pauseMetadata.pauseKind).toBe('time')
+ expect(result._pauseMetadata.resumeAt).toBe(expectedResumeAt)
+ })
+
+ it('should accept hours and convert to milliseconds', async () => {
+ vi.setSystemTime(new Date('2026-04-28T00:00:00.000Z'))
+
+ const result = (await handler.execute(mockContext, mockBlock, {
+ timeValue: '3',
+ timeUnit: 'hours',
+ })) as Record
+
+ const waitMs = 3 * 60 * 60 * 1000
+ expect(result.waitDuration).toBe(waitMs)
+ expect(result.status).toBe('waiting')
+ expect(result._pauseMetadata.pauseKind).toBe('time')
})
it('should handle cancellation via AbortSignal', async () => {
const abortController = new AbortController()
mockContext.abortSignal = abortController.signal
- const inputs = {
- timeValue: '30',
- timeUnit: 'seconds',
- }
+ const inputs = { timeValue: '30', timeUnit: 'seconds' }
const executePromise = handler.execute(mockContext, mockBlock, inputs)
@@ -220,10 +220,7 @@ describe('WaitBlockHandler', () => {
abortController.abort()
mockContext.abortSignal = abortController.signal
- const inputs = {
- timeValue: '10',
- timeUnit: 'seconds',
- }
+ const inputs = { timeValue: '10', timeUnit: 'seconds' }
const result = await handler.execute(mockContext, mockBlock, inputs)
@@ -233,62 +230,47 @@ describe('WaitBlockHandler', () => {
})
})
- it('should handle partial completion before cancellation', async () => {
+ it('should not invoke the in-process sleep when suspending; AbortSignal is irrelevant for long waits', async () => {
+ vi.setSystemTime(new Date('2026-04-28T00:00:00.000Z'))
const abortController = new AbortController()
- mockContext.abortSignal = abortController.signal
-
- const inputs = {
- timeValue: '100',
- timeUnit: 'seconds',
- }
-
- const executePromise = handler.execute(mockContext, mockBlock, inputs)
-
- await vi.advanceTimersByTimeAsync(50000)
abortController.abort()
- await vi.advanceTimersByTimeAsync(1)
+ mockContext.abortSignal = abortController.signal
- const result = await executePromise
+ const result = (await handler.execute(mockContext, mockBlock, {
+ timeValue: '1',
+ timeUnit: 'hours',
+ })) as Record
- expect(result).toEqual({
- waitDuration: 100000,
- status: 'cancelled',
- })
+ expect(result.status).toBe('waiting')
+ expect(result._pauseMetadata.pauseKind).toBe('time')
})
- it('should handle fractional seconds by converting to integers', async () => {
- const inputs = {
- timeValue: '5.7',
- timeUnit: 'seconds',
- }
+ it('should preserve fractional time values for larger units', async () => {
+ const inputs = { timeValue: '5.5', timeUnit: 'seconds' }
const executePromise = handler.execute(mockContext, mockBlock, inputs)
- await vi.advanceTimersByTimeAsync(5000)
+ await vi.advanceTimersByTimeAsync(5500)
const result = await executePromise
expect(result).toEqual({
- waitDuration: 5000,
+ waitDuration: 5500,
status: 'completed',
})
})
- it('should handle very short wait times', async () => {
- const inputs = {
- timeValue: '1',
- timeUnit: 'seconds',
- }
+ it('should suspend a 1.5-day wait without truncating', async () => {
+ vi.setSystemTime(new Date('2026-04-28T00:00:00.000Z'))
- const executePromise = handler.execute(mockContext, mockBlock, inputs)
-
- await vi.advanceTimersByTimeAsync(1000)
-
- const result = await executePromise
+ const result = (await handler.execute(mockContext, mockBlock, {
+ timeValue: '1.5',
+ timeUnit: 'days',
+ })) as Record
- expect(result).toEqual({
- waitDuration: 1000,
- status: 'completed',
- })
+ const waitMs = 1.5 * 24 * 60 * 60 * 1000
+ expect(result.waitDuration).toBe(waitMs)
+ expect(result.status).toBe('waiting')
+ expect(result._pauseMetadata.pauseKind).toBe('time')
})
})
diff --git a/apps/sim/executor/handlers/wait/wait-handler.ts b/apps/sim/executor/handlers/wait/wait-handler.ts
index 09ce055ec5a..43fbc4a2f31 100644
--- a/apps/sim/executor/handlers/wait/wait-handler.ts
+++ b/apps/sim/executor/handlers/wait/wait-handler.ts
@@ -1,10 +1,21 @@
import { isExecutionCancelled, isRedisCancellationEnabled } from '@/lib/execution/cancellation'
+import type { BlockOutput } from '@/blocks/types'
import { BlockType } from '@/executor/constants'
-import type { BlockHandler, ExecutionContext } from '@/executor/types'
+import {
+ generatePauseContextId,
+ mapNodeMetadataToPauseScopes,
+} from '@/executor/human-in-the-loop/utils'
+import type { BlockHandler, ExecutionContext, PauseMetadata } from '@/executor/types'
import type { SerializedBlock } from '@/serializer/types'
const CANCELLATION_CHECK_INTERVAL_MS = 500
+/** Threshold below which we hold the wait in-process; above, we suspend via PauseMetadata. */
+const INPROCESS_MAX_MS = 5 * 60 * 1000
+
+/** Hard ceiling on configurable wait duration. */
+const MAX_WAIT_MS = 30 * 24 * 60 * 60 * 1000
+
interface SleepOptions {
signal?: AbortSignal
executionId?: string
@@ -64,8 +75,26 @@ const sleep = async (ms: number, options: SleepOptions = {}): Promise =
})
}
+const UNIT_TO_MS = {
+ seconds: 1000,
+ minutes: 60 * 1000,
+ hours: 60 * 60 * 1000,
+ days: 24 * 60 * 60 * 1000,
+} as const satisfies Record
+
+type WaitUnit = keyof typeof UNIT_TO_MS
+
+function isWaitUnit(value: string): value is WaitUnit {
+ return value in UNIT_TO_MS
+}
+
/**
- * Handler for Wait blocks that pause workflow execution for a time delay
+ * Handler for Wait blocks that pause workflow execution for a time delay.
+ *
+ * Waits up to {@link INPROCESS_MAX_MS} are held in-process via an interruptible sleep.
+ * Longer waits suspend the workflow by returning {@link PauseMetadata} with
+ * `pauseKind: 'time'`; the cron-driven resume poller (see `/api/resume/poll`) picks
+ * the execution back up once `resumeAt` is reached.
*/
export class WaitBlockHandler implements BlockHandler {
canHandle(block: SerializedBlock): boolean {
@@ -76,40 +105,81 @@ export class WaitBlockHandler implements BlockHandler {
ctx: ExecutionContext,
block: SerializedBlock,
inputs: Record
- ): Promise {
- const timeValue = Number.parseInt(inputs.timeValue || '10', 10)
+ ): Promise {
+ return this.executeWithNode(ctx, block, inputs, { nodeId: block.id })
+ }
+
+ async executeWithNode(
+ ctx: ExecutionContext,
+ block: SerializedBlock,
+ inputs: Record,
+ nodeMetadata: {
+ nodeId: string
+ loopId?: string
+ parallelId?: string
+ branchIndex?: number
+ branchTotal?: number
+ originalBlockId?: string
+ isLoopNode?: boolean
+ executionOrder?: number
+ }
+ ): Promise {
+ const timeValue = Number.parseFloat(inputs.timeValue || '10')
const timeUnit = inputs.timeUnit || 'seconds'
- if (Number.isNaN(timeValue) || timeValue <= 0) {
+ if (!Number.isFinite(timeValue) || timeValue <= 0) {
throw new Error('Wait amount must be a positive number')
}
- let waitMs = timeValue * 1000
- if (timeUnit === 'minutes') {
- waitMs = timeValue * 60 * 1000
+ if (!isWaitUnit(timeUnit)) {
+ throw new Error(`Unknown wait unit: ${timeUnit}`)
}
+ const waitMs = Math.round(timeValue * UNIT_TO_MS[timeUnit])
- const maxWaitMs = 10 * 60 * 1000
- if (waitMs > maxWaitMs) {
- const maxDisplay = timeUnit === 'minutes' ? '10 minutes' : '600 seconds'
- throw new Error(`Wait time exceeds maximum of ${maxDisplay}`)
+ if (waitMs > MAX_WAIT_MS) {
+ throw new Error('Wait time exceeds maximum of 30 days')
}
- const completed = await sleep(waitMs, {
- signal: ctx.abortSignal,
- executionId: ctx.executionId,
- })
+ if (waitMs <= INPROCESS_MAX_MS) {
+ const completed = await sleep(waitMs, {
+ signal: ctx.abortSignal,
+ executionId: ctx.executionId,
+ })
+
+ if (!completed) {
+ return {
+ waitDuration: waitMs,
+ status: 'cancelled',
+ }
+ }
- if (!completed) {
return {
waitDuration: waitMs,
- status: 'cancelled',
+ status: 'completed',
}
}
+ const { parallelScope, loopScope } = mapNodeMetadataToPauseScopes(ctx, nodeMetadata)
+ const contextId = generatePauseContextId(block.id, nodeMetadata, loopScope)
+ const now = new Date()
+ const resumeAt = new Date(now.getTime() + waitMs).toISOString()
+
+ const pauseMetadata: PauseMetadata = {
+ contextId,
+ blockId: nodeMetadata.nodeId,
+ response: { waitDuration: waitMs, resumeAt },
+ timestamp: now.toISOString(),
+ parallelScope,
+ loopScope,
+ pauseKind: 'time',
+ resumeAt,
+ }
+
return {
waitDuration: waitMs,
- status: 'completed',
+ status: 'waiting',
+ resumeAt,
+ _pauseMetadata: pauseMetadata,
}
}
}
diff --git a/apps/sim/executor/handlers/workflow/workflow-handler.ts b/apps/sim/executor/handlers/workflow/workflow-handler.ts
index c99b907ea5f..e2f329c206a 100644
--- a/apps/sim/executor/handlers/workflow/workflow-handler.ts
+++ b/apps/sim/executor/handlers/workflow/workflow-handler.ts
@@ -156,11 +156,12 @@ export class WorkflowBlockHandler implements BlockHandler {
? (nodeMetadata.originalBlockId ?? nodeMetadata.nodeId)
: block.id
const iterationContext = nodeMetadata ? getIterationContext(ctx, nodeMetadata) : undefined
- ctx.onChildWorkflowInstanceReady?.(
+ await ctx.onChildWorkflowInstanceReady?.(
effectiveBlockId,
instanceId,
iterationContext,
- nodeMetadata?.executionOrder
+ nodeMetadata?.executionOrder,
+ ctx.childWorkflowContext
)
}
diff --git a/apps/sim/executor/types.ts b/apps/sim/executor/types.ts
index 7d844257d1e..8195f385a77 100644
--- a/apps/sim/executor/types.ts
+++ b/apps/sim/executor/types.ts
@@ -33,6 +33,8 @@ export interface LoopPauseScope {
iteration: number
}
+export type PauseKind = 'human' | 'time'
+
export interface PauseMetadata {
contextId: string
blockId: string
@@ -47,6 +49,9 @@ export interface PauseMetadata {
executionId: string
workflowId: string
}
+ pauseKind: PauseKind
+ /** ISO timestamp at which a `pauseKind: 'time'` pause becomes due for automatic resume. */
+ resumeAt?: string
}
export type ResumeStatus = 'paused' | 'resumed' | 'failed' | 'queued' | 'resuming'
@@ -67,6 +72,8 @@ export interface PausePoint {
executionId: string
workflowId: string
}
+ pauseKind: PauseKind
+ resumeAt?: string
}
export interface SerializedSnapshot {
@@ -261,6 +268,7 @@ export interface ExecutionMetadata {
triggerBlockId?: string
useDraftState?: boolean
resumeFromSnapshot?: boolean
+ resumeTerminalNoop?: boolean
}
export interface BlockState {
@@ -378,8 +386,9 @@ export interface ExecutionContext {
blockId: string,
childWorkflowInstanceId: string,
iterationContext?: IterationContext,
- executionOrder?: number
- ) => void
+ executionOrder?: number,
+ childWorkflowContext?: ChildWorkflowContext
+ ) => Promise
/**
* AbortSignal for cancellation support.
diff --git a/apps/sim/hooks/queries/workspace-files.ts b/apps/sim/hooks/queries/workspace-files.ts
index a894c4988d7..5e4364c5af6 100644
--- a/apps/sim/hooks/queries/workspace-files.ts
+++ b/apps/sim/hooks/queries/workspace-files.ts
@@ -205,6 +205,7 @@ interface UploadFileParams {
file: File
onProgress?: (event: UploadProgressEvent) => void
signal?: AbortSignal
+ skipToast?: boolean
}
interface UploadFileResponse {
@@ -324,7 +325,9 @@ export function useUploadWorkspaceFile() {
queryClient.invalidateQueries({ queryKey: workspaceFilesKeys.storageInfo() })
},
onSuccess: (_data, variables) => {
- toast.success(`Uploaded "${variables.file.name}"`)
+ if (!variables.skipToast) {
+ toast.success(`Uploaded "${variables.file.name}"`)
+ }
},
onError: (error, variables) => {
logger.error('Failed to upload file:', error)
diff --git a/apps/sim/hooks/selectors/helpers.ts b/apps/sim/hooks/selectors/helpers.ts
index 837205385af..ca4d2151abf 100644
--- a/apps/sim/hooks/selectors/helpers.ts
+++ b/apps/sim/hooks/selectors/helpers.ts
@@ -1,13 +1,28 @@
import { requestJson } from '@/lib/api/client/request'
import { oauthTokenContract } from '@/lib/api/contracts/selectors'
+export interface OAuthTokenBundle {
+ accessToken: string
+ cloudId?: string
+ domain?: string
+}
+
+/**
+ * Returns the access token plus any provider-specific extras (e.g. `cloudId` for
+ * Atlassian service accounts whose tokens cannot call api.atlassian.com/oauth/token/accessible-resources).
+ */
export async function fetchOAuthToken(
credentialId: string,
workflowId?: string
-): Promise {
+): Promise {
if (!credentialId) return null
const token = await requestJson(oauthTokenContract, {
body: { credentialId, workflowId },
})
- return token.accessToken ?? null
+ if (!token.accessToken) return null
+ return {
+ accessToken: token.accessToken,
+ cloudId: token.cloudId,
+ domain: token.domain,
+ }
}
diff --git a/apps/sim/hooks/selectors/providers/confluence/selectors.ts b/apps/sim/hooks/selectors/providers/confluence/selectors.ts
index 2daaf5290c7..84e0e528609 100644
--- a/apps/sim/hooks/selectors/providers/confluence/selectors.ts
+++ b/apps/sim/hooks/selectors/providers/confluence/selectors.ts
@@ -4,6 +4,11 @@ import { fetchOAuthToken } from '@/hooks/selectors/helpers'
import { ensureCredential, ensureDomain, SELECTOR_STALE } from '@/hooks/selectors/providers/shared'
import type { SelectorDefinition, SelectorKey, SelectorQueryArgs } from '@/hooks/selectors/types'
+function formatConfluenceSpaceLabel(space: { name: string; key: string; status?: string }): string {
+ const base = `${space.name} (${space.key})`
+ return space.status === 'archived' ? `${base} — archived` : base
+}
+
export const confluenceSelectors = {
'confluence.spaces': {
key: 'confluence.spaces',
@@ -17,6 +22,28 @@ export const confluenceSelectors = {
],
enabled: ({ context }) => Boolean(context.oauthCredential && context.domain),
fetchList: async ({ context, signal }: SelectorQueryArgs) => {
+ const credentialId = ensureCredential(context, 'confluence.spaces')
+ const domain = ensureDomain(context, 'confluence.spaces')
+ const collected: { id: string; label: string }[] = []
+ let cursor: string | undefined
+ do {
+ const data = await requestJson(selectorContracts.confluenceSpacesSelectorContract, {
+ body: {
+ credential: credentialId,
+ workflowId: context.workflowId,
+ domain,
+ cursor,
+ },
+ signal,
+ })
+ for (const space of data.spaces || []) {
+ collected.push({ id: space.id, label: formatConfluenceSpaceLabel(space) })
+ }
+ cursor = data.nextCursor
+ } while (cursor)
+ return collected
+ },
+ fetchPage: async ({ context, cursor, signal }) => {
const credentialId = ensureCredential(context, 'confluence.spaces')
const domain = ensureDomain(context, 'confluence.spaces')
const data = await requestJson(selectorContracts.confluenceSpacesSelectorContract, {
@@ -24,14 +51,24 @@ export const confluenceSelectors = {
credential: credentialId,
workflowId: context.workflowId,
domain,
+ cursor,
},
signal,
})
- return (data.spaces || []).map((space) => ({
- id: space.id,
- label: `${space.name} (${space.key})`,
- }))
+ return {
+ items: (data.spaces || []).map((space) => ({
+ id: space.id,
+ label: formatConfluenceSpaceLabel(space),
+ })),
+ nextCursor: data.nextCursor,
+ }
},
+ /**
+ * Resolves a single space label. Hits only the first page — the dropdown's
+ * `fetchPage` stream populates the options cache for spaces beyond page 1,
+ * and `useSelectorOptionMap` merges them in. Walking all pages here would
+ * double API load since the stream is already running in parallel.
+ */
fetchById: async ({ context, detailId, signal }: SelectorQueryArgs) => {
if (!detailId) return null
const credentialId = ensureCredential(context, 'confluence.spaces')
@@ -46,7 +83,7 @@ export const confluenceSelectors = {
})
const space = (data.spaces || []).find((s) => s.id === detailId) ?? null
if (!space) return null
- return { id: space.id, label: `${space.name} (${space.key})` }
+ return { id: space.id, label: formatConfluenceSpaceLabel(space) }
},
},
'confluence.pages': {
@@ -67,14 +104,15 @@ export const confluenceSelectors = {
fetchList: async ({ context, search, signal }: SelectorQueryArgs) => {
const credentialId = ensureCredential(context, 'confluence.pages')
const domain = ensureDomain(context, 'confluence.pages')
- const accessToken = await fetchOAuthToken(credentialId, context.workflowId)
- if (!accessToken) {
+ const bundle = await fetchOAuthToken(credentialId, context.workflowId)
+ if (!bundle) {
throw new Error('Missing Confluence access token')
}
const data = await requestJson(selectorContracts.confluencePagesSelectorContract, {
body: {
domain,
- accessToken,
+ accessToken: bundle.accessToken,
+ cloudId: bundle.cloudId,
title: search,
},
signal,
@@ -88,14 +126,15 @@ export const confluenceSelectors = {
if (!detailId) return null
const credentialId = ensureCredential(context, 'confluence.pages')
const domain = ensureDomain(context, 'confluence.pages')
- const accessToken = await fetchOAuthToken(credentialId, context.workflowId)
- if (!accessToken) {
+ const bundle = await fetchOAuthToken(credentialId, context.workflowId)
+ if (!bundle) {
throw new Error('Missing Confluence access token')
}
const data = await requestJson(selectorContracts.confluencePageSelectorContract, {
body: {
domain,
- accessToken,
+ accessToken: bundle.accessToken,
+ cloudId: bundle.cloudId,
pageId: detailId,
},
signal,
diff --git a/apps/sim/hooks/selectors/providers/jira/selectors.ts b/apps/sim/hooks/selectors/providers/jira/selectors.ts
index 9c8477e31b2..6175f8a0961 100644
--- a/apps/sim/hooks/selectors/providers/jira/selectors.ts
+++ b/apps/sim/hooks/selectors/providers/jira/selectors.ts
@@ -23,14 +23,15 @@ export const jiraSelectors = {
fetchList: async ({ context, search, signal }: SelectorQueryArgs) => {
const credentialId = ensureCredential(context, 'jira.projects')
const domain = ensureDomain(context, 'jira.projects')
- const accessToken = await fetchOAuthToken(credentialId, context.workflowId)
- if (!accessToken) {
+ const bundle = await fetchOAuthToken(credentialId, context.workflowId)
+ if (!bundle) {
throw new Error('Missing Jira access token')
}
const data = await requestJson(selectorContracts.jiraProjectsSelectorContract, {
query: {
domain,
- accessToken,
+ accessToken: bundle.accessToken,
+ cloudId: bundle.cloudId,
query: search,
},
signal,
@@ -44,14 +45,15 @@ export const jiraSelectors = {
if (!detailId) return null
const credentialId = ensureCredential(context, 'jira.projects')
const domain = ensureDomain(context, 'jira.projects')
- const accessToken = await fetchOAuthToken(credentialId, context.workflowId)
- if (!accessToken) {
+ const bundle = await fetchOAuthToken(credentialId, context.workflowId)
+ if (!bundle) {
throw new Error('Missing Jira access token')
}
const data = await requestJson(selectorContracts.jiraProjectSelectorContract, {
body: {
domain,
- accessToken,
+ accessToken: bundle.accessToken,
+ cloudId: bundle.cloudId,
projectId: detailId,
},
signal,
@@ -82,14 +84,15 @@ export const jiraSelectors = {
fetchList: async ({ context, search, signal }: SelectorQueryArgs) => {
const credentialId = ensureCredential(context, 'jira.issues')
const domain = ensureDomain(context, 'jira.issues')
- const accessToken = await fetchOAuthToken(credentialId, context.workflowId)
- if (!accessToken) {
+ const bundle = await fetchOAuthToken(credentialId, context.workflowId)
+ if (!bundle) {
throw new Error('Missing Jira access token')
}
const data = await requestJson(selectorContracts.jiraIssuesSelectorContract, {
query: {
domain,
- accessToken,
+ accessToken: bundle.accessToken,
+ cloudId: bundle.cloudId,
projectId: context.projectId,
query: search,
},
@@ -110,14 +113,15 @@ export const jiraSelectors = {
if (!detailId) return null
const credentialId = ensureCredential(context, 'jira.issues')
const domain = ensureDomain(context, 'jira.issues')
- const accessToken = await fetchOAuthToken(credentialId, context.workflowId)
- if (!accessToken) {
+ const bundle = await fetchOAuthToken(credentialId, context.workflowId)
+ if (!bundle) {
throw new Error('Missing Jira access token')
}
const data = await requestJson(selectorContracts.jiraIssueSelectorContract, {
body: {
domain,
- accessToken,
+ accessToken: bundle.accessToken,
+ cloudId: bundle.cloudId,
issueKeys: [detailId],
},
signal,
diff --git a/apps/sim/hooks/selectors/types.ts b/apps/sim/hooks/selectors/types.ts
index ef8724cbe43..be96287a791 100644
--- a/apps/sim/hooks/selectors/types.ts
+++ b/apps/sim/hooks/selectors/types.ts
@@ -100,11 +100,26 @@ export interface SelectorQueryArgs {
signal?: AbortSignal
}
+export interface SelectorPage {
+ items: SelectorOption[]
+ nextCursor?: string
+}
+
+export interface SelectorPageArgs extends SelectorQueryArgs {
+ cursor?: string
+}
+
export interface SelectorDefinition {
key: SelectorKey
contracts?: readonly AnyApiRouteContract[]
getQueryKey: (args: SelectorQueryArgs) => QueryKey
fetchList: (args: SelectorQueryArgs) => Promise
+ /**
+ * Optional. When defined, the selector hook fetches one page at a time and
+ * auto-drains remaining pages so the dropdown populates progressively.
+ * Returns `{ items, nextCursor }`; `nextCursor: undefined` ends the stream.
+ */
+ fetchPage?: (args: SelectorPageArgs) => Promise
fetchById?: (args: SelectorQueryArgs) => Promise
enabled?: (args: SelectorQueryArgs) => boolean
staleTime?: number
diff --git a/apps/sim/hooks/selectors/use-selector-query.ts b/apps/sim/hooks/selectors/use-selector-query.ts
index a4444b762aa..8eb4755834e 100644
--- a/apps/sim/hooks/selectors/use-selector-query.ts
+++ b/apps/sim/hooks/selectors/use-selector-query.ts
@@ -1,9 +1,14 @@
-import { useMemo } from 'react'
-import { useQuery } from '@tanstack/react-query'
+import { useEffect, useMemo } from 'react'
+import { useInfiniteQuery, useQuery } from '@tanstack/react-query'
import { extractEnvVarName, isEnvVarReference, isReference } from '@/executor/constants'
import { usePersonalEnvironment } from '@/hooks/queries/environment'
import { getSelectorDefinition, mergeOption } from '@/hooks/selectors/registry'
-import type { SelectorKey, SelectorOption, SelectorQueryArgs } from '@/hooks/selectors/types'
+import type {
+ SelectorKey,
+ SelectorOption,
+ SelectorPage,
+ SelectorQueryArgs,
+} from '@/hooks/selectors/types'
interface SelectorHookArgs extends Omit {
search?: string
@@ -11,7 +16,29 @@ interface SelectorHookArgs extends Omit {
enabled?: boolean
}
-export function useSelectorOptions(key: SelectorKey, args: SelectorHookArgs) {
+export interface SelectorOptionsResult {
+ data: SelectorOption[] | undefined
+ isLoading: boolean
+ isFetching: boolean
+ /**
+ * True while paginated selectors are draining remaining pages in the
+ * background. Always false for non-paginated selectors.
+ */
+ isFetchingMore: boolean
+ /**
+ * True when the paginated selector still has more pages queued. Always false
+ * for non-paginated selectors.
+ */
+ hasMore: boolean
+ error: Error | null
+}
+
+const EMPTY_PAGE: SelectorPage = { items: [], nextCursor: undefined }
+
+export function useSelectorOptions(
+ key: SelectorKey,
+ args: SelectorHookArgs
+): SelectorOptionsResult {
const definition = getSelectorDefinition(key)
const queryArgs: SelectorQueryArgs = {
key,
@@ -19,12 +46,65 @@ export function useSelectorOptions(key: SelectorKey, args: SelectorHookArgs) {
search: args.search,
}
const isEnabled = args.enabled ?? (definition.enabled ? definition.enabled(queryArgs) : true)
- return useQuery({
+ const supportsPagination = Boolean(definition.fetchPage)
+
+ const flatQuery = useQuery({
queryKey: definition.getQueryKey(queryArgs),
queryFn: ({ signal }) => definition.fetchList({ ...queryArgs, signal }),
- enabled: isEnabled,
+ enabled: !supportsPagination && isEnabled,
+ staleTime: definition.staleTime ?? 30_000,
+ })
+
+ const pagedQuery = useInfiniteQuery({
+ queryKey: [...definition.getQueryKey(queryArgs), 'paged'],
+ queryFn: ({ pageParam, signal }) => {
+ if (!definition.fetchPage) return Promise.resolve(EMPTY_PAGE)
+ return definition.fetchPage({
+ ...queryArgs,
+ cursor: pageParam as string | undefined,
+ signal,
+ })
+ },
+ getNextPageParam: (last) => last.nextCursor,
+ initialPageParam: undefined as string | undefined,
+ enabled: supportsPagination && isEnabled,
staleTime: definition.staleTime ?? 30_000,
})
+
+ const { hasNextPage, isFetchingNextPage, fetchNextPage, isError } = pagedQuery
+ useEffect(() => {
+ if (!supportsPagination) return
+ if (isError) return
+ if (hasNextPage && !isFetchingNextPage) {
+ void fetchNextPage()
+ }
+ }, [supportsPagination, hasNextPage, isFetchingNextPage, isError, fetchNextPage])
+
+ const pagedOptions = useMemo(() => {
+ if (!supportsPagination) return undefined
+ if (!pagedQuery.data) return undefined
+ return pagedQuery.data.pages.flatMap((page) => page.items)
+ }, [supportsPagination, pagedQuery.data])
+
+ if (supportsPagination) {
+ return {
+ data: pagedOptions,
+ isLoading: pagedQuery.isLoading,
+ isFetching: pagedQuery.isFetching,
+ isFetchingMore: pagedQuery.isFetchingNextPage,
+ hasMore: pagedQuery.hasNextPage ?? false,
+ error: (pagedQuery.error as Error | null) ?? null,
+ }
+ }
+
+ return {
+ data: flatQuery.data,
+ isLoading: flatQuery.isLoading,
+ isFetching: flatQuery.isFetching,
+ isFetchingMore: false,
+ hasMore: false,
+ error: (flatQuery.error as Error | null) ?? null,
+ }
}
export function useSelectorOptionDetail(
diff --git a/apps/sim/hooks/use-execution-stream.test.ts b/apps/sim/hooks/use-execution-stream.test.ts
new file mode 100644
index 00000000000..da52635ff99
--- /dev/null
+++ b/apps/sim/hooks/use-execution-stream.test.ts
@@ -0,0 +1,87 @@
+/**
+ * @vitest-environment node
+ */
+import { describe, expect, it, vi } from 'vitest'
+import type { ExecutionEvent } from '@/lib/workflows/executor/execution-events'
+import { processSSEStream } from '@/hooks/use-execution-stream'
+
+function streamEvents(events: ExecutionEvent[]): ReadableStream {
+ const encoder = new TextEncoder()
+ return new ReadableStream({
+ start(controller) {
+ for (const event of events) {
+ controller.enqueue(encoder.encode(`data: ${JSON.stringify(event)}\n\n`))
+ }
+ controller.enqueue(encoder.encode('data: [DONE]\n\n'))
+ controller.close()
+ },
+ })
+}
+
+describe('processSSEStream', () => {
+ it('acknowledges event ids only after the matching handler completes', async () => {
+ const order: string[] = []
+ const event: ExecutionEvent = {
+ type: 'block:started',
+ eventId: 5,
+ timestamp: new Date().toISOString(),
+ executionId: 'exec-1',
+ workflowId: 'wf-1',
+ data: {
+ blockId: 'block-1',
+ blockName: 'Block 1',
+ blockType: 'function',
+ executionOrder: 1,
+ },
+ }
+
+ await processSSEStream(
+ streamEvents([event]).getReader(),
+ {
+ onBlockStarted: async () => {
+ order.push('handler:start')
+ await Promise.resolve()
+ order.push('handler:end')
+ },
+ onEventId: vi.fn(async () => {
+ order.push('event-id')
+ }),
+ },
+ 'test'
+ )
+
+ expect(order).toEqual(['handler:start', 'handler:end', 'event-id'])
+ })
+
+ it('propagates callback failures without acknowledging the event id', async () => {
+ const event: ExecutionEvent = {
+ type: 'block:started',
+ eventId: 6,
+ timestamp: new Date().toISOString(),
+ executionId: 'exec-1',
+ workflowId: 'wf-1',
+ data: {
+ blockId: 'block-1',
+ blockName: 'Block 1',
+ blockType: 'function',
+ executionOrder: 1,
+ },
+ }
+ const onEventId = vi.fn()
+
+ await expect(
+ processSSEStream(
+ streamEvents([event]).getReader(),
+ {
+ onBlockStarted: async () => {
+ throw new Error('handler failed')
+ },
+ onEventId,
+ },
+ 'test'
+ )
+ ).rejects.toThrow('handler failed')
+
+ expect(onEventId).not.toHaveBeenCalled()
+ })
+})
diff --git a/apps/sim/hooks/use-execution-stream.ts b/apps/sim/hooks/use-execution-stream.ts
index a05fce82cef..b45a8550ba6 100644
--- a/apps/sim/hooks/use-execution-stream.ts
+++ b/apps/sim/hooks/use-execution-stream.ts
@@ -32,12 +32,40 @@ export function isExecutionStreamHttpError(error: unknown): error is ExecutionSt
return error instanceof ExecutionStreamHttpError
}
+export class SSEEventHandlerError extends Error {
+ constructor(
+ message: string,
+ public readonly eventType: string,
+ public readonly eventId: number | undefined,
+ public readonly executionId: string | undefined,
+ public readonly originalError: unknown
+ ) {
+ super(message)
+ this.name = 'SSEEventHandlerError'
+ }
+}
+
+export class SSEStreamInterruptedError extends Error {
+ constructor(
+ message: string,
+ public readonly executionId: string | undefined,
+ public readonly originalError: unknown
+ ) {
+ super(message)
+ this.name = 'SSEStreamInterruptedError'
+ }
+}
+
/**
* Detects errors caused by the browser killing a fetch (page refresh, navigation, tab close).
* These should be treated as clean disconnects, not execution errors.
*/
function isClientDisconnectError(error: any): boolean {
- if (error.name === 'AbortError') return true
+ return error.name === 'AbortError'
+}
+
+function isRecoverableStreamError(error: any): boolean {
+ if (isClientDisconnectError(error)) return false
const msg = (error.message ?? '').toLowerCase()
return (
msg.includes('network error') || msg.includes('failed to fetch') || msg.includes('load failed')
@@ -75,52 +103,69 @@ export async function processSSEStream(
continue
}
+ let event: ExecutionEvent
try {
- const event = JSON.parse(data) as ExecutionEvent
-
- if (event.eventId != null) {
- callbacks.onEventId?.(event.eventId)
- }
+ event = JSON.parse(data) as ExecutionEvent
+ } catch (error) {
+ logger.error('Failed to parse SSE event:', error, { data })
+ continue
+ }
+ try {
switch (event.type) {
case 'execution:started':
- callbacks.onExecutionStarted?.(event.data)
+ await callbacks.onExecutionStarted?.(event.data)
break
case 'execution:completed':
- callbacks.onExecutionCompleted?.(event.data)
+ await callbacks.onExecutionCompleted?.(event.data)
break
case 'execution:paused':
- callbacks.onExecutionPaused?.(event.data)
+ await callbacks.onExecutionPaused?.(event.data)
break
case 'execution:error':
- callbacks.onExecutionError?.(event.data)
+ await callbacks.onExecutionError?.(event.data)
break
case 'execution:cancelled':
- callbacks.onExecutionCancelled?.(event.data)
+ await callbacks.onExecutionCancelled?.(event.data)
break
case 'block:started':
- callbacks.onBlockStarted?.(event.data)
+ await callbacks.onBlockStarted?.(event.data)
break
case 'block:completed':
- callbacks.onBlockCompleted?.(event.data)
+ await callbacks.onBlockCompleted?.(event.data)
break
case 'block:error':
- callbacks.onBlockError?.(event.data)
+ await callbacks.onBlockError?.(event.data)
break
case 'block:childWorkflowStarted':
- callbacks.onBlockChildWorkflowStarted?.(event.data)
+ await callbacks.onBlockChildWorkflowStarted?.(event.data)
break
case 'stream:chunk':
- callbacks.onStreamChunk?.(event.data)
+ await callbacks.onStreamChunk?.(event.data)
break
case 'stream:done':
- callbacks.onStreamDone?.(event.data)
+ await callbacks.onStreamDone?.(event.data)
break
default:
logger.warn('Unknown event type:', (event as any).type)
}
+
+ if (event.eventId != null) {
+ await callbacks.onEventId?.(event.eventId)
+ }
} catch (error) {
- logger.error('Failed to parse SSE event:', error, { data })
+ logger.error('SSE event handler failed:', error, {
+ eventType: event.type,
+ eventId: event.eventId,
+ })
+ const message = error instanceof Error ? error.message : String(error)
+ throw new SSEEventHandlerError(
+ message,
+ event.type,
+ event.eventId,
+ event.executionId,
+ error
+ )
}
}
}
@@ -130,18 +175,18 @@ export async function processSSEStream(
}
export interface ExecutionStreamCallbacks {
- onExecutionStarted?: (data: ExecutionStartedData) => void
- onExecutionCompleted?: (data: ExecutionCompletedData) => void
- onExecutionPaused?: (data: ExecutionPausedData) => void
- onExecutionError?: (data: ExecutionErrorData) => void
- onExecutionCancelled?: (data: ExecutionCancelledData) => void
- onBlockStarted?: (data: BlockStartedData) => void
- onBlockCompleted?: (data: BlockCompletedData) => void
- onBlockError?: (data: BlockErrorData) => void
- onBlockChildWorkflowStarted?: (data: BlockChildWorkflowStartedData) => void
- onStreamChunk?: (data: StreamChunkData) => void
- onStreamDone?: (data: StreamDoneData) => void
- onEventId?: (eventId: number) => void
+ onExecutionStarted?: (data: ExecutionStartedData) => void | Promise
+ onExecutionCompleted?: (data: ExecutionCompletedData) => void | Promise
+ onExecutionPaused?: (data: ExecutionPausedData) => void | Promise
+ onExecutionError?: (data: ExecutionErrorData) => void | Promise
+ onExecutionCancelled?: (data: ExecutionCancelledData) => void | Promise
+ onBlockStarted?: (data: BlockStartedData) => void | Promise
+ onBlockCompleted?: (data: BlockCompletedData) => void | Promise
+ onBlockError?: (data: BlockErrorData) => void | Promise
+ onBlockChildWorkflowStarted?: (data: BlockChildWorkflowStartedData) => void | Promise
+ onStreamChunk?: (data: StreamChunkData) => void | Promise
+ onStreamDone?: (data: StreamDoneData) => void | Promise
+ onEventId?: (eventId: number) => void | Promise
}
export interface ExecuteStreamOptions {
@@ -191,6 +236,30 @@ export interface ReconnectStreamOptions {
*/
const sharedAbortControllers = new Map()
+function executeStreamKey(workflowId: string): string {
+ return `${workflowId}:execute`
+}
+
+function reconnectStreamKey(workflowId: string, executionId: string): string {
+ return `${workflowId}:reconnect:${executionId}`
+}
+
+function abortStream(key: string): void {
+ const controller = sharedAbortControllers.get(key)
+ if (!controller) return
+ controller.abort()
+ sharedAbortControllers.delete(key)
+}
+
+function abortWorkflowStreams(workflowId: string): void {
+ const prefix = `${workflowId}:`
+ for (const [key, controller] of sharedAbortControllers) {
+ if (!key.startsWith(prefix)) continue
+ controller.abort()
+ sharedAbortControllers.delete(key)
+ }
+}
+
/**
* Hook for executing workflows via server-side SSE streaming.
* Supports concurrent executions via per-workflow AbortController maps.
@@ -199,13 +268,12 @@ export function useExecutionStream() {
const execute = useCallback(async (options: ExecuteStreamOptions) => {
const { workflowId, callbacks = {}, onExecutionId, ...payload } = options
- const existing = sharedAbortControllers.get(workflowId)
- if (existing) {
- existing.abort()
- }
+ abortWorkflowStreams(workflowId)
const abortController = new AbortController()
- sharedAbortControllers.set(workflowId, abortController)
+ const streamKey = executeStreamKey(workflowId)
+ sharedAbortControllers.set(streamKey, abortController)
+ let serverExecutionId: string | undefined
try {
// boundary-raw-fetch: workflow execute endpoint returns an SSE stream consumed via response.body.getReader() and processSSEStream; also reads the X-Execution-Id response header
@@ -242,7 +310,7 @@ export function useExecutionStream() {
throw new Error('No response body')
}
- const serverExecutionId = response.headers.get('X-Execution-Id')
+ serverExecutionId = response.headers.get('X-Execution-Id') ?? undefined
if (serverExecutionId) {
onExecutionId?.(serverExecutionId)
}
@@ -254,15 +322,28 @@ export function useExecutionStream() {
logger.info('Execution stream disconnected (page unload or abort)')
return
}
+ if (isRecoverableStreamError(error)) {
+ logger.warn('Execution stream interrupted; preserving execution for reconnect', {
+ executionId: serverExecutionId,
+ error: error.message,
+ })
+ throw new SSEStreamInterruptedError(
+ 'Execution stream interrupted before a terminal event was received',
+ serverExecutionId,
+ error
+ )
+ }
logger.error('Execution stream error:', error)
- callbacks.onExecutionError?.({
- error: error.message || 'Unknown error',
- duration: 0,
- })
+ if (!(error instanceof SSEEventHandlerError)) {
+ await callbacks.onExecutionError?.({
+ error: error.message || 'Unknown error',
+ duration: 0,
+ })
+ }
throw error
} finally {
- if (sharedAbortControllers.get(workflowId) === abortController) {
- sharedAbortControllers.delete(workflowId)
+ if (sharedAbortControllers.get(streamKey) === abortController) {
+ sharedAbortControllers.delete(streamKey)
}
}
}, [])
@@ -277,13 +358,12 @@ export function useExecutionStream() {
callbacks = {},
} = options
- const existing = sharedAbortControllers.get(workflowId)
- if (existing) {
- existing.abort()
- }
+ abortWorkflowStreams(workflowId)
const abortController = new AbortController()
- sharedAbortControllers.set(workflowId, abortController)
+ const streamKey = executeStreamKey(workflowId)
+ sharedAbortControllers.set(streamKey, abortController)
+ let serverExecutionId: string | undefined
try {
// boundary-raw-fetch: run-from-block endpoint returns an SSE stream consumed via response.body.getReader() and processSSEStream; also reads the X-Execution-Id response header
@@ -324,7 +404,7 @@ export function useExecutionStream() {
throw new Error('No response body')
}
- const serverExecutionId = response.headers.get('X-Execution-Id')
+ serverExecutionId = response.headers.get('X-Execution-Id') ?? undefined
if (serverExecutionId) {
onExecutionId?.(serverExecutionId)
}
@@ -336,15 +416,28 @@ export function useExecutionStream() {
logger.info('Run-from-block stream disconnected (page unload or abort)')
return
}
+ if (isRecoverableStreamError(error)) {
+ logger.warn('Run-from-block stream interrupted; preserving execution for reconnect', {
+ executionId: serverExecutionId,
+ error: error.message,
+ })
+ throw new SSEStreamInterruptedError(
+ 'Run-from-block stream interrupted before a terminal event was received',
+ serverExecutionId,
+ error
+ )
+ }
logger.error('Run-from-block execution error:', error)
- callbacks.onExecutionError?.({
- error: error.message || 'Unknown error',
- duration: 0,
- })
+ if (!(error instanceof SSEEventHandlerError)) {
+ await callbacks.onExecutionError?.({
+ error: error.message || 'Unknown error',
+ duration: 0,
+ })
+ }
throw error
} finally {
- if (sharedAbortControllers.get(workflowId) === abortController) {
- sharedAbortControllers.delete(workflowId)
+ if (sharedAbortControllers.get(streamKey) === abortController) {
+ sharedAbortControllers.delete(streamKey)
}
}
}, [])
@@ -352,13 +445,10 @@ export function useExecutionStream() {
const reconnect = useCallback(async (options: ReconnectStreamOptions) => {
const { workflowId, executionId, fromEventId = 0, callbacks = {} } = options
- const existing = sharedAbortControllers.get(workflowId)
- if (existing) {
- existing.abort()
- }
-
const abortController = new AbortController()
- sharedAbortControllers.set(workflowId, abortController)
+ const streamKey = reconnectStreamKey(workflowId, executionId)
+ abortStream(streamKey)
+ sharedAbortControllers.set(streamKey, abortController)
try {
// boundary-raw-fetch: execution reconnect endpoint returns an SSE stream consumed via response.body.getReader() and processSSEStream
const response = await fetch(
@@ -376,19 +466,15 @@ export function useExecutionStream() {
logger.error('Reconnection stream error:', error)
throw error
} finally {
- if (sharedAbortControllers.get(workflowId) === abortController) {
- sharedAbortControllers.delete(workflowId)
+ if (sharedAbortControllers.get(streamKey) === abortController) {
+ sharedAbortControllers.delete(streamKey)
}
}
}, [])
const cancel = useCallback((workflowId?: string) => {
if (workflowId) {
- const controller = sharedAbortControllers.get(workflowId)
- if (controller) {
- controller.abort()
- sharedAbortControllers.delete(workflowId)
- }
+ abortWorkflowStreams(workflowId)
} else {
for (const [, controller] of sharedAbortControllers) {
controller.abort()
@@ -397,10 +483,20 @@ export function useExecutionStream() {
}
}, [])
+ const cancelReconnect = useCallback((workflowId: string, executionId: string) => {
+ abortStream(reconnectStreamKey(workflowId, executionId))
+ }, [])
+
+ const cancelExecute = useCallback((workflowId: string) => {
+ abortStream(executeStreamKey(workflowId))
+ }, [])
+
return {
execute,
executeFromBlock,
reconnect,
cancel,
+ cancelReconnect,
+ cancelExecute,
}
}
diff --git a/apps/sim/lib/api/client/request.ts b/apps/sim/lib/api/client/request.ts
index b2ecd688b8a..d0e8255d2f5 100644
--- a/apps/sim/lib/api/client/request.ts
+++ b/apps/sim/lib/api/client/request.ts
@@ -134,6 +134,14 @@ function messageFromErrorBody(body: unknown, fallback: string): string {
return fallback
}
+function codeFromErrorBody(body: unknown): string | undefined {
+ if (body && typeof body === 'object') {
+ const record = body as Record
+ if (typeof record.code === 'string' && record.code.length > 0) return record.code
+ }
+ return undefined
+}
+
function isSchemaValidationError(error: unknown): boolean {
return Boolean(
error &&
@@ -173,6 +181,7 @@ export async function requestJson(
message: messageFromErrorBody(parsed, `Request failed with ${response.status}`),
body: parsed,
rawBody: raw,
+ code: codeFromErrorBody(parsed),
})
}
diff --git a/apps/sim/lib/api/contracts/credentials.ts b/apps/sim/lib/api/contracts/credentials.ts
index 558946caebc..20c46095a31 100644
--- a/apps/sim/lib/api/contracts/credentials.ts
+++ b/apps/sim/lib/api/contracts/credentials.ts
@@ -1,6 +1,6 @@
import { z } from 'zod'
import { defineRouteContract } from '@/lib/api/contracts/types'
-import type { OAuthProvider } from '@/lib/oauth/types'
+import { ATLASSIAN_SERVICE_ACCOUNT_PROVIDER_ID, type OAuthProvider } from '@/lib/oauth/types'
const ENV_VAR_NAME_REGEX = /^[A-Za-z0-9_]+$/
@@ -119,6 +119,8 @@ export const createCredentialBodySchema = z
envKey: z.string().trim().min(1).optional(),
envOwnerUserId: z.string().trim().min(1).optional(),
serviceAccountJson: z.string().optional(),
+ apiToken: z.string().trim().min(1).optional(),
+ domain: z.string().trim().min(1).optional(),
})
.superRefine((data, ctx) => {
if (data.type === 'oauth') {
@@ -147,6 +149,23 @@ export const createCredentialBodySchema = z
}
if (data.type === 'service_account') {
+ if (data.providerId === ATLASSIAN_SERVICE_ACCOUNT_PROVIDER_ID) {
+ if (!data.apiToken) {
+ ctx.addIssue({
+ code: z.ZodIssueCode.custom,
+ message: 'apiToken is required for Atlassian service account credentials',
+ path: ['apiToken'],
+ })
+ }
+ if (!data.domain) {
+ ctx.addIssue({
+ code: z.ZodIssueCode.custom,
+ message: 'domain is required for Atlassian service account credentials',
+ path: ['domain'],
+ })
+ }
+ return
+ }
if (!data.serviceAccountJson) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
@@ -222,35 +241,6 @@ export const createCredentialDraftBodySchema = z.object({
credentialId: z.string().min(1).optional(),
})
-export const createWorkspaceCredentialBodySchema = z.object({
- workspaceId: z.string().uuid('Workspace ID must be a valid UUID'),
- type: workspaceCredentialTypeSchema,
- displayName: z.string().optional(),
- description: z.string().optional(),
- providerId: z.string().optional(),
- accountId: z.string().optional(),
- envKey: z.string().optional(),
- envOwnerUserId: z.string().optional(),
- serviceAccountJson: z.string().optional(),
-})
-
-export const updateWorkspaceCredentialBodySchema = z
- .object({
- displayName: z.string().trim().min(1).max(255).optional(),
- description: z.string().trim().max(500).nullable().optional(),
- serviceAccountJson: z.string().min(1).optional(),
- })
- .refine(
- (data) =>
- data.displayName !== undefined ||
- data.description !== undefined ||
- data.serviceAccountJson !== undefined,
- {
- message: 'At least one field must be provided',
- path: ['displayName'],
- }
- )
-
export const upsertWorkspaceCredentialMemberBodySchema = z.object({
userId: z.string().min(1),
role: workspaceCredentialRoleSchema.default('member'),
diff --git a/apps/sim/lib/api/contracts/data-drains.ts b/apps/sim/lib/api/contracts/data-drains.ts
new file mode 100644
index 00000000000..cecc401c069
--- /dev/null
+++ b/apps/sim/lib/api/contracts/data-drains.ts
@@ -0,0 +1,229 @@
+import { z } from 'zod'
+import { defineRouteContract } from '@/lib/api/contracts/types'
+import { CADENCE_TYPES, DESTINATION_TYPES, SOURCE_TYPES } from '@/lib/data-drains/types'
+
+export const dataDrainSourceSchema = z.enum(SOURCE_TYPES)
+export const dataDrainDestinationTypeSchema = z.enum(DESTINATION_TYPES)
+export const dataDrainCadenceSchema = z.enum(CADENCE_TYPES)
+export const dataDrainRunStatusSchema = z.enum(['running', 'success', 'failed'])
+export const dataDrainRunTriggerSchema = z.enum(['cron', 'manual'])
+
+export const dataDrainOrgParamsSchema = z.object({
+ id: z.string().min(1, 'organization id is required'),
+})
+
+export const dataDrainParamsSchema = z.object({
+ id: z.string().min(1, 'organization id is required'),
+ drainId: z.string().min(1, 'drain id is required'),
+})
+
+const drainNameSchema = z.string().trim().min(1, 'name is required').max(120)
+
+const s3ConfigBodySchema = z.object({
+ bucket: z.string().min(1, 'bucket is required').max(255),
+ region: z.string().min(1, 'region is required').max(64),
+ prefix: z.string().max(512).optional(),
+ endpoint: z.string().url().optional(),
+ forcePathStyle: z.boolean().optional(),
+})
+
+const s3CredentialsBodySchema = z.object({
+ accessKeyId: z.string().min(1, 'accessKeyId is required'),
+ secretAccessKey: z.string().min(1, 'secretAccessKey is required'),
+})
+
+const webhookConfigBodySchema = z.object({
+ url: z.string().url('url must be a valid URL'),
+ signatureHeader: z.string().min(1).max(128).optional(),
+})
+
+const webhookCredentialsBodySchema = z.object({
+ signingSecret: z.string().min(8, 'signingSecret must be at least 8 characters'),
+ bearerToken: z.string().min(1).optional(),
+})
+
+/**
+ * Discriminated body shape used by both create and update. Each destination
+ * variant carries its own typed `destinationConfig` and optional
+ * `destinationCredentials`. On update, omitting `destinationCredentials`
+ * leaves the encrypted blob in place.
+ */
+export const dataDrainDestinationBodySchema = z.discriminatedUnion('destinationType', [
+ z.object({
+ destinationType: z.literal('s3'),
+ destinationConfig: s3ConfigBodySchema,
+ destinationCredentials: s3CredentialsBodySchema.optional(),
+ }),
+ z.object({
+ destinationType: z.literal('webhook'),
+ destinationConfig: webhookConfigBodySchema,
+ destinationCredentials: webhookCredentialsBodySchema.optional(),
+ }),
+])
+
+const drainCommonBodyFieldsSchema = z.object({
+ name: drainNameSchema,
+ source: dataDrainSourceSchema,
+ scheduleCadence: dataDrainCadenceSchema,
+ enabled: z.boolean().optional(),
+})
+
+export const createDataDrainBodySchema = z.intersection(
+ drainCommonBodyFieldsSchema,
+ dataDrainDestinationBodySchema
+)
+
+/**
+ * Update bodies are partial — every field is optional. We deliberately don't
+ * use a discriminated union here: clients sending `{ enabled: false }` should
+ * not be forced to also send `destinationType`. The route validates the
+ * destination payloads against the typed `configSchema` / `credentialsSchema`
+ * for the existing drain's destination type before persisting, so the
+ * structural shape is still enforced — just at the route layer rather than at
+ * the contract boundary.
+ */
+export const updateDataDrainBodySchema = drainCommonBodyFieldsSchema.partial().extend({
+ destinationType: dataDrainDestinationTypeSchema.optional(),
+ destinationConfig: z.record(z.string(), z.unknown()).optional(),
+ destinationCredentials: z.record(z.string(), z.unknown()).optional(),
+})
+
+const drainDestinationResponseSchema = z.discriminatedUnion('destinationType', [
+ z.object({
+ destinationType: z.literal('s3'),
+ destinationConfig: s3ConfigBodySchema,
+ }),
+ z.object({
+ destinationType: z.literal('webhook'),
+ destinationConfig: webhookConfigBodySchema,
+ }),
+])
+
+const drainCommonResponseFieldsSchema = z.object({
+ id: z.string(),
+ organizationId: z.string(),
+ name: z.string(),
+ source: dataDrainSourceSchema,
+ scheduleCadence: dataDrainCadenceSchema,
+ enabled: z.boolean(),
+ cursor: z.string().nullable(),
+ lastRunAt: z.string().nullable(),
+ lastSuccessAt: z.string().nullable(),
+ createdBy: z.string(),
+ createdAt: z.string(),
+ updatedAt: z.string(),
+})
+
+export const dataDrainSchema = z.intersection(
+ drainCommonResponseFieldsSchema,
+ drainDestinationResponseSchema
+)
+
+export type DataDrain = z.output
+export type CreateDataDrainBody = z.input
+export type UpdateDataDrainBody = z.input
+
+export const dataDrainListResponseSchema = z.object({
+ drains: z.array(dataDrainSchema),
+})
+
+export const dataDrainResponseSchema = z.object({
+ drain: dataDrainSchema,
+})
+
+export const dataDrainRunSchema = z.object({
+ id: z.string(),
+ drainId: z.string(),
+ status: dataDrainRunStatusSchema,
+ trigger: dataDrainRunTriggerSchema,
+ startedAt: z.string(),
+ finishedAt: z.string().nullable(),
+ rowsExported: z.number().int(),
+ bytesWritten: z.number().int(),
+ cursorBefore: z.string().nullable(),
+ cursorAfter: z.string().nullable(),
+ error: z.string().nullable(),
+ locators: z.array(z.string()),
+})
+
+export type DataDrainRun = z.output
+
+export const dataDrainRunListResponseSchema = z.object({
+ runs: z.array(dataDrainRunSchema),
+})
+
+export const runDataDrainResponseSchema = z.object({
+ jobId: z.string(),
+})
+
+export const testDataDrainResponseSchema = z.object({
+ ok: z.literal(true),
+})
+
+export const listDataDrainsContract = defineRouteContract({
+ method: 'GET',
+ path: '/api/organizations/[id]/data-drains',
+ params: dataDrainOrgParamsSchema,
+ response: { mode: 'json', schema: dataDrainListResponseSchema },
+})
+
+export const createDataDrainContract = defineRouteContract({
+ method: 'POST',
+ path: '/api/organizations/[id]/data-drains',
+ params: dataDrainOrgParamsSchema,
+ body: createDataDrainBodySchema,
+ response: { mode: 'json', schema: dataDrainResponseSchema },
+})
+
+export const getDataDrainContract = defineRouteContract({
+ method: 'GET',
+ path: '/api/organizations/[id]/data-drains/[drainId]',
+ params: dataDrainParamsSchema,
+ response: { mode: 'json', schema: dataDrainResponseSchema },
+})
+
+export const updateDataDrainContract = defineRouteContract({
+ method: 'PUT',
+ path: '/api/organizations/[id]/data-drains/[drainId]',
+ params: dataDrainParamsSchema,
+ body: updateDataDrainBodySchema,
+ response: { mode: 'json', schema: dataDrainResponseSchema },
+})
+
+export const deleteDataDrainContract = defineRouteContract({
+ method: 'DELETE',
+ path: '/api/organizations/[id]/data-drains/[drainId]',
+ params: dataDrainParamsSchema,
+ response: { mode: 'json', schema: z.object({ success: z.literal(true) }) },
+})
+
+export const runDataDrainContract = defineRouteContract({
+ method: 'POST',
+ path: '/api/organizations/[id]/data-drains/[drainId]/run',
+ params: dataDrainParamsSchema,
+ response: { mode: 'json', schema: runDataDrainResponseSchema },
+})
+
+export const testDataDrainContract = defineRouteContract({
+ method: 'POST',
+ path: '/api/organizations/[id]/data-drains/[drainId]/test',
+ params: dataDrainParamsSchema,
+ response: { mode: 'json', schema: testDataDrainResponseSchema },
+})
+
+export const listDataDrainRunsContract = defineRouteContract({
+ method: 'GET',
+ path: '/api/organizations/[id]/data-drains/[drainId]/runs',
+ params: dataDrainParamsSchema,
+ query: z
+ .object({
+ limit: z
+ .preprocess(
+ (v) => (typeof v === 'string' ? Number.parseInt(v, 10) : v),
+ z.number().int().min(1).max(200)
+ )
+ .optional(),
+ })
+ .optional(),
+ response: { mode: 'json', schema: dataDrainRunListResponseSchema },
+})
diff --git a/apps/sim/lib/api/contracts/oauth-connections.ts b/apps/sim/lib/api/contracts/oauth-connections.ts
index ee37437e218..cc778ee6abb 100644
--- a/apps/sim/lib/api/contracts/oauth-connections.ts
+++ b/apps/sim/lib/api/contracts/oauth-connections.ts
@@ -77,6 +77,8 @@ const oauthTokenResponseSchema = z.object({
accessToken: z.string(),
idToken: z.string().optional(),
instanceUrl: z.string().optional(),
+ cloudId: z.string().optional(),
+ domain: z.string().optional(),
})
export const oauthTokenGetContract = defineRouteContract({
diff --git a/apps/sim/lib/api/contracts/selectors/confluence.ts b/apps/sim/lib/api/contracts/selectors/confluence.ts
index 18b3dd76408..3ed1b6bb8e5 100644
--- a/apps/sim/lib/api/contracts/selectors/confluence.ts
+++ b/apps/sim/lib/api/contracts/selectors/confluence.ts
@@ -10,7 +10,12 @@ import { defineRouteContract } from '@/lib/api/contracts/types'
import { validateAlphanumericId } from '@/lib/core/security/input-validation'
const confluenceSpaceSchema = z
- .object({ id: z.string(), name: z.string(), key: z.string() })
+ .object({
+ id: z.string(),
+ name: z.string(),
+ key: z.string(),
+ status: z.string().optional(),
+ })
.passthrough()
export const confluencePagesBodySchema = z.object({
@@ -354,10 +359,17 @@ const defineConfluenceGetContract = (path: string, que
},
})
+export const confluenceSpacesSelectorBodySchema = credentialWorkflowDomainBodySchema.extend({
+ cursor: optionalString,
+})
+
export const confluenceSpacesSelectorContract = definePostSelector(
'/api/tools/confluence/selector-spaces',
- credentialWorkflowDomainBodySchema,
- z.object({ spaces: z.array(confluenceSpaceSchema) })
+ confluenceSpacesSelectorBodySchema,
+ z.object({
+ spaces: z.array(confluenceSpaceSchema),
+ nextCursor: optionalString,
+ })
)
export const confluencePagesSelectorContract = definePostSelector(
diff --git a/apps/sim/lib/api/contracts/selectors/oauth.ts b/apps/sim/lib/api/contracts/selectors/oauth.ts
index ddb75089013..2afd417f68d 100644
--- a/apps/sim/lib/api/contracts/selectors/oauth.ts
+++ b/apps/sim/lib/api/contracts/selectors/oauth.ts
@@ -8,6 +8,8 @@ const oauthTokenResponseSchema = z
accessToken: z.string().optional(),
idToken: z.string().optional(),
instanceUrl: z.string().optional(),
+ cloudId: z.string().optional(),
+ domain: z.string().optional(),
})
.passthrough()
diff --git a/apps/sim/lib/copilot/chat/post.test.ts b/apps/sim/lib/copilot/chat/post.test.ts
index 3c114eccba2..1a6dee8a4b1 100644
--- a/apps/sim/lib/copilot/chat/post.test.ts
+++ b/apps/sim/lib/copilot/chat/post.test.ts
@@ -93,10 +93,18 @@ vi.mock('@sim/db', () => ({
})),
})),
})),
+ select: vi.fn(() => ({
+ from: vi.fn(() => ({
+ where: vi.fn(() => ({
+ limit: vi.fn().mockResolvedValue([{ permissionType: 'write' }]),
+ })),
+ })),
+ })),
},
}))
vi.mock('drizzle-orm', () => ({
+ and: vi.fn(() => ({})),
eq: vi.fn(() => ({})),
sql: (strings: TemplateStringsArray, ...values: unknown[]) => ({ strings, values }),
}))
diff --git a/apps/sim/lib/copilot/chat/post.ts b/apps/sim/lib/copilot/chat/post.ts
index a745f209c9e..a9d1eb30adc 100644
--- a/apps/sim/lib/copilot/chat/post.ts
+++ b/apps/sim/lib/copilot/chat/post.ts
@@ -1,9 +1,9 @@
import { type Context as OtelContext, context as otelContextApi } from '@opentelemetry/api'
import { db } from '@sim/db'
-import { copilotChats } from '@sim/db/schema'
+import { copilotChats, permissions } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { generateId } from '@sim/utils/id'
-import { eq, sql } from 'drizzle-orm'
+import { and, eq, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { isZodError, validationErrorResponse } from '@/lib/api/server'
@@ -506,14 +506,12 @@ async function resolveBranch(params: {
}
const resolvedWorkflowId = resolved.workflowId
- let resolvedWorkspaceId = requestedWorkspaceId
- if (!resolvedWorkspaceId) {
- try {
- const workflow = await getWorkflowById(resolvedWorkflowId)
- resolvedWorkspaceId = workflow?.workspaceId ?? undefined
- } catch {
- // best effort; downstream calls can still proceed
- }
+ let resolvedWorkspaceId: string | undefined
+ try {
+ const workflow = await getWorkflowById(resolvedWorkflowId)
+ resolvedWorkspaceId = workflow?.workspaceId ?? requestedWorkspaceId
+ } catch {
+ resolvedWorkspaceId = requestedWorkspaceId
}
const selectedModel = model || DEFAULT_MODEL
@@ -569,6 +567,22 @@ async function resolveBranch(params: {
return createBadRequestResponse('workspaceId is required when workflowId is not provided')
}
+ const [permissionRow] = await db
+ .select({ permissionType: permissions.permissionType })
+ .from(permissions)
+ .where(
+ and(
+ eq(permissions.userId, authenticatedUserId),
+ eq(permissions.entityType, 'workspace'),
+ eq(permissions.entityId, requestedWorkspaceId)
+ )
+ )
+ .limit(1)
+
+ if (!permissionRow) {
+ return createBadRequestResponse('Workspace not found or access denied')
+ }
+
return {
kind: 'workspace',
workspaceId: requestedWorkspaceId,
diff --git a/apps/sim/lib/copilot/chat/process-contents.ts b/apps/sim/lib/copilot/chat/process-contents.ts
index 78d9b94db07..d570bab39e7 100644
--- a/apps/sim/lib/copilot/chat/process-contents.ts
+++ b/apps/sim/lib/copilot/chat/process-contents.ts
@@ -116,8 +116,8 @@ export async function processContextsServer(
currentWorkspaceId
)
}
- if (ctx.kind === 'table' && ctx.tableId) {
- const result = await resolveTableResource(ctx.tableId)
+ if (ctx.kind === 'table' && ctx.tableId && currentWorkspaceId) {
+ const result = await resolveTableResource(ctx.tableId, currentWorkspaceId)
if (!result) return null
return { type: 'table', tag: ctx.label ? `@${ctx.label}` : '@', content: result.content }
}
@@ -701,7 +701,7 @@ export async function resolveActiveResourceContext(
resourceType: string,
resourceId: string,
workspaceId: string,
- _userId: string,
+ userId: string,
chatId?: string
): Promise {
try {
@@ -709,10 +709,10 @@ export async function resolveActiveResourceContext(
case 'workflow': {
const ctx = await processWorkflowFromDb(
resourceId,
- undefined,
+ userId,
'@active_resource',
'current_workflow',
- undefined,
+ workspaceId,
chatId
)
if (!ctx) return null
@@ -721,7 +721,7 @@ export async function resolveActiveResourceContext(
case 'knowledgebase': {
const ctx = await processKnowledgeFromDb(
resourceId,
- undefined,
+ userId,
'@active_resource',
workspaceId
)
@@ -729,7 +729,7 @@ export async function resolveActiveResourceContext(
return { type: 'active_resource', tag: '@active_resource', content: ctx.content }
}
case 'table': {
- return await resolveTableResource(resourceId)
+ return await resolveTableResource(resourceId, workspaceId)
}
case 'file': {
return await resolveFileResource(resourceId, workspaceId)
@@ -745,9 +745,13 @@ export async function resolveActiveResourceContext(
return null
}
}
-async function resolveTableResource(tableId: string): Promise {
+async function resolveTableResource(
+ tableId: string,
+ workspaceId: string
+): Promise {
const table = await getTableById(tableId)
if (!table) return null
+ if (table.workspaceId !== workspaceId) return null
return {
type: 'active_resource',
tag: '@active_resource',
diff --git a/apps/sim/lib/copilot/tools/client/run-tool-execution.test.ts b/apps/sim/lib/copilot/tools/client/run-tool-execution.test.ts
index 1de7a8ccc1a..ac5fff66d70 100644
--- a/apps/sim/lib/copilot/tools/client/run-tool-execution.test.ts
+++ b/apps/sim/lib/copilot/tools/client/run-tool-execution.test.ts
@@ -9,6 +9,8 @@ const {
executeWorkflowWithFullLogging,
getWorkflowEntries,
loadExecutionPointer,
+ MockSSEEventHandlerError,
+ MockSSEStreamInterruptedError,
saveExecutionPointer,
setActiveWorkflow,
} = vi.hoisted(() => ({
@@ -16,12 +18,32 @@ const {
executeWorkflowWithFullLogging: vi.fn(),
getWorkflowEntries: vi.fn(() => []),
loadExecutionPointer: vi.fn(),
+ MockSSEEventHandlerError: class SSEEventHandlerError extends Error {
+ executionId?: string
+
+ constructor(message: string, executionId?: string) {
+ super(message)
+ this.name = 'SSEEventHandlerError'
+ this.executionId = executionId
+ }
+ },
+ MockSSEStreamInterruptedError: class SSEStreamInterruptedError extends Error {
+ executionId?: string
+
+ constructor(message: string, executionId?: string) {
+ super(message)
+ this.name = 'SSEStreamInterruptedError'
+ this.executionId = executionId
+ }
+ },
saveExecutionPointer: vi.fn(),
setActiveWorkflow: vi.fn(),
}))
const setIsExecuting = vi.fn()
+const setActiveBlocks = vi.fn()
const setCurrentExecutionId = vi.fn()
+const getCurrentExecutionId = vi.fn()
const getWorkflowExecution = vi.fn(() => ({ isExecuting: false }))
vi.mock('@/app/workspace/[workspaceId]/w/[workflowId]/utils/workflow-execution-utils', () => ({
@@ -31,13 +53,20 @@ vi.mock('@/app/workspace/[workspaceId]/w/[workflowId]/utils/workflow-execution-u
vi.mock('@/stores/execution/store', () => ({
useExecutionStore: {
getState: () => ({
+ getCurrentExecutionId,
getWorkflowExecution,
+ setActiveBlocks,
setIsExecuting,
setCurrentExecutionId,
}),
},
}))
+vi.mock('@/hooks/use-execution-stream', () => ({
+ SSEEventHandlerError: MockSSEEventHandlerError,
+ SSEStreamInterruptedError: MockSSEStreamInterruptedError,
+}))
+
vi.mock('@/stores/workflows/registry/store', () => ({
useWorkflowRegistry: {
getState: () => ({
@@ -73,6 +102,7 @@ import {
describe('run tool execution cancellation', () => {
beforeEach(() => {
vi.clearAllMocks()
+ getCurrentExecutionId.mockReturnValue(null)
getWorkflowEntries.mockReturnValue([])
loadExecutionPointer.mockResolvedValue(null)
vi.stubGlobal('fetch', vi.fn().mockResolvedValue({ ok: true }))
@@ -144,7 +174,9 @@ describe('run tool execution cancellation', () => {
)
})
- it('binds a recovered execution without starting a new workflow run', async () => {
+ it('treats a tab-local execution pointer as handled in background', async () => {
+ const fetchMock = vi.fn().mockResolvedValue({ ok: true })
+ vi.stubGlobal('fetch', fetchMock)
loadExecutionPointer.mockResolvedValueOnce({
workflowId: 'wf-1',
executionId: 'exec-existing',
@@ -153,14 +185,67 @@ describe('run tool execution cancellation', () => {
await expect(bindRunToolToExecution('tool-3', 'wf-1')).resolves.toBe(true)
- expect(setActiveWorkflow).toHaveBeenCalledWith('wf-1')
- expect(setIsExecuting).toHaveBeenCalledWith('wf-1', true)
- expect(setCurrentExecutionId).toHaveBeenCalledWith('wf-1', 'exec-existing')
- expect(saveExecutionPointer).toHaveBeenCalledWith({
- workflowId: 'wf-1',
- executionId: 'exec-existing',
- lastEventId: 7,
- })
+ expect(setActiveWorkflow).not.toHaveBeenCalled()
+ expect(setIsExecuting).not.toHaveBeenCalled()
+ expect(setCurrentExecutionId).not.toHaveBeenCalled()
+ expect(saveExecutionPointer).not.toHaveBeenCalled()
expect(executeWorkflowWithFullLogging).not.toHaveBeenCalled()
+ expect(fetchMock).toHaveBeenCalledWith(
+ '/api/copilot/confirm',
+ expect.objectContaining({
+ method: 'POST',
+ body: expect.stringContaining('"status":"background"'),
+ })
+ )
+ })
+
+ it('does not recover from shared console rows without a tab-local pointer', async () => {
+ loadExecutionPointer.mockResolvedValueOnce(null)
+ getWorkflowEntries.mockReturnValueOnce([
+ {
+ workflowId: 'wf-1',
+ executionId: 'exec-shared',
+ isRunning: true,
+ startedAt: new Date().toISOString(),
+ },
+ ])
+
+ await expect(bindRunToolToExecution('tool-4', 'wf-1')).resolves.toBe(false)
+
+ expect(setActiveWorkflow).not.toHaveBeenCalled()
+ expect(setIsExecuting).not.toHaveBeenCalled()
+ expect(setCurrentExecutionId).not.toHaveBeenCalled()
+ expect(saveExecutionPointer).not.toHaveBeenCalled()
+ })
+
+ it('reports local stream handler failures as background instead of workflow errors', async () => {
+ const fetchMock = vi.fn().mockResolvedValue({ ok: true })
+ vi.stubGlobal('fetch', fetchMock)
+ getCurrentExecutionId.mockImplementation(
+ () => saveExecutionPointer.mock.calls[0]?.[0]?.executionId ?? null
+ )
+ executeWorkflowWithFullLogging.mockRejectedValueOnce(
+ new MockSSEEventHandlerError('handler failed', 'exec-1')
+ )
+
+ executeRunToolOnClient('tool-5', 'run_workflow', { workflowId: 'wf-1' })
+
+ await vi.waitFor(() => {
+ expect(fetchMock).toHaveBeenCalledWith(
+ '/api/copilot/confirm',
+ expect.objectContaining({
+ method: 'POST',
+ body: expect.stringContaining('"status":"background"'),
+ })
+ )
+ })
+ expect(clearExecutionPointer).not.toHaveBeenCalled()
+ expect(setIsExecuting).toHaveBeenCalledWith('wf-1', false)
+ expect(fetchMock).not.toHaveBeenCalledWith(
+ '/api/copilot/confirm',
+ expect.objectContaining({
+ body: expect.stringContaining('"status":"error"'),
+ })
+ )
})
})
diff --git a/apps/sim/lib/copilot/tools/client/run-tool-execution.ts b/apps/sim/lib/copilot/tools/client/run-tool-execution.ts
index 0dcd3786597..18104b415fe 100644
--- a/apps/sim/lib/copilot/tools/client/run-tool-execution.ts
+++ b/apps/sim/lib/copilot/tools/client/run-tool-execution.ts
@@ -1,7 +1,12 @@
import { createLogger } from '@sim/logger'
import { toError } from '@sim/utils/errors'
+import { sleep } from '@sim/utils/helpers'
import { generateId } from '@sim/utils/id'
-import type { AsyncCompletionData } from '@/lib/copilot/async-runs/lifecycle'
+import {
+ ASYNC_TOOL_CONFIRMATION_STATUS,
+ type AsyncCompletionData,
+ type AsyncConfirmationStatus,
+} from '@/lib/copilot/async-runs/lifecycle'
import { COPILOT_CONFIRM_API_PATH } from '@/lib/copilot/constants'
import { MothershipStreamV1ToolOutcome } from '@/lib/copilot/generated/mothership-stream-v1'
import {
@@ -11,14 +16,13 @@ import {
} from '@/lib/copilot/generated/tool-catalog-v1'
import { traceparentHeader } from '@/lib/copilot/tools/client/trace-context'
import { executeWorkflowWithFullLogging } from '@/app/workspace/[workspaceId]/w/[workflowId]/utils/workflow-execution-utils'
+import { SSEEventHandlerError, SSEStreamInterruptedError } from '@/hooks/use-execution-stream'
import { useExecutionStore } from '@/stores/execution/store'
import {
clearExecutionPointer,
consolePersistence,
- type ExecutionPointer,
loadExecutionPointer,
saveExecutionPointer,
- useTerminalConsoleStore,
} from '@/stores/terminal'
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
@@ -26,6 +30,20 @@ const logger = createLogger('CopilotRunToolExecution')
const activeRunToolByWorkflowId = new Map()
const activeRunAbortByWorkflowId = new Map()
const manuallyStoppedToolCallIds = new Set()
+const PENDING_COMPLETION_STORAGE_PREFIX = 'sim:copilot:run-tool-completion:'
+
+interface PendingCompletionReport {
+ status: AsyncConfirmationStatus
+ message?: string
+ data?: AsyncCompletionData
+}
+
+class CompletionReportError extends Error {
+ constructor(message: string) {
+ super(message)
+ this.name = 'CompletionReportError'
+ }
+}
function isRecord(value: unknown): value is Record {
return Boolean(value) && typeof value === 'object' && !Array.isArray(value)
@@ -47,55 +65,62 @@ function resolveTriggerBlockId(params: Record): string | undefi
: undefined
}
-function getRunningExecutionPointer(workflowId: string): ExecutionPointer | null {
- const runningEntries = useTerminalConsoleStore
- .getState()
- .getWorkflowEntries(workflowId)
- .filter((entry) => entry.isRunning && entry.executionId)
+function pendingCompletionStorageKey(toolCallId: string): string {
+ return `${PENDING_COMPLETION_STORAGE_PREFIX}${toolCallId}`
+}
- if (runningEntries.length === 0) {
- return null
+function savePendingCompletionReport(toolCallId: string, report: PendingCompletionReport): void {
+ if (typeof window === 'undefined') return
+ try {
+ window.sessionStorage.setItem(pendingCompletionStorageKey(toolCallId), JSON.stringify(report))
+ } catch (error) {
+ logger.warn('[RunTool] Failed to persist pending completion report', {
+ toolCallId,
+ error: toError(error).message,
+ })
}
+}
- const latestEntry = [...runningEntries].sort((a, b) => {
- const aStartedAt = a.startedAt ? new Date(a.startedAt).getTime() : 0
- const bStartedAt = b.startedAt ? new Date(b.startedAt).getTime() : 0
- return bStartedAt - aStartedAt
- })[0]
-
- const executionId = latestEntry?.executionId
- if (!executionId) {
+function loadPendingCompletionReport(toolCallId: string): PendingCompletionReport | null {
+ if (typeof window === 'undefined') return null
+ try {
+ const raw = window.sessionStorage.getItem(pendingCompletionStorageKey(toolCallId))
+ if (!raw) return null
+ const parsed = JSON.parse(raw) as PendingCompletionReport
+ return parsed?.status ? parsed : null
+ } catch (error) {
+ logger.warn('[RunTool] Failed to load pending completion report', {
+ toolCallId,
+ error: toError(error).message,
+ })
return null
}
-
- return {
- workflowId,
- executionId,
- lastEventId: 0,
- }
}
-async function findRecoverableExecutionPointer(
- workflowId: string
-): Promise {
- const pointer = await loadExecutionPointer(workflowId)
- if (pointer?.executionId) {
- return pointer
+function clearPendingCompletionReport(toolCallId: string): void {
+ if (typeof window === 'undefined') return
+ try {
+ window.sessionStorage.removeItem(pendingCompletionStorageKey(toolCallId))
+ } catch (error) {
+ logger.warn('[RunTool] Failed to clear pending completion report', {
+ toolCallId,
+ error: toError(error).message,
+ })
}
-
- return getRunningExecutionPointer(workflowId)
}
export async function bindRunToolToExecution(
toolCallId: string,
workflowId: string
): Promise {
- const executionPointer = await findRecoverableExecutionPointer(workflowId)
- if (!executionPointer) {
- return false
- }
-
const existingToolCallId = activeRunToolByWorkflowId.get(workflowId)
+ if (existingToolCallId === toolCallId) {
+ logger.info('[RunTool] Recovery skipped: run tool is already active in this tab', {
+ workflowId,
+ toolCallId,
+ })
+ return true
+ }
if (existingToolCallId && existingToolCallId !== toolCallId) {
logger.warn('[RunTool] Recovery skipped: another run tool is already active', {
workflowId,
@@ -105,20 +130,60 @@ export async function bindRunToolToExecution(
return false
}
- useWorkflowRegistry.getState().setActiveWorkflow(workflowId)
- activeRunToolByWorkflowId.set(workflowId, toolCallId)
-
- const { setCurrentExecutionId, setIsExecuting } = useExecutionStore.getState()
- setIsExecuting(workflowId, true)
- setCurrentExecutionId(workflowId, executionPointer.executionId)
- saveExecutionPointer(executionPointer)
+ const pointer = await loadExecutionPointer(workflowId).catch(() => null)
+ if (!pointer?.executionId) {
+ logger.info('[RunTool] Recovery skipped: no tab-local execution pointer', {
+ workflowId,
+ toolCallId,
+ })
+ return false
+ }
- logger.info('[RunTool] Reattached tool call to existing workflow execution', {
+ logger.info('[RunTool] Recovery moved to background for existing execution pointer', {
workflowId,
toolCallId,
- executionId: executionPointer.executionId,
- lastEventId: executionPointer.lastEventId,
+ executionId: pointer.executionId,
})
+ const pendingCompletion = loadPendingCompletionReport(toolCallId)
+ if (pendingCompletion) {
+ try {
+ await reportCompletion(
+ toolCallId,
+ pendingCompletion.status,
+ pendingCompletion.message,
+ pendingCompletion.data
+ )
+ clearPendingCompletionReport(toolCallId)
+ } catch (error) {
+ logger.warn('[RunTool] Failed to report recovered terminal completion', {
+ workflowId,
+ toolCallId,
+ executionId: pointer.executionId,
+ error: toError(error).message,
+ })
+ }
+ return true
+ }
+
+ try {
+ await reportCompletion(
+ toolCallId,
+ ASYNC_TOOL_CONFIRMATION_STATUS.background,
+ 'Client recovered an existing workflow execution; continuing in background.',
+ {
+ workflowId,
+ executionId: pointer.executionId,
+ lastEventId: pointer.lastEventId,
+ }
+ )
+ } catch (error) {
+ logger.warn('[RunTool] Failed to report recovered execution as background', {
+ workflowId,
+ toolCallId,
+ executionId: pointer.executionId,
+ error: toError(error).message,
+ })
+ }
return true
}
@@ -292,6 +357,15 @@ async function doExecuteRunTool(
setCurrentExecutionId(targetWorkflowId, executionId)
saveExecutionPointer({ workflowId: targetWorkflowId, executionId, lastEventId: 0 })
const executionStartTime = new Date().toISOString()
+ const releaseVisibleExecutionForBackground = () => {
+ const { setCurrentExecutionId: clearExecId, setActiveBlocks } = useExecutionStore.getState()
+ if (activeRunToolByWorkflowId.get(targetWorkflowId) === toolCallId) {
+ clearExecId(targetWorkflowId, null)
+ consolePersistence.executionEnded()
+ setIsExecuting(targetWorkflowId, false)
+ setActiveBlocks(targetWorkflowId, new Set())
+ }
+ }
const onPageHide = () => {
if (manuallyStoppedToolCallIds.has(toolCallId)) return
@@ -325,6 +399,8 @@ async function doExecuteRunTool(
runFromBlock: runFromBlock ? { startBlockId: runFromBlock.startBlockId } : undefined,
})
+ let leaveExecutionRecoverable = false
+
try {
const result = await executeWorkflowWithFullLogging({
workflowId: targetWorkflowId,
@@ -336,6 +412,7 @@ async function doExecuteRunTool(
stopAfterBlockId,
runFromBlock,
abortSignal: abortController.signal,
+ preserveExecutionOnTerminal: true,
})
// Determine success (same logic as staging's RunWorkflowClientTool)
@@ -368,21 +445,35 @@ async function doExecuteRunTool(
})
} else if (succeeded) {
logger.info('[RunTool] Workflow execution succeeded', { toolCallId, toolName })
+ const pendingCompletion = {
+ status: MothershipStreamV1ToolOutcome.success,
+ message: `Workflow execution completed. Started at: ${executionStartTime}`,
+ data: buildResultData(result),
+ }
+ savePendingCompletionReport(toolCallId, pendingCompletion)
await reportCompletion(
toolCallId,
- MothershipStreamV1ToolOutcome.success,
- `Workflow execution completed. Started at: ${executionStartTime}`,
- buildResultData(result)
+ pendingCompletion.status,
+ pendingCompletion.message,
+ pendingCompletion.data
)
+ clearPendingCompletionReport(toolCallId)
} else {
const msg = errorMessage || 'Workflow execution failed'
logger.error('[RunTool] Workflow execution failed', { toolCallId, toolName, error: msg })
+ const pendingCompletion = {
+ status: MothershipStreamV1ToolOutcome.error,
+ message: msg,
+ data: buildResultData(result),
+ }
+ savePendingCompletionReport(toolCallId, pendingCompletion)
await reportCompletion(
toolCallId,
- MothershipStreamV1ToolOutcome.error,
- msg,
- buildResultData(result)
+ pendingCompletion.status,
+ pendingCompletion.message,
+ pendingCompletion.data
)
+ clearPendingCompletionReport(toolCallId)
}
} catch (err) {
if (manuallyStoppedToolCallIds.has(toolCallId)) {
@@ -392,6 +483,35 @@ async function doExecuteRunTool(
})
} else {
const msg = toError(err).message
+ if (err instanceof SSEEventHandlerError || err instanceof SSEStreamInterruptedError) {
+ leaveExecutionRecoverable = true
+ logger.warn(
+ '[RunTool] Execution stream interrupted; leaving workflow execution in background',
+ {
+ toolCallId,
+ toolName,
+ executionId: err.executionId,
+ error: msg,
+ }
+ )
+ releaseVisibleExecutionForBackground()
+ await reportCompletion(
+ toolCallId,
+ ASYNC_TOOL_CONFIRMATION_STATUS.background,
+ 'Client lost local stream processing; workflow execution may still be continuing server-side.'
+ )
+ return
+ }
+ if (err instanceof CompletionReportError) {
+ leaveExecutionRecoverable = true
+ logger.warn('[RunTool] Completion report failed; leaving workflow execution recoverable', {
+ toolCallId,
+ toolName,
+ error: msg,
+ })
+ releaseVisibleExecutionForBackground()
+ return
+ }
logger.error('[RunTool] Workflow execution threw', { toolCallId, toolName, error: msg })
await reportCompletion(toolCallId, MothershipStreamV1ToolOutcome.error, msg)
}
@@ -408,11 +528,14 @@ async function doExecuteRunTool(
if (activeAbortController === abortController) {
activeRunAbortByWorkflowId.delete(targetWorkflowId)
}
- const { setCurrentExecutionId: clearExecId } = useExecutionStore.getState()
- clearExecId(targetWorkflowId, null)
- clearExecutionPointer(targetWorkflowId)
- consolePersistence.executionEnded()
- setIsExecuting(targetWorkflowId, false)
+ const { setCurrentExecutionId: clearExecId, setActiveBlocks } = useExecutionStore.getState()
+ if (!leaveExecutionRecoverable && activeToolCallId === toolCallId) {
+ clearExecId(targetWorkflowId, null)
+ clearExecutionPointer(targetWorkflowId)
+ consolePersistence.executionEnded()
+ setIsExecuting(targetWorkflowId, false)
+ setActiveBlocks(targetWorkflowId, new Set())
+ }
}
}
@@ -454,54 +577,65 @@ function buildResultData(result: unknown): Record | undefined {
*/
async function reportCompletion(
toolCallId: string,
- status: MothershipStreamV1ToolOutcome,
+ status: AsyncConfirmationStatus,
message?: string,
data?: AsyncCompletionData
): Promise {
- try {
- const body = JSON.stringify({
- toolCallId,
- status,
- message: message || (status === 'success' ? 'Tool completed' : 'Tool failed'),
- ...(data !== undefined ? { data } : {}),
- })
- const res = await fetch(COPILOT_CONFIRM_API_PATH, {
+ const basePayload = {
+ toolCallId,
+ status,
+ message: message || (status === 'success' ? 'Tool completed' : 'Tool failed'),
+ ...(data !== undefined ? { data } : {}),
+ }
+ const send = async (body: string) =>
+ fetch(COPILOT_CONFIRM_API_PATH, {
method: 'POST',
headers: { 'Content-Type': 'application/json', ...traceparentHeader() },
body,
})
- const LARGE_PAYLOAD_THRESHOLD = 10 * 1024 * 1024
- const bodySize = new Blob([body]).size
- if (!res.ok && isRecord(data) && bodySize > LARGE_PAYLOAD_THRESHOLD) {
- const { logs: _logs, ...dataWithoutLogs } = data
- logger.warn('[RunTool] reportCompletion failed with large payload, retrying without logs', {
- toolCallId,
- status: res.status,
- bodySize,
- })
- const retryRes = await fetch(COPILOT_CONFIRM_API_PATH, {
- method: 'POST',
- headers: { 'Content-Type': 'application/json', ...traceparentHeader() },
- body: JSON.stringify({
- toolCallId,
- status,
- message: message || (status === 'success' ? 'Tool completed' : 'Tool failed'),
- data: dataWithoutLogs,
- }),
- })
- if (!retryRes.ok) {
- logger.warn('[RunTool] reportCompletion retry also failed', {
+
+ const body = JSON.stringify(basePayload)
+ const LARGE_PAYLOAD_THRESHOLD = 10 * 1024 * 1024
+ const bodySize = new Blob([body]).size
+ let lastError: Error | null = null
+
+ for (let attempt = 1; attempt <= 2; attempt++) {
+ try {
+ const res = await send(body)
+ if (res.ok) return
+
+ if (isRecord(data) && bodySize > LARGE_PAYLOAD_THRESHOLD) {
+ const { logs: _logs, ...dataWithoutLogs } = data
+ logger.warn('[RunTool] reportCompletion failed with large payload, retrying without logs', {
toolCallId,
- status: retryRes.status,
+ status: res.status,
+ bodySize,
})
+ const retryRes = await send(
+ JSON.stringify({
+ toolCallId,
+ status,
+ message: message || (status === 'success' ? 'Tool completed' : 'Tool failed'),
+ data: dataWithoutLogs,
+ })
+ )
+ if (retryRes.ok) return
+ lastError = new Error(`reportCompletion retry failed with status ${retryRes.status}`)
+ } else {
+ lastError = new Error(`reportCompletion failed with status ${res.status}`)
}
- } else if (!res.ok) {
- logger.warn('[RunTool] reportCompletion failed', { toolCallId, status: res.status })
+ } catch (err) {
+ lastError = toError(err)
+ }
+
+ if (attempt < 2) {
+ await sleep(250)
}
- } catch (err) {
- logger.error('[RunTool] reportCompletion error', {
- toolCallId,
- error: toError(err).message,
- })
}
+
+ logger.error('[RunTool] reportCompletion failed after retries', {
+ toolCallId,
+ error: lastError?.message,
+ })
+ throw new CompletionReportError(lastError?.message ?? 'Failed to report tool completion')
}
diff --git a/apps/sim/lib/copilot/tools/client/store-utils.ts b/apps/sim/lib/copilot/tools/client/store-utils.ts
index 9ec88489672..6780db12807 100644
--- a/apps/sim/lib/copilot/tools/client/store-utils.ts
+++ b/apps/sim/lib/copilot/tools/client/store-utils.ts
@@ -71,7 +71,7 @@ function formatReadingLabel(target: string | undefined, state: ClientToolCallSta
case ClientToolCallState.success:
return `Read${suffix}`
case ClientToolCallState.error:
- return `Failed reading${suffix}`
+ return `Attempted to read${suffix}`
case ClientToolCallState.rejected:
case ClientToolCallState.aborted:
return `Skipped reading${suffix}`
@@ -127,14 +127,16 @@ function humanizedFallback(
toolName: string,
state: ClientToolCallState
): ClientToolDisplay | undefined {
- const formattedName = toolName.replace(/_/g, ' ').replace(/\b\w/g, (c) => c.toUpperCase())
+ const titleCaseName = toolName.replace(/_/g, ' ').replace(/\b\w/g, (c) => c.toUpperCase())
+ if (state === ClientToolCallState.error) {
+ const lowerCaseName = toolName.replace(/_/g, ' ').toLowerCase()
+ return { text: `Attempted to ${lowerCaseName}`, icon: Loader }
+ }
const stateVerb =
state === ClientToolCallState.success
? 'Executed'
- : state === ClientToolCallState.error
- ? 'Failed'
- : state === ClientToolCallState.rejected || state === ClientToolCallState.aborted
- ? 'Skipped'
- : 'Executing'
- return { text: `${stateVerb} ${formattedName}`, icon: Loader }
+ : state === ClientToolCallState.rejected || state === ClientToolCallState.aborted
+ ? 'Skipped'
+ : 'Executing'
+ return { text: `${stateVerb} ${titleCaseName}`, icon: Loader }
}
diff --git a/apps/sim/lib/core/async-jobs/backends/trigger-dev.ts b/apps/sim/lib/core/async-jobs/backends/trigger-dev.ts
index cff02f218d0..7427108e141 100644
--- a/apps/sim/lib/core/async-jobs/backends/trigger-dev.ts
+++ b/apps/sim/lib/core/async-jobs/backends/trigger-dev.ts
@@ -24,6 +24,7 @@ const JOB_TYPE_TO_TASK_ID: Record = {
'cleanup-logs': 'cleanup-logs',
'cleanup-soft-deletes': 'cleanup-soft-deletes',
'cleanup-tasks': 'cleanup-tasks',
+ 'run-data-drain': 'run-data-drain',
}
/**
diff --git a/apps/sim/lib/core/async-jobs/types.ts b/apps/sim/lib/core/async-jobs/types.ts
index 42be995c0c2..515fe784c21 100644
--- a/apps/sim/lib/core/async-jobs/types.ts
+++ b/apps/sim/lib/core/async-jobs/types.ts
@@ -29,6 +29,7 @@ export type JobType =
| 'cleanup-logs'
| 'cleanup-soft-deletes'
| 'cleanup-tasks'
+ | 'run-data-drain'
export type AsyncExecutionCorrelationSource = 'workflow' | 'schedule' | 'webhook'
diff --git a/apps/sim/lib/core/config/env.ts b/apps/sim/lib/core/config/env.ts
index 14bf33ce5d4..b2c8c2871bd 100644
--- a/apps/sim/lib/core/config/env.ts
+++ b/apps/sim/lib/core/config/env.ts
@@ -355,6 +355,7 @@ export const env = createEnv({
WHITELABELING_ENABLED: z.boolean().optional(), // Enable whitelabeling on self-hosted (bypasses hosted requirements)
AUDIT_LOGS_ENABLED: z.boolean().optional(), // Enable audit logs on self-hosted (bypasses hosted requirements)
DATA_RETENTION_ENABLED: z.boolean().optional(), // Enable data retention settings on self-hosted (bypasses hosted requirements)
+ DATA_DRAINS_ENABLED: z.boolean().optional(), // Enable data drains on self-hosted (bypasses hosted requirements)
// Organizations - for self-hosted deployments
ORGANIZATIONS_ENABLED: z.boolean().optional(), // Enable organizations on self-hosted (bypasses plan requirements)
@@ -451,6 +452,7 @@ export const env = createEnv({
NEXT_PUBLIC_WHITELABELING_ENABLED: z.boolean().optional(), // Enable whitelabeling on self-hosted (bypasses hosted requirements)
NEXT_PUBLIC_AUDIT_LOGS_ENABLED: z.boolean().optional(), // Enable audit logs on self-hosted (bypasses hosted requirements)
NEXT_PUBLIC_DATA_RETENTION_ENABLED: z.boolean().optional(), // Enable data retention settings on self-hosted (bypasses hosted requirements)
+ NEXT_PUBLIC_DATA_DRAINS_ENABLED: z.boolean().optional(), // Enable data drains on self-hosted (bypasses hosted requirements)
NEXT_PUBLIC_ORGANIZATIONS_ENABLED: z.boolean().optional(), // Enable organizations on self-hosted (bypasses plan requirements)
NEXT_PUBLIC_DISABLE_INVITATIONS: z.boolean().optional(), // Disable workspace invitations globally (for self-hosted deployments)
NEXT_PUBLIC_DISABLE_PUBLIC_API: z.boolean().optional(), // Disable public API access UI toggle globally
@@ -488,6 +490,7 @@ export const env = createEnv({
NEXT_PUBLIC_WHITELABELING_ENABLED: process.env.NEXT_PUBLIC_WHITELABELING_ENABLED,
NEXT_PUBLIC_AUDIT_LOGS_ENABLED: process.env.NEXT_PUBLIC_AUDIT_LOGS_ENABLED,
NEXT_PUBLIC_DATA_RETENTION_ENABLED: process.env.NEXT_PUBLIC_DATA_RETENTION_ENABLED,
+ NEXT_PUBLIC_DATA_DRAINS_ENABLED: process.env.NEXT_PUBLIC_DATA_DRAINS_ENABLED,
NEXT_PUBLIC_ORGANIZATIONS_ENABLED: process.env.NEXT_PUBLIC_ORGANIZATIONS_ENABLED,
NEXT_PUBLIC_DISABLE_INVITATIONS: process.env.NEXT_PUBLIC_DISABLE_INVITATIONS,
NEXT_PUBLIC_DISABLE_PUBLIC_API: process.env.NEXT_PUBLIC_DISABLE_PUBLIC_API,
diff --git a/apps/sim/lib/core/config/feature-flags.ts b/apps/sim/lib/core/config/feature-flags.ts
index 3a69af74fd1..e4c1b7f4441 100644
--- a/apps/sim/lib/core/config/feature-flags.ts
+++ b/apps/sim/lib/core/config/feature-flags.ts
@@ -135,6 +135,12 @@ export const isAuditLogsEnabled = isTruthy(env.AUDIT_LOGS_ENABLED)
*/
export const isDataRetentionEnabled = isTruthy(env.DATA_RETENTION_ENABLED)
+/**
+ * Is data drains enabled via env var override
+ * This bypasses hosted requirements for self-hosted deployments
+ */
+export const isDataDrainsEnabled = isTruthy(env.DATA_DRAINS_ENABLED)
+
/**
* Is E2B enabled for remote code execution
*/
diff --git a/apps/sim/lib/core/security/input-validation.server.ts b/apps/sim/lib/core/security/input-validation.server.ts
index 041d44c654d..ed23140ea46 100644
--- a/apps/sim/lib/core/security/input-validation.server.ts
+++ b/apps/sim/lib/core/security/input-validation.server.ts
@@ -24,6 +24,7 @@ export interface AsyncValidationResult extends ValidationResult {
* - Octal notation (0177.0.0.1)
* - Hex notation (0x7f000001)
* - IPv4-mapped IPv6 (::ffff:127.0.0.1)
+ * - IPv4-compatible IPv6 (::a.b.c.d / ::xxxx:xxxx, RFC 4291 §2.5.5.1, deprecated)
* - Various edge cases that regex patterns miss
*/
export function isPrivateOrReservedIP(ip: string): boolean {
@@ -35,7 +36,26 @@ export function isPrivateOrReservedIP(ip: string): boolean {
const addr = ipaddr.process(ip)
const range = addr.range()
- return range !== 'unicast'
+ if (range !== 'unicast') {
+ return true
+ }
+
+ if (addr.kind() === 'ipv6') {
+ const v6 = addr as ipaddr.IPv6
+ const parts = v6.parts
+ const firstSixZero = parts.slice(0, 6).every((p) => p === 0)
+ if (firstSixZero) {
+ const embedded = ipaddr.fromByteArray([
+ (parts[6] >> 8) & 0xff,
+ parts[6] & 0xff,
+ (parts[7] >> 8) & 0xff,
+ parts[7] & 0xff,
+ ])
+ return embedded.range() !== 'unicast'
+ }
+ }
+
+ return false
} catch {
return true
}
@@ -192,6 +212,7 @@ export interface SecureFetchOptions {
timeout?: number
maxRedirects?: number
maxResponseBytes?: number
+ signal?: AbortSignal
}
export class SecureFetchHeaders {
@@ -310,7 +331,7 @@ export async function secureFetchWithPinnedIP(
validateUrlWithDNS(redirectUrl, 'redirectUrl', { allowHttp: options.allowHttp })
.then((validation) => {
if (!validation.isValid) {
- reject(new Error(`Redirect blocked: ${validation.error}`))
+ settledReject(new Error(`Redirect blocked: ${validation.error}`))
return
}
return secureFetchWithPinnedIP(
@@ -321,15 +342,15 @@ export async function secureFetchWithPinnedIP(
)
})
.then((response) => {
- if (response) resolve(response)
+ if (response) settledResolve(response)
})
- .catch(reject)
+ .catch(settledReject)
return
}
if (isRedirectStatus(statusCode) && location && redirectCount >= maxRedirects) {
res.resume()
- reject(new Error(`Too many redirects (max: ${maxRedirects})`))
+ settledReject(new Error(`Too many redirects (max: ${maxRedirects})`))
return
}
@@ -355,7 +376,7 @@ export async function secureFetchWithPinnedIP(
})
res.on('error', (error) => {
- reject(error)
+ settledReject(error)
})
res.on('end', () => {
@@ -371,7 +392,7 @@ export async function secureFetchWithPinnedIP(
}
}
- resolve({
+ settledResolve({
ok: statusCode >= 200 && statusCode < 300,
status: statusCode,
statusText: res.statusMessage || '',
@@ -387,15 +408,44 @@ export async function secureFetchWithPinnedIP(
})
})
+ let onAbort: (() => void) | null = null
+ const cleanupAbort = () => {
+ if (onAbort && options.signal) {
+ options.signal.removeEventListener('abort', onAbort)
+ onAbort = null
+ }
+ }
+ const settledResolve: typeof resolve = (value) => {
+ cleanupAbort()
+ resolve(value)
+ }
+ const settledReject: typeof reject = (reason) => {
+ cleanupAbort()
+ reject(reason)
+ }
+
req.on('error', (error) => {
- reject(error)
+ settledReject(error)
})
req.on('timeout', () => {
req.destroy()
- reject(new Error(`Request timed out after ${requestOptions.timeout}ms`))
+ settledReject(new Error(`Request timed out after ${requestOptions.timeout}ms`))
})
+ if (options.signal) {
+ if (options.signal.aborted) {
+ req.destroy()
+ settledReject(options.signal.reason ?? new Error('Aborted'))
+ return
+ }
+ onAbort = () => {
+ req.destroy()
+ settledReject(options.signal?.reason ?? new Error('Aborted'))
+ }
+ options.signal.addEventListener('abort', onAbort, { once: true })
+ }
+
if (options.body) {
req.write(options.body)
}
diff --git a/apps/sim/lib/core/security/input-validation.test.ts b/apps/sim/lib/core/security/input-validation.test.ts
index 64bf5e33fd5..55b07a72db0 100644
--- a/apps/sim/lib/core/security/input-validation.test.ts
+++ b/apps/sim/lib/core/security/input-validation.test.ts
@@ -26,7 +26,10 @@ import {
validateSupabaseProjectId,
validateWorkdayTenantUrl,
} from '@/lib/core/security/input-validation'
-import { validateUrlWithDNS } from '@/lib/core/security/input-validation.server'
+import {
+ isPrivateOrReservedIP,
+ validateUrlWithDNS,
+} from '@/lib/core/security/input-validation.server'
import { sanitizeForLogging } from '@/lib/core/security/redaction'
vi.mock('@/lib/core/config/feature-flags', () => featureFlagsMock)
@@ -562,6 +565,147 @@ describe('sanitizeForLogging', () => {
})
})
+describe('isPrivateOrReservedIP', () => {
+ describe('IPv4 private/reserved ranges', () => {
+ it.concurrent.each([
+ ['192.168.1.1'],
+ ['192.168.0.0'],
+ ['10.0.0.1'],
+ ['10.255.255.255'],
+ ['172.16.0.1'],
+ ['172.31.255.255'],
+ ['127.0.0.1'],
+ ['127.255.255.255'],
+ ['169.254.169.254'],
+ ['0.0.0.0'],
+ ['224.0.0.1'],
+ ])('blocks IPv4 %s', (ip) => {
+ expect(isPrivateOrReservedIP(ip)).toBe(true)
+ })
+ })
+
+ describe('IPv6 reserved ranges', () => {
+ it.concurrent.each([
+ ['::1'],
+ ['::'],
+ ['fe80::1'],
+ ['fc00::1'],
+ ['fd00::1'],
+ ['ff02::1'],
+ ['2001:db8::1'],
+ ])('blocks IPv6 %s', (ip) => {
+ expect(isPrivateOrReservedIP(ip)).toBe(true)
+ })
+ })
+
+ describe('IPv4-mapped IPv6 (::ffff:0:0/96)', () => {
+ it.concurrent.each([
+ ['::ffff:192.168.1.1'],
+ ['::ffff:127.0.0.1'],
+ ['::ffff:169.254.169.254'],
+ ['::ffff:c0a8:101'],
+ ['::ffff:0:0'],
+ ])('blocks mapped private/reserved %s', (ip) => {
+ expect(isPrivateOrReservedIP(ip)).toBe(true)
+ })
+
+ it.concurrent('allows mapped public IPv4 ::ffff:8.8.8.8', () => {
+ expect(isPrivateOrReservedIP('::ffff:8.8.8.8')).toBe(false)
+ })
+ })
+
+ describe('NAT64 (RFC 6052, 64:ff9b::/96)', () => {
+ it.concurrent('blocks NAT64-encoded private IPv4', () => {
+ expect(isPrivateOrReservedIP('64:ff9b::192.168.1.1')).toBe(true)
+ })
+ })
+
+ describe('IPv4-compatible IPv6 (::a.b.c.d, RFC 4291 §2.5.5.1, deprecated)', () => {
+ it.concurrent.each([
+ ['::c0a8:101', '192.168.1.1 (URL-normalized hex form)'],
+ ['::c0a8:0101', '192.168.1.1 (zero-padded hex form)'],
+ ['::a9fe:a9fe', '169.254.169.254 (cloud metadata)'],
+ ['::7f00:1', '127.0.0.1 (loopback)'],
+ ['::7f00:0001', '127.0.0.1 (zero-padded)'],
+ ['::a00:1', '10.0.0.1 (RFC1918)'],
+ ['::ac10:1', '172.16.0.1 (RFC1918)'],
+ ['::e000:1', '224.0.0.1 (multicast)'],
+ ['::192.168.1.1', 'dotted form ::192.168.1.1'],
+ ['::169.254.169.254', 'dotted form ::169.254.169.254'],
+ ['::127.0.0.1', 'dotted form ::127.0.0.1'],
+ ['::10.0.0.1', 'dotted form ::10.0.0.1'],
+ ])('blocks %s — %s', (ip) => {
+ expect(isPrivateOrReservedIP(ip)).toBe(true)
+ })
+
+ it.concurrent.each([
+ ['::8.8.8.8', 'dotted form embedding public IPv4'],
+ ['::808:808', 'hex form embedding 8.8.8.8'],
+ ['::0808:0808', 'zero-padded hex form embedding 8.8.8.8'],
+ ])('allows IPv4-compatible IPv6 with embedded public IPv4 %s — %s', (ip) => {
+ expect(isPrivateOrReservedIP(ip)).toBe(false)
+ })
+
+ it.concurrent.each([
+ ['::ffff:1', 'embedded 255.255.0.1 (Class E reserved) via parts[6]=0xffff'],
+ ['::ffff:0', 'embedded 255.255.0.0 (Class E reserved)'],
+ ['::ffff:abcd', 'embedded 255.255.171.205 (Class E reserved)'],
+ ['::f000:1', 'embedded 240.0.0.1 (Class E reserved)'],
+ ])('blocks IPv4-compatible IPv6 with Class E embedded IPv4 %s — %s', (ip) => {
+ expect(isPrivateOrReservedIP(ip)).toBe(true)
+ })
+ })
+
+ describe('non-IPv4-compat unicast IPv6 (must not over-block)', () => {
+ it.concurrent.each([
+ ['2606:4700:4700::1111'],
+ ['2001:4860:4860::8888'],
+ ['::1:c0a8:101'],
+ ['1::c0a8:101'],
+ ['1:2:3:4:5:6:c0a8:101'],
+ ])('allows %s', (ip) => {
+ expect(isPrivateOrReservedIP(ip)).toBe(false)
+ })
+ })
+
+ describe('IPv4 public addresses', () => {
+ it.concurrent.each([['8.8.8.8'], ['1.1.1.1'], ['1.0.0.1']])('allows %s', (ip) => {
+ expect(isPrivateOrReservedIP(ip)).toBe(false)
+ })
+ })
+
+ describe('IPv4 alternate notations', () => {
+ it.concurrent.each([['0177.0.0.1'], ['0x7f000001']])('blocks loopback notation %s', (ip) => {
+ expect(isPrivateOrReservedIP(ip)).toBe(true)
+ })
+ })
+
+ describe('invalid input', () => {
+ it.concurrent.each([['not-an-ip'], [''], ['256.256.256.256'], ['::g']])('rejects %s', (ip) => {
+ expect(isPrivateOrReservedIP(ip)).toBe(true)
+ })
+ })
+})
+
+describe('URL hostname normalization (Node URL parser + isPrivateOrReservedIP integration)', () => {
+ it.concurrent('Node normalizes [::192.168.1.1] to [::c0a8:101] and validator blocks it', () => {
+ const url = new URL('http://[::192.168.1.1]/')
+ const cleanHostname =
+ url.hostname.startsWith('[') && url.hostname.endsWith(']')
+ ? url.hostname.slice(1, -1)
+ : url.hostname
+ expect(cleanHostname).toBe('::c0a8:101')
+ expect(isPrivateOrReservedIP(cleanHostname)).toBe(true)
+ })
+
+ it.concurrent('Node normalizes [::169.254.169.254] and validator blocks the metadata IP', () => {
+ const url = new URL('http://[::169.254.169.254]/')
+ const cleanHostname = url.hostname.slice(1, -1)
+ expect(cleanHostname).toBe('::a9fe:a9fe')
+ expect(isPrivateOrReservedIP(cleanHostname)).toBe(true)
+ })
+})
+
describe('validateUrlWithDNS', () => {
describe('basic validation', () => {
it('should reject invalid URLs', async () => {
diff --git a/apps/sim/lib/credentials/atlassian-service-account.ts b/apps/sim/lib/credentials/atlassian-service-account.ts
new file mode 100644
index 00000000000..1d78cd8bd6b
--- /dev/null
+++ b/apps/sim/lib/credentials/atlassian-service-account.ts
@@ -0,0 +1,127 @@
+import { parseAtlassianErrorMessage } from '@/tools/jira/utils'
+
+/**
+ * Discrete validation failure codes returned to the client. The UI maps each
+ * code to a human message; raw Atlassian response bodies stay in server logs.
+ */
+export type AtlassianValidationCode =
+ | 'invalid_credentials'
+ | 'site_not_found'
+ | 'atlassian_unavailable'
+
+export class AtlassianValidationError extends Error {
+ constructor(
+ public readonly code: AtlassianValidationCode,
+ public readonly status: number,
+ public readonly logDetail?: Record
+ ) {
+ super(code)
+ this.name = 'AtlassianValidationError'
+ }
+}
+
+/**
+ * Atlassian Cloud sites are always served from `*.atlassian.net` (production)
+ * or `*.jira-dev.com` (Atlassian's developer sandbox). Anything else is either
+ * a typo (`atlassian.com`, `jira.com`), a Data Center hostname (which our
+ * gateway URL doesn't support), or — worse — an attempt to point this
+ * server-side fetch at internal infrastructure (`localhost`, `169.254.169.254`,
+ * `*.corp`). Restricting to the public Atlassian Cloud suffixes blocks SSRF
+ * at the boundary before any outbound request.
+ */
+const ATLASSIAN_CLOUD_HOST_REGEX =
+ /^[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\.(?:atlassian\.net|jira-dev\.com)$/i
+
+export function normalizeAtlassianDomain(rawDomain: string): string {
+ return rawDomain.replace(/^https?:\/\//i, '').replace(/\/+$/, '')
+}
+
+function assertAtlassianCloudHost(domain: string): void {
+ if (!ATLASSIAN_CLOUD_HOST_REGEX.test(domain)) {
+ throw new AtlassianValidationError('site_not_found', 400, {
+ step: 'host_validation',
+ domain,
+ reason: 'host is not an Atlassian Cloud site (expected *.atlassian.net)',
+ })
+ }
+}
+
+/**
+ * Throws an `AtlassianValidationError` with `unauthorizedCode` for 401/403 responses
+ * (which mean the token itself was rejected) and `atlassian_unavailable` for any
+ * other non-2xx.
+ */
+async function assertAtlassianResponseOk(
+ res: Response,
+ step: string,
+ unauthorizedCode: AtlassianValidationCode,
+ context: Record = {}
+): Promise {
+ if (res.ok) return
+ const body = parseAtlassianErrorMessage(res.status, res.statusText, await res.text())
+ if (res.status === 401 || res.status === 403) {
+ throw new AtlassianValidationError(unauthorizedCode, res.status, { step, body, ...context })
+ }
+ throw new AtlassianValidationError('atlassian_unavailable', res.status, {
+ step,
+ body,
+ ...context,
+ })
+}
+
+/**
+ * Validates an Atlassian service account scoped API token.
+ *
+ * Scoped service-account tokens cannot call `api.atlassian.com/oauth/token/accessible-resources`
+ * (that endpoint is for OAuth-3LO tokens). Instead we use the public, unauthenticated
+ * `tenant_info` discovery endpoint to resolve cloudId from the site domain, then verify
+ * the token works by hitting `/myself` through the gateway.
+ */
+export async function validateAtlassianServiceAccount(
+ apiToken: string,
+ domain: string
+): Promise<{ accountId: string; displayName: string; cloudId: string }> {
+ assertAtlassianCloudHost(domain)
+
+ const tenantInfoRes = await fetch(`https://${domain}/_edge/tenant_info`, {
+ headers: { Accept: 'application/json' },
+ })
+ if (tenantInfoRes.status === 404) {
+ throw new AtlassianValidationError('site_not_found', 404, { step: 'tenant_info', domain })
+ }
+ // tenant_info is unauthenticated, so there is no "invalid credentials" branch here —
+ // any non-OK that isn't a 404 means Atlassian is unavailable, not the token's fault.
+ await assertAtlassianResponseOk(tenantInfoRes, 'tenant_info', 'atlassian_unavailable', { domain })
+ const tenantInfo = (await tenantInfoRes.json()) as { cloudId?: string }
+ if (!tenantInfo.cloudId) {
+ throw new AtlassianValidationError('atlassian_unavailable', 502, {
+ step: 'tenant_info',
+ reason: 'missing cloudId in response',
+ domain,
+ })
+ }
+ const cloudId = tenantInfo.cloudId
+
+ const myselfRes = await fetch(`https://api.atlassian.com/ex/jira/${cloudId}/rest/api/3/myself`, {
+ headers: { Authorization: `Bearer ${apiToken}`, Accept: 'application/json' },
+ })
+ await assertAtlassianResponseOk(myselfRes, 'myself', 'invalid_credentials', { cloudId })
+
+ const myself = (await myselfRes.json()) as {
+ accountId?: string
+ displayName?: string
+ emailAddress?: string
+ }
+ if (!myself.accountId) {
+ throw new AtlassianValidationError('atlassian_unavailable', 502, {
+ step: 'myself',
+ reason: 'missing accountId in response',
+ })
+ }
+
+ return {
+ accountId: myself.accountId,
+ displayName: myself.displayName || myself.emailAddress || domain,
+ cloudId,
+ }
+}
diff --git a/apps/sim/lib/data-drains/access.ts b/apps/sim/lib/data-drains/access.ts
new file mode 100644
index 00000000000..5206e595281
--- /dev/null
+++ b/apps/sim/lib/data-drains/access.ts
@@ -0,0 +1,111 @@
+import { db } from '@sim/db'
+import { dataDrains, member } from '@sim/db/schema'
+import { and, eq } from 'drizzle-orm'
+import { NextResponse } from 'next/server'
+import { getSession } from '@/lib/auth'
+import { isOrganizationOnEnterprisePlan } from '@/lib/billing/core/subscription'
+import { isBillingEnabled, isDataDrainsEnabled } from '@/lib/core/config/feature-flags'
+
+export interface DrainAccessSession {
+ user: {
+ id: string
+ name?: string | null
+ email?: string | null
+ }
+ membership: {
+ role: string
+ }
+}
+
+export type DrainAccessResult =
+ | { ok: true; session: DrainAccessSession }
+ | { ok: false; response: NextResponse }
+
+/**
+ * Auth + membership + role + enterprise-plan gate shared by every data-drain
+ * route. Owner/admin role is required for reads as well as writes since drain
+ * configs expose customer bucket names and webhook URLs. On Sim Cloud the
+ * gate is the Enterprise plan; on self-hosted it's `DATA_DRAINS_ENABLED`,
+ * which 404s when unset so a newer image doesn't silently expose drains.
+ */
+export async function authorizeDrainAccess(
+ organizationId: string,
+ options: { requireMutating: boolean }
+): Promise {
+ const session = await getSession()
+ if (!session?.user?.id) {
+ return { ok: false, response: NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) }
+ }
+
+ const [memberEntry] = await db
+ .select({ role: member.role })
+ .from(member)
+ .where(and(eq(member.organizationId, organizationId), eq(member.userId, session.user.id)))
+ .limit(1)
+
+ if (!memberEntry) {
+ return {
+ ok: false,
+ response: NextResponse.json(
+ { error: 'Forbidden - Not a member of this organization' },
+ { status: 403 }
+ ),
+ }
+ }
+
+ if (!isBillingEnabled && !isDataDrainsEnabled) {
+ return {
+ ok: false,
+ response: NextResponse.json(
+ { error: 'Data Drains are not enabled on this deployment' },
+ { status: 404 }
+ ),
+ }
+ }
+ if (isBillingEnabled) {
+ const hasEnterprise = await isOrganizationOnEnterprisePlan(organizationId)
+ if (!hasEnterprise) {
+ return {
+ ok: false,
+ response: NextResponse.json(
+ { error: 'Data Drains are available on Enterprise plans only' },
+ { status: 403 }
+ ),
+ }
+ }
+ }
+ if (memberEntry.role !== 'owner' && memberEntry.role !== 'admin') {
+ return {
+ ok: false,
+ response: NextResponse.json(
+ {
+ error: options.requireMutating
+ ? 'Forbidden - Only organization owners and admins can manage data drains'
+ : 'Forbidden - Only organization owners and admins can view data drains',
+ },
+ { status: 403 }
+ ),
+ }
+ }
+
+ return {
+ ok: true,
+ session: {
+ user: {
+ id: session.user.id,
+ name: session.user.name ?? null,
+ email: session.user.email ?? null,
+ },
+ membership: { role: memberEntry.role },
+ },
+ }
+}
+
+export async function loadDrain(organizationId: string, drainId: string) {
+ const [drain] = await db
+ .select()
+ .from(dataDrains)
+ .where(and(eq(dataDrains.id, drainId), eq(dataDrains.organizationId, organizationId)))
+ .limit(1)
+ return drain ?? null
+}
diff --git a/apps/sim/lib/data-drains/destinations/registry.ts b/apps/sim/lib/data-drains/destinations/registry.ts
new file mode 100644
index 00000000000..eb43b7c9b6a
--- /dev/null
+++ b/apps/sim/lib/data-drains/destinations/registry.ts
@@ -0,0 +1,12 @@
+import { s3Destination } from '@/lib/data-drains/destinations/s3'
+import { webhookDestination } from '@/lib/data-drains/destinations/webhook'
+import type { DestinationType, DrainDestination } from '@/lib/data-drains/types'
+
+export const DESTINATION_REGISTRY = {
+ s3: s3Destination,
+ webhook: webhookDestination,
+} as const satisfies Record
+
+export function getDestination(type: DestinationType): DrainDestination {
+ return DESTINATION_REGISTRY[type]
+}
diff --git a/apps/sim/lib/data-drains/destinations/s3.test.ts b/apps/sim/lib/data-drains/destinations/s3.test.ts
new file mode 100644
index 00000000000..210ff2c03c7
--- /dev/null
+++ b/apps/sim/lib/data-drains/destinations/s3.test.ts
@@ -0,0 +1,157 @@
+/**
+ * @vitest-environment node
+ */
+import { beforeEach, describe, expect, it, vi } from 'vitest'
+
+const { mockSend, mockDestroy, S3ClientCtor, PutObjectCommandCtor, DeleteObjectCommandCtor } =
+ vi.hoisted(() => {
+ const mockSend = vi.fn(async () => ({}))
+ const mockDestroy = vi.fn()
+ return {
+ mockSend,
+ mockDestroy,
+ S3ClientCtor: vi.fn(() => ({ send: mockSend, destroy: mockDestroy })),
+ PutObjectCommandCtor: vi.fn((args: unknown) => ({ __cmd: 'put', args })),
+ DeleteObjectCommandCtor: vi.fn((args: unknown) => ({ __cmd: 'delete', args })),
+ }
+ })
+
+vi.mock('@aws-sdk/client-s3', () => ({
+ S3Client: S3ClientCtor,
+ PutObjectCommand: PutObjectCommandCtor,
+ DeleteObjectCommand: DeleteObjectCommandCtor,
+}))
+
+import { s3Destination } from '@/lib/data-drains/destinations/s3'
+
+const config = {
+ bucket: 'my-bucket',
+ region: 'us-east-1',
+ prefix: 'sim/',
+}
+const credentials = { accessKeyId: 'AKID', secretAccessKey: 'SECRET' }
+
+beforeEach(() => {
+ vi.clearAllMocks()
+})
+
+describe('s3Destination openSession', () => {
+ it('reuses one S3Client across multiple deliveries and destroys on close', async () => {
+ const session = s3Destination.openSession({ config, credentials })
+ expect(S3ClientCtor).toHaveBeenCalledTimes(1)
+
+ const body = Buffer.from('row\n', 'utf8')
+ const meta = (sequence: number) => ({
+ drainId: 'd1',
+ runId: 'r1',
+ source: 'workflow_logs' as const,
+ sequence,
+ rowCount: 1,
+ runStartedAt: new Date('2025-06-15T12:00:00Z'),
+ })
+ const signal = new AbortController().signal
+
+ const res1 = await session.deliver({
+ body,
+ contentType: 'application/x-ndjson',
+ metadata: meta(0),
+ signal,
+ })
+ const res2 = await session.deliver({
+ body,
+ contentType: 'application/x-ndjson',
+ metadata: meta(1),
+ signal,
+ })
+
+ expect(S3ClientCtor).toHaveBeenCalledTimes(1)
+ expect(mockSend).toHaveBeenCalledTimes(2)
+
+ expect(res1.locator).toMatch(
+ /^s3:\/\/my-bucket\/sim\/workflow_logs\/d1\/\d{4}\/\d{2}\/\d{2}\/r1-00000\.ndjson$/
+ )
+ expect(res2.locator).toMatch(/r1-00001\.ndjson$/)
+
+ const putArgs = (PutObjectCommandCtor.mock.calls[0]?.[0] ?? {}) as Record
+ expect(putArgs.Bucket).toBe('my-bucket')
+ expect(putArgs.Body).toBe(body)
+ expect(putArgs.ContentType).toBe('application/x-ndjson')
+ expect((putArgs.Metadata as Record)['sim-drain-id']).toBe('d1')
+ expect((putArgs.Metadata as Record)['sim-sequence']).toBe('0')
+
+ await session.close()
+ expect(mockDestroy).toHaveBeenCalledTimes(1)
+ })
+
+ it('omits the prefix segment when prefix is empty', async () => {
+ const session = s3Destination.openSession({
+ config: { bucket: 'b', region: 'us-east-1' },
+ credentials,
+ })
+ const result = await session.deliver({
+ body: Buffer.from('x'),
+ contentType: 'application/x-ndjson',
+ metadata: {
+ drainId: 'd',
+ runId: 'r',
+ source: 'audit_logs',
+ sequence: 0,
+ rowCount: 1,
+ runStartedAt: new Date('2025-06-15T12:00:00Z'),
+ },
+ signal: new AbortController().signal,
+ })
+ expect(result.locator).toMatch(
+ /^s3:\/\/b\/audit_logs\/d\/\d{4}\/\d{2}\/\d{2}\/r-00000\.ndjson$/
+ )
+ await session.close()
+ })
+
+ it('surfaces AWS error code in delivery errors', async () => {
+ mockSend.mockRejectedValueOnce(
+ Object.assign(new Error('Access Denied'), {
+ name: 'AccessDenied',
+ $metadata: { httpStatusCode: 403, requestId: 'req-1' },
+ })
+ )
+ const session = s3Destination.openSession({ config, credentials })
+ await expect(
+ session.deliver({
+ body: Buffer.from('x'),
+ contentType: 'application/x-ndjson',
+ metadata: {
+ drainId: 'd',
+ runId: 'r',
+ source: 'audit_logs',
+ sequence: 0,
+ rowCount: 1,
+ runStartedAt: new Date('2025-06-15T12:00:00Z'),
+ },
+ signal: new AbortController().signal,
+ })
+ ).rejects.toThrow(/AccessDenied 403/)
+ await session.close()
+ })
+})
+
+describe('s3Destination test()', () => {
+ it('writes a probe object then attempts cleanup', async () => {
+ await s3Destination.test!({
+ config,
+ credentials,
+ signal: new AbortController().signal,
+ })
+ expect(PutObjectCommandCtor).toHaveBeenCalled()
+ expect(DeleteObjectCommandCtor).toHaveBeenCalled()
+ expect(mockDestroy).toHaveBeenCalled()
+ })
+
+ it('still returns success when cleanup delete fails', async () => {
+ mockSend
+ .mockResolvedValueOnce({}) // put probe
+ .mockRejectedValueOnce(new Error('no delete perms')) // cleanup
+ await expect(
+ s3Destination.test!({ config, credentials, signal: new AbortController().signal })
+ ).resolves.toBeUndefined()
+ })
+})
diff --git a/apps/sim/lib/data-drains/destinations/s3.ts b/apps/sim/lib/data-drains/destinations/s3.ts
new file mode 100644
index 00000000000..038d229ead4
--- /dev/null
+++ b/apps/sim/lib/data-drains/destinations/s3.ts
@@ -0,0 +1,223 @@
+import {
+ DeleteObjectCommand,
+ PutObjectCommand,
+ S3Client,
+ type S3ServiceException,
+} from '@aws-sdk/client-s3'
+import { createLogger } from '@sim/logger'
+import { generateShortId } from '@sim/utils/id'
+import { z } from 'zod'
+import { validateExternalUrl } from '@/lib/core/security/input-validation'
+import { validateUrlWithDNS } from '@/lib/core/security/input-validation.server'
+import type { DrainDestination } from '@/lib/data-drains/types'
+
+const logger = createLogger('DataDrainS3Destination')
+
+const s3ConfigSchema = z.object({
+ bucket: z.string().min(1, 'bucket is required').max(255),
+ region: z.string().min(1, 'region is required').max(64),
+ /** Optional prefix; trailing slash is added automatically when assembling keys. */
+ prefix: z.string().max(512).optional(),
+ /**
+ * Optional override for non-AWS S3-compatible providers (MinIO, R2, GCS interop, etc.).
+ * SSRF-validated: HTTPS-only, must not resolve syntactically to a private,
+ * loopback, or cloud-metadata address. The AWS SDK will issue requests to
+ * this host, so we reject internal targets at the schema boundary.
+ */
+ endpoint: z
+ .string()
+ .url()
+ .refine((value) => validateExternalUrl(value, 'endpoint').isValid, {
+ message: 'endpoint must be HTTPS and not point at a private, loopback, or metadata address',
+ })
+ .optional(),
+ /**
+ * Force path-style addressing. Set `true` for MinIO / Ceph RGW; defaults
+ * to `false` for AWS S3 and Cloudflare R2.
+ */
+ forcePathStyle: z.boolean().optional(),
+})
+
+const s3CredentialsSchema = z.object({
+ accessKeyId: z.string().min(1, 'accessKeyId is required'),
+ secretAccessKey: z.string().min(1, 'secretAccessKey is required'),
+})
+
+export type S3DestinationConfig = z.infer
+export type S3DestinationCredentials = z.infer
+
+function buildClient(config: S3DestinationConfig, credentials: S3DestinationCredentials): S3Client {
+ return new S3Client({
+ region: config.region,
+ credentials: {
+ accessKeyId: credentials.accessKeyId,
+ secretAccessKey: credentials.secretAccessKey,
+ },
+ endpoint: config.endpoint,
+ forcePathStyle: config.forcePathStyle ?? false,
+ })
+}
+
+function normalizePrefix(raw: string | undefined): string {
+ if (!raw) return ''
+ // S3 keys cannot start with `/` (creates an empty-name segment); also
+ // collapse trailing slashes so the joiner produces a single boundary.
+ const trimmed = raw.replace(/^\/+/, '').replace(/\/+$/, '')
+ return trimmed.length === 0 ? '' : `${trimmed}/`
+}
+
+function buildKey(
+ config: S3DestinationConfig,
+ metadata: {
+ drainId: string
+ runId: string
+ source: string
+ sequence: number
+ runStartedAt: Date
+ }
+): string {
+ // Partition by the run's start time so all chunks from one run share a
+ // single date prefix even if delivery crosses a midnight boundary.
+ const partition = metadata.runStartedAt
+ const yyyy = partition.getUTCFullYear().toString().padStart(4, '0')
+ const mm = (partition.getUTCMonth() + 1).toString().padStart(2, '0')
+ const dd = partition.getUTCDate().toString().padStart(2, '0')
+ const seq = metadata.sequence.toString().padStart(5, '0')
+ const prefix = normalizePrefix(config.prefix)
+ return `${prefix}${metadata.source}/${metadata.drainId}/${yyyy}/${mm}/${dd}/${metadata.runId}-${seq}.ndjson`
+}
+
+function isS3ServiceException(error: unknown): error is S3ServiceException {
+ return (
+ typeof error === 'object' &&
+ error !== null &&
+ '$metadata' in error &&
+ typeof (error as { name?: unknown }).name === 'string'
+ )
+}
+
+/**
+ * Resolves the optional custom endpoint and confirms it does not point at a
+ * private, loopback, or cloud-metadata address. The schema-level
+ * `validateExternalUrl` only catches IP literals, so a hostname like
+ * `evil.example.com` resolving to `169.254.169.254` would slip past it; the
+ * AWS SDK then resolves the host itself, bypassing the SSRF guard.
+ */
+async function assertEndpointIsPublic(endpoint: string | undefined): Promise {
+ if (!endpoint) return
+ const result = await validateUrlWithDNS(endpoint, 'endpoint')
+ if (!result.isValid) {
+ throw new Error(result.error ?? 'S3 endpoint failed SSRF validation')
+ }
+}
+
+/**
+ * Surfaces actionable S3 SDK error codes (`AccessDenied`, `NoSuchBucket`,
+ * `InvalidAccessKeyId`, `SignatureDoesNotMatch`, ...) and preserves the
+ * original error as `cause` so callers can still branch on `code`/`$metadata`.
+ */
+async function withS3ErrorContext(action: string, fn: () => Promise): Promise {
+ try {
+ return await fn()
+ } catch (error) {
+ if (isS3ServiceException(error)) {
+ const code = error.name
+ const status = error.$metadata?.httpStatusCode
+ const requestId = error.$metadata?.requestId
+ logger.warn('S3 operation failed', { action, code, status, requestId })
+ // Preserve the original SDK error as `cause` so callers can still
+ // branch on `code` / `$metadata` while getting an actionable message.
+ throw new Error(
+ `S3 ${action} failed (${code}${status ? ` ${status}` : ''}): ${error.message}`,
+ { cause: error }
+ )
+ }
+ throw error
+ }
+}
+
+export const s3Destination: DrainDestination = {
+ type: 's3',
+ displayName: 'Amazon S3',
+ configSchema: s3ConfigSchema,
+ credentialsSchema: s3CredentialsSchema,
+
+ async test({ config, credentials, signal }) {
+ await assertEndpointIsPublic(config.endpoint)
+ const client = buildClient(config, credentials)
+ // Probe with a real write so read-only creds and write-only IAM policies
+ // surface here instead of at the first scheduled run.
+ const probeKey = `${normalizePrefix(config.prefix)}.sim-drain-write-probe/${generateShortId(12)}`
+ try {
+ await withS3ErrorContext('test-put', () =>
+ client.send(
+ new PutObjectCommand({
+ Bucket: config.bucket,
+ Key: probeKey,
+ Body: Buffer.alloc(0),
+ ContentType: 'application/octet-stream',
+ ServerSideEncryption: 'AES256',
+ }),
+ { abortSignal: signal }
+ )
+ )
+ // Best-effort cleanup; ignore failures so a missing s3:DeleteObject
+ // doesn't fail the test (write was already proven).
+ try {
+ await client.send(new DeleteObjectCommand({ Bucket: config.bucket, Key: probeKey }), {
+ abortSignal: signal,
+ })
+ } catch (cleanupError) {
+ logger.debug('S3 test write probe cleanup failed (non-fatal)', {
+ bucket: config.bucket,
+ key: probeKey,
+ error: cleanupError,
+ })
+ }
+ } finally {
+ client.destroy()
+ }
+ },
+
+ openSession({ config, credentials }) {
+ const client = buildClient(config, credentials)
+ // Cache the DNS-aware endpoint check across all chunks in a run so we
+ // pay the lookup once. The SDK creates its own connections, so we can't
+ // pin the IP — but doing the check before any S3 call still rejects
+ // hostnames that resolve to internal targets at the start of the run.
+ // Lazy-init avoids an unhandled rejection if the source yields no chunks
+ // and `deliver` never runs (e.g., a drain with nothing new to export).
+ let endpointCheck: Promise | null = null
+ return {
+ async deliver({ body, contentType, metadata, signal }) {
+ if (endpointCheck === null) endpointCheck = assertEndpointIsPublic(config.endpoint)
+ await endpointCheck
+ const key = buildKey(config, metadata)
+ await withS3ErrorContext('put-object', () =>
+ client.send(
+ new PutObjectCommand({
+ Bucket: config.bucket,
+ Key: key,
+ Body: body,
+ ContentType: contentType,
+ ServerSideEncryption: 'AES256',
+ Metadata: {
+ 'sim-drain-id': metadata.drainId,
+ 'sim-run-id': metadata.runId,
+ 'sim-source': metadata.source,
+ 'sim-sequence': metadata.sequence.toString(),
+ 'sim-row-count': metadata.rowCount.toString(),
+ },
+ }),
+ { abortSignal: signal }
+ )
+ )
+ logger.debug('S3 chunk delivered', { bucket: config.bucket, key, bytes: body.byteLength })
+ return { locator: `s3://${config.bucket}/${key}` }
+ },
+ async close() {
+ client.destroy()
+ },
+ }
+ },
+}
diff --git a/apps/sim/lib/data-drains/destinations/webhook.test.ts b/apps/sim/lib/data-drains/destinations/webhook.test.ts
new file mode 100644
index 00000000000..38a6f37029d
--- /dev/null
+++ b/apps/sim/lib/data-drains/destinations/webhook.test.ts
@@ -0,0 +1,176 @@
+/**
+ * @vitest-environment node
+ */
+import { createHmac } from 'node:crypto'
+import { inputValidationMock, inputValidationMockFns } from '@sim/testing'
+import { beforeEach, describe, expect, it, vi } from 'vitest'
+
+vi.mock('@/lib/core/security/input-validation.server', () => inputValidationMock)
+
+import { webhookDestination } from '@/lib/data-drains/destinations/webhook'
+
+const config = { url: 'https://example.com/hook' }
+const credentials = { signingSecret: 'super-secret-key' }
+const metadata = {
+ drainId: 'd1',
+ runId: 'r1',
+ source: 'workflow_logs' as const,
+ sequence: 3,
+ rowCount: 5,
+}
+
+function mockPinnedFetchOnce(response: { ok: boolean; status: number; headers?: Headers }) {
+ inputValidationMockFns.mockSecureFetchWithPinnedIP.mockResolvedValueOnce({
+ ok: response.ok,
+ status: response.status,
+ statusText: '',
+ headers: response.headers ?? new Headers(),
+ text: async () => '',
+ json: async () => ({}),
+ arrayBuffer: async () => new ArrayBuffer(0),
+ })
+}
+
+beforeEach(() => {
+ vi.clearAllMocks()
+ inputValidationMockFns.mockValidateUrlWithDNS.mockResolvedValue({
+ isValid: true,
+ resolvedIP: '93.184.216.34',
+ originalHostname: 'example.com',
+ })
+})
+
+describe('webhookDestination openSession', () => {
+ it('signs the body with HMAC-SHA256 over `.`', async () => {
+ mockPinnedFetchOnce({ ok: true, status: 200 })
+ const session = webhookDestination.openSession({ config, credentials })
+ const body = Buffer.from('{"id":1}\n', 'utf8')
+
+ await session.deliver({
+ body,
+ contentType: 'application/x-ndjson',
+ metadata,
+ signal: new AbortController().signal,
+ })
+
+ const call = inputValidationMockFns.mockSecureFetchWithPinnedIP.mock.calls[0]
+ const [calledUrl, pinnedIP, init] = call
+ expect(calledUrl).toBe('https://example.com/hook')
+ expect(pinnedIP).toBe('93.184.216.34')
+ const headers = init.headers as Record
+ expect(headers['Content-Type']).toBe('application/x-ndjson')
+ expect(headers['X-Sim-Drain-Id']).toBe('d1')
+ expect(headers['X-Sim-Run-Id']).toBe('r1')
+ expect(headers['X-Sim-Sequence']).toBe('3')
+ expect(headers['Idempotency-Key']).toBe('r1-3')
+
+ const sig = headers['X-Sim-Signature']
+ const tsPart = sig.match(/t=(\d+)/)![1]
+ const v1Part = sig.match(/v1=([0-9a-f]+)/)![1]
+ const expected = createHmac('sha256', credentials.signingSecret)
+ .update(`${tsPart}.`)
+ .update(body)
+ .digest('hex')
+ expect(v1Part).toBe(expected)
+
+ await session.close()
+ })
+
+ it('retries on 5xx and succeeds', async () => {
+ mockPinnedFetchOnce({ ok: false, status: 503 })
+ mockPinnedFetchOnce({ ok: true, status: 200 })
+ vi.spyOn(global, 'setTimeout').mockImplementation(((fn: () => void) => {
+ fn()
+ return 0 as unknown as NodeJS.Timeout
+ }) as never)
+
+ const session = webhookDestination.openSession({ config, credentials })
+ const result = await session.deliver({
+ body: Buffer.from('x'),
+ contentType: 'application/x-ndjson',
+ metadata,
+ signal: new AbortController().signal,
+ })
+ expect(result.locator).toContain('https://example.com/hook')
+ expect(inputValidationMockFns.mockSecureFetchWithPinnedIP).toHaveBeenCalledTimes(2)
+ })
+
+ it('does not retry on 4xx (other than 408/429)', async () => {
+ mockPinnedFetchOnce({ ok: false, status: 401 })
+ const session = webhookDestination.openSession({ config, credentials })
+ await expect(
+ session.deliver({
+ body: Buffer.from('x'),
+ contentType: 'application/x-ndjson',
+ metadata,
+ signal: new AbortController().signal,
+ })
+ ).rejects.toThrow(/HTTP 401/)
+ expect(inputValidationMockFns.mockSecureFetchWithPinnedIP).toHaveBeenCalledTimes(1)
+ })
+
+ it('rejects when DNS resolves to a blocked IP', async () => {
+ inputValidationMockFns.mockValidateUrlWithDNS.mockResolvedValueOnce({
+ isValid: false,
+ error: 'url resolves to a blocked IP address',
+ })
+ const session = webhookDestination.openSession({ config, credentials })
+ await expect(
+ session.deliver({
+ body: Buffer.from('x'),
+ contentType: 'application/x-ndjson',
+ metadata,
+ signal: new AbortController().signal,
+ })
+ ).rejects.toThrow(/blocked IP/)
+ expect(inputValidationMockFns.mockSecureFetchWithPinnedIP).not.toHaveBeenCalled()
+ })
+
+ it('reuses the same pinned IP across deliveries (no DNS rebinding window)', async () => {
+ mockPinnedFetchOnce({ ok: true, status: 200 })
+ mockPinnedFetchOnce({ ok: true, status: 200 })
+ const session = webhookDestination.openSession({ config, credentials })
+ const signal = new AbortController().signal
+ await session.deliver({
+ body: Buffer.from('x'),
+ contentType: 'application/x-ndjson',
+ metadata,
+ signal,
+ })
+ await session.deliver({
+ body: Buffer.from('y'),
+ contentType: 'application/x-ndjson',
+ metadata: { ...metadata, sequence: 4 },
+ signal,
+ })
+ expect(inputValidationMockFns.mockValidateUrlWithDNS).toHaveBeenCalledTimes(1)
+ const calls = inputValidationMockFns.mockSecureFetchWithPinnedIP.mock.calls
+ expect(calls[0][1]).toBe('93.184.216.34')
+ expect(calls[1][1]).toBe('93.184.216.34')
+ })
+
+ it('rejects every header buildHeaders writes when reused as signatureHeader (drift guard)', async () => {
+ mockPinnedFetchOnce({ ok: true, status: 200 })
+ const session = webhookDestination.openSession({ config, credentials })
+ await session.deliver({
+ body: Buffer.from('x'),
+ contentType: 'application/x-ndjson',
+ metadata,
+ signal: new AbortController().signal,
+ })
+
+ const init = inputValidationMockFns.mockSecureFetchWithPinnedIP.mock.calls[0][2]
+ const writtenHeaders = Object.keys(init.headers as Record)
+
+ for (const name of writtenHeaders) {
+ const result = webhookDestination.configSchema.safeParse({
+ url: 'https://example.com/hook',
+ signatureHeader: name,
+ })
+ expect(
+ result.success,
+ `expected signatureHeader="${name}" to be rejected (it is written by buildHeaders)`
+ ).toBe(false)
+ }
+ })
+})
diff --git a/apps/sim/lib/data-drains/destinations/webhook.ts b/apps/sim/lib/data-drains/destinations/webhook.ts
new file mode 100644
index 00000000000..525898e3484
--- /dev/null
+++ b/apps/sim/lib/data-drains/destinations/webhook.ts
@@ -0,0 +1,282 @@
+import { createHmac } from 'node:crypto'
+import { createLogger } from '@sim/logger'
+import { toError } from '@sim/utils/errors'
+import { z } from 'zod'
+import { validateExternalUrl } from '@/lib/core/security/input-validation'
+import {
+ secureFetchWithPinnedIP,
+ validateUrlWithDNS,
+} from '@/lib/core/security/input-validation.server'
+import type { DeliveryMetadata, DrainDestination } from '@/lib/data-drains/types'
+
+const logger = createLogger('DataDrainWebhookDestination')
+
+/** Initial attempt + 3 retries — matches the documented 500ms/1s/2s backoff sequence. */
+const MAX_ATTEMPTS = 4
+const BASE_BACKOFF_MS = 500
+const MAX_BACKOFF_MS = 30_000
+const PER_ATTEMPT_TIMEOUT_MS = 30_000
+const SIGNATURE_VERSION = 'v1'
+const USER_AGENT = 'Sim-DataDrain/1.0'
+
+/** Reserved header names that callers cannot reuse as the signature header. */
+const RESERVED_SIGNATURE_HEADER_NAMES = new Set([
+ 'authorization',
+ 'content-type',
+ 'user-agent',
+ 'idempotency-key',
+ 'x-sim-timestamp',
+ 'x-sim-signature-version',
+ 'x-sim-drain-id',
+ 'x-sim-run-id',
+ 'x-sim-source',
+ 'x-sim-sequence',
+ 'x-sim-row-count',
+ 'x-sim-probe',
+ 'x-sim-signature',
+])
+
+/**
+ * Resolves the URL's hostname and returns the validated public IP. Uses
+ * `ipaddr.js` so all non-`unicast` ranges (RFC1918, loopback, CGNAT, multicast,
+ * broadcast, IPv4-mapped IPv6, link-local, cloud metadata) are blocked
+ * uniformly. The returned IP is then pinned to the underlying socket via
+ * `secureFetchWithPinnedIP` to defeat DNS rebinding (TOCTOU) between the
+ * validation lookup and the actual delivery.
+ */
+async function resolvePublicTarget(url: string): Promise {
+ const result = await validateUrlWithDNS(url, 'url')
+ if (!result.isValid || !result.resolvedIP) {
+ throw new Error(result.error ?? 'Webhook URL failed SSRF validation')
+ }
+ return result.resolvedIP
+}
+
+const webhookConfigSchema = z.object({
+ url: z
+ .string()
+ .url('url must be a valid URL')
+ .refine((value) => validateExternalUrl(value, 'url').isValid, {
+ message: 'url must be HTTPS and not point at a private, loopback, or metadata address',
+ }),
+ /** Optional custom header name for the signature (default: X-Sim-Signature). */
+ signatureHeader: z
+ .string()
+ .min(1)
+ .max(128)
+ .refine((value) => !RESERVED_SIGNATURE_HEADER_NAMES.has(value.toLowerCase()), {
+ message: 'signatureHeader cannot reuse a reserved Sim header name',
+ })
+ .optional(),
+})
+
+const webhookCredentialsSchema = z.object({
+ /** Shared secret used for HMAC-SHA256 signing of the request body. */
+ signingSecret: z.string().min(8, 'signingSecret must be at least 8 characters'),
+ /** Optional bearer token sent as Authorization header. */
+ bearerToken: z.string().min(1).optional(),
+})
+
+export type WebhookDestinationConfig = z.infer
+export type WebhookDestinationCredentials = z.infer
+
+/**
+ * Stripe-style replay-resistant signature: signs `${unixSeconds}.${body}` and
+ * emits `t=,v1=`. Verifiers should reject signatures
+ * older than ~5 minutes after also recomputing the HMAC over the same
+ * concatenation, defending against captured-request replay attacks.
+ */
+function sign(body: Buffer, secret: string, timestamp: number): string {
+ const hmac = createHmac('sha256', secret).update(`${timestamp}.`).update(body).digest('hex')
+ return `t=${timestamp},${SIGNATURE_VERSION}=${hmac}`
+}
+
+/**
+ * Resolves after `ms` or as soon as `signal` aborts, whichever happens first.
+ * The caller checks `signal.aborted` at the top of the next iteration to
+ * surface the abort — keeping resolution side-effect-free here.
+ */
+function sleepUntilAborted(ms: number, signal: AbortSignal): Promise {
+ if (signal.aborted) return Promise.resolve()
+ return new Promise((resolve) => {
+ const onAbort = () => {
+ clearTimeout(timeoutId)
+ resolve()
+ }
+ const timeoutId = setTimeout(() => {
+ signal.removeEventListener('abort', onAbort)
+ resolve()
+ }, ms)
+ signal.addEventListener('abort', onAbort, { once: true })
+ })
+}
+
+function backoffWithJitter(attempt: number, retryAfterMs?: number): number {
+ if (retryAfterMs !== undefined) {
+ // Floor at 500ms so a misbehaving server returning Retry-After: 0 cannot
+ // pin us in a tight retry loop.
+ return Math.min(Math.max(retryAfterMs, BASE_BACKOFF_MS), MAX_BACKOFF_MS)
+ }
+ const exponential = Math.min(BASE_BACKOFF_MS * 2 ** (attempt - 1), MAX_BACKOFF_MS)
+ // ±20% jitter avoids thundering-herd alignment across drains.
+ return exponential * (0.8 + Math.random() * 0.4)
+}
+
+function parseRetryAfter(header: string | null): number | undefined {
+ if (!header) return undefined
+ const seconds = Number.parseInt(header, 10)
+ if (!Number.isNaN(seconds) && seconds >= 0) return seconds * 1000
+ const dateMs = Date.parse(header)
+ if (!Number.isNaN(dateMs)) {
+ const delta = dateMs - Date.now()
+ return delta > 0 ? delta : 0
+ }
+ return undefined
+}
+
+function isRetryableStatus(status: number): boolean {
+ return status === 408 || status === 429 || status >= 500
+}
+
+function buildHeaders(input: {
+ config: WebhookDestinationConfig
+ credentials: WebhookDestinationCredentials
+ body: Buffer
+ contentType: string
+ metadata?: DeliveryMetadata
+ isProbe?: boolean
+}): Record {
+ const timestamp = Math.floor(Date.now() / 1000)
+ const headers: Record = {
+ 'Content-Type': input.contentType,
+ 'User-Agent': USER_AGENT,
+ 'X-Sim-Timestamp': timestamp.toString(),
+ 'X-Sim-Signature-Version': SIGNATURE_VERSION,
+ [input.config.signatureHeader ?? 'X-Sim-Signature']: sign(
+ input.body,
+ input.credentials.signingSecret,
+ timestamp
+ ),
+ }
+ if (input.metadata) {
+ headers['X-Sim-Drain-Id'] = input.metadata.drainId
+ headers['X-Sim-Run-Id'] = input.metadata.runId
+ headers['X-Sim-Source'] = input.metadata.source
+ headers['X-Sim-Sequence'] = input.metadata.sequence.toString()
+ headers['X-Sim-Row-Count'] = input.metadata.rowCount.toString()
+ // Lets idempotent receivers dedupe retried chunks server-side.
+ headers['Idempotency-Key'] = `${input.metadata.runId}-${input.metadata.sequence}`
+ }
+ if (input.isProbe) {
+ headers['X-Sim-Probe'] = '1'
+ }
+ if (input.credentials.bearerToken) {
+ headers.Authorization = `Bearer ${input.credentials.bearerToken}`
+ }
+ return headers
+}
+
+export const webhookDestination: DrainDestination<
+ WebhookDestinationConfig,
+ WebhookDestinationCredentials
+> = {
+ type: 'webhook',
+ displayName: 'HTTPS Webhook',
+ configSchema: webhookConfigSchema,
+ credentialsSchema: webhookCredentialsSchema,
+
+ async test({ config, credentials, signal }) {
+ const resolvedIP = await resolvePublicTarget(config.url)
+ const probe = Buffer.from('{"sim":"connection-test"}\n', 'utf8')
+ const headers = buildHeaders({
+ config,
+ credentials,
+ body: probe,
+ contentType: 'application/x-ndjson',
+ isProbe: true,
+ })
+ const response = await secureFetchWithPinnedIP(config.url, resolvedIP, {
+ method: 'POST',
+ body: new Uint8Array(probe),
+ headers,
+ signal,
+ timeout: PER_ATTEMPT_TIMEOUT_MS,
+ })
+ if (!response.ok) {
+ throw new Error(`Webhook probe failed: HTTP ${response.status}`)
+ }
+ },
+
+ openSession({ config, credentials }) {
+ let resolvedIP: string | null = null
+ return {
+ async deliver({ body, contentType, metadata, signal }) {
+ // Resolve once per session — within a run we trust the result rather
+ // than paying DNS on every chunk. Done lazily so a session that's
+ // opened-and-immediately-closed pays no cost. The pinned IP is reused
+ // across retries to defeat DNS rebinding (TOCTOU) attacks.
+ if (resolvedIP === null) {
+ resolvedIP = await resolvePublicTarget(config.url)
+ }
+ let lastError: unknown
+ for (let attempt = 1; attempt <= MAX_ATTEMPTS; attempt++) {
+ if (signal.aborted) throw signal.reason ?? new Error('Aborted')
+ // Re-build headers per attempt so the timestamp + signature are
+ // fresh (otherwise long backoffs would push us outside the
+ // verifier's skew window).
+ const headers = buildHeaders({ config, credentials, body, contentType, metadata })
+ let retryAfterMs: number | undefined
+ let response: Awaited> | undefined
+ try {
+ response = await secureFetchWithPinnedIP(config.url, resolvedIP, {
+ method: 'POST',
+ body: new Uint8Array(body),
+ headers,
+ signal,
+ timeout: PER_ATTEMPT_TIMEOUT_MS,
+ })
+ } catch (error) {
+ lastError = error
+ logger.debug('Webhook delivery attempt failed', {
+ url: config.url,
+ attempt,
+ error: toError(error).message,
+ })
+ }
+ if (response) {
+ if (response.ok) {
+ const requestId =
+ response.headers.get('x-request-id') ??
+ response.headers.get('x-amzn-trace-id') ??
+ null
+ logger.debug('Webhook chunk delivered', {
+ url: config.url,
+ attempt,
+ status: response.status,
+ bytes: body.byteLength,
+ })
+ return {
+ locator: requestId
+ ? `${config.url}#${metadata.runId}-${metadata.sequence}@${requestId}`
+ : `${config.url}#${metadata.runId}-${metadata.sequence}`,
+ }
+ }
+ if (!isRetryableStatus(response.status)) {
+ // Non-retryable HTTP error: surface immediately without retrying.
+ throw new Error(`Webhook responded with HTTP ${response.status}`)
+ }
+ lastError = new Error(`Webhook responded with HTTP ${response.status}`)
+ retryAfterMs = parseRetryAfter(response.headers.get('retry-after'))
+ }
+ if (attempt < MAX_ATTEMPTS) {
+ await sleepUntilAborted(backoffWithJitter(attempt, retryAfterMs), signal)
+ }
+ }
+ throw lastError instanceof Error
+ ? lastError
+ : new Error('Webhook delivery failed after retries')
+ },
+ async close() {},
+ }
+ },
+}
diff --git a/apps/sim/lib/data-drains/dispatcher.test.ts b/apps/sim/lib/data-drains/dispatcher.test.ts
new file mode 100644
index 00000000000..ffffac51472
--- /dev/null
+++ b/apps/sim/lib/data-drains/dispatcher.test.ts
@@ -0,0 +1,117 @@
+/**
+ * @vitest-environment node
+ */
+import { dbChainMock, dbChainMockFns, resetDbChainMock } from '@sim/testing'
+import { beforeEach, describe, expect, it, vi } from 'vitest'
+
+vi.mock('@sim/db', () => dbChainMock)
+
+const { mockIsEnterprise, mockEnqueue, mockGetJobQueue } = vi.hoisted(() => {
+ const mockEnqueue = vi.fn(async () => 'job-id')
+ return {
+ mockIsEnterprise: vi.fn(),
+ mockEnqueue,
+ mockGetJobQueue: vi.fn(async () => ({ enqueue: mockEnqueue })),
+ }
+})
+
+vi.mock('@/lib/billing/core/subscription', () => ({
+ isOrganizationOnEnterprisePlan: mockIsEnterprise,
+}))
+vi.mock('@/lib/core/async-jobs', () => ({ getJobQueue: mockGetJobQueue }))
+vi.mock('@/lib/core/config/feature-flags', () => ({ isBillingEnabled: true }))
+
+import { dispatchDueDrains, reapOrphanedRuns } from '@/lib/data-drains/dispatcher'
+
+function mockCandidates(rows: Array<{ id: string; organizationId: string }>) {
+ // db.select().from().where() — override `from` so awaiting `.where(pred)`
+ // resolves with the candidate rows.
+ dbChainMockFns.from.mockReturnValueOnce({
+ where: vi.fn().mockResolvedValueOnce(rows),
+ } as never)
+}
+
+beforeEach(() => {
+ vi.clearAllMocks()
+ resetDbChainMock()
+})
+
+describe('reapOrphanedRuns', () => {
+ it('returns the count of rows updated to failed', async () => {
+ dbChainMockFns.returning.mockResolvedValueOnce([{ id: 'run-1' }, { id: 'run-2' }])
+ const result = await reapOrphanedRuns(new Date('2026-01-01T12:00:00.000Z'))
+ expect(result).toEqual({ reaped: 2 })
+ expect(dbChainMockFns.set).toHaveBeenCalledWith(
+ expect.objectContaining({ status: 'failed', error: expect.stringContaining('Orphaned') })
+ )
+ })
+
+ it('returns 0 when nothing is stuck', async () => {
+ dbChainMockFns.returning.mockResolvedValueOnce([])
+ expect(await reapOrphanedRuns()).toEqual({ reaped: 0 })
+ })
+})
+
+describe('dispatchDueDrains', () => {
+ it('returns early when no candidates are due', async () => {
+ dbChainMockFns.returning.mockResolvedValueOnce([]) // reaper
+ mockCandidates([])
+
+ const result = await dispatchDueDrains()
+ expect(result).toEqual({ candidates: 0, dispatched: 0, skipped: 0, reaped: 0 })
+ expect(mockGetJobQueue).not.toHaveBeenCalled()
+ })
+
+ it('skips drains for orgs not on enterprise plan', async () => {
+ dbChainMockFns.returning.mockResolvedValueOnce([]) // reaper
+ mockCandidates([{ id: 'd1', organizationId: 'org-a' }])
+ mockIsEnterprise.mockResolvedValueOnce(false)
+
+ const result = await dispatchDueDrains()
+ expect(result).toMatchObject({ candidates: 1, dispatched: 0, skipped: 1 })
+ expect(mockEnqueue).not.toHaveBeenCalled()
+ })
+
+ it('claims and enqueues a job per due drain', async () => {
+ dbChainMockFns.returning
+ .mockResolvedValueOnce([]) // reaper
+ .mockResolvedValueOnce([{ id: 'd1' }]) // claim succeeds
+ mockCandidates([{ id: 'd1', organizationId: 'org-a' }])
+ mockIsEnterprise.mockResolvedValueOnce(true)
+
+ const result = await dispatchDueDrains()
+ expect(result).toMatchObject({ candidates: 1, dispatched: 1, skipped: 0 })
+ expect(mockEnqueue).toHaveBeenCalledWith(
+ 'run-data-drain',
+ { drainId: 'd1', trigger: 'cron' },
+ { concurrencyKey: 'data-drain:d1' }
+ )
+ })
+
+ it('does not enqueue when claim loses the race', async () => {
+ dbChainMockFns.returning
+ .mockResolvedValueOnce([]) // reaper
+ .mockResolvedValueOnce([]) // claim returns nothing — lost the race
+ mockCandidates([{ id: 'd1', organizationId: 'org-a' }])
+ mockIsEnterprise.mockResolvedValueOnce(true)
+
+ const result = await dispatchDueDrains()
+ expect(result.dispatched).toBe(0)
+ expect(mockEnqueue).not.toHaveBeenCalled()
+ })
+
+ it('caches enterprise check across drains in the same org', async () => {
+ dbChainMockFns.returning
+ .mockResolvedValueOnce([]) // reaper
+ .mockResolvedValueOnce([{ id: 'd1' }])
+ .mockResolvedValueOnce([{ id: 'd2' }])
+ mockCandidates([
+ { id: 'd1', organizationId: 'org-a' },
+ { id: 'd2', organizationId: 'org-a' },
+ ])
+ mockIsEnterprise.mockResolvedValue(true)
+
+ await dispatchDueDrains()
+ expect(mockIsEnterprise).toHaveBeenCalledTimes(1)
+ })
+})
diff --git a/apps/sim/lib/data-drains/dispatcher.ts b/apps/sim/lib/data-drains/dispatcher.ts
new file mode 100644
index 00000000000..c7021ed9a6c
--- /dev/null
+++ b/apps/sim/lib/data-drains/dispatcher.ts
@@ -0,0 +1,186 @@
+import { db } from '@sim/db'
+import { dataDrainRuns, dataDrains } from '@sim/db/schema'
+import { createLogger } from '@sim/logger'
+import { toError } from '@sim/utils/errors'
+import { and, eq, isNull, lt, or } from 'drizzle-orm'
+import { isOrganizationOnEnterprisePlan } from '@/lib/billing/core/subscription'
+import { getJobQueue } from '@/lib/core/async-jobs'
+import { isBillingEnabled } from '@/lib/core/config/feature-flags'
+
+const logger = createLogger('DataDrainsDispatcher')
+
+const HOUR_MS = 60 * 60 * 1000
+const DAY_MS = 24 * HOUR_MS
+
+/**
+ * Cron fires hourly. Without a buffer, a drain that finishes a few minutes
+ * after the tick (lastRunAt = 10:05) won't satisfy `lastRunAt < now - cadence`
+ * at the next tick (10:05 < 10:00 is false), so an "hourly" drain effectively
+ * runs every two hours. Subtracting a small buffer from the cadence absorbs
+ * normal run duration plus cron jitter without allowing back-to-back runs
+ * within the same tick.
+ */
+const CADENCE_BUFFER_MS = 5 * 60 * 1000
+
+/**
+ * Maximum wall-clock duration any single drain run is allowed before its
+ * `data_drain_runs` row is considered orphaned. Runs that exceed this are
+ * almost certainly the result of a Trigger.dev worker crash mid-run — there
+ * is no live process still updating them.
+ */
+const ORPHAN_THRESHOLD_MS = 60 * 60 * 1000
+
+/**
+ * Marks `running` rows older than the orphan threshold as `failed`. Without
+ * this, a worker crash leaves run history permanently misleading and (worse)
+ * the drain row's `lastRunAt` reflects a successful claim that never finished
+ * — but the drain `cursor` never advanced, so re-running is safe.
+ */
+export async function reapOrphanedRuns(now: Date = new Date()): Promise<{ reaped: number }> {
+ const cutoff = new Date(now.getTime() - ORPHAN_THRESHOLD_MS)
+ const reaped = await db
+ .update(dataDrainRuns)
+ .set({
+ status: 'failed',
+ finishedAt: now,
+ error: `Orphaned run reaped after exceeding ${ORPHAN_THRESHOLD_MS / 60_000}m without completion`,
+ })
+ .where(and(eq(dataDrainRuns.status, 'running'), lt(dataDrainRuns.startedAt, cutoff)))
+ .returning({ id: dataDrainRuns.id })
+ if (reaped.length > 0) {
+ logger.warn('Reaped orphaned data drain runs', { count: reaped.length })
+ }
+ return { reaped: reaped.length }
+}
+
+/**
+ * Selects every enabled drain whose schedule is due (or has never run) and
+ * fans out one `run-data-drain` job per drain. Each drain is atomically
+ * claimed via a conditional UPDATE before being enqueued — two concurrent
+ * dispatcher invocations cannot both win the same row, and a manual run that
+ * lands between the SELECT and the UPDATE will lose the race cleanly. Drains
+ * belonging to orgs that have lapsed off the enterprise plan are skipped.
+ */
+export async function dispatchDueDrains(now: Date = new Date()): Promise<{
+ candidates: number
+ dispatched: number
+ skipped: number
+ reaped: number
+}> {
+ const { reaped } = await reapOrphanedRuns(now)
+
+ const hourlyCutoff = new Date(now.getTime() - HOUR_MS + CADENCE_BUFFER_MS)
+ const dailyCutoff = new Date(now.getTime() - DAY_MS + CADENCE_BUFFER_MS)
+
+ const duePredicate = and(
+ eq(dataDrains.enabled, true),
+ or(
+ isNull(dataDrains.lastRunAt),
+ and(eq(dataDrains.scheduleCadence, 'hourly'), lt(dataDrains.lastRunAt, hourlyCutoff)),
+ and(eq(dataDrains.scheduleCadence, 'daily'), lt(dataDrains.lastRunAt, dailyCutoff))
+ )
+ )
+
+ const candidates = await db
+ .select({
+ id: dataDrains.id,
+ organizationId: dataDrains.organizationId,
+ lastRunAt: dataDrains.lastRunAt,
+ })
+ .from(dataDrains)
+ .where(duePredicate)
+
+ if (candidates.length === 0) {
+ return { candidates: 0, dispatched: 0, skipped: 0, reaped }
+ }
+
+ // Self-hosted deployments have no subscription infra; `DATA_DRAINS_ENABLED`
+ // is the global on/off there. Cache per-org so a multi-drain org pays one
+ // billing lookup.
+ const enterpriseCache = new Map()
+ const isEnterprise = async (orgId: string): Promise => {
+ if (!isBillingEnabled) return true
+ const cached = enterpriseCache.get(orgId)
+ if (cached !== undefined) return cached
+ const result = await isOrganizationOnEnterprisePlan(orgId)
+ enterpriseCache.set(orgId, result)
+ return result
+ }
+
+ const queue = await getJobQueue()
+ let dispatched = 0
+ let skipped = 0
+
+ for (const candidate of candidates) {
+ let enterprise: boolean
+ try {
+ enterprise = await isEnterprise(candidate.organizationId)
+ } catch (error) {
+ // A billing-API failure for one org must not abort the whole batch —
+ // skip this drain and let the next cron tick retry it.
+ logger.warn('Enterprise check failed; skipping drain', {
+ drainId: candidate.id,
+ organizationId: candidate.organizationId,
+ error,
+ })
+ skipped++
+ continue
+ }
+ if (!enterprise) {
+ skipped++
+ continue
+ }
+
+ // Conditional claim — re-asserts the due predicate to lose to any other
+ // dispatcher or manual-run path that's already moved this drain forward.
+ const claimed = await db
+ .update(dataDrains)
+ .set({ lastRunAt: now, updatedAt: now })
+ .where(and(eq(dataDrains.id, candidate.id), duePredicate))
+ .returning({ id: dataDrains.id })
+
+ if (claimed.length === 0) continue
+
+ try {
+ // concurrencyKey serializes runs of the same drain on the job queue, so
+ // a manual run-now racing a cron claim can never execute in parallel.
+ await queue.enqueue(
+ 'run-data-drain',
+ { drainId: candidate.id, trigger: 'cron' },
+ { concurrencyKey: `data-drain:${candidate.id}` }
+ )
+ dispatched++
+ } catch (error) {
+ // Roll back the claim so a transient queue outage doesn't delay this
+ // drain by a full cadence. Scoped to our own claim timestamp so it
+ // can't trample a concurrent advance. The rollback itself is guarded
+ // so a DB error here doesn't abort the rest of the batch.
+ try {
+ await db
+ .update(dataDrains)
+ .set({ lastRunAt: candidate.lastRunAt, updatedAt: now })
+ .where(and(eq(dataDrains.id, candidate.id), eq(dataDrains.lastRunAt, now)))
+ } catch (rollbackError) {
+ logger.error('Failed to roll back data-drain claim after enqueue failure', {
+ drainId: candidate.id,
+ enqueueError: toError(error).message,
+ rollbackError: toError(rollbackError).message,
+ })
+ continue
+ }
+ logger.error('Failed to enqueue data-drain job; rolled back claim', {
+ drainId: candidate.id,
+ error,
+ })
+ }
+ }
+
+ logger.info('Data drain dispatch complete', {
+ candidates: candidates.length,
+ dispatched,
+ skipped,
+ reaped,
+ })
+
+ return { candidates: candidates.length, dispatched, skipped, reaped }
+}
diff --git a/apps/sim/lib/data-drains/encryption.ts b/apps/sim/lib/data-drains/encryption.ts
new file mode 100644
index 00000000000..3454688d46d
--- /dev/null
+++ b/apps/sim/lib/data-drains/encryption.ts
@@ -0,0 +1,21 @@
+import { decryptSecret, encryptSecret } from '@/lib/core/security/encryption'
+
+/**
+ * Encrypts an arbitrary JSON-serializable credentials object into a single
+ * `iv:ciphertext:authTag` string suitable for storage in
+ * `data_drains.destination_credentials`. Wraps the shared AES-256-GCM helper.
+ */
+export async function encryptCredentials(plaintext: T): Promise {
+ const { encrypted } = await encryptSecret(JSON.stringify(plaintext))
+ return encrypted
+}
+
+/**
+ * Decrypts the inverse of `encryptCredentials`. The caller is expected to run
+ * the destination's `credentialsSchema` on the result to defend against
+ * encryption-format drift.
+ */
+export async function decryptCredentials(ciphertext: string): Promise {
+ const { decrypted } = await decryptSecret(ciphertext)
+ return JSON.parse(decrypted) as T
+}
diff --git a/apps/sim/lib/data-drains/serializers.ts b/apps/sim/lib/data-drains/serializers.ts
new file mode 100644
index 00000000000..cbc9843a3f8
--- /dev/null
+++ b/apps/sim/lib/data-drains/serializers.ts
@@ -0,0 +1,54 @@
+import type { dataDrainRuns, dataDrains } from '@sim/db/schema'
+import { type DataDrain, type DataDrainRun, dataDrainSchema } from '@/lib/api/contracts/data-drains'
+import { getDestination } from '@/lib/data-drains/destinations/registry'
+
+type DataDrainRow = typeof dataDrains.$inferSelect
+type DataDrainRunRow = typeof dataDrainRuns.$inferSelect
+
+/**
+ * Projects a DB row into the public `DataDrain` wire shape. Strips the
+ * encrypted credentials column and normalizes timestamps to ISO strings so
+ * clients receive a stable, schema-validated payload.
+ *
+ * The stored `destinationConfig` is JSONB and is re-validated against the
+ * destination's typed config schema before serialization so unexpected shapes
+ * surface as errors instead of leaking through the response.
+ */
+export function serializeDrain(row: DataDrainRow): DataDrain {
+ const destinationConfig = getDestination(row.destinationType).configSchema.parse(
+ row.destinationConfig
+ )
+ return dataDrainSchema.parse({
+ id: row.id,
+ organizationId: row.organizationId,
+ name: row.name,
+ source: row.source,
+ scheduleCadence: row.scheduleCadence,
+ enabled: row.enabled,
+ cursor: row.cursor,
+ lastRunAt: row.lastRunAt ? row.lastRunAt.toISOString() : null,
+ lastSuccessAt: row.lastSuccessAt ? row.lastSuccessAt.toISOString() : null,
+ createdBy: row.createdBy,
+ createdAt: row.createdAt.toISOString(),
+ updatedAt: row.updatedAt.toISOString(),
+ destinationType: row.destinationType,
+ destinationConfig,
+ })
+}
+
+export function serializeDrainRun(row: DataDrainRunRow): DataDrainRun {
+ return {
+ id: row.id,
+ drainId: row.drainId,
+ status: row.status,
+ trigger: row.trigger,
+ startedAt: row.startedAt.toISOString(),
+ finishedAt: row.finishedAt ? row.finishedAt.toISOString() : null,
+ rowsExported: row.rowsExported,
+ bytesWritten: row.bytesWritten,
+ cursorBefore: row.cursorBefore,
+ cursorAfter: row.cursorAfter,
+ error: row.error,
+ locators: row.locators ?? [],
+ }
+}
diff --git a/apps/sim/lib/data-drains/service.test.ts b/apps/sim/lib/data-drains/service.test.ts
new file mode 100644
index 00000000000..394b2d6d9f1
--- /dev/null
+++ b/apps/sim/lib/data-drains/service.test.ts
@@ -0,0 +1,159 @@
+/**
+ * @vitest-environment node
+ */
+import { dbChainMock, dbChainMockFns, resetDbChainMock } from '@sim/testing'
+import { beforeEach, describe, expect, it, vi } from 'vitest'
+
+vi.mock('@sim/db', () => dbChainMock)
+
+const { mockGetSource, mockGetDestination, mockDecryptCredentials } = vi.hoisted(() => ({
+ mockGetSource: vi.fn(),
+ mockGetDestination: vi.fn(),
+ mockDecryptCredentials: vi.fn(),
+}))
+
+vi.mock('@/lib/data-drains/sources/registry', () => ({ getSource: mockGetSource }))
+vi.mock('@/lib/data-drains/destinations/registry', () => ({ getDestination: mockGetDestination }))
+vi.mock('@/lib/data-drains/encryption', () => ({ decryptCredentials: mockDecryptCredentials }))
+
+import { runDrain } from '@/lib/data-drains/service'
+
+type Row = { id: string; ts: string }
+
+function makeSource(pages: Row[][]) {
+ return {
+ type: 'workflow_logs' as const,
+ displayName: 'Test',
+ pages: vi.fn(async function* () {
+ for (const page of pages) yield page
+ }),
+ serialize: vi.fn((row: Row) => row),
+ cursorAfter: vi.fn((row: Row) => JSON.stringify({ ts: row.ts, id: row.id })),
+ }
+}
+
+function makeDestination(
+ opts: { deliver?: ReturnType; close?: ReturnType } = {}
+) {
+ const deliver =
+ opts.deliver ??
+ vi.fn(async ({ metadata }: { metadata: { sequence: number } }) => ({
+ locator: `loc-${metadata.sequence}`,
+ }))
+ const close = opts.close ?? vi.fn(async () => {})
+ return {
+ type: 's3' as const,
+ displayName: 'Test',
+ configSchema: { parse: (v: unknown) => v },
+ credentialsSchema: { parse: (v: unknown) => v },
+ openSession: vi.fn(() => ({ deliver, close })),
+ _deliver: deliver,
+ _close: close,
+ }
+}
+
+const baseDrain = {
+ id: 'drain-1',
+ organizationId: 'org-1',
+ enabled: true,
+ source: 'workflow_logs',
+ destinationType: 's3',
+ destinationConfig: {},
+ destinationCredentials: 'enc:blob',
+ cursor: null,
+}
+
+beforeEach(() => {
+ vi.clearAllMocks()
+ resetDbChainMock()
+ mockDecryptCredentials.mockResolvedValue({})
+})
+
+describe('runDrain', () => {
+ it('returns skipped when drain is disabled', async () => {
+ dbChainMockFns.limit.mockResolvedValueOnce([{ ...baseDrain, enabled: false }])
+ const result = await runDrain('drain-1', 'manual')
+ expect(result.status).toBe('skipped')
+ expect(result.rowsExported).toBe(0)
+ expect(mockGetSource).not.toHaveBeenCalled()
+ })
+
+ it('throws when drain does not exist', async () => {
+ dbChainMockFns.limit.mockResolvedValueOnce([])
+ await expect(runDrain('drain-1', 'manual')).rejects.toThrow(/not found/)
+ })
+
+ it('delivers each page and advances cursor on success', async () => {
+ dbChainMockFns.limit.mockResolvedValueOnce([baseDrain])
+ const source = makeSource([
+ [
+ { id: 'r1', ts: '2026-01-01T00:00:00.000Z' },
+ { id: 'r2', ts: '2026-01-01T00:00:01.000Z' },
+ ],
+ [{ id: 'r3', ts: '2026-01-01T00:00:02.000Z' }],
+ ])
+ const destination = makeDestination()
+ mockGetSource.mockReturnValue(source)
+ mockGetDestination.mockReturnValue(destination)
+
+ const result = await runDrain('drain-1', 'cron')
+
+ expect(result.status).toBe('success')
+ expect(result.rowsExported).toBe(3)
+ expect(destination._deliver).toHaveBeenCalledTimes(2)
+ expect(destination._close).toHaveBeenCalledTimes(1)
+ expect(result.cursorAfter).toBe(JSON.stringify({ ts: '2026-01-01T00:00:02.000Z', id: 'r3' }))
+ expect(result.locators).toEqual(['loc-0', 'loc-1'])
+
+ // Drain row updated with new cursor; transaction was used.
+ expect(dbChainMockFns.transaction).toHaveBeenCalled()
+ const drainUpdate = dbChainMockFns.set.mock.calls.find(
+ (call) => (call[0] as { cursor?: unknown }).cursor !== undefined
+ )
+ expect(drainUpdate?.[0]).toMatchObject({ cursor: result.cursorAfter })
+ })
+
+ it('does not advance drain cursor when delivery fails', async () => {
+ dbChainMockFns.limit.mockResolvedValueOnce([{ ...baseDrain, cursor: 'prior' }])
+ const source = makeSource([[{ id: 'r1', ts: '2026-01-01T00:00:00.000Z' }]])
+ const destination = makeDestination({
+ deliver: vi.fn(async () => {
+ throw new Error('boom')
+ }),
+ })
+ mockGetSource.mockReturnValue(source)
+ mockGetDestination.mockReturnValue(destination)
+
+ await expect(runDrain('drain-1', 'cron')).rejects.toThrow('boom')
+
+ // Run row updated with status=failed and cursorAfter equal to prior cursor.
+ const failedUpdate = dbChainMockFns.set.mock.calls.find(
+ (call) => (call[0] as { status?: unknown }).status === 'failed'
+ )
+ expect(failedUpdate?.[0]).toMatchObject({ status: 'failed', cursorAfter: 'prior' })
+
+ // No drain-row update with a new cursor field.
+ const cursorAdvanced = dbChainMockFns.set.mock.calls.some(
+ (call) => 'cursor' in (call[0] as object)
+ )
+ expect(cursorAdvanced).toBe(false)
+
+ expect(destination._close).toHaveBeenCalledTimes(1)
+ })
+
+ it('closes session even if close throws', async () => {
+ dbChainMockFns.limit.mockResolvedValueOnce([baseDrain])
+ const source = makeSource([])
+ const destination = makeDestination({
+ close: vi.fn(async () => {
+ throw new Error('close-failed')
+ }),
+ })
+ mockGetSource.mockReturnValue(source)
+ mockGetDestination.mockReturnValue(destination)
+
+ const result = await runDrain('drain-1', 'manual')
+ expect(result.status).toBe('success')
+ expect(destination._close).toHaveBeenCalled()
+ })
+})
diff --git a/apps/sim/lib/data-drains/service.ts b/apps/sim/lib/data-drains/service.ts
new file mode 100644
index 00000000000..418d475137b
--- /dev/null
+++ b/apps/sim/lib/data-drains/service.ts
@@ -0,0 +1,227 @@
+import { db } from '@sim/db'
+import { dataDrainRuns, dataDrains } from '@sim/db/schema'
+import { createLogger } from '@sim/logger'
+import { toError } from '@sim/utils/errors'
+import { generateId } from '@sim/utils/id'
+import { eq } from 'drizzle-orm'
+import { getDestination } from '@/lib/data-drains/destinations/registry'
+import { decryptCredentials } from '@/lib/data-drains/encryption'
+import { getSource } from '@/lib/data-drains/sources/registry'
+import type { Cursor, RunTrigger } from '@/lib/data-drains/types'
+
+const logger = createLogger('DataDrainsService')
+
+const CHUNK_SIZE = 1000
+
+export interface RunDrainResult {
+ drainId: string
+ runId: string
+ status: 'success' | 'failed' | 'skipped'
+ rowsExported: number
+ bytesWritten: number
+ cursorBefore: Cursor
+ cursorAfter: Cursor
+ locators: string[]
+ error?: string
+}
+
+/**
+ * Orchestrates one drain export. Source-/destination-agnostic — talks only to
+ * the registry interfaces. The drain's cursor is advanced only when the entire
+ * run completes successfully so consumers see at-least-once delivery and can
+ * dedupe on the per-row `id` field.
+ */
+export async function runDrain(
+ drainId: string,
+ trigger: RunTrigger,
+ options: { signal?: AbortSignal } = {}
+): Promise {
+ const signal = options.signal ?? new AbortController().signal
+ const [drain] = await db.select().from(dataDrains).where(eq(dataDrains.id, drainId)).limit(1)
+ if (!drain) {
+ throw new Error(`Data drain not found: ${drainId}`)
+ }
+ if (!drain.enabled) {
+ return {
+ drainId,
+ runId: '',
+ status: 'skipped',
+ rowsExported: 0,
+ bytesWritten: 0,
+ cursorBefore: drain.cursor,
+ cursorAfter: drain.cursor,
+ locators: [],
+ }
+ }
+
+ const source = getSource(drain.source)
+ const destination = getDestination(drain.destinationType)
+
+ const runId = generateId()
+ const startedAt = new Date()
+ await db.insert(dataDrainRuns).values({
+ id: runId,
+ drainId,
+ status: 'running',
+ trigger,
+ startedAt,
+ cursorBefore: drain.cursor,
+ })
+
+ const cursorBefore = drain.cursor
+ let cursor: Cursor = drain.cursor
+ let rowsExported = 0
+ let bytesWritten = 0
+ let sequence = 0
+ const locators: string[] = []
+
+ /**
+ * Schema-parse and decrypt happen *after* the run row is created so failures
+ * in either (e.g. encryption-key rotation, schema drift across versions)
+ * surface as a `failed` run row in the UI rather than vanishing into the
+ * background-job logs while `lastRunAt` quietly advances.
+ */
+ let session: ReturnType | null = null
+
+ try {
+ const config = destination.configSchema.parse(drain.destinationConfig)
+ const credentials = destination.credentialsSchema.parse(
+ await decryptCredentials(drain.destinationCredentials)
+ )
+ session = destination.openSession({ config, credentials })
+
+ for await (const chunk of source.pages({
+ organizationId: drain.organizationId,
+ cursor,
+ chunkSize: CHUNK_SIZE,
+ signal,
+ })) {
+ const ndjson = `${chunk.map((row) => JSON.stringify(source.serialize(row))).join('\n')}\n`
+ const body = Buffer.from(ndjson, 'utf8')
+
+ const result = await session.deliver({
+ body,
+ contentType: 'application/x-ndjson',
+ metadata: {
+ drainId,
+ runId,
+ source: drain.source,
+ sequence,
+ rowCount: chunk.length,
+ runStartedAt: startedAt,
+ },
+ signal,
+ })
+
+ locators.push(result.locator)
+ rowsExported += chunk.length
+ bytesWritten += body.byteLength
+ cursor = source.cursorAfter(chunk[chunk.length - 1])
+ sequence++
+ }
+
+ if (signal.aborted) {
+ throw new Error('Data drain run cancelled')
+ }
+
+ const finishedAt = new Date()
+ await db.transaction(async (tx) => {
+ await tx
+ .update(dataDrains)
+ .set({
+ cursor,
+ lastRunAt: finishedAt,
+ lastSuccessAt: finishedAt,
+ updatedAt: finishedAt,
+ })
+ .where(eq(dataDrains.id, drainId))
+ await tx
+ .update(dataDrainRuns)
+ .set({
+ status: 'success',
+ finishedAt,
+ rowsExported,
+ bytesWritten,
+ cursorAfter: cursor,
+ locators,
+ error: null,
+ })
+ .where(eq(dataDrainRuns.id, runId))
+ })
+
+ logger.info('Data drain run succeeded', {
+ drainId,
+ runId,
+ source: drain.source,
+ destinationType: drain.destinationType,
+ rowsExported,
+ bytesWritten,
+ chunks: sequence,
+ })
+
+ return {
+ drainId,
+ runId,
+ status: 'success',
+ rowsExported,
+ bytesWritten,
+ cursorBefore,
+ cursorAfter: cursor,
+ locators,
+ }
+ } catch (error) {
+ const finishedAt = new Date()
+ const message = toError(error).message
+ try {
+ await db.transaction(async (tx) => {
+ await tx
+ .update(dataDrains)
+ .set({ lastRunAt: finishedAt, updatedAt: finishedAt })
+ .where(eq(dataDrains.id, drainId))
+ await tx
+ .update(dataDrainRuns)
+ .set({
+ status: 'failed',
+ finishedAt,
+ rowsExported,
+ bytesWritten,
+ cursorAfter: cursorBefore,
+ locators,
+ error: message.slice(0, 4000),
+ })
+ .where(eq(dataDrainRuns.id, runId))
+ })
+ } catch (statusError) {
+ // Best-effort status write — the reaper repairs stuck rows. Log so DB
+ // outages don't hide behind the original delivery error.
+ logger.error('Failed to record data drain failure status', {
+ drainId,
+ runId,
+ deliveryError: message,
+ statusError: toError(statusError).message,
+ })
+ }
+
+ logger.error('Data drain run failed', {
+ drainId,
+ runId,
+ source: drain.source,
+ destinationType: drain.destinationType,
+ error: message,
+ })
+
+ throw error
+ } finally {
+ if (session) {
+ try {
+ await session.close()
+ } catch (closeError) {
+ logger.warn('Data drain session close failed', {
+ drainId,
+ runId,
+ error: toError(closeError).message,
+ })
+ }
+ }
+ }
+}
diff --git a/apps/sim/lib/data-drains/sources/audit-logs.ts b/apps/sim/lib/data-drains/sources/audit-logs.ts
new file mode 100644
index 00000000000..fbbbbf77aa1
--- /dev/null
+++ b/apps/sim/lib/data-drains/sources/audit-logs.ts
@@ -0,0 +1,78 @@
+import { db } from '@sim/db'
+import { auditLog } from '@sim/db/schema'
+import { and, inArray, isNull, or, sql } from 'drizzle-orm'
+import {
+ decodeTimeCursor,
+ encodeTimeCursor,
+ timeCursorOrderBy,
+ timeCursorPredicate,
+} from '@/lib/data-drains/sources/cursor'
+import { getOrganizationWorkspaceIds } from '@/lib/data-drains/sources/helpers'
+import type { Cursor, DrainSource, SourcePageInput } from '@/lib/data-drains/types'
+
+type AuditLogRow = typeof auditLog.$inferSelect
+
+/**
+ * Drains audit events scoped to the organization: rows from any of the org's
+ * workspaces, plus org-level rows (`workspace_id IS NULL`) where
+ * `metadata->>'organizationId'` matches. Audit-log writers consistently set
+ * `metadata.organizationId` for org-scoped actions even though the table has
+ * no dedicated FK column.
+ */
+async function* pages(input: SourcePageInput): AsyncIterable {
+ const workspaceIds = await getOrganizationWorkspaceIds(input.organizationId)
+
+ const orgScopedClause = and(
+ isNull(auditLog.workspaceId),
+ sql`${auditLog.metadata}->>'organizationId' = ${input.organizationId}`
+ )
+ const scopeClause =
+ workspaceIds.length === 0
+ ? orgScopedClause
+ : or(inArray(auditLog.workspaceId, workspaceIds), orgScopedClause)
+
+ let cursor = decodeTimeCursor(input.cursor)
+ while (!input.signal.aborted) {
+ const cursorClause = timeCursorPredicate(auditLog.createdAt, auditLog.id, cursor)
+
+ const rows = await db
+ .select()
+ .from(auditLog)
+ .where(and(scopeClause, cursorClause))
+ .orderBy(...timeCursorOrderBy(auditLog.createdAt, auditLog.id))
+ .limit(input.chunkSize)
+
+ if (rows.length === 0) return
+ yield rows
+ const last = rows[rows.length - 1]
+ cursor = { ts: last.createdAt.toISOString(), id: last.id }
+ if (rows.length < input.chunkSize) return
+ }
+}
+
+export const auditLogsSource: DrainSource = {
+ type: 'audit_logs',
+ displayName: 'Audit logs',
+ pages,
+ serialize(row) {
+ return {
+ id: row.id,
+ workspaceId: row.workspaceId,
+ actorId: row.actorId,
+ actorName: row.actorName,
+ actorEmail: row.actorEmail,
+ action: row.action,
+ resourceType: row.resourceType,
+ resourceId: row.resourceId,
+ resourceName: row.resourceName,
+ description: row.description,
+ metadata: row.metadata,
+ ipAddress: row.ipAddress,
+ userAgent: row.userAgent,
+ createdAt: row.createdAt.toISOString(),
+ }
+ },
+ cursorAfter(row): Cursor {
+ return encodeTimeCursor({ ts: row.createdAt.toISOString(), id: row.id })
+ },
+}
diff --git a/apps/sim/lib/data-drains/sources/copilot-chats.ts b/apps/sim/lib/data-drains/sources/copilot-chats.ts
new file mode 100644
index 00000000000..d1d25c2aaf2
--- /dev/null
+++ b/apps/sim/lib/data-drains/sources/copilot-chats.ts
@@ -0,0 +1,73 @@
+import { db } from '@sim/db'
+import { copilotChats } from '@sim/db/schema'
+import { and, inArray } from 'drizzle-orm'
+import {
+ decodeTimeCursor,
+ encodeTimeCursor,
+ timeCursorOrderBy,
+ timeCursorPredicate,
+} from '@/lib/data-drains/sources/cursor'
+import { getOrganizationWorkspaceIds } from '@/lib/data-drains/sources/helpers'
+import type { Cursor, DrainSource, SourcePageInput } from '@/lib/data-drains/types'
+
+type CopilotChatRow = typeof copilotChats.$inferSelect
+
+/**
+ * Cursor is `createdAt` (immutable) but rows themselves are mutable —
+ * `messages`, `title`, `lastSeenAt`, etc. are updated in-place over the chat's
+ * lifetime. This means a chat exported once will not be re-exported when its
+ * messages change. Consumers who need the latest state should periodically
+ * full-refresh from a separate snapshot job; drains are append-mostly by
+ * design and `data-drains` is not a CDC pipeline.
+ */
+async function* pages(input: SourcePageInput): AsyncIterable {
+ const workspaceIds = await getOrganizationWorkspaceIds(input.organizationId)
+ if (workspaceIds.length === 0) return
+
+ let cursor = decodeTimeCursor(input.cursor)
+ while (!input.signal.aborted) {
+ const cursorClause = timeCursorPredicate(copilotChats.createdAt, copilotChats.id, cursor)
+
+ const rows = await db
+ .select()
+ .from(copilotChats)
+ .where(and(inArray(copilotChats.workspaceId, workspaceIds), cursorClause))
+ .orderBy(...timeCursorOrderBy(copilotChats.createdAt, copilotChats.id))
+ .limit(input.chunkSize)
+
+ if (rows.length === 0) return
+ yield rows
+ const last = rows[rows.length - 1]
+ cursor = { ts: last.createdAt.toISOString(), id: last.id }
+ if (rows.length < input.chunkSize) return
+ }
+}
+
+export const copilotChatsSource: DrainSource