+ );
+}
+```
+
## Getting Your API Key
@@ -578,7 +976,7 @@ import {
// Type-safe client initialization
const client: SimStudioClient = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
// Type-safe workflow execution
@@ -594,4 +992,4 @@ const status: WorkflowStatus = await client.getWorkflowStatus('workflow-id');
## License
-Apache-2.0
\ No newline at end of file
+Apache-2.0
diff --git a/apps/docs/content/docs/en/triggers/api.mdx b/apps/docs/content/docs/en/triggers/api.mdx
index 98dad08694..bc16047016 100644
--- a/apps/docs/content/docs/en/triggers/api.mdx
+++ b/apps/docs/content/docs/en/triggers/api.mdx
@@ -38,6 +38,84 @@ curl -X POST \
Successful responses return the serialized execution result from the Executor. Errors surface validation, auth, or workflow failures.
+## Streaming Responses
+
+Enable real-time streaming to receive workflow output as it's generated, character-by-character. This is useful for displaying AI responses progressively to users.
+
+### Request Parameters
+
+Add these parameters to enable streaming:
+
+- `stream` - Set to `true` to enable Server-Sent Events (SSE) streaming
+- `selectedOutputs` - Array of block outputs to stream (e.g., `["agent1.content"]`)
+
+### Block Output Format
+
+Use the `blockName.attribute` format to specify which block outputs to stream:
+- Format: `"blockName.attribute"` (e.g., If you want to stream the content of the Agent 1 block, you would use `"agent1.content"`)
+- Block names are case-insensitive and spaces are ignored
+
+### Example Request
+
+```bash
+curl -X POST \
+ https://sim.ai/api/workflows/WORKFLOW_ID/execute \
+ -H 'Content-Type: application/json' \
+ -H 'X-API-Key: YOUR_KEY' \
+ -d '{
+ "message": "Count to five",
+ "stream": true,
+ "selectedOutputs": ["agent1.content"]
+ }'
+```
+
+### Response Format
+
+Streaming responses use Server-Sent Events (SSE) format:
+
+```
+data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
+
+data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
+
+data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", three"}
+
+data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
+
+data: [DONE]
+```
+
+Each event includes:
+- **Streaming chunks**: `{"blockId": "...", "chunk": "text"}` - Real-time text as it's generated
+- **Final event**: `{"event": "done", ...}` - Execution metadata and complete results
+- **Terminator**: `[DONE]` - Signals end of stream
+
+### Multiple Block Streaming
+
+When `selectedOutputs` includes multiple blocks, each chunk indicates which block produced it:
+
+```bash
+curl -X POST \
+ https://sim.ai/api/workflows/WORKFLOW_ID/execute \
+ -H 'Content-Type: application/json' \
+ -H 'X-API-Key: YOUR_KEY' \
+ -d '{
+ "message": "Process this request",
+ "stream": true,
+ "selectedOutputs": ["agent1.content", "agent2.content"]
+ }'
+```
+
+The `blockId` field in each chunk lets you route output to the correct UI element:
+
+```
+data: {"blockId":"agent1-uuid","chunk":"Processing..."}
+
+data: {"blockId":"agent2-uuid","chunk":"Analyzing..."}
+
+data: {"blockId":"agent1-uuid","chunk":" complete"}
+```
+
## Output Reference
| Reference | Description |
diff --git a/apps/docs/content/docs/es/sdks/python.mdx b/apps/docs/content/docs/es/sdks/python.mdx
index 4203006117..2edb110394 100644
--- a/apps/docs/content/docs/es/sdks/python.mdx
+++ b/apps/docs/content/docs/es/sdks/python.mdx
@@ -10,7 +10,7 @@ import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
El SDK oficial de Python para Sim te permite ejecutar flujos de trabajo programáticamente desde tus aplicaciones Python utilizando el SDK oficial de Python.
- El SDK de Python es compatible con Python 3.8+ y proporciona ejecución sincrónica de flujos de trabajo. Todas las ejecuciones de flujos de trabajo son actualmente sincrónicas.
+ El SDK de Python es compatible con Python 3.8+ con soporte para ejecución asíncrona, limitación automática de velocidad con retroceso exponencial y seguimiento de uso.
## Instalación
@@ -74,12 +74,17 @@ result = client.execute_workflow(
- `workflow_id` (str): El ID del flujo de trabajo a ejecutar
- `input_data` (dict, opcional): Datos de entrada para pasar al flujo de trabajo
- `timeout` (float, opcional): Tiempo de espera en segundos (predeterminado: 30.0)
+- `stream` (bool, opcional): Habilitar respuestas en streaming (predeterminado: False)
+- `selected_outputs` (list[str], opcional): Salidas de bloque para transmitir en formato `blockName.attribute` (p. ej., `["agent1.content"]`)
+- `async_execution` (bool, opcional): Ejecutar de forma asíncrona (predeterminado: False)
-**Devuelve:** `WorkflowExecutionResult`
+**Devuelve:** `WorkflowExecutionResult | AsyncExecutionResult`
+
+Cuando `async_execution=True`, devuelve inmediatamente un ID de tarea para sondeo. De lo contrario, espera a que se complete.
##### get_workflow_status()
-Obtiene el estado de un flujo de trabajo (estado de implementación, etc.).
+Obtener el estado de un flujo de trabajo (estado de implementación, etc.).
```python
status = client.get_workflow_status("workflow-id")
@@ -93,7 +98,7 @@ print("Is deployed:", status.is_deployed)
##### validate_workflow()
-Valida que un flujo de trabajo esté listo para su ejecución.
+Validar que un flujo de trabajo está listo para su ejecución.
```python
is_ready = client.validate_workflow("workflow-id")
@@ -107,28 +112,118 @@ if is_ready:
**Devuelve:** `bool`
-##### execute_workflow_sync()
+##### get_job_status()
-
- Actualmente, este método es idéntico a `execute_workflow()` ya que todas las ejecuciones son síncronas. Este método se proporciona para compatibilidad futura cuando se añada la ejecución asíncrona.
-
+Obtener el estado de una ejecución de trabajo asíncrono.
+
+```python
+status = client.get_job_status("task-id-from-async-execution")
+print("Status:", status["status"]) # 'queued', 'processing', 'completed', 'failed'
+if status["status"] == "completed":
+ print("Output:", status["output"])
+```
+
+**Parámetros:**
+- `task_id` (str): El ID de tarea devuelto de la ejecución asíncrona
+
+**Devuelve:** `Dict[str, Any]`
-Ejecuta un flujo de trabajo (actualmente síncrono, igual que `execute_workflow()`).
+**Campos de respuesta:**
+- `success` (bool): Si la solicitud fue exitosa
+- `taskId` (str): El ID de la tarea
+- `status` (str): Uno de `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
+- `metadata` (dict): Contiene `startedAt`, `completedAt`, y `duration`
+- `output` (any, opcional): La salida del flujo de trabajo (cuando se completa)
+- `error` (any, opcional): Detalles del error (cuando falla)
+- `estimatedDuration` (int, opcional): Duración estimada en milisegundos (cuando está procesando/en cola)
+
+##### execute_with_retry()
+
+Ejecutar un flujo de trabajo con reintento automático en errores de límite de velocidad usando retroceso exponencial.
```python
-result = client.execute_workflow_sync(
+result = client.execute_with_retry(
"workflow-id",
- input_data={"data": "some input"},
- timeout=60.0
+ input_data={"message": "Hello"},
+ timeout=30.0,
+ max_retries=3, # Maximum number of retries
+ initial_delay=1.0, # Initial delay in seconds
+ max_delay=30.0, # Maximum delay in seconds
+ backoff_multiplier=2.0 # Exponential backoff multiplier
)
```
**Parámetros:**
- `workflow_id` (str): El ID del flujo de trabajo a ejecutar
- `input_data` (dict, opcional): Datos de entrada para pasar al flujo de trabajo
-- `timeout` (float): Tiempo de espera para la solicitud inicial en segundos
+- `timeout` (float, opcional): Tiempo de espera en segundos
+- `stream` (bool, opcional): Habilitar respuestas en streaming
+- `selected_outputs` (list, opcional): Salidas de bloque para transmitir
+- `async_execution` (bool, opcional): Ejecutar de forma asíncrona
+- `max_retries` (int, opcional): Número máximo de reintentos (predeterminado: 3)
+- `initial_delay` (float, opcional): Retraso inicial en segundos (predeterminado: 1.0)
+- `max_delay` (float, opcional): Retraso máximo en segundos (predeterminado: 30.0)
+- `backoff_multiplier` (float, opcional): Multiplicador de retroceso (predeterminado: 2.0)
+
+**Devuelve:** `WorkflowExecutionResult | AsyncExecutionResult`
+
+La lógica de reintento utiliza retroceso exponencial (1s → 2s → 4s → 8s...) con fluctuación de ±25% para evitar el efecto de manada. Si la API proporciona un encabezado `retry-after`, se utilizará en su lugar.
-**Devuelve:** `WorkflowExecutionResult`
+##### get_rate_limit_info()
+
+Obtiene la información actual del límite de tasa de la última respuesta de la API.
+
+```python
+rate_limit_info = client.get_rate_limit_info()
+if rate_limit_info:
+ print("Limit:", rate_limit_info.limit)
+ print("Remaining:", rate_limit_info.remaining)
+ print("Reset:", datetime.fromtimestamp(rate_limit_info.reset))
+```
+
+**Devuelve:** `RateLimitInfo | None`
+
+##### get_usage_limits()
+
+Obtiene los límites de uso actuales y la información de cuota para tu cuenta.
+
+```python
+limits = client.get_usage_limits()
+print("Sync requests remaining:", limits.rate_limit["sync"]["remaining"])
+print("Async requests remaining:", limits.rate_limit["async"]["remaining"])
+print("Current period cost:", limits.usage["currentPeriodCost"])
+print("Plan:", limits.usage["plan"])
+```
+
+**Devuelve:** `UsageLimits`
+
+**Estructura de respuesta:**
+
+```python
+{
+ "success": bool,
+ "rateLimit": {
+ "sync": {
+ "isLimited": bool,
+ "limit": int,
+ "remaining": int,
+ "resetAt": str
+ },
+ "async": {
+ "isLimited": bool,
+ "limit": int,
+ "remaining": int,
+ "resetAt": str
+ },
+ "authType": str # 'api' or 'manual'
+ },
+ "usage": {
+ "currentPeriodCost": float,
+ "limit": float,
+ "plan": str # e.g., 'free', 'pro'
+ }
+}
+```
##### set_api_key()
@@ -170,6 +265,18 @@ class WorkflowExecutionResult:
total_duration: Optional[float] = None
```
+### AsyncExecutionResult
+
+```python
+@dataclass
+class AsyncExecutionResult:
+ success: bool
+ task_id: str
+ status: str # 'queued'
+ created_at: str
+ links: Dict[str, str] # e.g., {"status": "/api/jobs/{taskId}"}
+```
+
### WorkflowStatus
```python
@@ -181,6 +288,27 @@ class WorkflowStatus:
needs_redeployment: bool = False
```
+### RateLimitInfo
+
+```python
+@dataclass
+class RateLimitInfo:
+ limit: int
+ remaining: int
+ reset: int
+ retry_after: Optional[int] = None
+```
+
+### UsageLimits
+
+```python
+@dataclass
+class UsageLimits:
+ success: bool
+ rate_limit: Dict[str, Any]
+ usage: Dict[str, Any]
+```
+
### SimStudioError
```python
@@ -191,6 +319,13 @@ class SimStudioError(Exception):
self.status = status
```
+**Códigos de error comunes:**
+- `UNAUTHORIZED`: Clave API inválida
+- `TIMEOUT`: Tiempo de espera agotado
+- `RATE_LIMIT_EXCEEDED`: Límite de tasa excedido
+- `USAGE_LIMIT_EXCEEDED`: Límite de uso excedido
+- `EXECUTION_ERROR`: Ejecución del flujo de trabajo fallida
+
## Ejemplos
### Ejecución básica de flujo de trabajo
@@ -205,8 +340,8 @@ class SimStudioError(Exception):
Ejecuta el flujo de trabajo con tus datos de entrada.
-
- Procesa el resultado de la ejecución y maneja cualquier error.
+
+ Procesa el resultado de la ejecución y gestiona cualquier error.
@@ -214,7 +349,7 @@ class SimStudioError(Exception):
import os
from simstudio import SimStudioClient
-client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
+client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def run_workflow():
try:
@@ -252,7 +387,7 @@ Maneja diferentes tipos de errores que pueden ocurrir durante la ejecución del
from simstudio import SimStudioClient, SimStudioError
import os
-client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
+client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_with_error_handling():
try:
@@ -275,22 +410,22 @@ def execute_with_error_handling():
raise
```
-### Uso del administrador de contexto
+### Uso del gestor de contexto
-Usa el cliente como un administrador de contexto para manejar automáticamente la limpieza de recursos:
+Usa el cliente como un gestor de contexto para manejar automáticamente la limpieza de recursos:
```python
from simstudio import SimStudioClient
import os
# Using context manager to automatically close the session
-with SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY")) as client:
+with SimStudioClient(api_key=os.getenv("SIM_API_KEY")) as client:
result = client.execute_workflow("workflow-id")
print("Result:", result)
# Session is automatically closed here
```
-### Ejecución por lotes de flujos de trabajo
+### Ejecución de flujos de trabajo por lotes
Ejecuta múltiples flujos de trabajo de manera eficiente:
@@ -298,7 +433,7 @@ Ejecuta múltiples flujos de trabajo de manera eficiente:
from simstudio import SimStudioClient
import os
-client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
+client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_workflows_batch(workflow_data_pairs):
"""Execute multiple workflows with different input data."""
@@ -339,6 +474,230 @@ for result in results:
print(f"Workflow {result['workflow_id']}: {'Success' if result['success'] else 'Failed'}")
```
+### Ejecución asíncrona de flujos de trabajo
+
+Ejecuta flujos de trabajo de forma asíncrona para tareas de larga duración:
+
+```python
+import os
+import time
+from simstudio import SimStudioClient
+
+client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
+
+def execute_async():
+ try:
+ # Start async execution
+ result = client.execute_workflow(
+ "workflow-id",
+ input_data={"data": "large dataset"},
+ async_execution=True # Execute asynchronously
+ )
+
+ # Check if result is an async execution
+ if hasattr(result, 'task_id'):
+ print(f"Task ID: {result.task_id}")
+ print(f"Status endpoint: {result.links['status']}")
+
+ # Poll for completion
+ status = client.get_job_status(result.task_id)
+
+ while status["status"] in ["queued", "processing"]:
+ print(f"Current status: {status['status']}")
+ time.sleep(2) # Wait 2 seconds
+ status = client.get_job_status(result.task_id)
+
+ if status["status"] == "completed":
+ print("Workflow completed!")
+ print(f"Output: {status['output']}")
+ print(f"Duration: {status['metadata']['duration']}")
+ else:
+ print(f"Workflow failed: {status['error']}")
+
+ except Exception as error:
+ print(f"Error: {error}")
+
+execute_async()
+```
+
+### Límite de tasa y reintentos
+
+Maneja los límites de tasa automáticamente con retroceso exponencial:
+
+```python
+import os
+from simstudio import SimStudioClient, SimStudioError
+
+client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
+
+def execute_with_retry_handling():
+ try:
+ # Automatically retries on rate limit
+ result = client.execute_with_retry(
+ "workflow-id",
+ input_data={"message": "Process this"},
+ max_retries=5,
+ initial_delay=1.0,
+ max_delay=60.0,
+ backoff_multiplier=2.0
+ )
+
+ print(f"Success: {result}")
+ except SimStudioError as error:
+ if error.code == "RATE_LIMIT_EXCEEDED":
+ print("Rate limit exceeded after all retries")
+
+ # Check rate limit info
+ rate_limit_info = client.get_rate_limit_info()
+ if rate_limit_info:
+ from datetime import datetime
+ reset_time = datetime.fromtimestamp(rate_limit_info.reset)
+ print(f"Rate limit resets at: {reset_time}")
+
+execute_with_retry_handling()
+```
+
+### Monitoreo de uso
+
+Monitorea el uso de tu cuenta y sus límites:
+
+```python
+import os
+from simstudio import SimStudioClient
+
+client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
+
+def check_usage():
+ try:
+ limits = client.get_usage_limits()
+
+ print("=== Rate Limits ===")
+ print("Sync requests:")
+ print(f" Limit: {limits.rate_limit['sync']['limit']}")
+ print(f" Remaining: {limits.rate_limit['sync']['remaining']}")
+ print(f" Resets at: {limits.rate_limit['sync']['resetAt']}")
+ print(f" Is limited: {limits.rate_limit['sync']['isLimited']}")
+
+ print("\nAsync requests:")
+ print(f" Limit: {limits.rate_limit['async']['limit']}")
+ print(f" Remaining: {limits.rate_limit['async']['remaining']}")
+ print(f" Resets at: {limits.rate_limit['async']['resetAt']}")
+ print(f" Is limited: {limits.rate_limit['async']['isLimited']}")
+
+ print("\n=== Usage ===")
+ print(f"Current period cost: ${limits.usage['currentPeriodCost']:.2f}")
+ print(f"Limit: ${limits.usage['limit']:.2f}")
+ print(f"Plan: {limits.usage['plan']}")
+
+ percent_used = (limits.usage['currentPeriodCost'] / limits.usage['limit']) * 100
+ print(f"Usage: {percent_used:.1f}%")
+
+ if percent_used > 80:
+ print("⚠️ Warning: You are approaching your usage limit!")
+
+ except Exception as error:
+ print(f"Error checking usage: {error}")
+
+check_usage()
+```
+
+### Ejecución de flujo de trabajo en streaming
+
+Ejecuta flujos de trabajo con respuestas en tiempo real:
+
+```python
+from simstudio import SimStudioClient
+import os
+
+client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
+
+def execute_with_streaming():
+ """Execute workflow with streaming enabled."""
+ try:
+ # Enable streaming for specific block outputs
+ result = client.execute_workflow(
+ "workflow-id",
+ input_data={"message": "Count to five"},
+ stream=True,
+ selected_outputs=["agent1.content"] # Use blockName.attribute format
+ )
+
+ print("Workflow result:", result)
+ except Exception as error:
+ print("Error:", error)
+
+execute_with_streaming()
+```
+
+La respuesta en streaming sigue el formato de Server-Sent Events (SSE):
+
+```
+data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
+
+data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
+
+data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
+
+data: [DONE]
+```
+
+**Ejemplo de streaming con Flask:**
+
+```python
+from flask import Flask, Response, stream_with_context
+import requests
+import json
+import os
+
+app = Flask(__name__)
+
+@app.route('/stream-workflow')
+def stream_workflow():
+ """Stream workflow execution to the client."""
+
+ def generate():
+ response = requests.post(
+ 'https://sim.ai/api/workflows/WORKFLOW_ID/execute',
+ headers={
+ 'Content-Type': 'application/json',
+ 'X-API-Key': os.getenv('SIM_API_KEY')
+ },
+ json={
+ 'message': 'Generate a story',
+ 'stream': True,
+ 'selectedOutputs': ['agent1.content']
+ },
+ stream=True
+ )
+
+ for line in response.iter_lines():
+ if line:
+ decoded_line = line.decode('utf-8')
+ if decoded_line.startswith('data: '):
+ data = decoded_line[6:] # Remove 'data: ' prefix
+
+ if data == '[DONE]':
+ break
+
+ try:
+ parsed = json.loads(data)
+ if 'chunk' in parsed:
+ yield f"data: {json.dumps(parsed)}\n\n"
+ elif parsed.get('event') == 'done':
+ yield f"data: {json.dumps(parsed)}\n\n"
+ print("Execution complete:", parsed.get('metadata'))
+ except json.JSONDecodeError:
+ pass
+
+ return Response(
+ stream_with_context(generate()),
+ mimetype='text/event-stream'
+ )
+
+if __name__ == '__main__':
+ app.run(debug=True)
+```
+
### Configuración del entorno
Configura el cliente usando variables de entorno:
@@ -352,8 +711,8 @@ Configura el cliente usando variables de entorno:
# Development configuration
client = SimStudioClient(
- api_key=os.getenv("SIMSTUDIO_API_KEY"),
- base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
+ api_key=os.getenv("SIM_API_KEY")
+ base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
)
```
@@ -365,13 +724,13 @@ Configura el cliente usando variables de entorno:
from simstudio import SimStudioClient
# Production configuration with error handling
- api_key = os.getenv("SIMSTUDIO_API_KEY")
+ api_key = os.getenv("SIM_API_KEY")
if not api_key:
- raise ValueError("SIMSTUDIO_API_KEY environment variable is required")
+ raise ValueError("SIM_API_KEY environment variable is required")
client = SimStudioClient(
api_key=api_key,
- base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
+ base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
)
```
diff --git a/apps/docs/content/docs/es/sdks/typescript.mdx b/apps/docs/content/docs/es/sdks/typescript.mdx
index 410554d7b7..fca8a9805f 100644
--- a/apps/docs/content/docs/es/sdks/typescript.mdx
+++ b/apps/docs/content/docs/es/sdks/typescript.mdx
@@ -7,10 +7,10 @@ import { Card, Cards } from 'fumadocs-ui/components/card'
import { Step, Steps } from 'fumadocs-ui/components/steps'
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
-El SDK oficial de TypeScript/JavaScript para Sim proporciona seguridad de tipos completa y es compatible tanto con entornos Node.js como con navegadores, lo que te permite ejecutar flujos de trabajo de forma programática desde tus aplicaciones Node.js, aplicaciones web y otros entornos JavaScript. Todas las ejecuciones de flujos de trabajo son actualmente síncronas.
+El SDK oficial de TypeScript/JavaScript para Sim proporciona seguridad de tipos completa y es compatible tanto con entornos Node.js como de navegador, lo que te permite ejecutar flujos de trabajo programáticamente desde tus aplicaciones Node.js, aplicaciones web y otros entornos JavaScript.
- El SDK de TypeScript proporciona seguridad de tipos completa y es compatible tanto con entornos Node.js como con navegadores. Todas las ejecuciones de flujos de trabajo son actualmente síncronas.
+ El SDK de TypeScript proporciona seguridad de tipos completa, soporte para ejecución asíncrona, limitación automática de velocidad con retroceso exponencial y seguimiento de uso.
## Instalación
@@ -95,8 +95,13 @@ const result = await client.executeWorkflow('workflow-id', {
- `options` (ExecutionOptions, opcional):
- `input` (any): Datos de entrada para pasar al flujo de trabajo
- `timeout` (number): Tiempo de espera en milisegundos (predeterminado: 30000)
+ - `stream` (boolean): Habilitar respuestas en streaming (predeterminado: false)
+ - `selectedOutputs` (string[]): Bloquear salidas para transmitir en formato `blockName.attribute` (por ejemplo, `["agent1.content"]`)
+ - `async` (boolean): Ejecutar de forma asíncrona (predeterminado: false)
-**Devuelve:** `Promise`
+**Devuelve:** `Promise`
+
+Cuando `async: true`, devuelve inmediatamente un ID de tarea para sondeo. De lo contrario, espera a que se complete.
##### getWorkflowStatus()
@@ -128,32 +133,121 @@ if (isReady) {
**Devuelve:** `Promise`
-##### executeWorkflowSync()
+##### getJobStatus()
-
- Actualmente, este método es idéntico a `executeWorkflow()` ya que todas las ejecuciones son síncronas. Este método se proporciona para compatibilidad futura cuando se añada la ejecución asíncrona.
-
+Obtener el estado de una ejecución de trabajo asíncrono.
+
+```typescript
+const status = await client.getJobStatus('task-id-from-async-execution');
+console.log('Status:', status.status); // 'queued', 'processing', 'completed', 'failed'
+if (status.status === 'completed') {
+ console.log('Output:', status.output);
+}
+```
+
+**Parámetros:**
+- `taskId` (string): El ID de tarea devuelto de la ejecución asíncrona
+
+**Devuelve:** `Promise`
+
+**Campos de respuesta:**
+- `success` (boolean): Si la solicitud fue exitosa
+- `taskId` (string): El ID de la tarea
+- `status` (string): Uno de `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
+- `metadata` (object): Contiene `startedAt`, `completedAt`, y `duration`
+- `output` (any, opcional): La salida del flujo de trabajo (cuando se completa)
+- `error` (any, opcional): Detalles del error (cuando falla)
+- `estimatedDuration` (number, opcional): Duración estimada en milisegundos (cuando está procesando/en cola)
-Ejecutar un flujo de trabajo (actualmente síncrono, igual que `executeWorkflow()`).
+##### executeWithRetry()
+
+Ejecuta un flujo de trabajo con reintento automático en errores de límite de tasa utilizando retroceso exponencial.
```typescript
-const result = await client.executeWorkflowSync('workflow-id', {
- input: { data: 'some input' },
- timeout: 60000
+const result = await client.executeWithRetry('workflow-id', {
+ input: { message: 'Hello' },
+ timeout: 30000
+}, {
+ maxRetries: 3, // Maximum number of retries
+ initialDelay: 1000, // Initial delay in ms (1 second)
+ maxDelay: 30000, // Maximum delay in ms (30 seconds)
+ backoffMultiplier: 2 // Exponential backoff multiplier
});
```
**Parámetros:**
- `workflowId` (string): El ID del flujo de trabajo a ejecutar
-- `options` (ExecutionOptions, opcional):
- - `input` (any): Datos de entrada para pasar al flujo de trabajo
- - `timeout` (number): Tiempo de espera para la solicitud inicial en milisegundos
+- `options` (ExecutionOptions, opcional): Igual que `executeWorkflow()`
+- `retryOptions` (RetryOptions, opcional):
+ - `maxRetries` (number): Número máximo de reintentos (predeterminado: 3)
+ - `initialDelay` (number): Retraso inicial en ms (predeterminado: 1000)
+ - `maxDelay` (number): Retraso máximo en ms (predeterminado: 30000)
+ - `backoffMultiplier` (number): Multiplicador de retroceso (predeterminado: 2)
+
+**Devuelve:** `Promise`
+
+La lógica de reintento utiliza retroceso exponencial (1s → 2s → 4s → 8s...) con fluctuación de ±25% para evitar el efecto de manada. Si la API proporciona una cabecera `retry-after`, se utilizará en su lugar.
-**Devuelve:** `Promise`
+##### getRateLimitInfo()
+
+Obtiene la información actual del límite de tasa de la última respuesta de la API.
+
+```typescript
+const rateLimitInfo = client.getRateLimitInfo();
+if (rateLimitInfo) {
+ console.log('Limit:', rateLimitInfo.limit);
+ console.log('Remaining:', rateLimitInfo.remaining);
+ console.log('Reset:', new Date(rateLimitInfo.reset * 1000));
+}
+```
+
+**Devuelve:** `RateLimitInfo | null`
+
+##### getUsageLimits()
+
+Obtiene los límites de uso actuales y la información de cuota para tu cuenta.
+
+```typescript
+const limits = await client.getUsageLimits();
+console.log('Sync requests remaining:', limits.rateLimit.sync.remaining);
+console.log('Async requests remaining:', limits.rateLimit.async.remaining);
+console.log('Current period cost:', limits.usage.currentPeriodCost);
+console.log('Plan:', limits.usage.plan);
+```
+
+**Devuelve:** `Promise`
+
+**Estructura de respuesta:**
+
+```typescript
+{
+ success: boolean
+ rateLimit: {
+ sync: {
+ isLimited: boolean
+ limit: number
+ remaining: number
+ resetAt: string
+ }
+ async: {
+ isLimited: boolean
+ limit: number
+ remaining: number
+ resetAt: string
+ }
+ authType: string // 'api' or 'manual'
+ }
+ usage: {
+ currentPeriodCost: number
+ limit: number
+ plan: string // e.g., 'free', 'pro'
+ }
+}
+```
##### setApiKey()
-Actualizar la clave API.
+Actualiza la clave API.
```typescript
client.setApiKey('new-api-key');
@@ -161,7 +255,7 @@ client.setApiKey('new-api-key');
##### setBaseUrl()
-Actualizar la URL base.
+Actualiza la URL base.
```typescript
client.setBaseUrl('https://my-custom-domain.com');
@@ -187,6 +281,20 @@ interface WorkflowExecutionResult {
}
```
+### AsyncExecutionResult
+
+```typescript
+interface AsyncExecutionResult {
+ success: boolean;
+ taskId: string;
+ status: 'queued';
+ createdAt: string;
+ links: {
+ status: string; // e.g., "/api/jobs/{taskId}"
+ };
+}
+```
+
### WorkflowStatus
```typescript
@@ -198,6 +306,45 @@ interface WorkflowStatus {
}
```
+### RateLimitInfo
+
+```typescript
+interface RateLimitInfo {
+ limit: number;
+ remaining: number;
+ reset: number;
+ retryAfter?: number;
+}
+```
+
+### UsageLimits
+
+```typescript
+interface UsageLimits {
+ success: boolean;
+ rateLimit: {
+ sync: {
+ isLimited: boolean;
+ limit: number;
+ remaining: number;
+ resetAt: string;
+ };
+ async: {
+ isLimited: boolean;
+ limit: number;
+ remaining: number;
+ resetAt: string;
+ };
+ authType: string;
+ };
+ usage: {
+ currentPeriodCost: number;
+ limit: number;
+ plan: string;
+ };
+}
+```
+
### SimStudioError
```typescript
@@ -207,6 +354,13 @@ class SimStudioError extends Error {
}
```
+**Códigos de error comunes:**
+- `UNAUTHORIZED`: Clave API inválida
+- `TIMEOUT`: Tiempo de espera agotado
+- `RATE_LIMIT_EXCEEDED`: Límite de tasa excedido
+- `USAGE_LIMIT_EXCEEDED`: Límite de uso excedido
+- `EXECUTION_ERROR`: Ejecución del flujo de trabajo fallida
+
## Ejemplos
### Ejecución básica de flujo de trabajo
@@ -216,13 +370,13 @@ class SimStudioError extends Error {
Configura el SimStudioClient con tu clave API.
- Comprueba si el flujo de trabajo está implementado y listo para su ejecución.
+ Comprueba si el flujo de trabajo está desplegado y listo para su ejecución.
Ejecuta el flujo de trabajo con tus datos de entrada.
-
- Procesa el resultado de la ejecución y maneja cualquier error.
+
+ Procesa el resultado de la ejecución y gestiona cualquier error.
@@ -230,7 +384,7 @@ class SimStudioError extends Error {
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
async function runWorkflow() {
@@ -271,7 +425,7 @@ Maneja diferentes tipos de errores que pueden ocurrir durante la ejecución del
import { SimStudioClient, SimStudioError } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
async function executeWithErrorHandling() {
@@ -315,14 +469,14 @@ Configura el cliente usando variables de entorno:
import { SimStudioClient } from 'simstudio-ts-sdk';
// Development configuration
- const apiKey = process.env.SIMSTUDIO_API_KEY;
+ const apiKey = process.env.SIM_API_KEY;
if (!apiKey) {
- throw new Error('SIMSTUDIO_API_KEY environment variable is required');
+ throw new Error('SIM_API_KEY environment variable is required');
}
const client = new SimStudioClient({
apiKey,
- baseUrl: process.env.SIMSTUDIO_BASE_URL // optional
+ baseUrl: process.env.SIM_BASE_URL // optional
});
```
@@ -333,14 +487,14 @@ Configura el cliente usando variables de entorno:
import { SimStudioClient } from 'simstudio-ts-sdk';
// Production configuration with validation
- const apiKey = process.env.SIMSTUDIO_API_KEY;
+ const apiKey = process.env.SIM_API_KEY;
if (!apiKey) {
- throw new Error('SIMSTUDIO_API_KEY environment variable is required');
+ throw new Error('SIM_API_KEY environment variable is required');
}
const client = new SimStudioClient({
apiKey,
- baseUrl: process.env.SIMSTUDIO_BASE_URL || 'https://sim.ai'
+ baseUrl: process.env.SIM_BASE_URL || 'https://sim.ai'
});
```
@@ -349,7 +503,7 @@ Configura el cliente usando variables de entorno:
### Integración con Express de Node.js
-Integración con un servidor Express.js:
+Integra con un servidor Express.js:
```typescript
import express from 'express';
@@ -357,7 +511,7 @@ import { SimStudioClient } from 'simstudio-ts-sdk';
const app = express();
const client = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
app.use(express.json());
@@ -399,7 +553,7 @@ import { NextApiRequest, NextApiResponse } from 'next';
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
export default async function handler(
@@ -430,7 +584,7 @@ export default async function handler(
### Uso del navegador
-Uso en el navegador (con la configuración CORS adecuada):
+Uso en el navegador (con configuración CORS adecuada):
```typescript
import { SimStudioClient } from 'simstudio-ts-sdk';
@@ -469,14 +623,14 @@ document.getElementById('executeBtn')?.addEventListener('click', executeClientSi
### Ejemplo de hook de React
-Crea un hook personalizado de React para la ejecución del flujo de trabajo:
+Crea un hook personalizado de React para la ejecución de flujos de trabajo:
```typescript
import { useState, useCallback } from 'react';
import { SimStudioClient, WorkflowExecutionResult } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
- apiKey: process.env.NEXT_PUBLIC_SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
interface UseWorkflowResult {
@@ -532,7 +686,7 @@ function WorkflowComponent() {
-
+
{error &&
Error: {error.message}
}
{result && (
@@ -545,38 +699,267 @@ function WorkflowComponent() {
}
```
-## Obtener tu clave API
+### Ejecución asíncrona de flujos de trabajo
+
+Ejecuta flujos de trabajo de forma asíncrona para tareas de larga duración:
+
+```typescript
+import { SimStudioClient, AsyncExecutionResult } from 'simstudio-ts-sdk';
+
+const client = new SimStudioClient({
+ apiKey: process.env.SIM_API_KEY!
+});
+
+async function executeAsync() {
+ try {
+ // Start async execution
+ const result = await client.executeWorkflow('workflow-id', {
+ input: { data: 'large dataset' },
+ async: true // Execute asynchronously
+ });
+
+ // Check if result is an async execution
+ if ('taskId' in result) {
+ console.log('Task ID:', result.taskId);
+ console.log('Status endpoint:', result.links.status);
+
+ // Poll for completion
+ let status = await client.getJobStatus(result.taskId);
+
+ while (status.status === 'queued' || status.status === 'processing') {
+ console.log('Current status:', status.status);
+ await new Promise(resolve => setTimeout(resolve, 2000)); // Wait 2 seconds
+ status = await client.getJobStatus(result.taskId);
+ }
+
+ if (status.status === 'completed') {
+ console.log('Workflow completed!');
+ console.log('Output:', status.output);
+ console.log('Duration:', status.metadata.duration);
+ } else {
+ console.error('Workflow failed:', status.error);
+ }
+ }
+ } catch (error) {
+ console.error('Error:', error);
+ }
+}
+
+executeAsync();
+```
+
+### Límite de tasa y reintentos
+
+Maneja límites de tasa automáticamente con retroceso exponencial:
+
+```typescript
+import { SimStudioClient, SimStudioError } from 'simstudio-ts-sdk';
+
+const client = new SimStudioClient({
+ apiKey: process.env.SIM_API_KEY!
+});
+
+async function executeWithRetryHandling() {
+ try {
+ // Automatically retries on rate limit
+ const result = await client.executeWithRetry('workflow-id', {
+ input: { message: 'Process this' }
+ }, {
+ maxRetries: 5,
+ initialDelay: 1000,
+ maxDelay: 60000,
+ backoffMultiplier: 2
+ });
+
+ console.log('Success:', result);
+ } catch (error) {
+ if (error instanceof SimStudioError && error.code === 'RATE_LIMIT_EXCEEDED') {
+ console.error('Rate limit exceeded after all retries');
+
+ // Check rate limit info
+ const rateLimitInfo = client.getRateLimitInfo();
+ if (rateLimitInfo) {
+ console.log('Rate limit resets at:', new Date(rateLimitInfo.reset * 1000));
+ }
+ }
+ }
+}
+```
+
+### Monitoreo de uso
+
+Monitorea el uso de tu cuenta y sus límites:
+
+```typescript
+import { SimStudioClient } from 'simstudio-ts-sdk';
+
+const client = new SimStudioClient({
+ apiKey: process.env.SIM_API_KEY!
+});
+
+async function checkUsage() {
+ try {
+ const limits = await client.getUsageLimits();
+
+ console.log('=== Rate Limits ===');
+ console.log('Sync requests:');
+ console.log(' Limit:', limits.rateLimit.sync.limit);
+ console.log(' Remaining:', limits.rateLimit.sync.remaining);
+ console.log(' Resets at:', limits.rateLimit.sync.resetAt);
+ console.log(' Is limited:', limits.rateLimit.sync.isLimited);
+
+ console.log('\nAsync requests:');
+ console.log(' Limit:', limits.rateLimit.async.limit);
+ console.log(' Remaining:', limits.rateLimit.async.remaining);
+ console.log(' Resets at:', limits.rateLimit.async.resetAt);
+ console.log(' Is limited:', limits.rateLimit.async.isLimited);
+
+ console.log('\n=== Usage ===');
+ console.log('Current period cost:
+
+### Streaming Workflow Execution
+
+Execute workflows with real-time streaming responses:
+
+```typescript
+import { SimStudioClient } from 'simstudio-ts-sdk';
+
+const client = new SimStudioClient({
+ apiKey: process.env.SIM_API_KEY!
+});
+
+async function executeWithStreaming() {
+ try {
+ // Habilita streaming para salidas de bloques específicos
+ const result = await client.executeWorkflow('workflow-id', {
+ input: { message: 'Count to five' },
+ stream: true,
+ selectedOutputs: ['agent1.content'] // Usa el formato blockName.attribute
+ });
+
+ console.log('Resultado del flujo de trabajo:', result);
+ } catch (error) {
+ console.error('Error:', error);
+ }
+}
+```
+
+The streaming response follows the Server-Sent Events (SSE) format:
+
+```
+data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
+
+data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", dos"}
+
+data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
+
+data: [DONE]
+```
+
+**React Streaming Example:**
+
+```typescript
+import { useState, useEffect } from 'react';
+
+function StreamingWorkflow() {
+ const [output, setOutput] = useState('');
+ const [loading, setLoading] = useState(false);
+
+ const executeStreaming = async () => {
+ setLoading(true);
+ setOutput('');
+
+ // IMPORTANT: Make this API call from your backend server, not the browser
+ // Never expose your API key in client-side code
+ const response = await fetch('https://sim.ai/api/workflows/WORKFLOW_ID/execute', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'X-API-Key': process.env.SIM_API_KEY! // Server-side environment variable only
+ },
+ body: JSON.stringify({
+ message: 'Generate a story',
+ stream: true,
+ selectedOutputs: ['agent1.content']
+ })
+ });
+
+ const reader = response.body?.getReader();
+ const decoder = new TextDecoder();
+
+ while (reader) {
+ const { done, value } = await reader.read();
+ if (done) break;
+
+ const chunk = decoder.decode(value);
+ const lines = chunk.split('\n\n');
+
+ for (const line of lines) {
+ if (line.startsWith('data: ')) {
+ const data = line.slice(6);
+ if (data === '[DONE]') {
+ setLoading(false);
+ break;
+ }
+
+ try {
+ const parsed = JSON.parse(data);
+ if (parsed.chunk) {
+ setOutput(prev => prev + parsed.chunk);
+ } else if (parsed.event === 'done') {
+ console.log('Execution complete:', parsed.metadata);
+ }
+ } catch (e) {
+ // Skip invalid JSON
+ }
+ }
+ }
+ }
+ };
+
+ return (
+
+
+
{output}
+
+ );
+}
+```
+
+## Getting Your API Key
-
- Navega a [Sim](https://sim.ai) e inicia sesión en tu cuenta.
+
+ Navigate to [Sim](https://sim.ai) and log in to your account.
-
- Navega al flujo de trabajo que quieres ejecutar programáticamente.
+
+ Navigate to the workflow you want to execute programmatically.
-
- Haz clic en "Deploy" para desplegar tu flujo de trabajo si aún no ha sido desplegado.
+
+ Click on "Deploy" to deploy your workflow if it hasn't been deployed yet.
-
- Durante el proceso de despliegue, selecciona o crea una clave API.
+
+ During the deployment process, select or create an API key.
-
- Copia la clave API para usarla en tu aplicación TypeScript/JavaScript.
+
+ Copy the API key to use in your TypeScript/JavaScript application.
- Mantén tu clave API segura y nunca la incluyas en el control de versiones. Usa variables de entorno o gestión de configuración segura.
+ Keep your API key secure and never commit it to version control. Use environment variables or secure configuration management.
-## Requisitos
+## Requirements
- Node.js 16+
-- TypeScript 5.0+ (para proyectos TypeScript)
+- TypeScript 5.0+ (for TypeScript projects)
-## Soporte para TypeScript
+## TypeScript Support
-El SDK está escrito en TypeScript y proporciona seguridad de tipos completa:
+The SDK is written in TypeScript and provides full type safety:
```typescript
import {
@@ -588,13 +971,13 @@ import {
// Type-safe client initialization
const client: SimStudioClient = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
// Type-safe workflow execution
const result: WorkflowExecutionResult = await client.executeWorkflow('workflow-id', {
input: {
- message: 'Hello, TypeScript!'
+ message: '¡Hola, TypeScript!'
}
});
@@ -602,6 +985,7 @@ const result: WorkflowExecutionResult = await client.executeWorkflow('workflow-i
const status: WorkflowStatus = await client.getWorkflowStatus('workflow-id');
```
-## Licencia
+## License
+
-Apache-2.0
\ No newline at end of file
+Apache-2.0
diff --git a/apps/docs/content/docs/es/triggers/api.mdx b/apps/docs/content/docs/es/triggers/api.mdx
index c05facd2a4..09ecda8971 100644
--- a/apps/docs/content/docs/es/triggers/api.mdx
+++ b/apps/docs/content/docs/es/triggers/api.mdx
@@ -38,15 +38,93 @@ curl -X POST \
Las respuestas exitosas devuelven el resultado de ejecución serializado del Ejecutor. Los errores muestran fallos de validación, autenticación o flujo de trabajo.
+## Respuestas en streaming
+
+Habilita el streaming en tiempo real para recibir la salida del flujo de trabajo a medida que se genera, carácter por carácter. Esto es útil para mostrar las respuestas de IA progresivamente a los usuarios.
+
+### Parámetros de solicitud
+
+Añade estos parámetros para habilitar el streaming:
+
+- `stream` - Establece a `true` para habilitar el streaming de eventos enviados por el servidor (SSE)
+- `selectedOutputs` - Array de salidas de bloques para transmitir (p. ej., `["agent1.content"]`)
+
+### Formato de salida de bloque
+
+Usa el formato `blockName.attribute` para especificar qué salidas de bloques transmitir:
+- Formato: `"blockName.attribute"` (p. ej., si quieres transmitir el contenido del bloque Agente 1, usarías `"agent1.content"`)
+- Los nombres de los bloques no distinguen entre mayúsculas y minúsculas y se ignoran los espacios
+
+### Ejemplo de solicitud
+
+```bash
+curl -X POST \
+ https://sim.ai/api/workflows/WORKFLOW_ID/execute \
+ -H 'Content-Type: application/json' \
+ -H 'X-API-Key: YOUR_KEY' \
+ -d '{
+ "message": "Count to five",
+ "stream": true,
+ "selectedOutputs": ["agent1.content"]
+ }'
+```
+
+### Formato de respuesta
+
+Las respuestas en streaming utilizan el formato de eventos enviados por el servidor (SSE):
+
+```
+data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
+
+data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
+
+data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", three"}
+
+data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
+
+data: [DONE]
+```
+
+Cada evento incluye:
+- **Fragmentos de streaming**: `{"blockId": "...", "chunk": "text"}` - Texto en tiempo real a medida que se genera
+- **Evento final**: `{"event": "done", ...}` - Metadatos de ejecución y resultados completos
+- **Terminador**: `[DONE]` - Señala el fin del stream
+
+### Streaming de múltiples bloques
+
+Cuando `selectedOutputs` incluye múltiples bloques, cada fragmento indica qué bloque lo produjo:
+
+```bash
+curl -X POST \
+ https://sim.ai/api/workflows/WORKFLOW_ID/execute \
+ -H 'Content-Type: application/json' \
+ -H 'X-API-Key: YOUR_KEY' \
+ -d '{
+ "message": "Process this request",
+ "stream": true,
+ "selectedOutputs": ["agent1.content", "agent2.content"]
+ }'
+```
+
+El campo `blockId` en cada fragmento te permite dirigir la salida al elemento de UI correcto:
+
+```
+data: {"blockId":"agent1-uuid","chunk":"Processing..."}
+
+data: {"blockId":"agent2-uuid","chunk":"Analyzing..."}
+
+data: {"blockId":"agent1-uuid","chunk":" complete"}
+```
+
## Referencia de salida
| Referencia | Descripción |
|-----------|-------------|
-| `` | Campo definido en el Formato de Entrada |
-| `` | Cuerpo completo estructurado de la solicitud |
+| `` | Campo definido en el formato de entrada |
+| `` | Cuerpo de solicitud estructurado completo |
-Si no se define un Formato de Entrada, el ejecutor expone el JSON sin procesar solo en ``.
+Si no se define un formato de entrada, el ejecutor expone el JSON sin procesar solo en ``.
-Un flujo de trabajo puede contener solo un Disparador de API. Publica una nueva implementación después de realizar cambios para que el punto de conexión se mantenga actualizado.
+Un flujo de trabajo puede contener solo un disparador de API. Publica una nueva implementación después de los cambios para que el endpoint se mantenga actualizado.
diff --git a/apps/docs/content/docs/fr/sdks/python.mdx b/apps/docs/content/docs/fr/sdks/python.mdx
index 61b0fb5c67..faf5f4b203 100644
--- a/apps/docs/content/docs/fr/sdks/python.mdx
+++ b/apps/docs/content/docs/fr/sdks/python.mdx
@@ -10,7 +10,7 @@ import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
Le SDK Python officiel pour Sim vous permet d'exécuter des workflows de manière programmatique à partir de vos applications Python en utilisant le SDK Python officiel.
- Le SDK Python prend en charge Python 3.8+ et fournit une exécution synchrone des workflows. Toutes les exécutions de workflow sont actuellement synchrones.
+ Le SDK Python prend en charge Python 3.8+ avec support d'exécution asynchrone, limitation automatique du débit avec backoff exponentiel, et suivi d'utilisation.
## Installation
@@ -71,11 +71,16 @@ result = client.execute_workflow(
```
**Paramètres :**
-- `workflow_id` (str) : L'ID du workflow à exécuter
+- `workflow_id` (str) : L'identifiant du workflow à exécuter
- `input_data` (dict, facultatif) : Données d'entrée à transmettre au workflow
-- `timeout` (float, facultatif) : Délai d'attente en secondes (par défaut : 30.0)
+- `timeout` (float, facultatif) : Délai d'expiration en secondes (par défaut : 30.0)
+- `stream` (bool, facultatif) : Activer les réponses en streaming (par défaut : False)
+- `selected_outputs` (list[str], facultatif) : Sorties de blocs à diffuser au format `blockName.attribute` (par exemple, `["agent1.content"]`)
+- `async_execution` (bool, facultatif) : Exécuter de manière asynchrone (par défaut : False)
+
+**Retourne :** `WorkflowExecutionResult | AsyncExecutionResult`
-**Retourne :** `WorkflowExecutionResult`
+Lorsque `async_execution=True`, retourne immédiatement un identifiant de tâche pour l'interrogation. Sinon, attend la fin de l'exécution.
##### get_workflow_status()
@@ -87,13 +92,13 @@ print("Is deployed:", status.is_deployed)
```
**Paramètres :**
-- `workflow_id` (str) : L'ID du workflow
+- `workflow_id` (str) : L'identifiant du workflow
**Retourne :** `WorkflowStatus`
##### validate_workflow()
-Valide qu'un workflow est prêt pour l'exécution.
+Valider qu'un workflow est prêt pour l'exécution.
```python
is_ready = client.validate_workflow("workflow-id")
@@ -107,32 +112,122 @@ if is_ready:
**Retourne :** `bool`
-##### execute_workflow_sync()
+##### get_job_status()
-
- Actuellement, cette méthode est identique à `execute_workflow()` puisque toutes les exécutions sont synchrones. Cette méthode est fournie pour une compatibilité future lorsque l'exécution asynchrone sera ajoutée.
-
+Obtenir le statut d'une exécution de tâche asynchrone.
-Exécute un workflow (actuellement synchrone, identique à `execute_workflow()`).
+```python
+status = client.get_job_status("task-id-from-async-execution")
+print("Status:", status["status"]) # 'queued', 'processing', 'completed', 'failed'
+if status["status"] == "completed":
+ print("Output:", status["output"])
+```
+
+**Paramètres :**
+- `task_id` (str) : L'identifiant de tâche retourné par l'exécution asynchrone
+
+**Retourne :** `Dict[str, Any]`
+
+**Champs de réponse :**
+- `success` (bool) : Si la requête a réussi
+- `taskId` (str) : L'identifiant de la tâche
+- `status` (str) : L'un des états suivants : `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
+- `metadata` (dict) : Contient `startedAt`, `completedAt`, et `duration`
+- `output` (any, facultatif) : La sortie du workflow (une fois terminé)
+- `error` (any, facultatif) : Détails de l'erreur (en cas d'échec)
+- `estimatedDuration` (int, facultatif) : Durée estimée en millisecondes (lors du traitement/mise en file d'attente)
+
+##### execute_with_retry()
+
+Exécuter un workflow avec réessai automatique en cas d'erreurs de limitation de débit, en utilisant un backoff exponentiel.
```python
-result = client.execute_workflow_sync(
+result = client.execute_with_retry(
"workflow-id",
- input_data={"data": "some input"},
- timeout=60.0
+ input_data={"message": "Hello"},
+ timeout=30.0,
+ max_retries=3, # Maximum number of retries
+ initial_delay=1.0, # Initial delay in seconds
+ max_delay=30.0, # Maximum delay in seconds
+ backoff_multiplier=2.0 # Exponential backoff multiplier
)
```
**Paramètres :**
- `workflow_id` (str) : L'identifiant du workflow à exécuter
- `input_data` (dict, facultatif) : Données d'entrée à transmettre au workflow
-- `timeout` (float) : Délai d'attente pour la requête initiale en secondes
+- `timeout` (float, facultatif) : Délai d'expiration en secondes
+- `stream` (bool, facultatif) : Activer les réponses en streaming
+- `selected_outputs` (list, facultatif) : Sorties de blocs à diffuser
+- `async_execution` (bool, facultatif) : Exécuter de manière asynchrone
+- `max_retries` (int, facultatif) : Nombre maximum de tentatives (par défaut : 3)
+- `initial_delay` (float, facultatif) : Délai initial en secondes (par défaut : 1.0)
+- `max_delay` (float, facultatif) : Délai maximum en secondes (par défaut : 30.0)
+- `backoff_multiplier` (float, facultatif) : Multiplicateur de backoff (par défaut : 2.0)
+
+**Retourne :** `WorkflowExecutionResult | AsyncExecutionResult`
+
+La logique de nouvelle tentative utilise un backoff exponentiel (1s → 2s → 4s → 8s...) avec une variation aléatoire de ±25% pour éviter l'effet de horde. Si l'API fournit un en-tête `retry-after`, celui-ci sera utilisé à la place.
+
+##### get_rate_limit_info()
+
+Obtenir les informations actuelles sur les limites de débit à partir de la dernière réponse de l'API.
-**Retourne :** `WorkflowExecutionResult`
+```python
+rate_limit_info = client.get_rate_limit_info()
+if rate_limit_info:
+ print("Limit:", rate_limit_info.limit)
+ print("Remaining:", rate_limit_info.remaining)
+ print("Reset:", datetime.fromtimestamp(rate_limit_info.reset))
+```
+
+**Retourne :** `RateLimitInfo | None`
+
+##### get_usage_limits()
+
+Obtenir les limites d'utilisation actuelles et les informations de quota pour votre compte.
+
+```python
+limits = client.get_usage_limits()
+print("Sync requests remaining:", limits.rate_limit["sync"]["remaining"])
+print("Async requests remaining:", limits.rate_limit["async"]["remaining"])
+print("Current period cost:", limits.usage["currentPeriodCost"])
+print("Plan:", limits.usage["plan"])
+```
+
+**Retourne :** `UsageLimits`
+
+**Structure de la réponse :**
+
+```python
+{
+ "success": bool,
+ "rateLimit": {
+ "sync": {
+ "isLimited": bool,
+ "limit": int,
+ "remaining": int,
+ "resetAt": str
+ },
+ "async": {
+ "isLimited": bool,
+ "limit": int,
+ "remaining": int,
+ "resetAt": str
+ },
+ "authType": str # 'api' or 'manual'
+ },
+ "usage": {
+ "currentPeriodCost": float,
+ "limit": float,
+ "plan": str # e.g., 'free', 'pro'
+ }
+}
+```
##### set_api_key()
-Met à jour la clé API.
+Mettre à jour la clé API.
```python
client.set_api_key("new-api-key")
@@ -140,7 +235,7 @@ client.set_api_key("new-api-key")
##### set_base_url()
-Met à jour l'URL de base.
+Mettre à jour l'URL de base.
```python
client.set_base_url("https://my-custom-domain.com")
@@ -148,7 +243,7 @@ client.set_base_url("https://my-custom-domain.com")
##### close()
-Ferme la session HTTP sous-jacente.
+Fermer la session HTTP sous-jacente.
```python
client.close()
@@ -170,6 +265,18 @@ class WorkflowExecutionResult:
total_duration: Optional[float] = None
```
+### AsyncExecutionResult
+
+```python
+@dataclass
+class AsyncExecutionResult:
+ success: bool
+ task_id: str
+ status: str # 'queued'
+ created_at: str
+ links: Dict[str, str] # e.g., {"status": "/api/jobs/{taskId}"}
+```
+
### WorkflowStatus
```python
@@ -181,6 +288,27 @@ class WorkflowStatus:
needs_redeployment: bool = False
```
+### RateLimitInfo
+
+```python
+@dataclass
+class RateLimitInfo:
+ limit: int
+ remaining: int
+ reset: int
+ retry_after: Optional[int] = None
+```
+
+### UsageLimits
+
+```python
+@dataclass
+class UsageLimits:
+ success: bool
+ rate_limit: Dict[str, Any]
+ usage: Dict[str, Any]
+```
+
### SimStudioError
```python
@@ -191,19 +319,26 @@ class SimStudioError(Exception):
self.status = status
```
+**Codes d'erreur courants :**
+- `UNAUTHORIZED` : Clé API invalide
+- `TIMEOUT` : Délai d'attente de la requête dépassé
+- `RATE_LIMIT_EXCEEDED` : Limite de débit dépassée
+- `USAGE_LIMIT_EXCEEDED` : Limite d'utilisation dépassée
+- `EXECUTION_ERROR` : Échec de l'exécution du workflow
+
## Exemples
-### Exécution de flux de travail basique
+### Exécution basique d'un workflow
Configurez le SimStudioClient avec votre clé API.
-
- Vérifiez si le flux de travail est déployé et prêt pour l'exécution.
+
+ Vérifiez si le workflow est déployé et prêt pour l'exécution.
-
- Lancez le flux de travail avec vos données d'entrée.
+
+ Lancez le workflow avec vos données d'entrée.
Traitez le résultat de l'exécution et gérez les éventuelles erreurs.
@@ -214,7 +349,7 @@ class SimStudioError(Exception):
import os
from simstudio import SimStudioClient
-client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
+client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def run_workflow():
try:
@@ -246,13 +381,13 @@ run_workflow()
### Gestion des erreurs
-Gérez différents types d'erreurs qui peuvent survenir pendant l'exécution du flux de travail :
+Gérez différents types d'erreurs qui peuvent survenir pendant l'exécution du workflow :
```python
from simstudio import SimStudioClient, SimStudioError
import os
-client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
+client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_with_error_handling():
try:
@@ -284,21 +419,21 @@ from simstudio import SimStudioClient
import os
# Using context manager to automatically close the session
-with SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY")) as client:
+with SimStudioClient(api_key=os.getenv("SIM_API_KEY")) as client:
result = client.execute_workflow("workflow-id")
print("Result:", result)
# Session is automatically closed here
```
-### Exécution de flux de travail par lots
+### Exécution de workflows par lots
-Exécutez plusieurs flux de travail efficacement :
+Exécutez plusieurs workflows efficacement :
```python
from simstudio import SimStudioClient
import os
-client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
+client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_workflows_batch(workflow_data_pairs):
"""Execute multiple workflows with different input data."""
@@ -339,6 +474,230 @@ for result in results:
print(f"Workflow {result['workflow_id']}: {'Success' if result['success'] else 'Failed'}")
```
+### Exécution asynchrone de workflow
+
+Exécutez des workflows de manière asynchrone pour les tâches de longue durée :
+
+```python
+import os
+import time
+from simstudio import SimStudioClient
+
+client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
+
+def execute_async():
+ try:
+ # Start async execution
+ result = client.execute_workflow(
+ "workflow-id",
+ input_data={"data": "large dataset"},
+ async_execution=True # Execute asynchronously
+ )
+
+ # Check if result is an async execution
+ if hasattr(result, 'task_id'):
+ print(f"Task ID: {result.task_id}")
+ print(f"Status endpoint: {result.links['status']}")
+
+ # Poll for completion
+ status = client.get_job_status(result.task_id)
+
+ while status["status"] in ["queued", "processing"]:
+ print(f"Current status: {status['status']}")
+ time.sleep(2) # Wait 2 seconds
+ status = client.get_job_status(result.task_id)
+
+ if status["status"] == "completed":
+ print("Workflow completed!")
+ print(f"Output: {status['output']}")
+ print(f"Duration: {status['metadata']['duration']}")
+ else:
+ print(f"Workflow failed: {status['error']}")
+
+ except Exception as error:
+ print(f"Error: {error}")
+
+execute_async()
+```
+
+### Limitation de débit et nouvelle tentative
+
+Gérez les limites de débit automatiquement avec un retrait exponentiel :
+
+```python
+import os
+from simstudio import SimStudioClient, SimStudioError
+
+client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
+
+def execute_with_retry_handling():
+ try:
+ # Automatically retries on rate limit
+ result = client.execute_with_retry(
+ "workflow-id",
+ input_data={"message": "Process this"},
+ max_retries=5,
+ initial_delay=1.0,
+ max_delay=60.0,
+ backoff_multiplier=2.0
+ )
+
+ print(f"Success: {result}")
+ except SimStudioError as error:
+ if error.code == "RATE_LIMIT_EXCEEDED":
+ print("Rate limit exceeded after all retries")
+
+ # Check rate limit info
+ rate_limit_info = client.get_rate_limit_info()
+ if rate_limit_info:
+ from datetime import datetime
+ reset_time = datetime.fromtimestamp(rate_limit_info.reset)
+ print(f"Rate limit resets at: {reset_time}")
+
+execute_with_retry_handling()
+```
+
+### Surveillance de l'utilisation
+
+Surveillez l'utilisation et les limites de votre compte :
+
+```python
+import os
+from simstudio import SimStudioClient
+
+client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
+
+def check_usage():
+ try:
+ limits = client.get_usage_limits()
+
+ print("=== Rate Limits ===")
+ print("Sync requests:")
+ print(f" Limit: {limits.rate_limit['sync']['limit']}")
+ print(f" Remaining: {limits.rate_limit['sync']['remaining']}")
+ print(f" Resets at: {limits.rate_limit['sync']['resetAt']}")
+ print(f" Is limited: {limits.rate_limit['sync']['isLimited']}")
+
+ print("\nAsync requests:")
+ print(f" Limit: {limits.rate_limit['async']['limit']}")
+ print(f" Remaining: {limits.rate_limit['async']['remaining']}")
+ print(f" Resets at: {limits.rate_limit['async']['resetAt']}")
+ print(f" Is limited: {limits.rate_limit['async']['isLimited']}")
+
+ print("\n=== Usage ===")
+ print(f"Current period cost: ${limits.usage['currentPeriodCost']:.2f}")
+ print(f"Limit: ${limits.usage['limit']:.2f}")
+ print(f"Plan: {limits.usage['plan']}")
+
+ percent_used = (limits.usage['currentPeriodCost'] / limits.usage['limit']) * 100
+ print(f"Usage: {percent_used:.1f}%")
+
+ if percent_used > 80:
+ print("⚠️ Warning: You are approaching your usage limit!")
+
+ except Exception as error:
+ print(f"Error checking usage: {error}")
+
+check_usage()
+```
+
+### Exécution de workflow en streaming
+
+Exécutez des workflows avec des réponses en streaming en temps réel :
+
+```python
+from simstudio import SimStudioClient
+import os
+
+client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
+
+def execute_with_streaming():
+ """Execute workflow with streaming enabled."""
+ try:
+ # Enable streaming for specific block outputs
+ result = client.execute_workflow(
+ "workflow-id",
+ input_data={"message": "Count to five"},
+ stream=True,
+ selected_outputs=["agent1.content"] # Use blockName.attribute format
+ )
+
+ print("Workflow result:", result)
+ except Exception as error:
+ print("Error:", error)
+
+execute_with_streaming()
+```
+
+La réponse en streaming suit le format Server-Sent Events (SSE) :
+
+```
+data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
+
+data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
+
+data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
+
+data: [DONE]
+```
+
+**Exemple de streaming avec Flask :**
+
+```python
+from flask import Flask, Response, stream_with_context
+import requests
+import json
+import os
+
+app = Flask(__name__)
+
+@app.route('/stream-workflow')
+def stream_workflow():
+ """Stream workflow execution to the client."""
+
+ def generate():
+ response = requests.post(
+ 'https://sim.ai/api/workflows/WORKFLOW_ID/execute',
+ headers={
+ 'Content-Type': 'application/json',
+ 'X-API-Key': os.getenv('SIM_API_KEY')
+ },
+ json={
+ 'message': 'Generate a story',
+ 'stream': True,
+ 'selectedOutputs': ['agent1.content']
+ },
+ stream=True
+ )
+
+ for line in response.iter_lines():
+ if line:
+ decoded_line = line.decode('utf-8')
+ if decoded_line.startswith('data: '):
+ data = decoded_line[6:] # Remove 'data: ' prefix
+
+ if data == '[DONE]':
+ break
+
+ try:
+ parsed = json.loads(data)
+ if 'chunk' in parsed:
+ yield f"data: {json.dumps(parsed)}\n\n"
+ elif parsed.get('event') == 'done':
+ yield f"data: {json.dumps(parsed)}\n\n"
+ print("Execution complete:", parsed.get('metadata'))
+ except json.JSONDecodeError:
+ pass
+
+ return Response(
+ stream_with_context(generate()),
+ mimetype='text/event-stream'
+ )
+
+if __name__ == '__main__':
+ app.run(debug=True)
+```
+
### Configuration de l'environnement
Configurez le client en utilisant des variables d'environnement :
@@ -352,8 +711,8 @@ Configurez le client en utilisant des variables d'environnement :
# Development configuration
client = SimStudioClient(
- api_key=os.getenv("SIMSTUDIO_API_KEY"),
- base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
+ api_key=os.getenv("SIM_API_KEY")
+ base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
)
```
@@ -365,30 +724,30 @@ Configurez le client en utilisant des variables d'environnement :
from simstudio import SimStudioClient
# Production configuration with error handling
- api_key = os.getenv("SIMSTUDIO_API_KEY")
+ api_key = os.getenv("SIM_API_KEY")
if not api_key:
- raise ValueError("SIMSTUDIO_API_KEY environment variable is required")
+ raise ValueError("SIM_API_KEY environment variable is required")
client = SimStudioClient(
api_key=api_key,
- base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
+ base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
)
```
-## Obtenir votre clé API
+## Obtention de votre clé API
Accédez à [Sim](https://sim.ai) et connectez-vous à votre compte.
-
- Naviguez vers le flux de travail que vous souhaitez exécuter par programmation.
+
+ Accédez au workflow que vous souhaitez exécuter par programmation.
-
- Cliquez sur "Déployer" pour déployer votre flux de travail s'il n'a pas encore été déployé.
+
+ Cliquez sur "Déployer" pour déployer votre workflow s'il n'a pas encore été déployé.
Pendant le processus de déploiement, sélectionnez ou créez une clé API.
diff --git a/apps/docs/content/docs/fr/sdks/typescript.mdx b/apps/docs/content/docs/fr/sdks/typescript.mdx
index b57bbd0803..ca572c346e 100644
--- a/apps/docs/content/docs/fr/sdks/typescript.mdx
+++ b/apps/docs/content/docs/fr/sdks/typescript.mdx
@@ -7,10 +7,10 @@ import { Card, Cards } from 'fumadocs-ui/components/card'
import { Step, Steps } from 'fumadocs-ui/components/steps'
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
-Le SDK officiel TypeScript/JavaScript pour Sim offre une sécurité de type complète et prend en charge les environnements Node.js et navigateur, vous permettant d'exécuter des workflows de manière programmatique depuis vos applications Node.js, applications web et autres environnements JavaScript. Toutes les exécutions de workflow sont actuellement synchrones.
+Le SDK officiel TypeScript/JavaScript pour Sim offre une sécurité de type complète et prend en charge les environnements Node.js et navigateur, vous permettant d'exécuter des workflows par programmation depuis vos applications Node.js, applications web et autres environnements JavaScript.
- Le SDK TypeScript offre une sécurité de type complète et prend en charge les environnements Node.js et navigateur. Toutes les exécutions de workflow sont actuellement synchrones.
+ Le SDK TypeScript offre une sécurité de type complète, la prise en charge de l'exécution asynchrone, une limitation automatique du débit avec backoff exponentiel et le suivi d'utilisation.
## Installation
@@ -91,12 +91,17 @@ const result = await client.executeWorkflow('workflow-id', {
```
**Paramètres :**
-- `workflowId` (string) : L'identifiant du workflow à exécuter
-- `options` (ExecutionOptions, facultatif) :
+- `workflowId` (string) : L'ID du workflow à exécuter
+- `options` (ExecutionOptions, optionnel) :
- `input` (any) : Données d'entrée à transmettre au workflow
- `timeout` (number) : Délai d'expiration en millisecondes (par défaut : 30000)
+ - `stream` (boolean) : Activer les réponses en streaming (par défaut : false)
+ - `selectedOutputs` (string[]) : Bloquer les sorties à diffuser au format `blockName.attribute` (par exemple, `["agent1.content"]`)
+ - `async` (boolean) : Exécuter de manière asynchrone (par défaut : false)
+
+**Retourne :** `Promise`
-**Retourne :** `Promise`
+Lorsque `async: true`, retourne immédiatement avec un ID de tâche pour l'interrogation. Sinon, attend la fin de l'exécution.
##### getWorkflowStatus()
@@ -108,7 +113,7 @@ console.log('Is deployed:', status.isDeployed);
```
**Paramètres :**
-- `workflowId` (string) : L'identifiant du workflow
+- `workflowId` (string) : L'ID du workflow
**Retourne :** `Promise`
@@ -124,36 +129,125 @@ if (isReady) {
```
**Paramètres :**
-- `workflowId` (string) : L'identifiant du workflow
+- `workflowId` (string) : L'ID du workflow
**Retourne :** `Promise`
-##### executeWorkflowSync()
+##### getJobStatus()
-
- Actuellement, cette méthode est identique à `executeWorkflow()` puisque toutes les exécutions sont synchrones. Cette méthode est fournie pour une compatibilité future lorsque l'exécution asynchrone sera ajoutée.
-
+Obtenir le statut d'une exécution de tâche asynchrone.
+
+```typescript
+const status = await client.getJobStatus('task-id-from-async-execution');
+console.log('Status:', status.status); // 'queued', 'processing', 'completed', 'failed'
+if (status.status === 'completed') {
+ console.log('Output:', status.output);
+}
+```
+
+**Paramètres :**
+- `taskId` (string) : L'ID de tâche retourné par l'exécution asynchrone
-Exécuter un workflow (actuellement synchrone, identique à `executeWorkflow()`).
+**Retourne :** `Promise`
+
+**Champs de réponse :**
+- `success` (boolean) : Indique si la requête a réussi
+- `taskId` (string) : L'ID de la tâche
+- `status` (string) : L'un des statuts suivants : `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
+- `metadata` (object) : Contient `startedAt`, `completedAt`, et `duration`
+- `output` (any, optionnel) : La sortie du workflow (une fois terminé)
+- `error` (any, optionnel) : Détails de l'erreur (en cas d'échec)
+- `estimatedDuration` (number, optionnel) : Durée estimée en millisecondes (lorsqu'en traitement/en file d'attente)
+
+##### executeWithRetry()
+
+Exécute un workflow avec une nouvelle tentative automatique en cas d'erreurs de limite de débit en utilisant un backoff exponentiel.
```typescript
-const result = await client.executeWorkflowSync('workflow-id', {
- input: { data: 'some input' },
- timeout: 60000
+const result = await client.executeWithRetry('workflow-id', {
+ input: { message: 'Hello' },
+ timeout: 30000
+}, {
+ maxRetries: 3, // Maximum number of retries
+ initialDelay: 1000, // Initial delay in ms (1 second)
+ maxDelay: 30000, // Maximum delay in ms (30 seconds)
+ backoffMultiplier: 2 // Exponential backoff multiplier
});
```
**Paramètres :**
- `workflowId` (string) : L'identifiant du workflow à exécuter
-- `options` (ExecutionOptions, facultatif) :
- - `input` (any) : Données d'entrée à transmettre au workflow
- - `timeout` (number) : Délai d'expiration pour la requête initiale en millisecondes
+- `options` (ExecutionOptions, facultatif) : Identique à `executeWorkflow()`
+- `retryOptions` (RetryOptions, facultatif) :
+ - `maxRetries` (number) : Nombre maximum de tentatives (par défaut : 3)
+ - `initialDelay` (number) : Délai initial en ms (par défaut : 1000)
+ - `maxDelay` (number) : Délai maximum en ms (par défaut : 30000)
+ - `backoffMultiplier` (number) : Multiplicateur de backoff (par défaut : 2)
+
+**Retourne :** `Promise`
+
+La logique de nouvelle tentative utilise un backoff exponentiel (1s → 2s → 4s → 8s...) avec une variation aléatoire de ±25 % pour éviter l'effet de rafale. Si l'API fournit un en-tête `retry-after`, celui-ci sera utilisé à la place.
+
+##### getRateLimitInfo()
+
+Obtient les informations actuelles sur les limites de débit à partir de la dernière réponse de l'API.
+
+```typescript
+const rateLimitInfo = client.getRateLimitInfo();
+if (rateLimitInfo) {
+ console.log('Limit:', rateLimitInfo.limit);
+ console.log('Remaining:', rateLimitInfo.remaining);
+ console.log('Reset:', new Date(rateLimitInfo.reset * 1000));
+}
+```
+
+**Retourne :** `RateLimitInfo | null`
+
+##### getUsageLimits()
+
+Obtient les limites d'utilisation actuelles et les informations de quota pour votre compte.
+
+```typescript
+const limits = await client.getUsageLimits();
+console.log('Sync requests remaining:', limits.rateLimit.sync.remaining);
+console.log('Async requests remaining:', limits.rateLimit.async.remaining);
+console.log('Current period cost:', limits.usage.currentPeriodCost);
+console.log('Plan:', limits.usage.plan);
+```
+
+**Retourne :** `Promise`
+
+**Structure de la réponse :**
-**Retourne :** `Promise`
+```typescript
+{
+ success: boolean
+ rateLimit: {
+ sync: {
+ isLimited: boolean
+ limit: number
+ remaining: number
+ resetAt: string
+ }
+ async: {
+ isLimited: boolean
+ limit: number
+ remaining: number
+ resetAt: string
+ }
+ authType: string // 'api' or 'manual'
+ }
+ usage: {
+ currentPeriodCost: number
+ limit: number
+ plan: string // e.g., 'free', 'pro'
+ }
+}
+```
##### setApiKey()
-Mettre à jour la clé API.
+Met à jour la clé API.
```typescript
client.setApiKey('new-api-key');
@@ -161,7 +255,7 @@ client.setApiKey('new-api-key');
##### setBaseUrl()
-Mettre à jour l'URL de base.
+Met à jour l'URL de base.
```typescript
client.setBaseUrl('https://my-custom-domain.com');
@@ -187,6 +281,20 @@ interface WorkflowExecutionResult {
}
```
+### AsyncExecutionResult
+
+```typescript
+interface AsyncExecutionResult {
+ success: boolean;
+ taskId: string;
+ status: 'queued';
+ createdAt: string;
+ links: {
+ status: string; // e.g., "/api/jobs/{taskId}"
+ };
+}
+```
+
### WorkflowStatus
```typescript
@@ -198,6 +306,45 @@ interface WorkflowStatus {
}
```
+### RateLimitInfo
+
+```typescript
+interface RateLimitInfo {
+ limit: number;
+ remaining: number;
+ reset: number;
+ retryAfter?: number;
+}
+```
+
+### UsageLimits
+
+```typescript
+interface UsageLimits {
+ success: boolean;
+ rateLimit: {
+ sync: {
+ isLimited: boolean;
+ limit: number;
+ remaining: number;
+ resetAt: string;
+ };
+ async: {
+ isLimited: boolean;
+ limit: number;
+ remaining: number;
+ resetAt: string;
+ };
+ authType: string;
+ };
+ usage: {
+ currentPeriodCost: number;
+ limit: number;
+ plan: string;
+ };
+}
+```
+
### SimStudioError
```typescript
@@ -207,9 +354,16 @@ class SimStudioError extends Error {
}
```
+**Codes d'erreur courants :**
+- `UNAUTHORIZED` : Clé API invalide
+- `TIMEOUT` : Délai d'attente de la requête dépassé
+- `RATE_LIMIT_EXCEEDED` : Limite de débit dépassée
+- `USAGE_LIMIT_EXCEEDED` : Limite d'utilisation dépassée
+- `EXECUTION_ERROR` : Échec de l'exécution du workflow
+
## Exemples
-### Exécution de workflow basique
+### Exécution basique d'un workflow
@@ -230,7 +384,7 @@ class SimStudioError extends Error {
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
async function runWorkflow() {
@@ -271,7 +425,7 @@ Gérez différents types d'erreurs qui peuvent survenir pendant l'exécution du
import { SimStudioClient, SimStudioError } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
async function executeWithErrorHandling() {
@@ -315,14 +469,14 @@ Configurez le client en utilisant des variables d'environnement :
import { SimStudioClient } from 'simstudio-ts-sdk';
// Development configuration
- const apiKey = process.env.SIMSTUDIO_API_KEY;
+ const apiKey = process.env.SIM_API_KEY;
if (!apiKey) {
- throw new Error('SIMSTUDIO_API_KEY environment variable is required');
+ throw new Error('SIM_API_KEY environment variable is required');
}
const client = new SimStudioClient({
apiKey,
- baseUrl: process.env.SIMSTUDIO_BASE_URL // optional
+ baseUrl: process.env.SIM_BASE_URL // optional
});
```
@@ -333,14 +487,14 @@ Configurez le client en utilisant des variables d'environnement :
import { SimStudioClient } from 'simstudio-ts-sdk';
// Production configuration with validation
- const apiKey = process.env.SIMSTUDIO_API_KEY;
+ const apiKey = process.env.SIM_API_KEY;
if (!apiKey) {
- throw new Error('SIMSTUDIO_API_KEY environment variable is required');
+ throw new Error('SIM_API_KEY environment variable is required');
}
const client = new SimStudioClient({
apiKey,
- baseUrl: process.env.SIMSTUDIO_BASE_URL || 'https://sim.ai'
+ baseUrl: process.env.SIM_BASE_URL || 'https://sim.ai'
});
```
@@ -357,7 +511,7 @@ import { SimStudioClient } from 'simstudio-ts-sdk';
const app = express();
const client = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
app.use(express.json());
@@ -399,7 +553,7 @@ import { NextApiRequest, NextApiResponse } from 'next';
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
export default async function handler(
@@ -469,14 +623,14 @@ document.getElementById('executeBtn')?.addEventListener('click', executeClientSi
### Exemple de hook React
-Créez un hook React personnalisé pour l'exécution du workflow :
+Créer un hook React personnalisé pour l'exécution de workflow :
```typescript
import { useState, useCallback } from 'react';
import { SimStudioClient, WorkflowExecutionResult } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
- apiKey: process.env.NEXT_PUBLIC_SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
interface UseWorkflowResult {
@@ -532,7 +686,7 @@ function WorkflowComponent() {
-
+
{error &&
Error: {error.message}
}
{result && (
@@ -545,38 +699,267 @@ function WorkflowComponent() {
}
```
-## Obtenir votre clé API
+### Exécution asynchrone de workflow
+
+Exécuter des workflows de manière asynchrone pour les tâches de longue durée :
+
+```typescript
+import { SimStudioClient, AsyncExecutionResult } from 'simstudio-ts-sdk';
+
+const client = new SimStudioClient({
+ apiKey: process.env.SIM_API_KEY!
+});
+
+async function executeAsync() {
+ try {
+ // Start async execution
+ const result = await client.executeWorkflow('workflow-id', {
+ input: { data: 'large dataset' },
+ async: true // Execute asynchronously
+ });
+
+ // Check if result is an async execution
+ if ('taskId' in result) {
+ console.log('Task ID:', result.taskId);
+ console.log('Status endpoint:', result.links.status);
+
+ // Poll for completion
+ let status = await client.getJobStatus(result.taskId);
+
+ while (status.status === 'queued' || status.status === 'processing') {
+ console.log('Current status:', status.status);
+ await new Promise(resolve => setTimeout(resolve, 2000)); // Wait 2 seconds
+ status = await client.getJobStatus(result.taskId);
+ }
+
+ if (status.status === 'completed') {
+ console.log('Workflow completed!');
+ console.log('Output:', status.output);
+ console.log('Duration:', status.metadata.duration);
+ } else {
+ console.error('Workflow failed:', status.error);
+ }
+ }
+ } catch (error) {
+ console.error('Error:', error);
+ }
+}
+
+executeAsync();
+```
+
+### Limitation de débit et nouvelle tentative
+
+Gérer automatiquement les limites de débit avec backoff exponentiel :
+
+```typescript
+import { SimStudioClient, SimStudioError } from 'simstudio-ts-sdk';
+
+const client = new SimStudioClient({
+ apiKey: process.env.SIM_API_KEY!
+});
+
+async function executeWithRetryHandling() {
+ try {
+ // Automatically retries on rate limit
+ const result = await client.executeWithRetry('workflow-id', {
+ input: { message: 'Process this' }
+ }, {
+ maxRetries: 5,
+ initialDelay: 1000,
+ maxDelay: 60000,
+ backoffMultiplier: 2
+ });
+
+ console.log('Success:', result);
+ } catch (error) {
+ if (error instanceof SimStudioError && error.code === 'RATE_LIMIT_EXCEEDED') {
+ console.error('Rate limit exceeded after all retries');
+
+ // Check rate limit info
+ const rateLimitInfo = client.getRateLimitInfo();
+ if (rateLimitInfo) {
+ console.log('Rate limit resets at:', new Date(rateLimitInfo.reset * 1000));
+ }
+ }
+ }
+}
+```
+
+### Surveillance d'utilisation
+
+Surveiller l'utilisation et les limites de votre compte :
+
+```typescript
+import { SimStudioClient } from 'simstudio-ts-sdk';
+
+const client = new SimStudioClient({
+ apiKey: process.env.SIM_API_KEY!
+});
+
+async function checkUsage() {
+ try {
+ const limits = await client.getUsageLimits();
+
+ console.log('=== Rate Limits ===');
+ console.log('Sync requests:');
+ console.log(' Limit:', limits.rateLimit.sync.limit);
+ console.log(' Remaining:', limits.rateLimit.sync.remaining);
+ console.log(' Resets at:', limits.rateLimit.sync.resetAt);
+ console.log(' Is limited:', limits.rateLimit.sync.isLimited);
+
+ console.log('\nAsync requests:');
+ console.log(' Limit:', limits.rateLimit.async.limit);
+ console.log(' Remaining:', limits.rateLimit.async.remaining);
+ console.log(' Resets at:', limits.rateLimit.async.resetAt);
+ console.log(' Is limited:', limits.rateLimit.async.isLimited);
+
+ console.log('\n=== Usage ===');
+ console.log('Current period cost:
+
+### Streaming Workflow Execution
+
+Execute workflows with real-time streaming responses:
+
+```typescript
+import { SimStudioClient } from 'simstudio-ts-sdk';
+
+const client = new SimStudioClient({
+ apiKey: process.env.SIM_API_KEY!
+});
+
+async function executeWithStreaming() {
+ try {
+ // Activer le streaming pour des sorties de blocs spécifiques
+ const result = await client.executeWorkflow('workflow-id', {
+ input: { message: 'Compter jusqu'à cinq' },
+ stream: true,
+ selectedOutputs: ['agent1.content'] // Utiliser le format blockName.attribute
+ });
+
+ console.log('Résultat du workflow :', result);
+ } catch (error) {
+ console.error('Erreur :', error);
+ }
+}
+```
+
+The streaming response follows the Server-Sent Events (SSE) format:
+
+```
+data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
+
+data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", deux"}
+
+data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
+
+data: [DONE]
+```
+
+**React Streaming Example:**
+
+```typescript
+import { useState, useEffect } from 'react';
+
+function StreamingWorkflow() {
+ const [output, setOutput] = useState('');
+ const [loading, setLoading] = useState(false);
+
+ const executeStreaming = async () => {
+ setLoading(true);
+ setOutput('');
+
+ // IMPORTANT: Make this API call from your backend server, not the browser
+ // Never expose your API key in client-side code
+ const response = await fetch('https://sim.ai/api/workflows/WORKFLOW_ID/execute', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'X-API-Key': process.env.SIM_API_KEY! // Server-side environment variable only
+ },
+ body: JSON.stringify({
+ message: 'Generate a story',
+ stream: true,
+ selectedOutputs: ['agent1.content']
+ })
+ });
+
+ const reader = response.body?.getReader();
+ const decoder = new TextDecoder();
+
+ while (reader) {
+ const { done, value } = await reader.read();
+ if (done) break;
+
+ const chunk = decoder.decode(value);
+ const lines = chunk.split('\n\n');
+
+ for (const line of lines) {
+ if (line.startsWith('data: ')) {
+ const data = line.slice(6);
+ if (data === '[DONE]') {
+ setLoading(false);
+ break;
+ }
+
+ try {
+ const parsed = JSON.parse(data);
+ if (parsed.chunk) {
+ setOutput(prev => prev + parsed.chunk);
+ } else if (parsed.event === 'done') {
+ console.log('Execution complete:', parsed.metadata);
+ }
+ } catch (e) {
+ // Skip invalid JSON
+ }
+ }
+ }
+ }
+ };
+
+ return (
+
+
+
{output}
+
+ );
+}
+```
+
+## Getting Your API Key
-
- Accédez à [Sim](https://sim.ai) et connectez-vous à votre compte.
+
+ Navigate to [Sim](https://sim.ai) and log in to your account.
-
- Accédez au workflow que vous souhaitez exécuter par programmation.
+
+ Navigate to the workflow you want to execute programmatically.
-
- Cliquez sur « Déployer » pour déployer votre workflow s'il n'a pas encore été déployé.
+
+ Click on "Deploy" to deploy your workflow if it hasn't been deployed yet.
-
- Pendant le processus de déploiement, sélectionnez ou créez une clé API.
+
+ During the deployment process, select or create an API key.
-
- Copiez la clé API à utiliser dans votre application TypeScript/JavaScript.
+
+ Copy the API key to use in your TypeScript/JavaScript application.
- Gardez votre clé API en sécurité et ne la soumettez jamais au contrôle de version. Utilisez des variables d'environnement ou une gestion de configuration sécurisée.
+ Keep your API key secure and never commit it to version control. Use environment variables or secure configuration management.
-## Prérequis
+## Requirements
- Node.js 16+
-- TypeScript 5.0+ (pour les projets TypeScript)
+- TypeScript 5.0+ (for TypeScript projects)
-## Support TypeScript
+## TypeScript Support
-Le SDK est écrit en TypeScript et offre une sécurité de type complète :
+The SDK is written in TypeScript and provides full type safety:
```typescript
import {
@@ -588,13 +971,13 @@ import {
// Type-safe client initialization
const client: SimStudioClient = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
// Type-safe workflow execution
const result: WorkflowExecutionResult = await client.executeWorkflow('workflow-id', {
input: {
- message: 'Hello, TypeScript!'
+ message: 'Bonjour, TypeScript !'
}
});
@@ -602,6 +985,7 @@ const result: WorkflowExecutionResult = await client.executeWorkflow('workflow-i
const status: WorkflowStatus = await client.getWorkflowStatus('workflow-id');
```
-## Licence
+## License
+
-Apache-2.0
\ No newline at end of file
+Apache-2.0
diff --git a/apps/docs/content/docs/fr/triggers/api.mdx b/apps/docs/content/docs/fr/triggers/api.mdx
index bac2d9ab95..978ec55610 100644
--- a/apps/docs/content/docs/fr/triggers/api.mdx
+++ b/apps/docs/content/docs/fr/triggers/api.mdx
@@ -38,6 +38,84 @@ curl -X POST \
Les réponses réussies renvoient le résultat d'exécution sérialisé de l'exécuteur. Les erreurs révèlent des problèmes de validation, d'authentification ou d'échec du workflow.
+## Réponses en streaming
+
+Activez le streaming en temps réel pour recevoir les résultats du workflow au fur et à mesure qu'ils sont générés, caractère par caractère. Cela est utile pour afficher progressivement les réponses de l'IA aux utilisateurs.
+
+### Paramètres de requête
+
+Ajoutez ces paramètres pour activer le streaming :
+
+- `stream` - Définissez à `true` pour activer le streaming Server-Sent Events (SSE)
+- `selectedOutputs` - Tableau des sorties de blocs à diffuser en streaming (par exemple, `["agent1.content"]`)
+
+### Format de sortie de bloc
+
+Utilisez le format `blockName.attribute` pour spécifier quelles sorties de blocs diffuser en streaming :
+- Format : `"blockName.attribute"` (par exemple, si vous souhaitez diffuser en streaming le contenu du bloc Agent 1, vous utiliseriez `"agent1.content"`)
+- Les noms de blocs ne sont pas sensibles à la casse et les espaces sont ignorés
+
+### Exemple de requête
+
+```bash
+curl -X POST \
+ https://sim.ai/api/workflows/WORKFLOW_ID/execute \
+ -H 'Content-Type: application/json' \
+ -H 'X-API-Key: YOUR_KEY' \
+ -d '{
+ "message": "Count to five",
+ "stream": true,
+ "selectedOutputs": ["agent1.content"]
+ }'
+```
+
+### Format de réponse
+
+Les réponses en streaming utilisent le format Server-Sent Events (SSE) :
+
+```
+data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
+
+data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
+
+data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", three"}
+
+data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
+
+data: [DONE]
+```
+
+Chaque événement comprend :
+- **Fragments en streaming** : `{"blockId": "...", "chunk": "text"}` - Texte en temps réel au fur et à mesure qu'il est généré
+- **Événement final** : `{"event": "done", ...}` - Métadonnées d'exécution et résultats complets
+- **Terminateur** : `[DONE]` - Signale la fin du flux
+
+### Streaming de plusieurs blocs
+
+Lorsque `selectedOutputs` inclut plusieurs blocs, chaque fragment indique quel bloc l'a produit :
+
+```bash
+curl -X POST \
+ https://sim.ai/api/workflows/WORKFLOW_ID/execute \
+ -H 'Content-Type: application/json' \
+ -H 'X-API-Key: YOUR_KEY' \
+ -d '{
+ "message": "Process this request",
+ "stream": true,
+ "selectedOutputs": ["agent1.content", "agent2.content"]
+ }'
+```
+
+Le champ `blockId` dans chaque fragment vous permet d'acheminer la sortie vers l'élément d'interface utilisateur approprié :
+
+```
+data: {"blockId":"agent1-uuid","chunk":"Processing..."}
+
+data: {"blockId":"agent2-uuid","chunk":"Analyzing..."}
+
+data: {"blockId":"agent1-uuid","chunk":" complete"}
+```
+
## Référence des sorties
| Référence | Description |
diff --git a/apps/docs/content/docs/ja/sdks/python.mdx b/apps/docs/content/docs/ja/sdks/python.mdx
index 173246235b..b667a9f3d8 100644
--- a/apps/docs/content/docs/ja/sdks/python.mdx
+++ b/apps/docs/content/docs/ja/sdks/python.mdx
@@ -10,7 +10,7 @@ import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
Simの公式Python SDKを使用すると、公式Python SDKを使用してPythonアプリケーションからプログラムでワークフローを実行できます。
- Python SDKはPython 3.8以上をサポートし、同期的なワークフロー実行を提供します。現在、すべてのワークフロー実行は同期的です。
+ Python SDKはPython 3.8以上をサポートし、非同期実行、指数バックオフによる自動レート制限、使用状況追跡機能を提供します。
## インストール
@@ -70,12 +70,17 @@ result = client.execute_workflow(
)
```
-**パラメータ:**
+**パラメータ:**
- `workflow_id` (str): 実行するワークフローのID
- `input_data` (dict, オプション): ワークフローに渡す入力データ
-- `timeout` (float, オプション): タイムアウト(秒)(デフォルト:30.0)
+- `timeout` (float, オプション): タイムアウト(秒)(デフォルト: 30.0)
+- `stream` (bool, オプション): ストリーミングレスポンスを有効にする(デフォルト: False)
+- `selected_outputs` (list[str], オプション): `blockName.attribute`形式でストリーミングするブロック出力(例: `["agent1.content"]`)
+- `async_execution` (bool, オプション): 非同期実行(デフォルト: False)
+
+**戻り値:** `WorkflowExecutionResult | AsyncExecutionResult`
-**戻り値:** `WorkflowExecutionResult`
+`async_execution=True`の場合、ポーリング用のタスクIDをすぐに返します。それ以外の場合は、完了を待ちます。
##### get_workflow_status()
@@ -86,7 +91,7 @@ status = client.get_workflow_status("workflow-id")
print("Is deployed:", status.is_deployed)
```
-**パラメータ:**
+**パラメータ:**
- `workflow_id` (str): ワークフローのID
**戻り値:** `WorkflowStatus`
@@ -107,28 +112,118 @@ if is_ready:
**戻り値:** `bool`
-##### execute_workflow_sync()
+##### get_job_status()
-
- 現在、このメソッドは `execute_workflow()` と同一です。すべての実行は同期的に行われるためです。このメソッドは、将来的に非同期実行が追加された際の互換性のために提供されています。
-
+非同期ジョブ実行のステータスを取得します。
-ワークフローを実行します(現在は同期的、`execute_workflow()` と同じ)。
+```python
+status = client.get_job_status("task-id-from-async-execution")
+print("Status:", status["status"]) # 'queued', 'processing', 'completed', 'failed'
+if status["status"] == "completed":
+ print("Output:", status["output"])
+```
+
+**パラメータ:**
+- `task_id` (str): 非同期実行から返されたタスクID
+
+**戻り値:** `Dict[str, Any]`
+
+**レスポンスフィールド:**
+- `success` (bool): リクエストが成功したかどうか
+- `taskId` (str): タスクID
+- `status` (str): 次のいずれか: `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
+- `metadata` (dict): `startedAt`, `completedAt`, `duration`を含む
+- `output` (any, オプション): ワークフロー出力(完了時)
+- `error` (any, オプション): エラー詳細(失敗時)
+- `estimatedDuration` (int, オプション): 推定所要時間(ミリ秒)(処理中/キュー時)
+
+##### execute_with_retry()
+
+指数バックオフを使用してレート制限エラーで自動的に再試行するワークフロー実行。
```python
-result = client.execute_workflow_sync(
+result = client.execute_with_retry(
"workflow-id",
- input_data={"data": "some input"},
- timeout=60.0
+ input_data={"message": "Hello"},
+ timeout=30.0,
+ max_retries=3, # Maximum number of retries
+ initial_delay=1.0, # Initial delay in seconds
+ max_delay=30.0, # Maximum delay in seconds
+ backoff_multiplier=2.0 # Exponential backoff multiplier
)
```
**パラメータ:**
- `workflow_id` (str): 実行するワークフローのID
-- `input_data` (dict, optional): ワークフローに渡す入力データ
-- `timeout` (float): 初期リクエストのタイムアウト(秒)
+- `input_data` (dict, オプション): ワークフローに渡す入力データ
+- `timeout` (float, オプション): タイムアウト(秒)
+- `stream` (bool, オプション): ストリーミングレスポンスを有効にする
+- `selected_outputs` (list, オプション): ストリーミングするブロック出力
+- `async_execution` (bool, オプション): 非同期実行
+- `max_retries` (int, オプション): 最大再試行回数(デフォルト: 3)
+- `initial_delay` (float, オプション): 初期遅延(秒)(デフォルト: 1.0)
+- `max_delay` (float, オプション): 最大遅延(秒)(デフォルト: 30.0)
+- `backoff_multiplier` (float, オプション): バックオフ乗数(デフォルト: 2.0)
+
+**戻り値:** `WorkflowExecutionResult | AsyncExecutionResult`
-**戻り値:** `WorkflowExecutionResult`
+リトライロジックは、サンダリングハード問題を防ぐために±25%のジッターを伴う指数バックオフ(1秒→2秒→4秒→8秒...)を使用します。APIが `retry-after` ヘッダーを提供する場合、代わりにそれが使用されます。
+
+##### get_rate_limit_info()
+
+最後のAPIレスポンスから現在のレート制限情報を取得します。
+
+```python
+rate_limit_info = client.get_rate_limit_info()
+if rate_limit_info:
+ print("Limit:", rate_limit_info.limit)
+ print("Remaining:", rate_limit_info.remaining)
+ print("Reset:", datetime.fromtimestamp(rate_limit_info.reset))
+```
+
+**戻り値:** `RateLimitInfo | None`
+
+##### get_usage_limits()
+
+アカウントの現在の使用制限とクォータ情報を取得します。
+
+```python
+limits = client.get_usage_limits()
+print("Sync requests remaining:", limits.rate_limit["sync"]["remaining"])
+print("Async requests remaining:", limits.rate_limit["async"]["remaining"])
+print("Current period cost:", limits.usage["currentPeriodCost"])
+print("Plan:", limits.usage["plan"])
+```
+
+**戻り値:** `UsageLimits`
+
+**レスポンス構造:**
+
+```python
+{
+ "success": bool,
+ "rateLimit": {
+ "sync": {
+ "isLimited": bool,
+ "limit": int,
+ "remaining": int,
+ "resetAt": str
+ },
+ "async": {
+ "isLimited": bool,
+ "limit": int,
+ "remaining": int,
+ "resetAt": str
+ },
+ "authType": str # 'api' or 'manual'
+ },
+ "usage": {
+ "currentPeriodCost": float,
+ "limit": float,
+ "plan": str # e.g., 'free', 'pro'
+ }
+}
+```
##### set_api_key()
@@ -170,6 +265,18 @@ class WorkflowExecutionResult:
total_duration: Optional[float] = None
```
+### AsyncExecutionResult
+
+```python
+@dataclass
+class AsyncExecutionResult:
+ success: bool
+ task_id: str
+ status: str # 'queued'
+ created_at: str
+ links: Dict[str, str] # e.g., {"status": "/api/jobs/{taskId}"}
+```
+
### WorkflowStatus
```python
@@ -181,6 +288,27 @@ class WorkflowStatus:
needs_redeployment: bool = False
```
+### RateLimitInfo
+
+```python
+@dataclass
+class RateLimitInfo:
+ limit: int
+ remaining: int
+ reset: int
+ retry_after: Optional[int] = None
+```
+
+### UsageLimits
+
+```python
+@dataclass
+class UsageLimits:
+ success: bool
+ rate_limit: Dict[str, Any]
+ usage: Dict[str, Any]
+```
+
### SimStudioError
```python
@@ -191,6 +319,13 @@ class SimStudioError(Exception):
self.status = status
```
+**一般的なエラーコード:**
+- `UNAUTHORIZED`: 無効なAPIキー
+- `TIMEOUT`: リクエストがタイムアウトしました
+- `RATE_LIMIT_EXCEEDED`: レート制限を超えました
+- `USAGE_LIMIT_EXCEEDED`: 使用制限を超えました
+- `EXECUTION_ERROR`: ワークフローの実行に失敗しました
+
## 例
### 基本的なワークフロー実行
@@ -214,7 +349,7 @@ class SimStudioError(Exception):
import os
from simstudio import SimStudioClient
-client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
+client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def run_workflow():
try:
@@ -252,7 +387,7 @@ run_workflow()
from simstudio import SimStudioClient, SimStudioError
import os
-client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
+client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_with_error_handling():
try:
@@ -277,14 +412,14 @@ def execute_with_error_handling():
### コンテキストマネージャーの使用
-リソースのクリーンアップを自動的に処理するためにコンテキストマネージャーとしてクライアントを使用します:
+リソースのクリーンアップを自動的に処理するためにクライアントをコンテキストマネージャーとして使用します:
```python
from simstudio import SimStudioClient
import os
# Using context manager to automatically close the session
-with SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY")) as client:
+with SimStudioClient(api_key=os.getenv("SIM_API_KEY")) as client:
result = client.execute_workflow("workflow-id")
print("Result:", result)
# Session is automatically closed here
@@ -298,7 +433,7 @@ with SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY")) as client:
from simstudio import SimStudioClient
import os
-client = SimStudioClient(api_key=os.getenv("SIMSTUDIO_API_KEY"))
+client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
def execute_workflows_batch(workflow_data_pairs):
"""Execute multiple workflows with different input data."""
@@ -339,6 +474,230 @@ for result in results:
print(f"Workflow {result['workflow_id']}: {'Success' if result['success'] else 'Failed'}")
```
+### 非同期ワークフロー実行
+
+長時間実行されるタスクのためにワークフローを非同期で実行します:
+
+```python
+import os
+import time
+from simstudio import SimStudioClient
+
+client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
+
+def execute_async():
+ try:
+ # Start async execution
+ result = client.execute_workflow(
+ "workflow-id",
+ input_data={"data": "large dataset"},
+ async_execution=True # Execute asynchronously
+ )
+
+ # Check if result is an async execution
+ if hasattr(result, 'task_id'):
+ print(f"Task ID: {result.task_id}")
+ print(f"Status endpoint: {result.links['status']}")
+
+ # Poll for completion
+ status = client.get_job_status(result.task_id)
+
+ while status["status"] in ["queued", "processing"]:
+ print(f"Current status: {status['status']}")
+ time.sleep(2) # Wait 2 seconds
+ status = client.get_job_status(result.task_id)
+
+ if status["status"] == "completed":
+ print("Workflow completed!")
+ print(f"Output: {status['output']}")
+ print(f"Duration: {status['metadata']['duration']}")
+ else:
+ print(f"Workflow failed: {status['error']}")
+
+ except Exception as error:
+ print(f"Error: {error}")
+
+execute_async()
+```
+
+### レート制限とリトライ
+
+指数バックオフを使用して自動的にレート制限を処理します:
+
+```python
+import os
+from simstudio import SimStudioClient, SimStudioError
+
+client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
+
+def execute_with_retry_handling():
+ try:
+ # Automatically retries on rate limit
+ result = client.execute_with_retry(
+ "workflow-id",
+ input_data={"message": "Process this"},
+ max_retries=5,
+ initial_delay=1.0,
+ max_delay=60.0,
+ backoff_multiplier=2.0
+ )
+
+ print(f"Success: {result}")
+ except SimStudioError as error:
+ if error.code == "RATE_LIMIT_EXCEEDED":
+ print("Rate limit exceeded after all retries")
+
+ # Check rate limit info
+ rate_limit_info = client.get_rate_limit_info()
+ if rate_limit_info:
+ from datetime import datetime
+ reset_time = datetime.fromtimestamp(rate_limit_info.reset)
+ print(f"Rate limit resets at: {reset_time}")
+
+execute_with_retry_handling()
+```
+
+### 使用状況モニタリング
+
+アカウントの使用状況と制限をモニタリングします:
+
+```python
+import os
+from simstudio import SimStudioClient
+
+client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
+
+def check_usage():
+ try:
+ limits = client.get_usage_limits()
+
+ print("=== Rate Limits ===")
+ print("Sync requests:")
+ print(f" Limit: {limits.rate_limit['sync']['limit']}")
+ print(f" Remaining: {limits.rate_limit['sync']['remaining']}")
+ print(f" Resets at: {limits.rate_limit['sync']['resetAt']}")
+ print(f" Is limited: {limits.rate_limit['sync']['isLimited']}")
+
+ print("\nAsync requests:")
+ print(f" Limit: {limits.rate_limit['async']['limit']}")
+ print(f" Remaining: {limits.rate_limit['async']['remaining']}")
+ print(f" Resets at: {limits.rate_limit['async']['resetAt']}")
+ print(f" Is limited: {limits.rate_limit['async']['isLimited']}")
+
+ print("\n=== Usage ===")
+ print(f"Current period cost: ${limits.usage['currentPeriodCost']:.2f}")
+ print(f"Limit: ${limits.usage['limit']:.2f}")
+ print(f"Plan: {limits.usage['plan']}")
+
+ percent_used = (limits.usage['currentPeriodCost'] / limits.usage['limit']) * 100
+ print(f"Usage: {percent_used:.1f}%")
+
+ if percent_used > 80:
+ print("⚠️ Warning: You are approaching your usage limit!")
+
+ except Exception as error:
+ print(f"Error checking usage: {error}")
+
+check_usage()
+```
+
+### ワークフローの実行ストリーミング
+
+リアルタイムのストリーミングレスポンスでワークフローを実行します:
+
+```python
+from simstudio import SimStudioClient
+import os
+
+client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
+
+def execute_with_streaming():
+ """Execute workflow with streaming enabled."""
+ try:
+ # Enable streaming for specific block outputs
+ result = client.execute_workflow(
+ "workflow-id",
+ input_data={"message": "Count to five"},
+ stream=True,
+ selected_outputs=["agent1.content"] # Use blockName.attribute format
+ )
+
+ print("Workflow result:", result)
+ except Exception as error:
+ print("Error:", error)
+
+execute_with_streaming()
+```
+
+ストリーミングレスポンスはServer-Sent Events(SSE)形式に従います:
+
+```
+data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
+
+data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
+
+data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
+
+data: [DONE]
+```
+
+**Flaskストリーミングの例:**
+
+```python
+from flask import Flask, Response, stream_with_context
+import requests
+import json
+import os
+
+app = Flask(__name__)
+
+@app.route('/stream-workflow')
+def stream_workflow():
+ """Stream workflow execution to the client."""
+
+ def generate():
+ response = requests.post(
+ 'https://sim.ai/api/workflows/WORKFLOW_ID/execute',
+ headers={
+ 'Content-Type': 'application/json',
+ 'X-API-Key': os.getenv('SIM_API_KEY')
+ },
+ json={
+ 'message': 'Generate a story',
+ 'stream': True,
+ 'selectedOutputs': ['agent1.content']
+ },
+ stream=True
+ )
+
+ for line in response.iter_lines():
+ if line:
+ decoded_line = line.decode('utf-8')
+ if decoded_line.startswith('data: '):
+ data = decoded_line[6:] # Remove 'data: ' prefix
+
+ if data == '[DONE]':
+ break
+
+ try:
+ parsed = json.loads(data)
+ if 'chunk' in parsed:
+ yield f"data: {json.dumps(parsed)}\n\n"
+ elif parsed.get('event') == 'done':
+ yield f"data: {json.dumps(parsed)}\n\n"
+ print("Execution complete:", parsed.get('metadata'))
+ except json.JSONDecodeError:
+ pass
+
+ return Response(
+ stream_with_context(generate()),
+ mimetype='text/event-stream'
+ )
+
+if __name__ == '__main__':
+ app.run(debug=True)
+```
+
### 環境設定
環境変数を使用してクライアントを設定します:
@@ -352,8 +711,8 @@ for result in results:
# Development configuration
client = SimStudioClient(
- api_key=os.getenv("SIMSTUDIO_API_KEY"),
- base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
+ api_key=os.getenv("SIM_API_KEY")
+ base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
)
```
@@ -365,13 +724,13 @@ for result in results:
from simstudio import SimStudioClient
# Production configuration with error handling
- api_key = os.getenv("SIMSTUDIO_API_KEY")
+ api_key = os.getenv("SIM_API_KEY")
if not api_key:
- raise ValueError("SIMSTUDIO_API_KEY environment variable is required")
+ raise ValueError("SIM_API_KEY environment variable is required")
client = SimStudioClient(
api_key=api_key,
- base_url=os.getenv("SIMSTUDIO_BASE_URL", "https://sim.ai")
+ base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
)
```
diff --git a/apps/docs/content/docs/ja/sdks/typescript.mdx b/apps/docs/content/docs/ja/sdks/typescript.mdx
index 1d2a6e8196..785f60b29d 100644
--- a/apps/docs/content/docs/ja/sdks/typescript.mdx
+++ b/apps/docs/content/docs/ja/sdks/typescript.mdx
@@ -7,10 +7,10 @@ import { Card, Cards } from 'fumadocs-ui/components/card'
import { Step, Steps } from 'fumadocs-ui/components/steps'
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
-公式TypeScript/JavaScript SDKはSimのために完全な型安全性を提供し、Node.jsとブラウザ環境の両方をサポートしています。これにより、Node.jsアプリケーション、Webアプリケーション、その他のJavaScript環境からプログラムでワークフローを実行することができます。現在、すべてのワークフロー実行は同期的に行われます。
+Sim用の公式TypeScript/JavaScript SDKは、完全な型安全性を提供し、Node.jsとブラウザ環境の両方をサポートしています。これにより、Node.jsアプリケーション、Webアプリケーション、その他のJavaScript環境からプログラムによってワークフローを実行することができます。
- TypeScript SDKは完全な型安全性を提供し、Node.jsとブラウザ環境の両方をサポートしています。現在、すべてのワークフロー実行は同期的に行われます。
+ TypeScript SDKは、完全な型安全性、非同期実行サポート、指数バックオフによる自動レート制限、使用状況追跡を提供します。
## インストール
@@ -95,8 +95,13 @@ const result = await client.executeWorkflow('workflow-id', {
- `options` (ExecutionOptions, オプション):
- `input` (any): ワークフローに渡す入力データ
- `timeout` (number): タイムアウト(ミリ秒)(デフォルト: 30000)
+ - `stream` (boolean): ストリーミングレスポンスを有効にする(デフォルト: false)
+ - `selectedOutputs` (string[]): `blockName.attribute`形式でストリーミングするブロック出力(例: `["agent1.content"]`)
+ - `async` (boolean): 非同期実行(デフォルト: false)
-**戻り値:** `Promise`
+**戻り値:** `Promise`
+
+`async: true`の場合、ポーリング用のタスクIDをすぐに返します。それ以外の場合は、完了を待ちます。
##### getWorkflowStatus()
@@ -114,7 +119,7 @@ console.log('Is deployed:', status.isDeployed);
##### validateWorkflow()
-ワークフローが実行準備ができているか検証します。
+ワークフローが実行準備ができているかを検証します。
```typescript
const isReady = await client.validateWorkflow('workflow-id');
@@ -128,28 +133,117 @@ if (isReady) {
**戻り値:** `Promise`
-##### executeWorkflowSync()
+##### getJobStatus()
-
- 現在、このメソッドは `executeWorkflow()` と同一です。すべての実行は同期的に行われるためです。このメソッドは、将来的に非同期実行が追加された際の互換性のために提供されています。
-
+非同期ジョブ実行のステータスを取得します。
+
+```typescript
+const status = await client.getJobStatus('task-id-from-async-execution');
+console.log('Status:', status.status); // 'queued', 'processing', 'completed', 'failed'
+if (status.status === 'completed') {
+ console.log('Output:', status.output);
+}
+```
+
+**パラメータ:**
+- `taskId` (string): 非同期実行から返されたタスクID
+
+**戻り値:** `Promise`
-ワークフローを実行します(現在は同期的、`executeWorkflow()` と同じ)。
+**レスポンスフィールド:**
+- `success` (boolean): リクエストが成功したかどうか
+- `taskId` (string): タスクID
+- `status` (string): 次のいずれか `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
+- `metadata` (object): `startedAt`, `completedAt`, および `duration` を含む
+- `output` (any, オプション): ワークフロー出力(完了時)
+- `error` (any, オプション): エラー詳細(失敗時)
+- `estimatedDuration` (number, オプション): 推定所要時間(ミリ秒)(処理中/キュー時)
+
+##### executeWithRetry()
+
+レート制限エラー時に指数バックオフを使用して自動的に再試行するワークフロー実行。
```typescript
-const result = await client.executeWorkflowSync('workflow-id', {
- input: { data: 'some input' },
- timeout: 60000
+const result = await client.executeWithRetry('workflow-id', {
+ input: { message: 'Hello' },
+ timeout: 30000
+}, {
+ maxRetries: 3, // Maximum number of retries
+ initialDelay: 1000, // Initial delay in ms (1 second)
+ maxDelay: 30000, // Maximum delay in ms (30 seconds)
+ backoffMultiplier: 2 // Exponential backoff multiplier
});
```
**パラメータ:**
- `workflowId` (string): 実行するワークフローのID
-- `options` (ExecutionOptions, オプション):
- - `input` (any): ワークフローに渡す入力データ
- - `timeout` (number): 初期リクエストのタイムアウト(ミリ秒)
+- `options` (ExecutionOptions, オプション): `executeWorkflow()`と同じ
+- `retryOptions` (RetryOptions, オプション):
+ - `maxRetries` (number): 最大再試行回数(デフォルト: 3)
+ - `initialDelay` (number): 初期遅延(ミリ秒)(デフォルト: 1000)
+ - `maxDelay` (number): 最大遅延(ミリ秒)(デフォルト: 30000)
+ - `backoffMultiplier` (number): バックオフ乗数(デフォルト: 2)
+
+**戻り値:** `Promise`
+
+再試行ロジックは、サンダリングハード問題を防ぐために±25%のジッターを含む指数バックオフ(1秒→2秒→4秒→8秒...)を使用します。APIが`retry-after`ヘッダーを提供する場合、代わりにそれが使用されます。
+
+##### getRateLimitInfo()
+
+最後のAPIレスポンスから現在のレート制限情報を取得します。
+
+```typescript
+const rateLimitInfo = client.getRateLimitInfo();
+if (rateLimitInfo) {
+ console.log('Limit:', rateLimitInfo.limit);
+ console.log('Remaining:', rateLimitInfo.remaining);
+ console.log('Reset:', new Date(rateLimitInfo.reset * 1000));
+}
+```
+
+**戻り値:** `RateLimitInfo | null`
+
+##### getUsageLimits()
+
+アカウントの現在の使用制限とクォータ情報を取得します。
+
+```typescript
+const limits = await client.getUsageLimits();
+console.log('Sync requests remaining:', limits.rateLimit.sync.remaining);
+console.log('Async requests remaining:', limits.rateLimit.async.remaining);
+console.log('Current period cost:', limits.usage.currentPeriodCost);
+console.log('Plan:', limits.usage.plan);
+```
+
+**戻り値:** `Promise`
+
+**レスポンス構造:**
-**戻り値:** `Promise`
+```typescript
+{
+ success: boolean
+ rateLimit: {
+ sync: {
+ isLimited: boolean
+ limit: number
+ remaining: number
+ resetAt: string
+ }
+ async: {
+ isLimited: boolean
+ limit: number
+ remaining: number
+ resetAt: string
+ }
+ authType: string // 'api' or 'manual'
+ }
+ usage: {
+ currentPeriodCost: number
+ limit: number
+ plan: string // e.g., 'free', 'pro'
+ }
+}
+```
##### setApiKey()
@@ -167,7 +261,7 @@ client.setApiKey('new-api-key');
client.setBaseUrl('https://my-custom-domain.com');
```
-## 型
+## 型定義
### WorkflowExecutionResult
@@ -187,6 +281,20 @@ interface WorkflowExecutionResult {
}
```
+### AsyncExecutionResult
+
+```typescript
+interface AsyncExecutionResult {
+ success: boolean;
+ taskId: string;
+ status: 'queued';
+ createdAt: string;
+ links: {
+ status: string; // e.g., "/api/jobs/{taskId}"
+ };
+}
+```
+
### WorkflowStatus
```typescript
@@ -198,6 +306,45 @@ interface WorkflowStatus {
}
```
+### RateLimitInfo
+
+```typescript
+interface RateLimitInfo {
+ limit: number;
+ remaining: number;
+ reset: number;
+ retryAfter?: number;
+}
+```
+
+### UsageLimits
+
+```typescript
+interface UsageLimits {
+ success: boolean;
+ rateLimit: {
+ sync: {
+ isLimited: boolean;
+ limit: number;
+ remaining: number;
+ resetAt: string;
+ };
+ async: {
+ isLimited: boolean;
+ limit: number;
+ remaining: number;
+ resetAt: string;
+ };
+ authType: string;
+ };
+ usage: {
+ currentPeriodCost: number;
+ limit: number;
+ plan: string;
+ };
+}
+```
+
### SimStudioError
```typescript
@@ -207,9 +354,16 @@ class SimStudioError extends Error {
}
```
+**一般的なエラーコード:**
+- `UNAUTHORIZED`: 無効なAPIキー
+- `TIMEOUT`: リクエストがタイムアウトしました
+- `RATE_LIMIT_EXCEEDED`: レート制限を超えました
+- `USAGE_LIMIT_EXCEEDED`: 使用制限を超えました
+- `EXECUTION_ERROR`: ワークフローの実行に失敗しました
+
## 例
-### 基本的なワークフローの実行
+### 基本的なワークフロー実行
@@ -230,7 +384,7 @@ class SimStudioError extends Error {
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
async function runWorkflow() {
@@ -271,7 +425,7 @@ runWorkflow();
import { SimStudioClient, SimStudioError } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
async function executeWithErrorHandling() {
@@ -315,14 +469,14 @@ async function executeWithErrorHandling() {
import { SimStudioClient } from 'simstudio-ts-sdk';
// Development configuration
- const apiKey = process.env.SIMSTUDIO_API_KEY;
+ const apiKey = process.env.SIM_API_KEY;
if (!apiKey) {
- throw new Error('SIMSTUDIO_API_KEY environment variable is required');
+ throw new Error('SIM_API_KEY environment variable is required');
}
const client = new SimStudioClient({
apiKey,
- baseUrl: process.env.SIMSTUDIO_BASE_URL // optional
+ baseUrl: process.env.SIM_BASE_URL // optional
});
```
@@ -333,21 +487,21 @@ async function executeWithErrorHandling() {
import { SimStudioClient } from 'simstudio-ts-sdk';
// Production configuration with validation
- const apiKey = process.env.SIMSTUDIO_API_KEY;
+ const apiKey = process.env.SIM_API_KEY;
if (!apiKey) {
- throw new Error('SIMSTUDIO_API_KEY environment variable is required');
+ throw new Error('SIM_API_KEY environment variable is required');
}
const client = new SimStudioClient({
apiKey,
- baseUrl: process.env.SIMSTUDIO_BASE_URL || 'https://sim.ai'
+ baseUrl: process.env.SIM_BASE_URL || 'https://sim.ai'
});
```
-### Node.js Expressとの統合
+### Node.js Express統合
Express.jsサーバーとの統合:
@@ -357,7 +511,7 @@ import { SimStudioClient } from 'simstudio-ts-sdk';
const app = express();
const client = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
app.use(express.json());
@@ -399,7 +553,7 @@ import { NextApiRequest, NextApiResponse } from 'next';
import { SimStudioClient } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
export default async function handler(
@@ -430,7 +584,7 @@ export default async function handler(
### ブラウザでの使用
-ブラウザで使用する場合(適切なCORS設定が必要):
+ブラウザでの使用(適切なCORS設定が必要):
```typescript
import { SimStudioClient } from 'simstudio-ts-sdk';
@@ -464,7 +618,7 @@ document.getElementById('executeBtn')?.addEventListener('click', executeClientSi
```
- ブラウザでSDKを使用する際は、機密性の高いAPIキーを公開しないよう注意してください。バックエンドプロキシや権限が制限された公開APIキーの使用を検討してください。
+ ブラウザでSDKを使用する場合、機密性の高いAPIキーを公開しないよう注意してください。バックエンドプロキシや権限が制限された公開APIキーの使用を検討してください。
### Reactフックの例
@@ -476,7 +630,7 @@ import { useState, useCallback } from 'react';
import { SimStudioClient, WorkflowExecutionResult } from 'simstudio-ts-sdk';
const client = new SimStudioClient({
- apiKey: process.env.NEXT_PUBLIC_SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
interface UseWorkflowResult {
@@ -532,7 +686,7 @@ function WorkflowComponent() {
-
+
{error &&
+ );
+}
+```
+
+## Getting Your API Key
-
- 访问 [Sim](https://sim.ai) 并登录您的账户。
+
+ Navigate to [Sim](https://sim.ai) and log in to your account.
-
- 导航到您想要以编程方式执行的工作流。
+
+ Navigate to the workflow you want to execute programmatically.
-
- 如果尚未部署,请点击“部署”以部署您的工作流。
+
+ Click on "Deploy" to deploy your workflow if it hasn't been deployed yet.
-
- 在部署过程中,选择或创建一个 API 密钥。
+
+ During the deployment process, select or create an API key.
-
- 复制 API 密钥以在您的 TypeScript/JavaScript 应用程序中使用。
+
+ Copy the API key to use in your TypeScript/JavaScript application.
- 请确保您的 API 密钥安全,切勿将其提交到版本控制中。使用环境变量或安全配置管理。
+ Keep your API key secure and never commit it to version control. Use environment variables or secure configuration management.
-## 要求
+## Requirements
- Node.js 16+
-- TypeScript 5.0+(适用于 TypeScript 项目)
+- TypeScript 5.0+ (for TypeScript projects)
-## TypeScript 支持
+## TypeScript Support
-SDK 是用 TypeScript 编写的,并提供完整的类型安全:
+The SDK is written in TypeScript and provides full type safety:
```typescript
import {
@@ -586,22 +969,22 @@ import {
SimStudioError
} from 'simstudio-ts-sdk';
-// Type-safe client initialization
+// 类型安全的客户端初始化
const client: SimStudioClient = new SimStudioClient({
- apiKey: process.env.SIMSTUDIO_API_KEY!
+ apiKey: process.env.SIM_API_KEY!
});
-// Type-safe workflow execution
+// 类型安全的工作流执行
const result: WorkflowExecutionResult = await client.executeWorkflow('workflow-id', {
input: {
- message: 'Hello, TypeScript!'
+ message: '你好,TypeScript!'
}
});
-// Type-safe status checking
+// 类型安全的状态检查
const status: WorkflowStatus = await client.getWorkflowStatus('workflow-id');
```
## 许可证
-Apache-2.0
\ No newline at end of file
+Apache-2.0
diff --git a/apps/docs/content/docs/zh/triggers/api.mdx b/apps/docs/content/docs/zh/triggers/api.mdx
index 5f4ecb3b71..e5f6ef72ef 100644
--- a/apps/docs/content/docs/zh/triggers/api.mdx
+++ b/apps/docs/content/docs/zh/triggers/api.mdx
@@ -38,6 +38,84 @@ curl -X POST \
成功的响应会返回来自执行器的序列化执行结果。错误会显示验证、认证或工作流失败的信息。
+## 流式响应
+
+启用实时流式传输以在生成时逐字符接收工作流输出。这对于向用户逐步显示 AI 响应非常有用。
+
+### 请求参数
+
+添加以下参数以启用流式传输:
+
+- `stream` - 设置为 `true` 以启用服务器发送事件 (SSE) 流式传输
+- `selectedOutputs` - 要流式传输的块输出数组(例如,`["agent1.content"]`)
+
+### 块输出格式
+
+使用 `blockName.attribute` 格式指定要流式传输的块输出:
+- 格式:`"blockName.attribute"`(例如,如果您想流式传输 Agent 1 块的内容,可以使用 `"agent1.content"`)
+- 块名称不区分大小写,空格会被忽略
+
+### 示例请求
+
+```bash
+curl -X POST \
+ https://sim.ai/api/workflows/WORKFLOW_ID/execute \
+ -H 'Content-Type: application/json' \
+ -H 'X-API-Key: YOUR_KEY' \
+ -d '{
+ "message": "Count to five",
+ "stream": true,
+ "selectedOutputs": ["agent1.content"]
+ }'
+```
+
+### 响应格式
+
+流式响应使用服务器发送事件 (SSE) 格式:
+
+```
+data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
+
+data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
+
+data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", three"}
+
+data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
+
+data: [DONE]
+```
+
+每个事件包括:
+- **流式块**:`{"blockId": "...", "chunk": "text"}` - 实时生成的文本
+- **最终事件**:`{"event": "done", ...}` - 执行元数据和完整结果
+- **终止符**:`[DONE]` - 表示流结束
+
+### 多块流式传输
+
+当 `selectedOutputs` 包含多个块时,每个块会指示其来源:
+
+```bash
+curl -X POST \
+ https://sim.ai/api/workflows/WORKFLOW_ID/execute \
+ -H 'Content-Type: application/json' \
+ -H 'X-API-Key: YOUR_KEY' \
+ -d '{
+ "message": "Process this request",
+ "stream": true,
+ "selectedOutputs": ["agent1.content", "agent2.content"]
+ }'
+```
+
+每个块中的 `blockId` 字段可让您将输出路由到正确的 UI 元素:
+
+```
+data: {"blockId":"agent1-uuid","chunk":"Processing..."}
+
+data: {"blockId":"agent2-uuid","chunk":"Analyzing..."}
+
+data: {"blockId":"agent1-uuid","chunk":" complete"}
+```
+
## 输出参考
| 参考 | 描述 |
@@ -48,5 +126,5 @@ curl -X POST \
如果未定义输入格式,执行器仅在 `` 处暴露原始 JSON。
-一个工作流只能包含一个 API 触发器。更改后发布新的部署,以确保端点保持最新。
+一个工作流只能包含一个 API 触发器。更改后发布新部署,以确保端点保持最新。
diff --git a/apps/docs/i18n.lock b/apps/docs/i18n.lock
index c8a891bbc2..4ef71f624c 100644
--- a/apps/docs/i18n.lock
+++ b/apps/docs/i18n.lock
@@ -2231,8 +2231,8 @@ checksums:
d394ac42b56429e524dc5a771b0610b9:
meta/title: 9da9098244c6c7a0ebcc3009cef66c7b
content/0: 9218a2e190598690d0fc5c27c30f01bb
- content/1: 8a3feb937915c3191f1eecb10b94297d
- content/2: 99af1bfe8d1629acdb5a9229430af791
+ content/1: 6c88f52bdb4e4a5668d1b25b5f444f48
+ content/2: 7e827833339b6b4c6abdf154de7f9a0c
content/3: 391128dee61b5d0d43eba88567aaef42
content/4: 4d132e6346723ecf45c408afeab2757b
content/5: d3df764a69d2926d10aed65ad8693e9f
@@ -2254,79 +2254,155 @@ checksums:
content/21: bd0e851fdde30c0e94c00b60f85d655e
content/22: 837ca74ccf63f23333c54e010faf681c
content/23: 8fb33cfc314b86d35df8ea1b10466f20
- content/24: 09e003c28fb1810e9afefe51324265fd
- content/25: 07fb2d6b16c75839a32d383f12419ca5
- content/26: 9fd0cd99a879360d355d91e9cfb41531
- content/27: f6fed8ebf67ba12199b4474a754969ae
- content/28: bcee3febe1be079e53aea841e2b08b3b
- content/29: f00be560fcd4ff3f53d61c70c249597b
- content/30: fa4fa1573c369fcc2eee57d7852caf9c
- content/31: fa68c1f8c9ea3dba96b2ea7edb8680d7
- content/32: 304e608d459ef53f308e6ea1f6f8b54a
- content/33: cb63e267fb16a7aaeea45c4ca29bf697
- content/34: f00be560fcd4ff3f53d61c70c249597b
- content/35: f7c266db4d07d040f8f788be598476cf
- content/36: cd306281b5136831335e6376edb1e822
- content/37: afb7b7f27d48deb3154da26135e17fb8
- content/38: 179000198c9cd78601b5c862e9c8659f
- content/39: 28c37db52f39323e125fcaf0e60911db
- content/40: 8de5041c3c93b70619ec1723f657757f
- content/41: 07fb2d6b16c75839a32d383f12419ca5
- content/42: 65db7855a79ab283c6409e81a7703d19
- content/43: 191fb7087315702a36001c69d745ebed
- content/44: f6113edfd7a0062af4d88bcf31a73f45
- content/45: 1e84fc1eee794c20e3411b3a34a02278
- content/46: ec31e300f79185f734d32b1cfaf8a137
- content/47: f7ad301d02e8826921644a5268f13f32
- content/48: 025d60fdaf93713ccb34abcbc71dfa2b
- content/49: 70a9ece41fdad09f3a06ca0efdb92ae9
- content/50: 356d67409ae0d82a72d052573314f660
- content/51: bb172f1678686d9d49666c516716de24
- content/52: 529711647eccfdf031dbb5bc70581986
- content/53: 9a84c92505eb468916637fcf2cef70f2
- content/54: 225bca3fb37bd38cd645e8a698abbfa9
- content/55: 33b9b1e9744318597da4b925b0995be2
- content/56: 6afe3b62e6d53c3dcd07149abcab4c05
- content/57: b6363faee219321c16d41a9c3f8d3bdd
- content/58: 24ef65dd034a2881a978d8d0065fb258
- content/59: b8b23ab79a7eb32c6f8d5f49f43c51f6
- content/60: be358297e2bbb9ab4689d11d072611d1
- content/61: b2eaadc86870d2e64b55c89e0348ef93
- content/62: 450265802cb0ba5b435b74b9cac1bf23
- content/63: b735ede8764e4b2dfb25967e33ab5143
- content/64: 0f881e586a03c4b916456c73fad48358
- content/65: 62bbbeca4e0500062f5cdbbc1614dde0
- content/66: 55d47e12745c1b0b62c9bdf6e8449730
- content/67: 1d873c7ccd87f564e2b30387b40ee9e9
- content/68: 3304a33dfb626c6e2267c062e8956a9d
- content/69: 77256b36307e9f7293bd00063239c8ee
- content/70: ac686382ccbb07d75b0f141af500dfd5
- content/71: 38f7308105b0843792c8e2fb93e1895d
- content/72: 62f6977928b2f596ed7d54383d1e779d
- content/73: 3415d6c5ad1df56b212d69519bdf0fea
- content/74: d1a104e667cd2284ab5b3fead4a6ba1d
- content/75: a81d7cd4a644a0061dad3a5973b4fe06
- content/76: 981447969a71fd038049e9d9f40f4f8c
- content/77: 531941216d31cb1947367c3c02127baa
- content/78: bf1afa789fdfa5815faaf43574341e90
- content/79: 5f2fe55d098d4e4f438af595708b2280
- content/80: 41b8f7cf8899a0e92e255a3f845f9584
- content/81: 5040bab65fb6bb77862f8098d16afbb5
- content/82: a88260a5b5e23da73e4534376adeb193
- content/83: e5e2329cdc226186fe9d44767528a4a0
- content/84: 1773624e9ac3d5132b505894ef51977e
- content/85: d62c9575cc66feec7589fba95c9f7aee
- content/86: 7af652c5407ae7e156ab27b21a4f26d3
- content/87: 4aa69b29cca745389dea8cd74eba4f83
- content/88: 46877074b69519165997fa0968169611
- content/89: d8ebc69b18baf83689ba315e7b4946ea
- content/90: ecd571818ddf3d31b08b80a25958a662
- content/91: 7dcdf2fbf3fce3f94987046506e12a9b
+ content/24: f8fbd9375113651be0f2498bdacde0ef
+ content/25: 2c57d87589b65f785e0fbbda60d32e54
+ content/26: 2541eb37fca67a6d7c5a10f8067127a3
+ content/27: 9fd0cd99a879360d355d91e9cfb41531
+ content/28: f6fed8ebf67ba12199b4474a754969ae
+ content/29: bcee3febe1be079e53aea841e2b08b3b
+ content/30: f00be560fcd4ff3f53d61c70c249597b
+ content/31: fa4fa1573c369fcc2eee57d7852caf9c
+ content/32: fa68c1f8c9ea3dba96b2ea7edb8680d7
+ content/33: 304e608d459ef53f308e6ea1f6f8b54a
+ content/34: cb63e267fb16a7aaeea45c4ca29bf697
+ content/35: f00be560fcd4ff3f53d61c70c249597b
+ content/36: f7c266db4d07d040f8f788be598476cf
+ content/37: d93b320646fde160c0fdd1936ee63cfb
+ content/38: c76e2089a41880dd6feac759ec8867c2
+ content/39: 0d61b9631788e64d1c1335b08c907107
+ content/40: 5ec50e6f56bd0a9a55fae14fa02185d9
+ content/41: 47bdc3ba4908bf1ce3d1a0a8f646b339
+ content/42: 5e8af7125448a6021a6ea431486dd587
+ content/43: 15017685691db74889cc6116373e44a5
+ content/44: 4d4ad5d56e800e5d227a07339300fc7f
+ content/45: c035728b4b81d006a18ba9ba7b9c638d
+ content/46: f1c9ad60574d19a5f93c837ab9d88890
+ content/47: 2c57d87589b65f785e0fbbda60d32e54
+ content/48: e7019a0e12f7295893c5822356fc0df0
+ content/49: 5912d8d9df5bbe435579d8eb0677685c
+ content/50: 4e1da4edce56837c750ce8da4c0e6cf2
+ content/51: 3d35097bb958e6eddd6976aeb1fe9e41
+ content/52: 78dce98d48ba070dbe100ee2a94cb17d
+ content/53: 38ec85acf292485e3dd837a29208fd2c
+ content/54: 58d582d90c8715f5570f76fed2be508d
+ content/55: 7d2b7134d447172c502b5f40fc3b38e6
+ content/56: 4a71171863d7329da6813b94772c0d4e
+ content/57: 1900d5b89dbca22d7a455bdc3367f0f5
+ content/58: 45126feb4fc831922a7edabfa2d54e4a
+ content/59: 65db7855a79ab283c6409e81a7703d19
+ content/60: 191fb7087315702a36001c69d745ebed
+ content/61: f6113edfd7a0062af4d88bcf31a73f45
+ content/62: 1e84fc1eee794c20e3411b3a34a02278
+ content/63: ec31e300f79185f734d32b1cfaf8a137
+ content/64: f7ad301d02e8826921644a5268f13f32
+ content/65: 025d60fdaf93713ccb34abcbc71dfa2b
+ content/66: 70a9ece41fdad09f3a06ca0efdb92ae9
+ content/67: 356d67409ae0d82a72d052573314f660
+ content/68: 5a80933fb21deea17a0a200564f0111b
+ content/69: 9527ba2ab5ddd8001baaaaf25f1a7acc
+ content/70: bb172f1678686d9d49666c516716de24
+ content/71: 529711647eccfdf031dbb5bc70581986
+ content/72: baa408b1603f35a8e24dd60b88773c72
+ content/73: c42a9f19d0678d8d1a36cf1f93e4a5ba
+ content/74: f6180f2341e8a7ae24afb05d7a185340
+ content/75: 8196e101e443ec2aac13cefd90a6d454
+ content/76: 9a84c92505eb468916637fcf2cef70f2
+ content/77: 225bca3fb37bd38cd645e8a698abbfa9
+ content/78: 7431c09b430effd69de843ee0fbaafe8
+ content/79: 33b9b1e9744318597da4b925b0995be2
+ content/80: 6afe3b62e6d53c3dcd07149abcab4c05
+ content/81: b6363faee219321c16d41a9c3f8d3bdd
+ content/82: 2449c8e8f55e2bf3f732527352d35c9f
+ content/83: b8b23ab79a7eb32c6f8d5f49f43c51f6
+ content/84: be358297e2bbb9ab4689d11d072611d1
+ content/85: eb774a8a86d778153905b0f6cdcdf517
+ content/86: 450265802cb0ba5b435b74b9cac1bf23
+ content/87: b735ede8764e4b2dfb25967e33ab5143
+ content/88: 0f881e586a03c4b916456c73fad48358
+ content/89: f51639ab2b7ccac72b850e2064e694e9
+ content/90: 55d47e12745c1b0b62c9bdf6e8449730
+ content/91: e6223d6aa9efa444282e58d7d9a99ced
+ content/92: 3304a33dfb626c6e2267c062e8956a9d
+ content/93: 77256b36307e9f7293bd00063239c8ee
+ content/94: ac686382ccbb07d75b0f141af500dfd5
+ content/95: 5610b6538a29672335b572d6f35d0657
+ content/96: 62f6977928b2f596ed7d54383d1e779d
+ content/97: 3415d6c5ad1df56b212d69519bdf0fea
+ content/98: 6bd60468d8cc072c5fe4214481fa9f60
+ content/99: a81d7cd4a644a0061dad3a5973b4fe06
+ content/100: 981447969a71fd038049e9d9f40f4f8c
+ content/101: 531941216d31cb1947367c3c02127baa
+ content/102: bf1afa789fdfa5815faaf43574341e90
+ content/103: 5f2fe55d098d4e4f438af595708b2280
+ content/104: 41b8f7cf8899a0e92e255a3f845f9584
+ content/105: 61ddd890032078ffd2da931b1d153b6d
+ content/106: 7873aa7487bc3e8a4826d65c1760a4a0
+ content/107: 98182d9aabe14d5bad43a5ee76a75eab
+ content/108: 2bdb01e4bcb08b1d99f192acf8e2fba7
+ content/109: 7079d9c00b1e1882c329b7e9b8f74552
+ content/110: 0f9d65eaf6e8de43c3d5fa7e62bc838d
+ content/111: 58c8e9d2d0ac37efd958203b8fbc8193
+ content/112: 7859d36a7a6d0122c0818b28ee29aa3e
+ content/113: ce185e7b041b8f95ebc11370d3e0aad9
+ content/114: 701e9bf4fd4d0669da0584eac5bd96e0
+ content/115: d1bab8ec5a51a9da5464eb47e2a16b50
+ content/116: da658275cc81a20f9cf7e4c66c7af1e3
+ content/117: 377d7c99a5df4b72166946573f7210b8
+ content/118: 3afc03a5ab1dc9db2bfa092b0ac4826a
+ content/119: 18ddfcaf2be4a6f1d9819407dad9ce7c
+ content/120: 2f6263b2e95f09f7e4842453f4bf4a0a
+ content/121: 4603578d6b314b662f45564a34ca430d
+ content/122: cf4c97eb254d0bd6ea6633344621c2c2
+ content/123: 7b4640989fab002039936156f857eb21
+ content/124: 65ca9f08745b47b4cce8ea8247d043bf
+ content/125: 162b4180611ff0a53b782e4dc8109293
+ content/126: 6b367a189eb53cb198e3666023def89c
+ content/127: dbb2125cefcf618849600c1eccae8a64
+ content/128: 04eedda0da3767b06e6017c559e05414
+ content/129: 661688450606eb09d8faee1468e88331
+ content/130: 8ff8367c3246103b3e3e02499e34ae0b
+ content/131: 44678bda9166f746da1d61b694ced482
+ content/132: a5e75db27c0a901f4cacf6598f450e6c
+ content/133: d1bab8ec5a51a9da5464eb47e2a16b50
+ content/134: da658275cc81a20f9cf7e4c66c7af1e3
+ content/135: 377d7c99a5df4b72166946573f7210b8
+ content/136: 3afc03a5ab1dc9db2bfa092b0ac4826a
+ content/137: 18ddfcaf2be4a6f1d9819407dad9ce7c
+ content/138: 2f6263b2e95f09f7e4842453f4bf4a0a
+ content/139: 4603578d6b314b662f45564a34ca430d
+ content/140: cf4c97eb254d0bd6ea6633344621c2c2
+ content/141: 7b4640989fab002039936156f857eb21
+ content/142: 65ca9f08745b47b4cce8ea8247d043bf
+ content/143: 162b4180611ff0a53b782e4dc8109293
+ content/144: 6b367a189eb53cb198e3666023def89c
+ content/145: dbb2125cefcf618849600c1eccae8a64
+ content/146: 04eedda0da3767b06e6017c559e05414
+ content/147: 661688450606eb09d8faee1468e88331
+ content/148: 8ff8367c3246103b3e3e02499e34ae0b
+ content/149: 44678bda9166f746da1d61b694ced482
+ content/150: 192a89879084dd7a74a6f44bcecae958
+ content/151: 41c2bb95317d7c0421817a2b1a68cc09
+ content/152: 4c95f9fa55f698f220577380dff95011
+ content/153: 9ef273d776aada1b2cff3452f08ff985
+ content/154: 100e12673551d4ceb5b906b1b9c65059
+ content/155: ce253674cd7c49320203cda2bdd3685b
+ content/156: 8910afcea8c205a28256eb30de6a1f26
+ content/157: 4d7ad757d2c70fdff7834146d38dddd8
+ content/158: a88260a5b5e23da73e4534376adeb193
+ content/159: e5e2329cdc226186fe9d44767528a4a0
+ content/160: 1773624e9ac3d5132b505894ef51977e
+ content/161: d62c9575cc66feec7589fba95c9f7aee
+ content/162: 7af652c5407ae7e156ab27b21a4f26d3
+ content/163: 4aa69b29cca745389dea8cd74eba4f83
+ content/164: 46877074b69519165997fa0968169611
+ content/165: 2e81908c18033109ac82a054b3fafd3d
+ content/166: ecd571818ddf3d31b08b80a25958a662
+ content/167: 7dcdf2fbf3fce3f94987046506e12a9b
27578f1315b6f1b7418d5e0d6042722e:
meta/title: 8c555594662512e95f28e20d3880f186
content/0: 9218a2e190598690d0fc5c27c30f01bb
content/1: feca29d7cbb17f461bc8706f142cb475
- content/2: 65705e1bef9ddf2674454c20e77af61f
+ content/2: 9cb58e08402fc80050ad6a62cae3f643
content/3: 391128dee61b5d0d43eba88567aaef42
content/4: fa77bab0a8660a7999bf3104921aac5c
content/5: e8839cfb872185cea76973caaa7f84e0
@@ -2342,67 +2418,107 @@ checksums:
content/15: 64005abb7b5c1c3edef8970a8a7d17b2
content/16: 837ca74ccf63f23333c54e010faf681c
content/17: 626054376e08522e7195a60c34db9af8
- content/18: 03c715df3c784e92ce1c0ce6a4dcd2e3
- content/19: dcb92b9a1f222393f2e81cdae239885c
- content/20: 2f5c7e73763a1884893739283f0d0659
- content/21: f6fed8ebf67ba12199b4474a754969ae
- content/22: c8f9a1d43885f2b9fe8b64c79d8af8b8
- content/23: e1a2ca39583549a731d942082e1fa07c
- content/24: 14e077bdb64d87457870efa215384654
- content/25: c2e86eaf4b7d1cd53ed8172264337cc9
- content/26: 304e608d459ef53f308e6ea1f6f8b54a
- content/27: 9d04294f8385211535ed7622d164871f
- content/28: e1a2ca39583549a731d942082e1fa07c
- content/29: 279c20e11af33abb94993e8ea3e80669
- content/30: eec7d8395f8cf305106deb7b25384ecf
- content/31: 921824b44c391f8a0cdc5ce4cd283e77
- content/32: d5aaccb9399a1255f986b703921594e5
- content/33: dba855cc28255e4576026e3da0cdf05b
- content/34: 17fdd93c6df75b108e352a62a195bc73
- content/35: dcb92b9a1f222393f2e81cdae239885c
- content/36: fb6fddfdf4753a36c7878ef60b345822
- content/37: 191fb7087315702a36001c69d745ebed
- content/38: 1ffef0a4e0d6a6bbca85776c113e1164
- content/39: 61caafaf79e863df9525c4baf72c14e1
- content/40: ec31e300f79185f734d32b1cfaf8a137
- content/41: 65a172d64ffca3b03c6e0ed08f0bd821
- content/42: 2db387754d7fb3539bcb986dfaac1c8c
- content/43: e118d997ba48a5230ec70a564d436860
- content/44: 77268362a748dafad471f31acfd230dc
- content/45: b55b3773df2dfba66b6e675db7e2470e
- content/46: 70a9ece41fdad09f3a06ca0efdb92ae9
- content/47: 646ee615d86faf3b6a8da03115a30efa
- content/48: bb172f1678686d9d49666c516716de24
- content/49: a025b3b746d72e0f676f58703ee19a47
- content/50: 9a84c92505eb468916637fcf2cef70f2
- content/51: a4c78d85ed9be63b07b657166510f440
- content/52: 33b9b1e9744318597da4b925b0995be2
- content/53: 6afe3b62e6d53c3dcd07149abcab4c05
- content/54: b6363faee219321c16d41a9c3f8d3bdd
- content/55: f939bc99e05d04e1d52bf4b9ec3f1825
- content/56: b8b23ab79a7eb32c6f8d5f49f43c51f6
- content/57: be358297e2bbb9ab4689d11d072611d1
- content/58: d8fcefba15a99bf4a9cf71c985097677
- content/59: 7d098f0349c782f389431377ee512e92
- content/60: 22b39537f6a104803389469d211154e4
- content/61: 5dc147f9fe5e8117dfa6c94808c4ff54
- content/62: f29d6bfd74ba3fee0b90180f620b4f47
- content/63: 2a59466500b62e57481fe27692a3ed0f
- content/64: d3ac9ea2a213cafb1f871dda8f6e6fe0
- content/65: 450265802cb0ba5b435b74b9cac1bf23
- content/66: b735ede8764e4b2dfb25967e33ab5143
- content/67: 0f881e586a03c4b916456c73fad48358
- content/68: 3f643fb43f3a022a449ded1e7c4db8bf
- content/69: 55d47e12745c1b0b62c9bdf6e8449730
- content/70: 166b3975e39841707381880ae4df3984
- content/71: 3304a33dfb626c6e2267c062e8956a9d
- content/72: a88260a5b5e23da73e4534376adeb193
- content/73: cc31ae653c5642b223ec634888de29c6
- content/74: 1773624e9ac3d5132b505894ef51977e
- content/75: d62c9575cc66feec7589fba95c9f7aee
- content/76: 8df5939abc771b5d24c115ef20d42d6f
- content/77: ecd571818ddf3d31b08b80a25958a662
- content/78: 7dcdf2fbf3fce3f94987046506e12a9b
+ content/18: 12153919e0229ac0a3699de043eae2a2
+ content/19: 59ceca96004d0746448717245eb65c5c
+ content/20: a0ff152e09498effe90572fe5cdfad1b
+ content/21: 2f5c7e73763a1884893739283f0d0659
+ content/22: f6fed8ebf67ba12199b4474a754969ae
+ content/23: c8f9a1d43885f2b9fe8b64c79d8af8b8
+ content/24: e1a2ca39583549a731d942082e1fa07c
+ content/25: 14e077bdb64d87457870efa215384654
+ content/26: c2e86eaf4b7d1cd53ed8172264337cc9
+ content/27: 304e608d459ef53f308e6ea1f6f8b54a
+ content/28: 9d04294f8385211535ed7622d164871f
+ content/29: e1a2ca39583549a731d942082e1fa07c
+ content/30: 279c20e11af33abb94993e8ea3e80669
+ content/31: 9e772c161a4b008c2f1db15a967d07ab
+ content/32: c76e2089a41880dd6feac759ec8867c2
+ content/33: 5d9a7b1e681cbe8f02def7eefabb0ac5
+ content/34: b4e0e90d40a60a024f64f80b193dcb48
+ content/35: b9f46c03c91c1070dd3ca0eba461f29b
+ content/36: fbecf63d14b56039ba44471f7a8afd4a
+ content/37: 58701f4ec097582ee105714a9363ccbe
+ content/38: 4d4ad5d56e800e5d227a07339300fc7f
+ content/39: 7f2a42a752279d7871064a21d0891b73
+ content/40: 8462e2271506b0545c62e5f70865a2f4
+ content/41: 59ceca96004d0746448717245eb65c5c
+ content/42: e7019a0e12f7295893c5822356fc0df0
+ content/43: 29d376146cd1149025028c61eb33e7ab
+ content/44: 4e1da4edce56837c750ce8da4c0e6cf2
+ content/45: 666a62d9fd54735b2adcad6277b3e07f
+ content/46: db012cfc3749d025f1dd40b5db1d9d63
+ content/47: 478fe7c3fbdd5e7d779691c9a09795c9
+ content/48: 58d582d90c8715f5570f76fed2be508d
+ content/49: 710baf5cf18c21cc284e70df97b36f40
+ content/50: 6363bbb118f3f51ca1b1acf3e9ec2f7c
+ content/51: 1900d5b89dbca22d7a455bdc3367f0f5
+ content/52: 959f29f44825109bf4bb16129896a8dd
+ content/53: fb6fddfdf4753a36c7878ef60b345822
+ content/54: 191fb7087315702a36001c69d745ebed
+ content/55: 1ffef0a4e0d6a6bbca85776c113e1164
+ content/56: 61caafaf79e863df9525c4baf72c14e1
+ content/57: ec31e300f79185f734d32b1cfaf8a137
+ content/58: 65a172d64ffca3b03c6e0ed08f0bd821
+ content/59: 2db387754d7fb3539bcb986dfaac1c8c
+ content/60: e118d997ba48a5230ec70a564d436860
+ content/61: 77268362a748dafad471f31acfd230dc
+ content/62: b55b3773df2dfba66b6e675db7e2470e
+ content/63: 70a9ece41fdad09f3a06ca0efdb92ae9
+ content/64: 646ee615d86faf3b6a8da03115a30efa
+ content/65: 5a80933fb21deea17a0a200564f0111b
+ content/66: a82d5e5fad0fbfd60ca97e5312d11941
+ content/67: bb172f1678686d9d49666c516716de24
+ content/68: a025b3b746d72e0f676f58703ee19a47
+ content/69: baa408b1603f35a8e24dd60b88773c72
+ content/70: c0cc113d0001826984f9c096c79cd18b
+ content/71: f6180f2341e8a7ae24afb05d7a185340
+ content/72: 3d414a5669f152cd296af27b61104858
+ content/73: 9a84c92505eb468916637fcf2cef70f2
+ content/74: a4c78d85ed9be63b07b657166510f440
+ content/75: 7431c09b430effd69de843ee0fbaafe8
+ content/76: 33b9b1e9744318597da4b925b0995be2
+ content/77: 6afe3b62e6d53c3dcd07149abcab4c05
+ content/78: b6363faee219321c16d41a9c3f8d3bdd
+ content/79: 08410ce9f0ec358b3c7230a56bc66399
+ content/80: b8b23ab79a7eb32c6f8d5f49f43c51f6
+ content/81: be358297e2bbb9ab4689d11d072611d1
+ content/82: 09fea7c0d742a0eefa77e982e848de6c
+ content/83: 7d098f0349c782f389431377ee512e92
+ content/84: 22b39537f6a104803389469d211154e4
+ content/85: d9ec74ab28b264d76f797fdae7c8f3d3
+ content/86: f29d6bfd74ba3fee0b90180f620b4f47
+ content/87: 2a59466500b62e57481fe27692a3ed0f
+ content/88: cbbb123fc3a12bf2ab72dc1bbe373a6e
+ content/89: 7873aa7487bc3e8a4826d65c1760a4a0
+ content/90: 98182d9aabe14d5bad43a5ee76a75eab
+ content/91: 67bfa8ae3e22d9a949f08c79a40b8df5
+ content/92: 7079d9c00b1e1882c329b7e9b8f74552
+ content/93: 0f9d65eaf6e8de43c3d5fa7e62bc838d
+ content/94: bcf0ce93a4493586ad32c20d9d2b285c
+ content/95: 7859d36a7a6d0122c0818b28ee29aa3e
+ content/96: ce185e7b041b8f95ebc11370d3e0aad9
+ content/97: dae96b41f0c029b464f02ac65d3c5796
+ content/98: 41c2bb95317d7c0421817a2b1a68cc09
+ content/99: 4c95f9fa55f698f220577380dff95011
+ content/100: 6695bd47a05f9963134d8a71abb3d298
+ content/101: 100e12673551d4ceb5b906b1b9c65059
+ content/102: ce253674cd7c49320203cda2bdd3685b
+ content/103: 94d4346a735149c2a83f6d2a21b8ab4c
+ content/104: 3ee4b16b8204ef3b5b7c0322ff636fab
+ content/105: 450265802cb0ba5b435b74b9cac1bf23
+ content/106: b735ede8764e4b2dfb25967e33ab5143
+ content/107: 0f881e586a03c4b916456c73fad48358
+ content/108: 4570af52d41ecda8d91e6bbe2bc19891
+ content/109: 55d47e12745c1b0b62c9bdf6e8449730
+ content/110: 82507d357ec8766f0173b9b1081c4c56
+ content/111: 3304a33dfb626c6e2267c062e8956a9d
+ content/112: a88260a5b5e23da73e4534376adeb193
+ content/113: cc31ae653c5642b223ec634888de29c6
+ content/114: 1773624e9ac3d5132b505894ef51977e
+ content/115: d62c9575cc66feec7589fba95c9f7aee
+ content/116: 8df5939abc771b5d24c115ef20d42d6f
+ content/117: ecd571818ddf3d31b08b80a25958a662
+ content/118: 7dcdf2fbf3fce3f94987046506e12a9b
004fe5dc5ca33719cb175f3619fe5208:
meta/title: be754b00d8a2c13c561e314f6f526515
content/0: 7e581dbf3e581d503ac94f7fb7938b1f
@@ -3970,7 +4086,25 @@ checksums:
content/7: e73f4b831f5b77c71d7d86c83abcbf11
content/8: 07e064793f3e0bbcb02c4dc6083b6daa
content/9: a702b191c3f94458bee880d33853e0cb
- content/10: ce110ab5da3ff96f8cbf96ce3376fc51
- content/11: 83f9b3ab46b0501c8eb3989bec3f4f1b
- content/12: e00be80effb71b0acb014f9aa53dfbe1
- content/13: 847a381137856ded9faa5994fbc489fb
+ content/10: c497057cbb9dd53599071f8550f327cd
+ content/11: cc6e48f85d5c6bfc05f846341f2d5cc9
+ content/12: 8a80a6a97da9bf375fac565f1caabb49
+ content/13: 098cc8e062187eb877fe5e172a4aa467
+ content/14: e452a7cb33d7cf2f7cf1804703edaa20
+ content/15: 466cfd61b1d0fcd8fc93d867dfd0f3e3
+ content/16: 377572316021236994f444e88949ef34
+ content/17: 54852933b2cbe3deb3b1c3059dba6a15
+ content/18: 9e66b045763abe053a3ba8d2c23e9aa1
+ content/19: d34f0950591e3beb085e99db64d07d2f
+ content/20: 8677ef07618f7289b04fef3cce8bf745
+ content/21: c0e6d2790e369569e7f272a5ec9ae21a
+ content/22: 93643a0d9d9745f131e4eabf7ead2018
+ content/23: 89c7da6d2e8fbc25e303a7381e147237
+ content/24: a8ec63597dc3a3564bc5f0c3a6e5f42c
+ content/25: 379618989b6cd427b319cfdab523297d
+ content/26: bc4c2e699a7514771276e90e9aee53ba
+ content/27: 38e14193b679ef774c3db93d399e700e
+ content/28: ce110ab5da3ff96f8cbf96ce3376fc51
+ content/29: 83f9b3ab46b0501c8eb3989bec3f4f1b
+ content/30: e00be80effb71b0acb014f9aa53dfbe1
+ content/31: 847a381137856ded9faa5994fbc489fb
diff --git a/apps/docs/lib/utils.ts b/apps/docs/lib/utils.ts
index bc3e8ab5ad..61be1d99b6 100644
--- a/apps/docs/lib/utils.ts
+++ b/apps/docs/lib/utils.ts
@@ -9,7 +9,7 @@ export function cn(...inputs: ClassValue[]) {
}
/**
- * Get the full URL for an asset stored in Vercel Blob or local fallback
+ * Get the full URL for an asset stored in Vercel Blob
* - If CDN is configured (NEXT_PUBLIC_BLOB_BASE_URL), uses CDN URL
* - Otherwise falls back to local static assets served from root path
*/
@@ -20,12 +20,3 @@ export function getAssetUrl(filename: string) {
}
return `/${filename}`
}
-
-/**
- * Get the full URL for a video asset stored in Vercel Blob or local fallback
- * - If CDN is configured (NEXT_PUBLIC_BLOB_BASE_URL), uses CDN URL
- * - Otherwise falls back to local static assets served from root path
- */
-export function getVideoUrl(filename: string) {
- return getAssetUrl(filename)
-}
diff --git a/apps/sim/app/(landing)/components/testimonials/testimonials.tsx b/apps/sim/app/(landing)/components/testimonials/testimonials.tsx
index e3d01cc5c5..35fdc34d08 100644
--- a/apps/sim/app/(landing)/components/testimonials/testimonials.tsx
+++ b/apps/sim/app/(landing)/components/testimonials/testimonials.tsx
@@ -2,7 +2,6 @@
import { useEffect, useState } from 'react'
import Image from 'next/image'
-import { getAssetUrl } from '@/lib/utils'
import { inter } from '@/app/fonts/inter'
interface Testimonial {
@@ -14,7 +13,6 @@ interface Testimonial {
profileImage: string
}
-// Import all testimonials
const allTestimonials: Testimonial[] = [
{
text: "🚨 BREAKING: This startup just dropped the fastest way to build AI agents.\n\nThis Figma-like canvas to build agents will blow your mind.\n\nHere's why this is the best tool for building AI agents:",
@@ -22,7 +20,7 @@ const allTestimonials: Testimonial[] = [
username: '@hasantoxr',
viewCount: '515k',
tweetUrl: 'https://x.com/hasantoxr/status/1912909502036525271',
- profileImage: getAssetUrl('twitter/hasan.jpg'),
+ profileImage: '/twitter/hasan.jpg',
},
{
text: "Drag-and-drop AI workflows for devs who'd rather build agents than babysit them.",
@@ -30,7 +28,7 @@ const allTestimonials: Testimonial[] = [
username: '@GithubProjects',
viewCount: '90.4k',
tweetUrl: 'https://x.com/GithubProjects/status/1906383555707490499',
- profileImage: getAssetUrl('twitter/github-projects.jpg'),
+ profileImage: '/twitter/github-projects.jpg',
},
{
text: "🚨 BREAKING: This startup just dropped the fastest way to build AI agents.\n\nThis Figma-like canvas to build agents will blow your mind.\n\nHere's why this is the best tool for building AI agents:",
@@ -38,7 +36,7 @@ const allTestimonials: Testimonial[] = [
username: '@lazukars',
viewCount: '47.4k',
tweetUrl: 'https://x.com/lazukars/status/1913136390503600575',
- profileImage: getAssetUrl('twitter/lazukars.png'),
+ profileImage: '/twitter/lazukars.png',
},
{
text: 'omfggggg this is the zapier of agent building\n\ni always believed that building agents and using ai should not be limited to technical people. i think this solves just that\n\nthe fact that this is also open source makes me so optimistic about the future of building with ai :)))\n\ncongrats @karabegemir & @typingwala !!!',
@@ -46,7 +44,7 @@ const allTestimonials: Testimonial[] = [
username: '@nizzyabi',
viewCount: '6,269',
tweetUrl: 'https://x.com/nizzyabi/status/1907864421227180368',
- profileImage: getAssetUrl('twitter/nizzy.jpg'),
+ profileImage: '/twitter/nizzy.jpg',
},
{
text: 'A very good looking agent workflow builder 🔥 and open source!',
@@ -54,7 +52,7 @@ const allTestimonials: Testimonial[] = [
username: '@xyflowdev',
viewCount: '3,246',
tweetUrl: 'https://x.com/xyflowdev/status/1909501499719438670',
- profileImage: getAssetUrl('twitter/xyflow.jpg'),
+ profileImage: '/twitter/xyflow.jpg',
},
{
text: "One of the best products I've seen in the space, and the hustle and grind I've seen from @karabegemir and @typingwala is insane. Sim is positioned to build something game-changing, and there's no better team for the job.\n\nCongrats on the launch 🚀 🎊 great things ahead!",
@@ -62,7 +60,7 @@ const allTestimonials: Testimonial[] = [
username: '@firestorm776',
viewCount: '1,256',
tweetUrl: 'https://x.com/firestorm776/status/1907896097735061598',
- profileImage: getAssetUrl('twitter/samarth.jpg'),
+ profileImage: '/twitter/samarth.jpg',
},
{
text: 'lfgg got access to @simstudioai via @zerodotemail 😎',
@@ -70,7 +68,7 @@ const allTestimonials: Testimonial[] = [
username: '@nizzyabi',
viewCount: '1,762',
tweetUrl: 'https://x.com/nizzyabi/status/1910482357821595944',
- profileImage: getAssetUrl('twitter/nizzy.jpg'),
+ profileImage: '/twitter/nizzy.jpg',
},
{
text: 'Feels like we\'re finally getting a "Photoshop moment" for AI devs—visual, intuitive, and fast enough to keep up with ideas mid-flow.',
@@ -78,7 +76,7 @@ const allTestimonials: Testimonial[] = [
username: '@syamrajk',
viewCount: '2,784',
tweetUrl: 'https://x.com/syamrajk/status/1912911980110946491',
- profileImage: getAssetUrl('twitter/syamrajk.jpg'),
+ profileImage: '/twitter/syamrajk.jpg',
},
{
text: 'The use cases are endless. Great work @simstudioai',
@@ -86,7 +84,7 @@ const allTestimonials: Testimonial[] = [
username: '@daniel_zkim',
viewCount: '103',
tweetUrl: 'https://x.com/daniel_zkim/status/1907891273664782708',
- profileImage: getAssetUrl('twitter/daniel.jpg'),
+ profileImage: '/twitter/daniel.jpg',
},
]
@@ -95,11 +93,9 @@ export default function Testimonials() {
const [isTransitioning, setIsTransitioning] = useState(false)
const [isPaused, setIsPaused] = useState(false)
- // Create an extended array for smooth infinite scrolling
const extendedTestimonials = [...allTestimonials, ...allTestimonials]
useEffect(() => {
- // Set up automatic sliding every 3 seconds
const interval = setInterval(() => {
if (!isPaused) {
setIsTransitioning(true)
@@ -110,17 +106,15 @@ export default function Testimonials() {
return () => clearInterval(interval)
}, [isPaused])
- // Reset position when reaching the end for infinite loop
useEffect(() => {
if (currentIndex >= allTestimonials.length) {
setTimeout(() => {
setIsTransitioning(false)
setCurrentIndex(0)
- }, 500) // Match transition duration
+ }, 500)
}
}, [currentIndex])
- // Calculate the transform value
const getTransformValue = () => {
// Each card unit (card + separator) takes exactly 25% width
return `translateX(-${currentIndex * 25}%)`
diff --git a/apps/sim/app/api/__test-utils__/utils.ts b/apps/sim/app/api/__test-utils__/utils.ts
index 5b8be3b565..ce02592a9b 100644
--- a/apps/sim/app/api/__test-utils__/utils.ts
+++ b/apps/sim/app/api/__test-utils__/utils.ts
@@ -403,7 +403,10 @@ export function mockExecutionDependencies() {
provider: 'provider',
providerConfig: 'providerConfig',
},
- workflow: { id: 'id', userId: 'userId' },
+ workflow: {
+ id: 'id',
+ userId: 'userId',
+ },
workflowSchedule: {
id: 'id',
workflowId: 'workflowId',
diff --git a/apps/sim/app/api/chat/[identifier]/route.test.ts b/apps/sim/app/api/chat/[identifier]/route.test.ts
index 47c0bff36a..c95102cf88 100644
--- a/apps/sim/app/api/chat/[identifier]/route.test.ts
+++ b/apps/sim/app/api/chat/[identifier]/route.test.ts
@@ -27,7 +27,7 @@ describe('Chat Identifier API Route', () => {
const mockAddCorsHeaders = vi.fn().mockImplementation((response) => response)
const mockValidateChatAuth = vi.fn().mockResolvedValue({ authorized: true })
const mockSetChatAuthCookie = vi.fn()
- const mockExecuteWorkflowForChat = vi.fn().mockResolvedValue(createMockStream())
+ const mockCreateStreamingResponse = vi.fn().mockResolvedValue(createMockStream())
const mockChatResult = [
{
@@ -72,7 +72,16 @@ describe('Chat Identifier API Route', () => {
validateChatAuth: mockValidateChatAuth,
setChatAuthCookie: mockSetChatAuthCookie,
validateAuthToken: vi.fn().mockReturnValue(true),
- executeWorkflowForChat: mockExecuteWorkflowForChat,
+ }))
+
+ vi.doMock('@/lib/workflows/streaming', () => ({
+ createStreamingResponse: mockCreateStreamingResponse,
+ SSE_HEADERS: {
+ 'Content-Type': 'text/event-stream',
+ 'Cache-Control': 'no-cache',
+ Connection: 'keep-alive',
+ 'X-Accel-Buffering': 'no',
+ },
}))
vi.doMock('@/lib/logs/console/logger', () => ({
@@ -369,8 +378,23 @@ describe('Chat Identifier API Route', () => {
expect(response.headers.get('Cache-Control')).toBe('no-cache')
expect(response.headers.get('Connection')).toBe('keep-alive')
- // Verify executeWorkflowForChat was called with correct parameters
- expect(mockExecuteWorkflowForChat).toHaveBeenCalledWith('chat-id', 'Hello world', 'conv-123')
+ // Verify createStreamingResponse was called with correct workflow info
+ expect(mockCreateStreamingResponse).toHaveBeenCalledWith(
+ expect.objectContaining({
+ workflow: expect.objectContaining({
+ id: 'workflow-id',
+ userId: 'user-id',
+ }),
+ input: expect.objectContaining({
+ input: 'Hello world',
+ conversationId: 'conv-123',
+ }),
+ streamConfig: expect.objectContaining({
+ isSecureMode: true,
+ workflowTriggerType: 'chat',
+ }),
+ })
+ )
})
it('should handle streaming response body correctly', async () => {
@@ -399,8 +423,8 @@ describe('Chat Identifier API Route', () => {
})
it('should handle workflow execution errors gracefully', async () => {
- const originalExecuteWorkflow = mockExecuteWorkflowForChat.getMockImplementation()
- mockExecuteWorkflowForChat.mockImplementationOnce(async () => {
+ const originalStreamingResponse = mockCreateStreamingResponse.getMockImplementation()
+ mockCreateStreamingResponse.mockImplementationOnce(async () => {
throw new Error('Execution failed')
})
@@ -417,8 +441,8 @@ describe('Chat Identifier API Route', () => {
expect(data).toHaveProperty('error')
expect(data).toHaveProperty('message', 'Execution failed')
- if (originalExecuteWorkflow) {
- mockExecuteWorkflowForChat.mockImplementation(originalExecuteWorkflow)
+ if (originalStreamingResponse) {
+ mockCreateStreamingResponse.mockImplementation(originalStreamingResponse)
}
})
@@ -443,7 +467,7 @@ describe('Chat Identifier API Route', () => {
expect(data).toHaveProperty('message', 'Invalid request body')
})
- it('should pass conversationId to executeWorkflowForChat when provided', async () => {
+ it('should pass conversationId to streaming execution when provided', async () => {
const req = createMockRequest('POST', {
input: 'Hello world',
conversationId: 'test-conversation-123',
@@ -454,10 +478,13 @@ describe('Chat Identifier API Route', () => {
await POST(req, { params })
- expect(mockExecuteWorkflowForChat).toHaveBeenCalledWith(
- 'chat-id',
- 'Hello world',
- 'test-conversation-123'
+ expect(mockCreateStreamingResponse).toHaveBeenCalledWith(
+ expect.objectContaining({
+ input: expect.objectContaining({
+ input: 'Hello world',
+ conversationId: 'test-conversation-123',
+ }),
+ })
)
})
@@ -469,7 +496,13 @@ describe('Chat Identifier API Route', () => {
await POST(req, { params })
- expect(mockExecuteWorkflowForChat).toHaveBeenCalledWith('chat-id', 'Hello world', undefined)
+ expect(mockCreateStreamingResponse).toHaveBeenCalledWith(
+ expect.objectContaining({
+ input: expect.objectContaining({
+ input: 'Hello world',
+ }),
+ })
+ )
})
})
})
diff --git a/apps/sim/app/api/chat/[identifier]/route.ts b/apps/sim/app/api/chat/[identifier]/route.ts
index 9551e99131..e349dfe74c 100644
--- a/apps/sim/app/api/chat/[identifier]/route.ts
+++ b/apps/sim/app/api/chat/[identifier]/route.ts
@@ -6,7 +6,6 @@ import { createLogger } from '@/lib/logs/console/logger'
import { generateRequestId } from '@/lib/utils'
import {
addCorsHeaders,
- executeWorkflowForChat,
setChatAuthCookie,
validateAuthToken,
validateChatAuth,
@@ -15,6 +14,9 @@ import { createErrorResponse, createSuccessResponse } from '@/app/api/workflows/
const logger = createLogger('ChatIdentifierAPI')
+export const dynamic = 'force-dynamic'
+export const runtime = 'nodejs'
+
// This endpoint handles chat interactions via the identifier
export async function POST(
request: NextRequest,
@@ -106,18 +108,37 @@ export async function POST(
}
try {
- // Execute workflow with structured input (input + conversationId for context)
- const result = await executeWorkflowForChat(deployment.id, input, conversationId)
+ // Transform outputConfigs to selectedOutputs format (blockId_attribute format)
+ const selectedOutputs: string[] = []
+ if (deployment.outputConfigs && Array.isArray(deployment.outputConfigs)) {
+ for (const config of deployment.outputConfigs) {
+ const outputId = config.path
+ ? `${config.blockId}_${config.path}`
+ : `${config.blockId}_content`
+ selectedOutputs.push(outputId)
+ }
+ }
- // The result is always a ReadableStream that we can pipe to the client
- const streamResponse = new NextResponse(result, {
- status: 200,
- headers: {
- 'Content-Type': 'text/event-stream',
- 'Cache-Control': 'no-cache',
- Connection: 'keep-alive',
- 'X-Accel-Buffering': 'no',
+ const { createStreamingResponse } = await import('@/lib/workflows/streaming')
+ const { SSE_HEADERS } = await import('@/lib/utils')
+ const { createFilteredResult } = await import('@/app/api/workflows/[id]/execute/route')
+
+ const stream = await createStreamingResponse({
+ requestId,
+ workflow: { id: deployment.workflowId, userId: deployment.userId, isDeployed: true },
+ input: { input, conversationId }, // Format for chat_trigger
+ executingUserId: deployment.userId, // Use workflow owner's ID for chat deployments
+ streamConfig: {
+ selectedOutputs,
+ isSecureMode: true,
+ workflowTriggerType: 'chat',
},
+ createFilteredResult,
+ })
+
+ const streamResponse = new NextResponse(stream, {
+ status: 200,
+ headers: SSE_HEADERS,
})
return addCorsHeaders(streamResponse, request)
} catch (error: any) {
diff --git a/apps/sim/app/api/chat/utils.test.ts b/apps/sim/app/api/chat/utils.test.ts
index c1e5b68dff..30ec46c628 100644
--- a/apps/sim/app/api/chat/utils.test.ts
+++ b/apps/sim/app/api/chat/utils.test.ts
@@ -416,7 +416,7 @@ describe('Chat API Utils', () => {
execution: executionResult,
}
- // Simulate the type extraction logic from executeWorkflowForChat
+ // Test that streaming execution wraps the result correctly
const extractedFromStreaming =
streamingResult && typeof streamingResult === 'object' && 'execution' in streamingResult
? streamingResult.execution
diff --git a/apps/sim/app/api/chat/utils.ts b/apps/sim/app/api/chat/utils.ts
index 5fada4cc98..d3c33b474c 100644
--- a/apps/sim/app/api/chat/utils.ts
+++ b/apps/sim/app/api/chat/utils.ts
@@ -1,29 +1,11 @@
import { db } from '@sim/db'
-import { chat, userStats, workflow } from '@sim/db/schema'
-import { eq, sql } from 'drizzle-orm'
+import { chat, workflow } from '@sim/db/schema'
+import { eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
-import { v4 as uuidv4 } from 'uuid'
-import { checkServerSideUsageLimits } from '@/lib/billing'
import { isDev } from '@/lib/environment'
-import { getPersonalAndWorkspaceEnv } from '@/lib/environment/utils'
import { createLogger } from '@/lib/logs/console/logger'
-import { LoggingSession } from '@/lib/logs/execution/logging-session'
-import { buildTraceSpans } from '@/lib/logs/execution/trace-spans/trace-spans'
import { hasAdminPermission } from '@/lib/permissions/utils'
-import { processStreamingBlockLogs } from '@/lib/tokenization'
-import { decryptSecret, generateRequestId } from '@/lib/utils'
-import { TriggerUtils } from '@/lib/workflows/triggers'
-import { CHAT_ERROR_MESSAGES } from '@/app/chat/constants'
-import { getBlock } from '@/blocks'
-import { Executor } from '@/executor'
-import type { BlockLog, ExecutionResult } from '@/executor/types'
-import { Serializer } from '@/serializer'
-import { mergeSubblockState } from '@/stores/workflows/server-utils'
-import type { WorkflowState } from '@/stores/workflows/workflow/types'
-
-declare global {
- var __chatStreamProcessingTasks: Promise<{ success: boolean; error?: any }>[] | undefined
-}
+import { decryptSecret } from '@/lib/utils'
const logger = createLogger('ChatAuthUtils')
@@ -281,586 +263,3 @@ export async function validateChatAuth(
// Unknown auth type
return { authorized: false, error: 'Unsupported authentication type' }
}
-
-/**
- * Executes a workflow for a chat request and returns the formatted output.
- *
- * When workflows reference , they receive the input directly.
- * The conversationId is available at for maintaining chat context.
- *
- * @param chatId - Chat deployment identifier
- * @param input - User's chat input
- * @param conversationId - Optional ID for maintaining conversation context
- * @returns Workflow execution result formatted for the chat interface
- */
-export async function executeWorkflowForChat(
- chatId: string,
- input: string,
- conversationId?: string
-): Promise {
- const requestId = generateRequestId()
-
- logger.debug(
- `[${requestId}] Executing workflow for chat: ${chatId}${
- conversationId ? `, conversationId: ${conversationId}` : ''
- }`
- )
-
- // Find the chat deployment
- const deploymentResult = await db
- .select({
- id: chat.id,
- workflowId: chat.workflowId,
- userId: chat.userId,
- outputConfigs: chat.outputConfigs,
- customizations: chat.customizations,
- })
- .from(chat)
- .where(eq(chat.id, chatId))
- .limit(1)
-
- if (deploymentResult.length === 0) {
- logger.warn(`[${requestId}] Chat not found: ${chatId}`)
- throw new Error('Chat not found')
- }
-
- const deployment = deploymentResult[0]
- const workflowId = deployment.workflowId
- const executionId = uuidv4()
-
- const usageCheck = await checkServerSideUsageLimits(deployment.userId)
- if (usageCheck.isExceeded) {
- logger.warn(
- `[${requestId}] User ${deployment.userId} has exceeded usage limits. Skipping chat execution.`,
- {
- currentUsage: usageCheck.currentUsage,
- limit: usageCheck.limit,
- workflowId: deployment.workflowId,
- chatId,
- }
- )
- throw new Error(usageCheck.message || CHAT_ERROR_MESSAGES.USAGE_LIMIT_EXCEEDED)
- }
-
- // Set up logging for chat execution
- const loggingSession = new LoggingSession(workflowId, executionId, 'chat', requestId)
-
- // Check for multi-output configuration in customizations
- const customizations = (deployment.customizations || {}) as Record
- let outputBlockIds: string[] = []
-
- // Extract output configs from the new schema format
- let selectedOutputIds: string[] = []
- if (deployment.outputConfigs && Array.isArray(deployment.outputConfigs)) {
- // Extract output IDs in the format expected by the streaming processor
- logger.debug(
- `[${requestId}] Found ${deployment.outputConfigs.length} output configs in deployment`
- )
-
- selectedOutputIds = deployment.outputConfigs.map((config) => {
- const outputId = config.path
- ? `${config.blockId}_${config.path}`
- : `${config.blockId}.content`
-
- logger.debug(
- `[${requestId}] Processing output config: blockId=${config.blockId}, path=${config.path || 'content'} -> outputId=${outputId}`
- )
-
- return outputId
- })
-
- // Also extract block IDs for legacy compatibility
- outputBlockIds = deployment.outputConfigs.map((config) => config.blockId)
- } else {
- // Use customizations as fallback
- outputBlockIds = Array.isArray(customizations.outputBlockIds)
- ? customizations.outputBlockIds
- : []
- }
-
- // Fall back to customizations if we still have no outputs
- if (
- outputBlockIds.length === 0 &&
- customizations.outputBlockIds &&
- customizations.outputBlockIds.length > 0
- ) {
- outputBlockIds = customizations.outputBlockIds
- }
-
- logger.debug(
- `[${requestId}] Using ${outputBlockIds.length} output blocks and ${selectedOutputIds.length} selected output IDs for extraction`
- )
-
- // Find the workflow to check if it's deployed
- const workflowResult = await db
- .select({
- isDeployed: workflow.isDeployed,
- variables: workflow.variables,
- workspaceId: workflow.workspaceId,
- })
- .from(workflow)
- .where(eq(workflow.id, workflowId))
- .limit(1)
-
- if (workflowResult.length === 0 || !workflowResult[0].isDeployed) {
- logger.warn(`[${requestId}] Workflow not found or not deployed: ${workflowId}`)
- throw new Error('Workflow not available')
- }
-
- // Load the active deployed state from the deployment versions table
- const { loadDeployedWorkflowState } = await import('@/lib/workflows/db-helpers')
-
- let deployedState: WorkflowState
- try {
- deployedState = await loadDeployedWorkflowState(workflowId)
- } catch (error) {
- logger.error(`[${requestId}] Failed to load deployed state for workflow ${workflowId}:`, error)
- throw new Error(`Workflow must be deployed to be available for chat`)
- }
-
- const { blocks, edges, loops, parallels } = deployedState
-
- // Prepare for execution, similar to use-workflow-execution.ts
- const mergedStates = mergeSubblockState(blocks)
-
- const filteredStates = Object.entries(mergedStates).reduce(
- (acc, [id, block]) => {
- const blockConfig = getBlock(block.type)
- const isTriggerBlock = blockConfig?.category === 'triggers'
- const isChatTrigger = block.type === 'chat_trigger'
-
- // Keep all non-trigger blocks and also keep the chat_trigger block
- if (!isTriggerBlock || isChatTrigger) {
- acc[id] = block
- }
- return acc
- },
- {} as typeof mergedStates
- )
-
- const currentBlockStates = Object.entries(filteredStates).reduce(
- (acc, [id, block]) => {
- acc[id] = Object.entries(block.subBlocks).reduce(
- (subAcc, [key, subBlock]) => {
- subAcc[key] = subBlock.value
- return subAcc
- },
- {} as Record
- )
- return acc
- },
- {} as Record>
- )
-
- // Get user environment variables with workspace precedence
- let envVars: Record = {}
- try {
- const workspaceId = workflowResult[0].workspaceId || undefined
- const { personalEncrypted, workspaceEncrypted } = await getPersonalAndWorkspaceEnv(
- deployment.userId,
- workspaceId
- )
- envVars = { ...personalEncrypted, ...workspaceEncrypted }
- } catch (error) {
- logger.warn(`[${requestId}] Could not fetch environment variables:`, error)
- }
-
- let workflowVariables = {}
- try {
- if (workflowResult[0].variables) {
- workflowVariables =
- typeof workflowResult[0].variables === 'string'
- ? JSON.parse(workflowResult[0].variables)
- : workflowResult[0].variables
- }
- } catch (error) {
- logger.warn(`[${requestId}] Could not parse workflow variables:`, error)
- }
-
- // Filter edges to exclude connections to/from trigger blocks (same as manual execution)
- const triggerBlockIds = Object.keys(mergedStates).filter((id) => {
- const type = mergedStates[id].type
- const blockConfig = getBlock(type)
- // Exclude chat_trigger from the list so its edges are preserved
- return blockConfig?.category === 'triggers' && type !== 'chat_trigger'
- })
-
- const filteredEdges = edges.filter(
- (edge) => !triggerBlockIds.includes(edge.source) && !triggerBlockIds.includes(edge.target)
- )
-
- // Create serialized workflow with filtered blocks and edges
- const serializedWorkflow = new Serializer().serializeWorkflow(
- filteredStates,
- filteredEdges,
- loops,
- parallels,
- true // Enable validation during execution
- )
-
- // Decrypt environment variables
- const decryptedEnvVars: Record = {}
- for (const [key, encryptedValue] of Object.entries(envVars)) {
- try {
- const { decrypted } = await decryptSecret(encryptedValue)
- decryptedEnvVars[key] = decrypted
- } catch (error: any) {
- logger.error(`[${requestId}] Failed to decrypt environment variable "${key}"`, error)
- // Log but continue - we don't want to break execution if just one var fails
- }
- }
-
- // Process block states to ensure response formats are properly parsed
- const processedBlockStates = Object.entries(currentBlockStates).reduce(
- (acc, [blockId, blockState]) => {
- // Check if this block has a responseFormat that needs to be parsed
- if (blockState.responseFormat && typeof blockState.responseFormat === 'string') {
- try {
- logger.debug(`[${requestId}] Parsing responseFormat for block ${blockId}`)
- // Attempt to parse the responseFormat if it's a string
- const parsedResponseFormat = JSON.parse(blockState.responseFormat)
-
- acc[blockId] = {
- ...blockState,
- responseFormat: parsedResponseFormat,
- }
- } catch (error) {
- logger.warn(`[${requestId}] Failed to parse responseFormat for block ${blockId}`, error)
- acc[blockId] = blockState
- }
- } else {
- acc[blockId] = blockState
- }
- return acc
- },
- {} as Record>
- )
-
- // Start logging session
- await loggingSession.safeStart({
- userId: deployment.userId,
- workspaceId: workflowResult[0].workspaceId || '',
- variables: workflowVariables,
- })
-
- let sessionCompleted = false
-
- const stream = new ReadableStream({
- async start(controller) {
- const encoder = new TextEncoder()
-
- try {
- const streamedContent = new Map()
- const streamedBlocks = new Set() // Track which blocks have started streaming
-
- const onStream = async (streamingExecution: any): Promise => {
- if (!streamingExecution.stream) return
-
- const blockId = streamingExecution.execution?.blockId
- const reader = streamingExecution.stream.getReader()
- if (blockId) {
- streamedContent.set(blockId, '')
-
- // Add separator if this is not the first block to stream
- if (streamedBlocks.size > 0) {
- // Send separator before the new block starts
- controller.enqueue(
- encoder.encode(`data: ${JSON.stringify({ blockId, chunk: '\n\n' })}\n\n`)
- )
- }
- streamedBlocks.add(blockId)
- }
- try {
- while (true) {
- const { done, value } = await reader.read()
- if (done) {
- controller.enqueue(
- encoder.encode(`data: ${JSON.stringify({ blockId, event: 'end' })}\n\n`)
- )
- break
- }
- const chunk = new TextDecoder().decode(value)
- if (blockId) {
- streamedContent.set(blockId, (streamedContent.get(blockId) || '') + chunk)
- }
- controller.enqueue(encoder.encode(`data: ${JSON.stringify({ blockId, chunk })}\n\n`))
- }
- } catch (error) {
- logger.error('Error while reading from stream:', error)
- controller.error(error)
- }
- }
-
- // Determine the start block for chat execution BEFORE creating executor
- const startBlock = TriggerUtils.findStartBlock(mergedStates, 'chat')
-
- if (!startBlock) {
- const errorMessage = CHAT_ERROR_MESSAGES.NO_CHAT_TRIGGER
- logger.error(`[${requestId}] ${errorMessage}`)
-
- if (!sessionCompleted) {
- await loggingSession.safeCompleteWithError({
- endedAt: new Date().toISOString(),
- totalDurationMs: 0,
- error: { message: errorMessage },
- })
- sessionCompleted = true
- }
-
- // Send error event that the client expects
- controller.enqueue(
- encoder.encode(
- `data: ${JSON.stringify({
- event: 'error',
- error: CHAT_ERROR_MESSAGES.GENERIC_ERROR,
- })}\n\n`
- )
- )
- controller.close()
- return
- }
-
- const startBlockId = startBlock.blockId
-
- // Create executor AFTER confirming we have a chat trigger
- const executor = new Executor({
- workflow: serializedWorkflow,
- currentBlockStates: processedBlockStates,
- envVarValues: decryptedEnvVars,
- workflowInput: { input: input, conversationId },
- workflowVariables,
- contextExtensions: {
- stream: true,
- selectedOutputIds: selectedOutputIds.length > 0 ? selectedOutputIds : outputBlockIds,
- edges: filteredEdges.map((e: any) => ({
- source: e.source,
- target: e.target,
- })),
- onStream,
- isDeployedContext: true,
- },
- })
-
- // Set up logging on the executor
- loggingSession.setupExecutor(executor)
-
- let result
- try {
- result = await executor.execute(workflowId, startBlockId)
- } catch (error: any) {
- logger.error(`[${requestId}] Chat workflow execution failed:`, error)
- if (!sessionCompleted) {
- await loggingSession.safeCompleteWithError({
- endedAt: new Date().toISOString(),
- totalDurationMs: 0,
- error: { message: error.message || 'Chat workflow execution failed' },
- })
- sessionCompleted = true
- }
-
- // Send error to stream before ending
- controller.enqueue(
- encoder.encode(
- `data: ${JSON.stringify({
- event: 'error',
- error: error.message || 'Chat workflow execution failed',
- })}\n\n`
- )
- )
- controller.close()
- return // Don't throw - just return to end the stream gracefully
- }
-
- // Handle both ExecutionResult and StreamingExecution types
- const executionResult =
- result && typeof result === 'object' && 'execution' in result
- ? (result.execution as ExecutionResult)
- : (result as ExecutionResult)
-
- if (executionResult?.logs) {
- // Update streamed content and apply tokenization - process regardless of overall success
- // This ensures partial successes (some agents succeed, some fail) still return results
-
- // Add newlines between different agent outputs for better readability
- const processedOutputs = new Set()
- executionResult.logs.forEach((log: BlockLog) => {
- if (streamedContent.has(log.blockId)) {
- const content = streamedContent.get(log.blockId)
- if (log.output && content) {
- // Add newline separation between different outputs (but not before the first one)
- const separator = processedOutputs.size > 0 ? '\n\n' : ''
- log.output.content = separator + content
- processedOutputs.add(log.blockId)
- }
- }
- })
-
- // Also process non-streamed outputs from selected blocks (like function blocks)
- // This uses the same logic as the chat panel to ensure identical behavior
- const nonStreamingLogs = executionResult.logs.filter(
- (log: BlockLog) => !streamedContent.has(log.blockId)
- )
-
- // Extract the exact same functions used by the chat panel
- const extractBlockIdFromOutputId = (outputId: string): string => {
- return outputId.includes('_') ? outputId.split('_')[0] : outputId.split('.')[0]
- }
-
- const extractPathFromOutputId = (outputId: string, blockId: string): string => {
- return outputId.substring(blockId.length + 1)
- }
-
- const parseOutputContentSafely = (output: any): any => {
- if (!output?.content) {
- return output
- }
-
- if (typeof output.content === 'string') {
- try {
- return JSON.parse(output.content)
- } catch (e) {
- // Fallback to original structure if parsing fails
- return output
- }
- }
-
- return output
- }
-
- // Filter outputs that have matching logs (exactly like chat panel)
- const outputsToRender = selectedOutputIds.filter((outputId) => {
- const blockIdForOutput = extractBlockIdFromOutputId(outputId)
- return nonStreamingLogs.some((log) => log.blockId === blockIdForOutput)
- })
-
- // Process each selected output (exactly like chat panel)
- for (const outputId of outputsToRender) {
- const blockIdForOutput = extractBlockIdFromOutputId(outputId)
- const path = extractPathFromOutputId(outputId, blockIdForOutput)
- const log = nonStreamingLogs.find((l) => l.blockId === blockIdForOutput)
-
- if (log) {
- let outputValue: any = log.output
-
- if (path) {
- // Parse JSON content safely (exactly like chat panel)
- outputValue = parseOutputContentSafely(outputValue)
-
- const pathParts = path.split('.')
- for (const part of pathParts) {
- if (outputValue && typeof outputValue === 'object' && part in outputValue) {
- outputValue = outputValue[part]
- } else {
- outputValue = undefined
- break
- }
- }
- }
-
- if (outputValue !== undefined) {
- // Add newline separation between different outputs
- const separator = processedOutputs.size > 0 ? '\n\n' : ''
-
- // Format the output exactly like the chat panel
- const formattedOutput =
- typeof outputValue === 'string'
- ? outputValue
- : JSON.stringify(outputValue, null, 2)
-
- // Update the log content
- if (!log.output.content) {
- log.output.content = separator + formattedOutput
- } else {
- log.output.content = separator + formattedOutput
- }
- processedOutputs.add(log.blockId)
- }
- }
- }
-
- // Process all logs for streaming tokenization
- const processedCount = processStreamingBlockLogs(executionResult.logs, streamedContent)
- logger.info(`Processed ${processedCount} blocks for streaming tokenization`)
-
- const { traceSpans, totalDuration } = buildTraceSpans(executionResult)
- const enrichedResult = { ...executionResult, traceSpans, totalDuration }
- if (conversationId) {
- if (!enrichedResult.metadata) {
- enrichedResult.metadata = {
- duration: totalDuration,
- startTime: new Date().toISOString(),
- }
- }
- ;(enrichedResult.metadata as any).conversationId = conversationId
- }
- // Use the executionId created at the beginning of this function
- logger.debug(`Using execution ID for deployed chat: ${executionId}`)
-
- if (executionResult.success) {
- try {
- await db
- .update(userStats)
- .set({
- totalChatExecutions: sql`total_chat_executions + 1`,
- lastActive: new Date(),
- })
- .where(eq(userStats.userId, deployment.userId))
- logger.debug(`Updated user stats for deployed chat: ${deployment.userId}`)
- } catch (error) {
- logger.error(`Failed to update user stats for deployed chat:`, error)
- }
- }
- }
-
- if (!(result && typeof result === 'object' && 'stream' in result)) {
- controller.enqueue(
- encoder.encode(`data: ${JSON.stringify({ event: 'final', data: result })}\n\n`)
- )
- }
-
- if (!sessionCompleted) {
- const resultForTracing =
- executionResult || ({ success: true, output: {}, logs: [] } as ExecutionResult)
- const { traceSpans } = buildTraceSpans(resultForTracing)
- await loggingSession.safeComplete({
- endedAt: new Date().toISOString(),
- totalDurationMs: executionResult?.metadata?.duration || 0,
- finalOutput: executionResult?.output || {},
- traceSpans,
- })
- sessionCompleted = true
- }
-
- controller.close()
- } catch (error: any) {
- // Handle any errors that occur in the stream
- logger.error(`[${requestId}] Stream error:`, error)
-
- // Send error event to client
- const encoder = new TextEncoder()
- controller.enqueue(
- encoder.encode(
- `data: ${JSON.stringify({
- event: 'error',
- error: error.message || 'An unexpected error occurred',
- })}\n\n`
- )
- )
-
- // Try to complete the logging session with error if not already completed
- if (!sessionCompleted && loggingSession) {
- await loggingSession.safeCompleteWithError({
- endedAt: new Date().toISOString(),
- totalDurationMs: 0,
- error: { message: error.message || 'Stream processing error' },
- })
- sessionCompleted = true
- }
-
- controller.close()
- }
- },
- })
-
- return stream
-}
diff --git a/apps/sim/app/api/knowledge/[id]/route.test.ts b/apps/sim/app/api/knowledge/[id]/route.test.ts
index bbe491d465..9d64bf5caf 100644
--- a/apps/sim/app/api/knowledge/[id]/route.test.ts
+++ b/apps/sim/app/api/knowledge/[id]/route.test.ts
@@ -234,6 +234,7 @@ describe('Knowledge Base By ID API Route', () => {
{
name: validUpdateData.name,
description: validUpdateData.description,
+ workspaceId: undefined,
chunkingConfig: undefined,
},
expect.any(String)
diff --git a/apps/sim/app/api/knowledge/[id]/route.ts b/apps/sim/app/api/knowledge/[id]/route.ts
index 38fa990ea1..bce2006b32 100644
--- a/apps/sim/app/api/knowledge/[id]/route.ts
+++ b/apps/sim/app/api/knowledge/[id]/route.ts
@@ -103,6 +103,7 @@ export async function PUT(req: NextRequest, { params }: { params: Promise<{ id:
{
name: validatedData.name,
description: validatedData.description,
+ workspaceId: validatedData.workspaceId,
chunkingConfig: validatedData.chunkingConfig,
},
requestId
diff --git a/apps/sim/app/api/schedules/execute/route.ts b/apps/sim/app/api/schedules/execute/route.ts
index 6d682465c6..81bb6a6445 100644
--- a/apps/sim/app/api/schedules/execute/route.ts
+++ b/apps/sim/app/api/schedules/execute/route.ts
@@ -4,6 +4,7 @@ import { and, eq, lte, not, sql } from 'drizzle-orm'
import { NextResponse } from 'next/server'
import { v4 as uuidv4 } from 'uuid'
import { z } from 'zod'
+import { getApiKeyOwnerUserId } from '@/lib/api-key/service'
import { checkServerSideUsageLimits } from '@/lib/billing'
import { getHighestPrioritySubscription } from '@/lib/billing/core/subscription'
import { getPersonalAndWorkspaceEnv } from '@/lib/environment/utils'
@@ -17,7 +18,7 @@ import {
getSubBlockValue,
} from '@/lib/schedules/utils'
import { decryptSecret, generateRequestId } from '@/lib/utils'
-import { loadDeployedWorkflowState } from '@/lib/workflows/db-helpers'
+import { blockExistsInDeployment, loadDeployedWorkflowState } from '@/lib/workflows/db-helpers'
import { updateWorkflowRunCounts } from '@/lib/workflows/utils'
import { Executor } from '@/executor'
import { Serializer } from '@/serializer'
@@ -106,12 +107,22 @@ export async function GET() {
continue
}
+ const actorUserId = await getApiKeyOwnerUserId(workflowRecord.pinnedApiKeyId)
+
+ if (!actorUserId) {
+ logger.warn(
+ `[${requestId}] Skipping schedule ${schedule.id}: pinned API key required to attribute usage.`
+ )
+ runningExecutions.delete(schedule.workflowId)
+ continue
+ }
+
// Check rate limits for scheduled execution (checks both personal and org subscriptions)
- const userSubscription = await getHighestPrioritySubscription(workflowRecord.userId)
+ const userSubscription = await getHighestPrioritySubscription(actorUserId)
const rateLimiter = new RateLimiter()
const rateLimitCheck = await rateLimiter.checkRateLimitWithSubscription(
- workflowRecord.userId,
+ actorUserId,
userSubscription,
'schedule',
false // schedules are always sync
@@ -149,7 +160,7 @@ export async function GET() {
continue
}
- const usageCheck = await checkServerSideUsageLimits(workflowRecord.userId)
+ const usageCheck = await checkServerSideUsageLimits(actorUserId)
if (usageCheck.isExceeded) {
logger.warn(
`[${requestId}] User ${workflowRecord.userId} has exceeded usage limits. Skipping scheduled execution.`,
@@ -159,26 +170,19 @@ export async function GET() {
workflowId: schedule.workflowId,
}
)
-
- // Error logging handled by logging session
-
- const retryDelay = 24 * 60 * 60 * 1000 // 24 hour delay for exceeded limits
- const nextRetryAt = new Date(now.getTime() + retryDelay)
-
try {
+ const deployedData = await loadDeployedWorkflowState(schedule.workflowId)
+ const nextRunAt = calculateNextRunTime(schedule, deployedData.blocks as any)
await db
.update(workflowSchedule)
- .set({
- updatedAt: now,
- nextRunAt: nextRetryAt,
- })
+ .set({ updatedAt: now, nextRunAt })
.where(eq(workflowSchedule.id, schedule.id))
-
- logger.debug(`[${requestId}] Updated next retry time due to usage limits`)
- } catch (updateError) {
- logger.error(`[${requestId}] Error updating schedule for usage limits:`, updateError)
+ } catch (calcErr) {
+ logger.warn(
+ `[${requestId}] Unable to calculate nextRunAt while skipping schedule ${schedule.id}`,
+ calcErr
+ )
}
-
runningExecutions.delete(schedule.workflowId)
continue
}
@@ -206,11 +210,25 @@ export async function GET() {
const parallels = deployedData.parallels
logger.info(`[${requestId}] Loaded deployed workflow ${schedule.workflowId}`)
+ // Validate that the schedule's trigger block exists in the deployed state
+ if (schedule.blockId) {
+ const blockExists = await blockExistsInDeployment(
+ schedule.workflowId,
+ schedule.blockId
+ )
+ if (!blockExists) {
+ logger.warn(
+ `[${requestId}] Schedule trigger block ${schedule.blockId} not found in deployed workflow ${schedule.workflowId}. Skipping execution.`
+ )
+ return { skip: true, blocks: {} as Record }
+ }
+ }
+
const mergedStates = mergeSubblockState(blocks)
// Retrieve environment variables with workspace precedence
const { personalEncrypted, workspaceEncrypted } = await getPersonalAndWorkspaceEnv(
- workflowRecord.userId,
+ actorUserId,
workflowRecord.workspaceId || undefined
)
const variables = EnvVarsSchema.parse({
@@ -355,7 +373,6 @@ export async function GET() {
)
const input = {
- workflowId: schedule.workflowId,
_context: {
workflowId: schedule.workflowId,
},
@@ -363,7 +380,7 @@ export async function GET() {
// Start logging with environment variables
await loggingSession.safeStart({
- userId: workflowRecord.userId,
+ userId: actorUserId,
workspaceId: workflowRecord.workspaceId || '',
variables: variables || {},
})
@@ -407,7 +424,7 @@ export async function GET() {
totalScheduledExecutions: sql`total_scheduled_executions + 1`,
lastActive: now,
})
- .where(eq(userStats.userId, workflowRecord.userId))
+ .where(eq(userStats.userId, actorUserId))
logger.debug(`[${requestId}] Updated user stats for scheduled execution`)
} catch (statsError) {
@@ -446,6 +463,7 @@ export async function GET() {
message: `Schedule execution failed before workflow started: ${earlyError.message}`,
stackTrace: earlyError.stack,
},
+ traceSpans: [],
})
} catch (loggingError) {
logger.error(
@@ -459,6 +477,12 @@ export async function GET() {
}
})()
+ // Check if execution was skipped (e.g., trigger block not found)
+ if ('skip' in executionSuccess && executionSuccess.skip) {
+ runningExecutions.delete(schedule.workflowId)
+ continue
+ }
+
if (executionSuccess.success) {
logger.info(`[${requestId}] Workflow ${schedule.workflowId} executed successfully`)
@@ -565,6 +589,7 @@ export async function GET() {
message: `Schedule execution failed: ${error.message}`,
stackTrace: error.stack,
},
+ traceSpans: [],
})
} catch (loggingError) {
logger.error(
diff --git a/apps/sim/app/api/webhooks/trigger/[path]/route.test.ts b/apps/sim/app/api/webhooks/trigger/[path]/route.test.ts
index 9d5b9268a2..9a827c9ece 100644
--- a/apps/sim/app/api/webhooks/trigger/[path]/route.test.ts
+++ b/apps/sim/app/api/webhooks/trigger/[path]/route.test.ts
@@ -106,6 +106,24 @@ describe('Webhook Trigger API Route', () => {
mockExecutionDependencies()
mockTriggerDevSdk()
+ globalMockData.workflows.push({
+ id: 'test-workflow-id',
+ userId: 'test-user-id',
+ pinnedApiKeyId: 'test-pinned-api-key-id',
+ })
+
+ vi.doMock('@/lib/api-key/service', async () => {
+ const actual = await vi.importActual('@/lib/api-key/service')
+ return {
+ ...(actual as Record),
+ getApiKeyOwnerUserId: vi
+ .fn()
+ .mockImplementation(async (pinnedApiKeyId: string | null | undefined) =>
+ pinnedApiKeyId ? 'test-user-id' : null
+ ),
+ }
+ })
+
vi.doMock('@/services/queue', () => ({
RateLimiter: vi.fn().mockImplementation(() => ({
checkRateLimit: vi.fn().mockResolvedValue({
@@ -222,6 +240,7 @@ describe('Webhook Trigger API Route', () => {
globalMockData.workflows.push({
id: 'test-workflow-id',
userId: 'test-user-id',
+ pinnedApiKeyId: 'test-pinned-api-key-id',
})
const req = createMockRequest('POST', { event: 'test', id: 'test-123' })
@@ -250,7 +269,11 @@ describe('Webhook Trigger API Route', () => {
providerConfig: { requireAuth: true, token: 'test-token-123' },
workflowId: 'test-workflow-id',
})
- globalMockData.workflows.push({ id: 'test-workflow-id', userId: 'test-user-id' })
+ globalMockData.workflows.push({
+ id: 'test-workflow-id',
+ userId: 'test-user-id',
+ pinnedApiKeyId: 'test-pinned-api-key-id',
+ })
const headers = {
'Content-Type': 'application/json',
@@ -281,7 +304,11 @@ describe('Webhook Trigger API Route', () => {
},
workflowId: 'test-workflow-id',
})
- globalMockData.workflows.push({ id: 'test-workflow-id', userId: 'test-user-id' })
+ globalMockData.workflows.push({
+ id: 'test-workflow-id',
+ userId: 'test-user-id',
+ pinnedApiKeyId: 'test-pinned-api-key-id',
+ })
const headers = {
'Content-Type': 'application/json',
@@ -308,7 +335,11 @@ describe('Webhook Trigger API Route', () => {
providerConfig: { requireAuth: true, token: 'case-test-token' },
workflowId: 'test-workflow-id',
})
- globalMockData.workflows.push({ id: 'test-workflow-id', userId: 'test-user-id' })
+ globalMockData.workflows.push({
+ id: 'test-workflow-id',
+ userId: 'test-user-id',
+ pinnedApiKeyId: 'test-pinned-api-key-id',
+ })
vi.doMock('@trigger.dev/sdk', () => ({
tasks: {
@@ -354,7 +385,11 @@ describe('Webhook Trigger API Route', () => {
},
workflowId: 'test-workflow-id',
})
- globalMockData.workflows.push({ id: 'test-workflow-id', userId: 'test-user-id' })
+ globalMockData.workflows.push({
+ id: 'test-workflow-id',
+ userId: 'test-user-id',
+ pinnedApiKeyId: 'test-pinned-api-key-id',
+ })
vi.doMock('@trigger.dev/sdk', () => ({
tasks: {
@@ -391,7 +426,6 @@ describe('Webhook Trigger API Route', () => {
providerConfig: { requireAuth: true, token: 'correct-token' },
workflowId: 'test-workflow-id',
})
- globalMockData.workflows.push({ id: 'test-workflow-id', userId: 'test-user-id' })
const headers = {
'Content-Type': 'application/json',
@@ -424,7 +458,6 @@ describe('Webhook Trigger API Route', () => {
},
workflowId: 'test-workflow-id',
})
- globalMockData.workflows.push({ id: 'test-workflow-id', userId: 'test-user-id' })
const headers = {
'Content-Type': 'application/json',
@@ -453,7 +486,6 @@ describe('Webhook Trigger API Route', () => {
providerConfig: { requireAuth: true, token: 'required-token' },
workflowId: 'test-workflow-id',
})
- globalMockData.workflows.push({ id: 'test-workflow-id', userId: 'test-user-id' })
const req = createMockRequest('POST', { event: 'no.auth.test' })
const params = Promise.resolve({ path: 'test-path' })
@@ -482,7 +514,6 @@ describe('Webhook Trigger API Route', () => {
},
workflowId: 'test-workflow-id',
})
- globalMockData.workflows.push({ id: 'test-workflow-id', userId: 'test-user-id' })
const headers = {
'Content-Type': 'application/json',
@@ -515,7 +546,6 @@ describe('Webhook Trigger API Route', () => {
},
workflowId: 'test-workflow-id',
})
- globalMockData.workflows.push({ id: 'test-workflow-id', userId: 'test-user-id' })
const headers = {
'Content-Type': 'application/json',
diff --git a/apps/sim/app/api/workflows/[id]/deploy/route.ts b/apps/sim/app/api/workflows/[id]/deploy/route.ts
index a2688eacc7..b153aa4fc0 100644
--- a/apps/sim/app/api/workflows/[id]/deploy/route.ts
+++ b/apps/sim/app/api/workflows/[id]/deploy/route.ts
@@ -293,6 +293,13 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
}
}
+ // Attribution: this route is UI-only; require session user as actor
+ const actorUserId: string | null = session?.user?.id ?? null
+ if (!actorUserId) {
+ logger.warn(`[${requestId}] Unable to resolve actor user for workflow deployment: ${id}`)
+ return createErrorResponse('Unable to determine deploying user', 400)
+ }
+
await db.transaction(async (tx) => {
const [{ maxVersion }] = await tx
.select({ maxVersion: sql`COALESCE(MAX("version"), 0)` })
@@ -318,7 +325,7 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
state: currentState,
isActive: true,
createdAt: deployedAt,
- createdBy: userId,
+ createdBy: actorUserId,
})
const updateData: Record = {
diff --git a/apps/sim/app/api/workflows/[id]/execute/route.ts b/apps/sim/app/api/workflows/[id]/execute/route.ts
index fd4239e9d5..4d12560d14 100644
--- a/apps/sim/app/api/workflows/[id]/execute/route.ts
+++ b/apps/sim/app/api/workflows/[id]/execute/route.ts
@@ -5,9 +5,11 @@ import { eq, sql } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { v4 as uuidv4 } from 'uuid'
import { z } from 'zod'
+import { authenticateApiKeyFromHeader, updateApiKeyLastUsed } from '@/lib/api-key/service'
import { getSession } from '@/lib/auth'
import { checkServerSideUsageLimits } from '@/lib/billing'
import { getHighestPrioritySubscription } from '@/lib/billing/core/subscription'
+import { env } from '@/lib/env'
import { getPersonalAndWorkspaceEnv } from '@/lib/environment/utils'
import { createLogger } from '@/lib/logs/console/logger'
import { LoggingSession } from '@/lib/logs/execution/logging-session'
@@ -23,6 +25,7 @@ import {
import { validateWorkflowAccess } from '@/app/api/workflows/middleware'
import { createErrorResponse, createSuccessResponse } from '@/app/api/workflows/utils'
import { Executor } from '@/executor'
+import type { ExecutionResult } from '@/executor/types'
import { Serializer } from '@/serializer'
import { RateLimitError, RateLimiter, type TriggerType } from '@/services/queue'
import { mergeSubblockState } from '@/stores/workflows/server-utils'
@@ -32,15 +35,11 @@ const logger = createLogger('WorkflowExecuteAPI')
export const dynamic = 'force-dynamic'
export const runtime = 'nodejs'
-// Define the schema for environment variables
const EnvVarsSchema = z.record(z.string())
-// Keep track of running executions to prevent duplicate requests
-// Use a combination of workflow ID and request ID to allow concurrent executions with different inputs
const runningExecutions = new Set()
-// Utility function to filter out logs and workflowConnections from API response
-function createFilteredResult(result: any) {
+export function createFilteredResult(result: any) {
return {
...result,
logs: undefined,
@@ -53,7 +52,6 @@ function createFilteredResult(result: any) {
}
}
-// Custom error class for usage limit exceeded
class UsageLimitError extends Error {
statusCode: number
constructor(message: string, statusCode = 402) {
@@ -62,20 +60,76 @@ class UsageLimitError extends Error {
}
}
-async function executeWorkflow(
+/**
+ * Resolves output IDs to the internal blockId_attribute format
+ * Supports both:
+ * - User-facing format: blockName.path (e.g., "agent1.content")
+ * - Internal format: blockId_attribute (e.g., "uuid_content") - used by chat deployments
+ */
+function resolveOutputIds(
+ selectedOutputs: string[] | undefined,
+ blocks: Record
+): string[] | undefined {
+ if (!selectedOutputs || selectedOutputs.length === 0) {
+ return selectedOutputs
+ }
+
+ // UUID regex to detect if it's already in blockId_attribute format
+ const UUID_REGEX = /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/i
+
+ return selectedOutputs.map((outputId) => {
+ // If it starts with a UUID, it's already in blockId_attribute format (from chat deployments)
+ if (UUID_REGEX.test(outputId)) {
+ return outputId
+ }
+
+ // Otherwise, it's in blockName.path format from the user/API
+ const dotIndex = outputId.indexOf('.')
+ if (dotIndex === -1) {
+ logger.warn(`Invalid output ID format (missing dot): ${outputId}`)
+ return outputId
+ }
+
+ const blockName = outputId.substring(0, dotIndex)
+ const path = outputId.substring(dotIndex + 1)
+
+ // Find the block by name (case-insensitive, ignoring spaces)
+ const normalizedBlockName = blockName.toLowerCase().replace(/\s+/g, '')
+ const block = Object.values(blocks).find((b: any) => {
+ const normalized = (b.name || '').toLowerCase().replace(/\s+/g, '')
+ return normalized === normalizedBlockName
+ })
+
+ if (!block) {
+ logger.warn(`Block not found for name: ${blockName} (from output ID: ${outputId})`)
+ return outputId
+ }
+
+ const resolvedId = `${block.id}_${path}`
+ logger.debug(`Resolved output ID: ${outputId} -> ${resolvedId}`)
+ return resolvedId
+ })
+}
+
+export async function executeWorkflow(
workflow: any,
requestId: string,
- input?: any,
- executingUserId?: string
+ input: any | undefined,
+ actorUserId: string,
+ streamConfig?: {
+ enabled: boolean
+ selectedOutputs?: string[]
+ isSecureMode?: boolean // When true, filter out all sensitive data
+ workflowTriggerType?: 'api' | 'chat' // Which trigger block type to look for (default: 'api')
+ onStream?: (streamingExec: any) => Promise // Callback for streaming agent responses
+ onBlockComplete?: (blockId: string, output: any) => Promise // Callback when any block completes
+ }
): Promise {
const workflowId = workflow.id
const executionId = uuidv4()
- // Create a unique execution key combining workflow ID and request ID
- // This allows concurrent executions of the same workflow with different inputs
const executionKey = `${workflowId}:${requestId}`
- // Skip if this exact execution is already running (prevents duplicate requests)
if (runningExecutions.has(executionKey)) {
logger.warn(`[${requestId}] Execution is already running: ${executionKey}`)
throw new Error('Execution is already running')
@@ -85,8 +139,8 @@ async function executeWorkflow(
// Rate limiting is now handled before entering the sync queue
- // Check if the user has exceeded their usage limits
- const usageCheck = await checkServerSideUsageLimits(workflow.userId)
+ // Check if the actor has exceeded their usage limits
+ const usageCheck = await checkServerSideUsageLimits(actorUserId)
if (usageCheck.isExceeded) {
logger.warn(`[${requestId}] User ${workflow.userId} has exceeded usage limits`, {
currentUsage: usageCheck.currentUsage,
@@ -132,13 +186,13 @@ async function executeWorkflow(
// Load personal (for the executing user) and workspace env (workspace overrides personal)
const { personalEncrypted, workspaceEncrypted } = await getPersonalAndWorkspaceEnv(
- executingUserId || workflow.userId,
+ actorUserId,
workflow.workspaceId || undefined
)
const variables = EnvVarsSchema.parse({ ...personalEncrypted, ...workspaceEncrypted })
await loggingSession.safeStart({
- userId: executingUserId || workflow.userId,
+ userId: actorUserId,
workspaceId: workflow.workspaceId,
variables,
})
@@ -273,15 +327,20 @@ async function executeWorkflow(
true // Enable validation during execution
)
- // Determine API trigger start block
- // Direct API execution ONLY works with API trigger blocks (or legacy starter in api/run mode)
- const startBlock = TriggerUtils.findStartBlock(mergedStates, 'api', false) // isChildWorkflow = false
+ // Determine trigger start block based on execution type
+ // - 'chat': For chat deployments (looks for chat_trigger block)
+ // - 'api': For direct API execution (looks for api_trigger block)
+ // streamConfig is passed from POST handler when using streaming/chat
+ const preferredTriggerType = streamConfig?.workflowTriggerType || 'api'
+ const startBlock = TriggerUtils.findStartBlock(mergedStates, preferredTriggerType, false)
if (!startBlock) {
- logger.error(`[${requestId}] No API trigger configured for this workflow`)
- throw new Error(
- 'No API trigger configured for this workflow. Add an API Trigger block or use a Start block in API mode.'
- )
+ const errorMsg =
+ preferredTriggerType === 'api'
+ ? 'No API trigger block found. Add an API Trigger block to this workflow.'
+ : 'No chat trigger block found. Add a Chat Trigger block to this workflow.'
+ logger.error(`[${requestId}] ${errorMsg}`)
+ throw new Error(errorMsg)
}
const startBlockId = startBlock.blockId
@@ -299,38 +358,50 @@ async function executeWorkflow(
}
}
+ // Build context extensions
+ const contextExtensions: any = {
+ executionId,
+ workspaceId: workflow.workspaceId,
+ isDeployedContext: true,
+ }
+
+ // Add streaming configuration if enabled
+ if (streamConfig?.enabled) {
+ contextExtensions.stream = true
+ contextExtensions.selectedOutputs = streamConfig.selectedOutputs || []
+ contextExtensions.edges = edges.map((e: any) => ({
+ source: e.source,
+ target: e.target,
+ }))
+ contextExtensions.onStream = streamConfig.onStream
+ contextExtensions.onBlockComplete = streamConfig.onBlockComplete
+ }
+
const executor = new Executor({
workflow: serializedWorkflow,
currentBlockStates: processedBlockStates,
envVarValues: decryptedEnvVars,
workflowInput: processedInput,
workflowVariables,
- contextExtensions: {
- executionId,
- workspaceId: workflow.workspaceId,
- isDeployedContext: true,
- },
+ contextExtensions,
})
// Set up logging on the executor
loggingSession.setupExecutor(executor)
- const result = await executor.execute(workflowId, startBlockId)
-
- // Check if we got a StreamingExecution result (with stream + execution properties)
- // For API routes, we only care about the ExecutionResult part, not the stream
- const executionResult = 'stream' in result && 'execution' in result ? result.execution : result
+ // Execute workflow (will always return ExecutionResult since we don't use onStream)
+ const result = (await executor.execute(workflowId, startBlockId)) as ExecutionResult
logger.info(`[${requestId}] Workflow execution completed: ${workflowId}`, {
- success: executionResult.success,
- executionTime: executionResult.metadata?.duration,
+ success: result.success,
+ executionTime: result.metadata?.duration,
})
// Build trace spans from execution result (works for both success and failure)
- const { traceSpans, totalDuration } = buildTraceSpans(executionResult)
+ const { traceSpans, totalDuration } = buildTraceSpans(result)
// Update workflow run counts if execution was successful
- if (executionResult.success) {
+ if (result.success) {
await updateWorkflowRunCounts(workflowId)
// Track API call in user stats
@@ -340,20 +411,28 @@ async function executeWorkflow(
totalApiCalls: sql`total_api_calls + 1`,
lastActive: sql`now()`,
})
- .where(eq(userStats.userId, workflow.userId))
+ .where(eq(userStats.userId, actorUserId))
}
await loggingSession.safeComplete({
endedAt: new Date().toISOString(),
totalDurationMs: totalDuration || 0,
- finalOutput: executionResult.output || {},
+ finalOutput: result.output || {},
traceSpans: (traceSpans || []) as any,
})
- return executionResult
+ // For non-streaming, return the execution result
+ return result
} catch (error: any) {
logger.error(`[${requestId}] Workflow execution failed: ${workflowId}`, error)
+ const executionResultForError = (error?.executionResult as ExecutionResult | undefined) || {
+ success: false,
+ output: {},
+ logs: [],
+ }
+ const { traceSpans } = buildTraceSpans(executionResultForError)
+
await loggingSession.safeCompleteWithError({
endedAt: new Date().toISOString(),
totalDurationMs: 0,
@@ -361,6 +440,7 @@ async function executeWorkflow(
message: error.message || 'Workflow execution failed',
stackTrace: error.stack,
},
+ traceSpans,
})
throw error
@@ -396,19 +476,30 @@ export async function GET(request: NextRequest, { params }: { params: Promise<{
// Synchronous execution
try {
- // Check rate limits BEFORE entering queue for GET requests
- if (triggerType === 'api') {
- // Get user subscription (checks both personal and org subscriptions)
- const userSubscription = await getHighestPrioritySubscription(validation.workflow.userId)
+ // Resolve actor user id
+ let actorUserId: string | null = null
+ if (triggerType === 'manual') {
+ actorUserId = session!.user!.id
+ } else {
+ const apiKeyHeader = request.headers.get('X-API-Key')
+ const auth = apiKeyHeader ? await authenticateApiKeyFromHeader(apiKeyHeader) : null
+ if (!auth?.success || !auth.userId) {
+ return createErrorResponse('Unauthorized', 401)
+ }
+ actorUserId = auth.userId
+ if (auth.keyId) {
+ void updateApiKeyLastUsed(auth.keyId).catch(() => {})
+ }
+ // Check rate limits BEFORE entering execution for API requests
+ const userSubscription = await getHighestPrioritySubscription(actorUserId)
const rateLimiter = new RateLimiter()
const rateLimitCheck = await rateLimiter.checkRateLimitWithSubscription(
- validation.workflow.userId,
+ actorUserId,
userSubscription,
- triggerType,
- false // isAsync = false for sync calls
+ 'api',
+ false
)
-
if (!rateLimitCheck.allowed) {
throw new RateLimitError(
`Rate limit exceeded. You have ${rateLimitCheck.remaining} requests remaining. Resets at ${rateLimitCheck.resetAt.toISOString()}`
@@ -420,8 +511,7 @@ export async function GET(request: NextRequest, { params }: { params: Promise<{
validation.workflow,
requestId,
undefined,
- // Executing user (manual run): if session present, use that user for fallback
- (await getSession())?.user?.id || undefined
+ actorUserId as string
)
// Check if the workflow execution contains a response block output
@@ -487,42 +577,78 @@ export async function POST(
const executionMode = request.headers.get('X-Execution-Mode')
const isAsync = executionMode === 'async'
- // Parse request body
+ // Parse request body first to check for internal parameters
const body = await request.text()
logger.info(`[${requestId}] ${body ? 'Request body provided' : 'No request body provided'}`)
- let input = {}
+ let parsedBody: any = {}
if (body) {
try {
- input = JSON.parse(body)
+ parsedBody = JSON.parse(body)
} catch (error) {
logger.error(`[${requestId}] Failed to parse request body as JSON`, error)
return createErrorResponse('Invalid JSON in request body', 400)
}
}
- logger.info(`[${requestId}] Input passed to workflow:`, input)
+ logger.info(`[${requestId}] Input passed to workflow:`, parsedBody)
+
+ const extractExecutionParams = (req: NextRequest, body: any) => {
+ const internalSecret = req.headers.get('X-Internal-Secret')
+ const isInternalCall = internalSecret === env.INTERNAL_API_SECRET
+
+ return {
+ isSecureMode: body.isSecureMode !== undefined ? body.isSecureMode : isInternalCall,
+ streamResponse: req.headers.get('X-Stream-Response') === 'true' || body.stream === true,
+ selectedOutputs:
+ body.selectedOutputs ||
+ (req.headers.get('X-Selected-Outputs')
+ ? JSON.parse(req.headers.get('X-Selected-Outputs')!)
+ : undefined),
+ workflowTriggerType:
+ body.workflowTriggerType || (isInternalCall && body.stream ? 'chat' : 'api'),
+ input: body.input !== undefined ? body.input : body,
+ }
+ }
+
+ const {
+ isSecureMode: finalIsSecureMode,
+ streamResponse,
+ selectedOutputs,
+ workflowTriggerType,
+ input,
+ } = extractExecutionParams(request as NextRequest, parsedBody)
// Get authenticated user and determine trigger type
- let authenticatedUserId: string | null = null
+ let authenticatedUserId: string
let triggerType: TriggerType = 'manual'
- const session = await getSession()
- if (session?.user?.id) {
- authenticatedUserId = session.user.id
- triggerType = 'manual' // UI session (not rate limited)
+ // For internal calls (chat deployments), use the workflow owner's ID
+ if (finalIsSecureMode) {
+ authenticatedUserId = validation.workflow.userId
+ triggerType = 'manual' // Chat deployments use manual trigger type (no rate limit)
} else {
+ const session = await getSession()
const apiKeyHeader = request.headers.get('X-API-Key')
- if (apiKeyHeader) {
- authenticatedUserId = validation.workflow.userId
+
+ if (session?.user?.id && !apiKeyHeader) {
+ authenticatedUserId = session.user.id
+ triggerType = 'manual'
+ } else if (apiKeyHeader) {
+ const auth = await authenticateApiKeyFromHeader(apiKeyHeader)
+ if (!auth.success || !auth.userId) {
+ return createErrorResponse('Unauthorized', 401)
+ }
+ authenticatedUserId = auth.userId
triggerType = 'api'
+ if (auth.keyId) {
+ void updateApiKeyLastUsed(auth.keyId).catch(() => {})
+ }
+ } else {
+ return createErrorResponse('Authentication required', 401)
}
}
- if (!authenticatedUserId) {
- return createErrorResponse('Authentication required', 401)
- }
-
// Get user subscription (checks both personal and org subscriptions)
const userSubscription = await getHighestPrioritySubscription(authenticatedUserId)
@@ -606,13 +732,47 @@ export async function POST(
)
}
+ // Handle streaming response - wrap execution in SSE stream
+ if (streamResponse) {
+ // Load workflow blocks to resolve output IDs from blockName.attribute to blockId_attribute format
+ const deployedData = await loadDeployedWorkflowState(workflowId)
+ const resolvedSelectedOutputs = selectedOutputs
+ ? resolveOutputIds(selectedOutputs, deployedData.blocks || {})
+ : selectedOutputs
+
+ // Use shared streaming response creator
+ const { createStreamingResponse } = await import('@/lib/workflows/streaming')
+ const { SSE_HEADERS } = await import('@/lib/utils')
+
+ const stream = await createStreamingResponse({
+ requestId,
+ workflow: validation.workflow,
+ input,
+ executingUserId: authenticatedUserId,
+ streamConfig: {
+ selectedOutputs: resolvedSelectedOutputs,
+ isSecureMode: finalIsSecureMode,
+ workflowTriggerType,
+ },
+ createFilteredResult,
+ })
+
+ return new NextResponse(stream, {
+ status: 200,
+ headers: SSE_HEADERS,
+ })
+ }
+
+ // Non-streaming execution
const result = await executeWorkflow(
validation.workflow,
requestId,
input,
- authenticatedUserId
+ authenticatedUserId,
+ undefined
)
+ // Non-streaming response
const hasResponseBlock = workflowHasResponseBlock(result)
if (hasResponseBlock) {
return createHttpResponseFromBlock(result)
diff --git a/apps/sim/app/api/workflows/[id]/log/route.ts b/apps/sim/app/api/workflows/[id]/log/route.ts
index 5042f77b1f..75bec17ce2 100644
--- a/apps/sim/app/api/workflows/[id]/log/route.ts
+++ b/apps/sim/app/api/workflows/[id]/log/route.ts
@@ -44,15 +44,17 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
variables: {},
})
+ const { traceSpans } = buildTraceSpans(result)
+
if (result.success === false) {
const message = result.error || 'Workflow execution failed'
await loggingSession.safeCompleteWithError({
endedAt: new Date().toISOString(),
totalDurationMs: result.metadata?.duration || 0,
error: { message },
+ traceSpans,
})
} else {
- const { traceSpans } = buildTraceSpans(result)
await loggingSession.safeComplete({
endedAt: new Date().toISOString(),
totalDurationMs: result.metadata?.duration || 0,
diff --git a/apps/sim/app/api/workflows/middleware.ts b/apps/sim/app/api/workflows/middleware.ts
index 5cab568a25..8ac40caed2 100644
--- a/apps/sim/app/api/workflows/middleware.ts
+++ b/apps/sim/app/api/workflows/middleware.ts
@@ -1,6 +1,7 @@
import type { NextRequest } from 'next/server'
import { authenticateApiKey } from '@/lib/api-key/auth'
import { authenticateApiKeyFromHeader, updateApiKeyLastUsed } from '@/lib/api-key/service'
+import { env } from '@/lib/env'
import { createLogger } from '@/lib/logs/console/logger'
import { getWorkflowById } from '@/lib/workflows/utils'
@@ -37,7 +38,11 @@ export async function validateWorkflowAccess(
}
}
- // API key authentication
+ const internalSecret = request.headers.get('X-Internal-Secret')
+ if (internalSecret === env.INTERNAL_API_SECRET) {
+ return { workflow }
+ }
+
let apiKeyHeader = null
for (const [key, value] of request.headers.entries()) {
if (key.toLowerCase() === 'x-api-key' && value) {
diff --git a/apps/sim/app/chat/hooks/use-chat-streaming.ts b/apps/sim/app/chat/hooks/use-chat-streaming.ts
index 01b1a32a7c..3f5473e8a1 100644
--- a/apps/sim/app/chat/hooks/use-chat-streaming.ts
+++ b/apps/sim/app/chat/hooks/use-chat-streaming.ts
@@ -4,8 +4,6 @@ import { useRef, useState } from 'react'
import { createLogger } from '@/lib/logs/console/logger'
import type { ChatMessage } from '@/app/chat/components/message/message'
import { CHAT_ERROR_MESSAGES } from '@/app/chat/constants'
-// No longer need complex output extraction - backend handles this
-import type { ExecutionResult } from '@/executor/types'
const logger = createLogger('UseChatStreaming')
@@ -148,11 +146,16 @@ export function useChatStreaming() {
for (const line of lines) {
if (line.startsWith('data: ')) {
+ const data = line.substring(6)
+
+ if (data === '[DONE]') {
+ continue
+ }
+
try {
- const json = JSON.parse(line.substring(6))
+ const json = JSON.parse(data)
const { blockId, chunk: contentChunk, event: eventType } = json
- // Handle error events from the server
if (eventType === 'error' || json.event === 'error') {
const errorMessage = json.error || CHAT_ERROR_MESSAGES.GENERIC_ERROR
setMessages((prev) =>
@@ -172,34 +175,11 @@ export function useChatStreaming() {
}
if (eventType === 'final' && json.data) {
- // The backend has already processed and combined all outputs
- // We just need to extract the combined content and use it
- const result = json.data as ExecutionResult
-
- // Collect all content from logs that have output.content (backend processed)
- let combinedContent = ''
- if (result.logs) {
- const contentParts: string[] = []
-
- // Get content from all logs that have processed content
- result.logs.forEach((log) => {
- if (log.output?.content && typeof log.output.content === 'string') {
- // The backend already includes proper separators, so just collect the content
- contentParts.push(log.output.content)
- }
- })
-
- // Join without additional separators since backend already handles this
- combinedContent = contentParts.join('')
- }
-
- // Update the existing streaming message with the final combined content
setMessages((prev) =>
prev.map((msg) =>
msg.id === messageId
? {
...msg,
- content: combinedContent || accumulatedText, // Use combined content or fallback to streamed
isStreaming: false,
}
: msg
@@ -210,7 +190,6 @@ export function useChatStreaming() {
}
if (blockId && contentChunk) {
- // Track that this block has streamed content (like chat panel)
if (!messageIdMap.has(blockId)) {
messageIdMap.set(blockId, messageId)
}
diff --git a/apps/sim/app/workspace/[workspaceId]/knowledge/[id]/base.tsx b/apps/sim/app/workspace/[workspaceId]/knowledge/[id]/base.tsx
index 99ec6b8c0d..1ab3380f3d 100644
--- a/apps/sim/app/workspace/[workspaceId]/knowledge/[id]/base.tsx
+++ b/apps/sim/app/workspace/[workspaceId]/knowledge/[id]/base.tsx
@@ -698,10 +698,6 @@ export function KnowledgeBase({
options={{
knowledgeBaseId: id,
currentWorkspaceId: knowledgeBase?.workspaceId || null,
- onWorkspaceChange: () => {
- // Refresh the page to reflect the workspace change
- window.location.reload()
- },
onDeleteKnowledgeBase: () => setShowDeleteDialog(true),
}}
/>
diff --git a/apps/sim/app/workspace/[workspaceId]/knowledge/components/workspace-selector/workspace-selector.tsx b/apps/sim/app/workspace/[workspaceId]/knowledge/components/workspace-selector/workspace-selector.tsx
index 7e1f9a24ab..b393a192d1 100644
--- a/apps/sim/app/workspace/[workspaceId]/knowledge/components/workspace-selector/workspace-selector.tsx
+++ b/apps/sim/app/workspace/[workspaceId]/knowledge/components/workspace-selector/workspace-selector.tsx
@@ -11,6 +11,7 @@ import {
} from '@/components/ui/dropdown-menu'
import { Tooltip, TooltipContent, TooltipTrigger } from '@/components/ui/tooltip'
import { createLogger } from '@/lib/logs/console/logger'
+import { useKnowledgeStore } from '@/stores/knowledge/store'
const logger = createLogger('WorkspaceSelector')
@@ -33,6 +34,7 @@ export function WorkspaceSelector({
onWorkspaceChange,
disabled = false,
}: WorkspaceSelectorProps) {
+ const { updateKnowledgeBase } = useKnowledgeStore()
const [workspaces, setWorkspaces] = useState([])
const [isLoading, setIsLoading] = useState(false)
const [isUpdating, setIsUpdating] = useState(false)
@@ -95,6 +97,11 @@ export function WorkspaceSelector({
if (result.success) {
logger.info(`Knowledge base workspace updated: ${knowledgeBaseId} -> ${workspaceId}`)
+
+ // Update the store immediately to reflect the change without page reload
+ updateKnowledgeBase(knowledgeBaseId, { workspaceId: workspaceId || undefined })
+
+ // Notify parent component of the change
onWorkspaceChange?.(workspaceId)
} else {
throw new Error(result.error || 'Failed to update workspace')
diff --git a/apps/sim/app/workspace/[workspaceId]/logs/components/frozen-canvas/frozen-canvas-modal.tsx b/apps/sim/app/workspace/[workspaceId]/logs/components/frozen-canvas/frozen-canvas-modal.tsx
index 4a22b5f6fd..af039420c4 100644
--- a/apps/sim/app/workspace/[workspaceId]/logs/components/frozen-canvas/frozen-canvas-modal.tsx
+++ b/apps/sim/app/workspace/[workspaceId]/logs/components/frozen-canvas/frozen-canvas-modal.tsx
@@ -1,7 +1,7 @@
'use client'
import { useState } from 'react'
-import { Eye, Maximize2, Minimize2, X } from 'lucide-react'
+import { Maximize2, Minimize2, X } from 'lucide-react'
import { Badge } from '@/components/ui/badge'
import { Button } from '@/components/ui/button'
import { Dialog, DialogContent, DialogHeader, DialogTitle } from '@/components/ui/dialog'
@@ -45,7 +45,6 @@ export function FrozenCanvasModal({
{/* Header */}
-
Logged Workflow State
@@ -83,14 +82,15 @@ export function FrozenCanvasModal({
traceSpans={traceSpans}
height='100%'
width='100%'
+ // Ensure preview leaves padding at edges so nodes don't touch header
/>
{/* Footer with instructions */}
- 💡 Click on blocks to see their input and output data at execution time. This canvas
- shows the exact state of the workflow when this execution was captured.
+ Click on blocks to see their input and output data at execution time. This canvas shows
+ the exact state of the workflow when this execution was captured.
diff --git a/apps/sim/app/workspace/[workspaceId]/logs/components/frozen-canvas/frozen-canvas.tsx b/apps/sim/app/workspace/[workspaceId]/logs/components/frozen-canvas/frozen-canvas.tsx
index 9adb54cdf6..897e956c16 100644
--- a/apps/sim/app/workspace/[workspaceId]/logs/components/frozen-canvas/frozen-canvas.tsx
+++ b/apps/sim/app/workspace/[workspaceId]/logs/components/frozen-canvas/frozen-canvas.tsx
@@ -582,6 +582,8 @@ export function FrozenCanvas({
workflowState={data.workflowState}
showSubBlocks={true}
isPannable={true}
+ defaultZoom={0.8}
+ fitPadding={0.25}
onNodeClick={(blockId) => {
// Always allow clicking blocks, even if they don't have execution data
// This is important for failed workflows where some blocks never executed
diff --git a/apps/sim/app/workspace/[workspaceId]/logs/components/trace-spans/trace-spans-display.tsx b/apps/sim/app/workspace/[workspaceId]/logs/components/trace-spans/trace-spans-display.tsx
index 0d3361816b..7a3657f5d5 100644
--- a/apps/sim/app/workspace/[workspaceId]/logs/components/trace-spans/trace-spans-display.tsx
+++ b/apps/sim/app/workspace/[workspaceId]/logs/components/trace-spans/trace-spans-display.tsx
@@ -13,6 +13,67 @@ import {
import { cn, redactApiKeys } from '@/lib/utils'
import type { TraceSpan } from '@/stores/logs/filters/types'
+function getSpanKey(span: TraceSpan): string {
+ if (span.id) {
+ return span.id
+ }
+
+ const name = span.name || 'span'
+ const start = span.startTime || 'unknown-start'
+ const end = span.endTime || 'unknown-end'
+
+ return `${name}|${start}|${end}`
+}
+
+function mergeTraceSpanChildren(...groups: TraceSpan[][]): TraceSpan[] {
+ const merged: TraceSpan[] = []
+ const seen = new Set()
+
+ groups.forEach((group) => {
+ group.forEach((child) => {
+ const key = getSpanKey(child)
+ if (seen.has(key)) {
+ return
+ }
+ seen.add(key)
+ merged.push(child)
+ })
+ })
+
+ return merged
+}
+
+function normalizeChildWorkflowSpan(span: TraceSpan): TraceSpan {
+ const enrichedSpan: TraceSpan = { ...span }
+
+ if (enrichedSpan.output && typeof enrichedSpan.output === 'object') {
+ enrichedSpan.output = { ...enrichedSpan.output }
+ }
+
+ const normalizedChildren = Array.isArray(span.children)
+ ? span.children.map((childSpan) => normalizeChildWorkflowSpan(childSpan))
+ : []
+
+ const outputChildSpans = Array.isArray(span.output?.childTraceSpans)
+ ? (span.output!.childTraceSpans as TraceSpan[]).map((childSpan) =>
+ normalizeChildWorkflowSpan(childSpan)
+ )
+ : []
+
+ const mergedChildren = mergeTraceSpanChildren(normalizedChildren, outputChildSpans)
+
+ if (enrichedSpan.output && 'childTraceSpans' in enrichedSpan.output) {
+ const { childTraceSpans, ...cleanOutput } = enrichedSpan.output as {
+ childTraceSpans?: TraceSpan[]
+ } & Record
+ enrichedSpan.output = cleanOutput
+ }
+
+ enrichedSpan.children = mergedChildren.length > 0 ? mergedChildren : undefined
+
+ return enrichedSpan
+}
+
interface TraceSpansDisplayProps {
traceSpans?: TraceSpan[]
totalDuration?: number
@@ -310,22 +371,23 @@ export function TraceSpansDisplay({