Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ const modelProviderIcons = [
{ icon: Icons.OllamaIcon, label: 'Ollama' },
{ icon: Icons.DeepseekIcon, label: 'Deepseek' },
{ icon: Icons.ElevenLabsIcon, label: 'ElevenLabs' },
{ icon: Icons.VllmIcon, label: 'vLLM' },
]

const communicationIcons = [
Expand Down Expand Up @@ -88,7 +89,6 @@ interface TickerRowProps {
}

function TickerRow({ direction, offset, showOdd, icons }: TickerRowProps) {
// Create multiple copies of the icons array for seamless looping
const extendedIcons = [...icons, ...icons, ...icons, ...icons]

return (
Expand Down
4 changes: 3 additions & 1 deletion apps/sim/app/.env.example
Original file line number Diff line number Diff line change
Expand Up @@ -20,4 +20,6 @@ INTERNAL_API_SECRET=your_internal_api_secret # Use `openssl rand -hex 32` to gen
# If left commented out, emails will be logged to console instead

# Local AI Models (Optional)
# OLLAMA_URL=http://localhost:11434 # URL for local Ollama server - uncomment if using local models
# OLLAMA_URL=http://localhost:11434 # URL for local Ollama server - uncomment if using local models
# VLLM_BASE_URL=http://localhost:8000 # Base URL for your self-hosted vLLM (OpenAI-compatible)
# VLLM_API_KEY= # Optional bearer token if your vLLM instance requires auth
56 changes: 56 additions & 0 deletions apps/sim/app/api/providers/vllm/models/route.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
import { type NextRequest, NextResponse } from 'next/server'
import { env } from '@/lib/env'
import { createLogger } from '@/lib/logs/console/logger'

const logger = createLogger('VLLMModelsAPI')

/**
* Get available vLLM models
*/
export async function GET(request: NextRequest) {
const baseUrl = (env.VLLM_BASE_URL || '').replace(/\/$/, '')

if (!baseUrl) {
logger.info('VLLM_BASE_URL not configured')
return NextResponse.json({ models: [] })
}

try {
logger.info('Fetching vLLM models', {
baseUrl,
})

const response = await fetch(`${baseUrl}/v1/models`, {
headers: {
'Content-Type': 'application/json',
},
next: { revalidate: 60 },
})

if (!response.ok) {
logger.warn('vLLM service is not available', {
status: response.status,
statusText: response.statusText,
})
return NextResponse.json({ models: [] })
}

const data = (await response.json()) as { data: Array<{ id: string }> }
const models = data.data.map((model) => `vllm/${model.id}`)

logger.info('Successfully fetched vLLM models', {
count: models.length,
models,
})

return NextResponse.json({ models })
} catch (error) {
logger.error('Failed to fetch vLLM models', {
error: error instanceof Error ? error.message : 'Unknown error',
baseUrl,
})

// Return empty array instead of error to avoid breaking the UI
return NextResponse.json({ models: [] })
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,11 @@
import { useEffect } from 'react'
import { createLogger } from '@/lib/logs/console/logger'
import { useProviderModels } from '@/hooks/queries/providers'
import { updateOllamaProviderModels, updateOpenRouterProviderModels } from '@/providers/utils'
import {
updateOllamaProviderModels,
updateOpenRouterProviderModels,
updateVLLMProviderModels,
} from '@/providers/utils'
import { useProvidersStore } from '@/stores/providers/store'
import type { ProviderName } from '@/stores/providers/types'

Expand All @@ -24,6 +28,8 @@ function useSyncProvider(provider: ProviderName) {
try {
if (provider === 'ollama') {
updateOllamaProviderModels(data)
} else if (provider === 'vllm') {
updateVLLMProviderModels(data)
} else if (provider === 'openrouter') {
void updateOpenRouterProviderModels(data)
}
Expand All @@ -44,6 +50,7 @@ function useSyncProvider(provider: ProviderName) {
export function ProviderModelsLoader() {
useSyncProvider('base')
useSyncProvider('ollama')
useSyncProvider('vllm')
useSyncProvider('openrouter')
return null
}
15 changes: 11 additions & 4 deletions apps/sim/blocks/blocks/agent.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,10 @@ const getCurrentOllamaModels = () => {
return useProvidersStore.getState().providers.ollama.models
}

const getCurrentVLLMModels = () => {
return useProvidersStore.getState().providers.vllm.models
}

import { useProvidersStore } from '@/stores/providers/store'
import type { ToolResponse } from '@/tools/types'

Expand Down Expand Up @@ -90,8 +94,11 @@ export const AgentBlock: BlockConfig<AgentResponse> = {
const providersState = useProvidersStore.getState()
const baseModels = providersState.providers.base.models
const ollamaModels = providersState.providers.ollama.models
const vllmModels = providersState.providers.vllm.models
const openrouterModels = providersState.providers.openrouter.models
const allModels = Array.from(new Set([...baseModels, ...ollamaModels, ...openrouterModels]))
const allModels = Array.from(
new Set([...baseModels, ...ollamaModels, ...vllmModels, ...openrouterModels])
)

return allModels.map((model) => {
const icon = getProviderIcon(model)
Expand Down Expand Up @@ -172,7 +179,7 @@ export const AgentBlock: BlockConfig<AgentResponse> = {
password: true,
connectionDroppable: false,
required: true,
// Hide API key for hosted models and Ollama models
// Hide API key for hosted models, Ollama models, and vLLM models
condition: isHosted
? {
field: 'model',
Expand All @@ -181,8 +188,8 @@ export const AgentBlock: BlockConfig<AgentResponse> = {
}
: () => ({
field: 'model',
value: getCurrentOllamaModels(),
not: true, // Show for all models EXCEPT Ollama models
value: [...getCurrentOllamaModels(), ...getCurrentVLLMModels()],
not: true, // Show for all models EXCEPT Ollama and vLLM models
}),
},
{
Expand Down
10 changes: 10 additions & 0 deletions apps/sim/components/icons.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -4150,3 +4150,13 @@ export function VideoIcon(props: SVGProps<SVGSVGElement>) {
</svg>
)
}

export function VllmIcon(props: SVGProps<SVGSVGElement>) {
return (
<svg {...props} fill='currentColor' viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg'>
<title>vLLM</title>
<path d='M0 4.973h9.324V23L0 4.973z' fill='#FDB515' />
<path d='M13.986 4.351L22.378 0l-6.216 23H9.324l4.662-18.649z' fill='#30A2FF' />
</svg>
)
}
1 change: 1 addition & 0 deletions apps/sim/hooks/queries/providers.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ const logger = createLogger('ProviderModelsQuery')
const providerEndpoints: Record<ProviderName, string> = {
base: '/api/providers/base/models',
ollama: '/api/providers/ollama/models',
vllm: '/api/providers/vllm/models',
openrouter: '/api/providers/openrouter/models',
}

Expand Down
2 changes: 2 additions & 0 deletions apps/sim/lib/env.ts
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,8 @@ export const env = createEnv({
ANTHROPIC_API_KEY_2: z.string().min(1).optional(), // Additional Anthropic API key for load balancing
ANTHROPIC_API_KEY_3: z.string().min(1).optional(), // Additional Anthropic API key for load balancing
OLLAMA_URL: z.string().url().optional(), // Ollama local LLM server URL
VLLM_BASE_URL: z.string().url().optional(), // vLLM self-hosted base URL (OpenAI-compatible)
VLLM_API_KEY: z.string().optional(), // Optional bearer token for vLLM
ELEVENLABS_API_KEY: z.string().min(1).optional(), // ElevenLabs API key for text-to-speech in deployed chat
SERPER_API_KEY: z.string().min(1).optional(), // Serper API key for online search
EXA_API_KEY: z.string().min(1).optional(), // Exa AI API key for enhanced online search
Expand Down
29 changes: 29 additions & 0 deletions apps/sim/providers/models.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ import {
OllamaIcon,
OpenAIIcon,
OpenRouterIcon,
VllmIcon,
xAIIcon,
} from '@/components/icons'

Expand Down Expand Up @@ -82,6 +83,19 @@ export const PROVIDER_DEFINITIONS: Record<string, ProviderDefinition> = {
contextInformationAvailable: false,
models: [],
},
vllm: {
id: 'vllm',
name: 'vLLM',
icon: VllmIcon,
description: 'Self-hosted vLLM with an OpenAI-compatible API',
defaultModel: 'vllm/generic',
modelPatterns: [/^vllm\//],
capabilities: {
temperature: { min: 0, max: 2 },
toolUsageControl: true,
},
models: [],
},
openai: {
id: 'openai',
name: 'OpenAI',
Expand Down Expand Up @@ -1366,6 +1380,21 @@ export function updateOllamaModels(models: string[]): void {
}))
}

/**
* Update vLLM models dynamically
*/
export function updateVLLMModels(models: string[]): void {
PROVIDER_DEFINITIONS.vllm.models = models.map((modelId) => ({
id: modelId,
pricing: {
input: 0,
output: 0,
updatedAt: new Date().toISOString().split('T')[0],
},
capabilities: {},
}))
}

/**
* Update OpenRouter models dynamically
*/
Expand Down
1 change: 1 addition & 0 deletions apps/sim/providers/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ export type ProviderId =
| 'mistral'
| 'ollama'
| 'openrouter'
| 'vllm'

/**
* Model pricing information per million tokens
Expand Down
17 changes: 16 additions & 1 deletion apps/sim/providers/utils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ import { ollamaProvider } from '@/providers/ollama'
import { openaiProvider } from '@/providers/openai'
import { openRouterProvider } from '@/providers/openrouter'
import type { ProviderConfig, ProviderId, ProviderToolConfig } from '@/providers/types'
import { vllmProvider } from '@/providers/vllm'
import { xAIProvider } from '@/providers/xai'
import { useCustomToolsStore } from '@/stores/custom-tools/store'
import { useProvidersStore } from '@/stores/providers/store'
Expand Down Expand Up @@ -86,6 +87,11 @@ export const providers: Record<
models: getProviderModelsFromDefinitions('groq'),
modelPatterns: PROVIDER_DEFINITIONS.groq.modelPatterns,
},
vllm: {
...vllmProvider,
models: getProviderModelsFromDefinitions('vllm'),
modelPatterns: PROVIDER_DEFINITIONS.vllm.modelPatterns,
},
mistral: {
...mistralProvider,
models: getProviderModelsFromDefinitions('mistral'),
Expand Down Expand Up @@ -123,6 +129,12 @@ export function updateOllamaProviderModels(models: string[]): void {
providers.ollama.models = getProviderModelsFromDefinitions('ollama')
}

export function updateVLLMProviderModels(models: string[]): void {
const { updateVLLMModels } = require('@/providers/models')
updateVLLMModels(models)
providers.vllm.models = getProviderModelsFromDefinitions('vllm')
}

export async function updateOpenRouterProviderModels(models: string[]): Promise<void> {
const { updateOpenRouterModels } = await import('@/providers/models')
updateOpenRouterModels(models)
Expand All @@ -131,7 +143,10 @@ export async function updateOpenRouterProviderModels(models: string[]): Promise<

export function getBaseModelProviders(): Record<string, ProviderId> {
const allProviders = Object.entries(providers)
.filter(([providerId]) => providerId !== 'ollama' && providerId !== 'openrouter')
.filter(
([providerId]) =>
providerId !== 'ollama' && providerId !== 'vllm' && providerId !== 'openrouter'
)
.reduce(
(map, [providerId, config]) => {
config.models.forEach((model) => {
Expand Down
Loading