diff --git a/apiserver/plane/app/views/external/base.py b/apiserver/plane/app/views/external/base.py
index 1dfbc421a00..ae5c47f1455 100644
--- a/apiserver/plane/app/views/external/base.py
+++ b/apiserver/plane/app/views/external/base.py
@@ -1,71 +1,169 @@
-# Python imports
-import requests
+# Python import
import os
+from typing import List, Dict, Tuple
+
+# Third party import
+import litellm
+import requests
-# Third party imports
-from openai import OpenAI
-from rest_framework.response import Response
from rest_framework import status
+from rest_framework.response import Response
-# Django imports
+# Module import
+from plane.app.permissions import ROLE, allow_permission
+from plane.app.serializers import (ProjectLiteSerializer,
+ WorkspaceLiteSerializer)
+from plane.db.models import Project, Workspace
+from plane.license.utils.instance_value import get_configuration_value
+from plane.utils.exception_logger import log_exception
-# Module imports
from ..base import BaseAPIView
-from plane.app.permissions import allow_permission, ROLE
-from plane.db.models import Workspace, Project
-from plane.app.serializers import ProjectLiteSerializer, WorkspaceLiteSerializer
-from plane.license.utils.instance_value import get_configuration_value
+class LLMProvider:
+ """Base class for LLM provider configurations"""
+ name: str = ""
+ models: List[str] = []
+ default_model: str = ""
+
+ @classmethod
+ def get_config(cls) -> Dict[str, str | List[str]]:
+ return {
+ "name": cls.name,
+ "models": cls.models,
+ "default_model": cls.default_model,
+ }
+
+class OpenAIProvider(LLMProvider):
+ name = "OpenAI"
+ models = ["gpt-3.5-turbo", "gpt-4o-mini", "gpt-4o", "o1-mini", "o1-preview"]
+ default_model = "gpt-4o-mini"
+
+class AnthropicProvider(LLMProvider):
+ name = "Anthropic"
+ models = [
+ "claude-3-5-sonnet-20240620",
+ "claude-3-haiku-20240307",
+ "claude-3-opus-20240229",
+ "claude-3-sonnet-20240229",
+ "claude-2.1",
+ "claude-2",
+ "claude-instant-1.2",
+ "claude-instant-1"
+ ]
+ default_model = "claude-3-sonnet-20240229"
+
+class GeminiProvider(LLMProvider):
+ name = "Gemini"
+ models = ["gemini-pro", "gemini-1.5-pro-latest", "gemini-pro-vision"]
+ default_model = "gemini-pro"
+
+SUPPORTED_PROVIDERS = {
+ "openai": OpenAIProvider,
+ "anthropic": AnthropicProvider,
+ "gemini": GeminiProvider,
+}
+
+def get_llm_config() -> Tuple[str | None, str | None, str | None]:
+ """
+ Helper to get LLM configuration values, returns:
+ - api_key, model, provider
+ """
+ api_key, provider_key, model = get_configuration_value([
+ {
+ "key": "LLM_API_KEY",
+ "default": os.environ.get("LLM_API_KEY", None),
+ },
+ {
+ "key": "LLM_PROVIDER",
+ "default": os.environ.get("LLM_PROVIDER", "openai"),
+ },
+ {
+ "key": "LLM_MODEL",
+ "default": os.environ.get("LLM_MODEL", None),
+ },
+ ])
+
+ provider = SUPPORTED_PROVIDERS.get(provider_key.lower())
+ if not provider:
+ log_exception(ValueError(f"Unsupported provider: {provider_key}"))
+ return None, None, None
+
+ if not api_key:
+ log_exception(ValueError(f"Missing API key for provider: {provider.name}"))
+ return None, None, None
+
+ # If no model specified, use provider's default
+ if not model:
+ model = provider.default_model
+
+ # Validate model is supported by provider
+ if model not in provider.models:
+ log_exception(ValueError(
+ f"Model {model} not supported by {provider.name}. "
+ f"Supported models: {', '.join(provider.models)}"
+ ))
+ return None, None, None
+
+ return api_key, model, provider_key
+
+
+def get_llm_response(task, prompt, api_key: str, model: str, provider: str) -> Tuple[str | None, str | None]:
+ """Helper to get LLM completion response"""
+ final_text = task + "\n" + prompt
+ try:
+ # For Gemini, prepend provider name to model
+ if provider.lower() == "gemini":
+ model = f"gemini/{model}"
+
+ response = litellm.completion(
+ model=model,
+ messages=[{"role": "user", "content": final_text}],
+ api_key=api_key,
+ )
+ text = response.choices[0].message.content.strip()
+ return text, None
+ except Exception as e:
+ log_exception(e)
+ error_type = e.__class__.__name__
+ if error_type == "AuthenticationError":
+ return None, f"Invalid API key for {provider}"
+ elif error_type == "RateLimitError":
+ return None, f"Rate limit exceeded for {provider}"
+ else:
+ return None, f"Error occurred while generating response from {provider}"
+
class GPTIntegrationEndpoint(BaseAPIView):
@allow_permission([ROLE.ADMIN, ROLE.MEMBER])
def post(self, request, slug, project_id):
- OPENAI_API_KEY, GPT_ENGINE = get_configuration_value(
- [
- {
- "key": "OPENAI_API_KEY",
- "default": os.environ.get("OPENAI_API_KEY", None),
- },
- {
- "key": "GPT_ENGINE",
- "default": os.environ.get("GPT_ENGINE", "gpt-3.5-turbo"),
- },
- ]
- )
+ api_key, model, provider = get_llm_config()
- # Get the configuration value
- # Check the keys
- if not OPENAI_API_KEY or not GPT_ENGINE:
+ if not api_key or not model or not provider:
return Response(
- {"error": "OpenAI API key and engine is required"},
+ {"error": "LLM provider API key and model are required"},
status=status.HTTP_400_BAD_REQUEST,
)
- prompt = request.data.get("prompt", False)
task = request.data.get("task", False)
-
if not task:
return Response(
{"error": "Task is required"}, status=status.HTTP_400_BAD_REQUEST
)
- final_text = task + "\n" + prompt
-
- client = OpenAI(api_key=OPENAI_API_KEY)
-
- response = client.chat.completions.create(
- model=GPT_ENGINE, messages=[{"role": "user", "content": final_text}]
- )
+ text, error = get_llm_response(task, request.data.get("prompt", False), api_key, model, provider)
+ if not text and error:
+ return Response(
+ {"error": "An internal error has occurred."},
+ status=status.HTTP_500_INTERNAL_SERVER_ERROR,
+ )
workspace = Workspace.objects.get(slug=slug)
project = Project.objects.get(pk=project_id)
- text = response.choices[0].message.content.strip()
- text_html = text.replace("\n", "
")
return Response(
{
"response": text,
- "response_html": text_html,
+ "response_html": text.replace("\n", "
"),
"project_detail": ProjectLiteSerializer(project).data,
"workspace_detail": WorkspaceLiteSerializer(workspace).data,
},
@@ -76,47 +174,33 @@ def post(self, request, slug, project_id):
class WorkspaceGPTIntegrationEndpoint(BaseAPIView):
@allow_permission(allowed_roles=[ROLE.ADMIN, ROLE.MEMBER], level="WORKSPACE")
def post(self, request, slug):
- OPENAI_API_KEY, GPT_ENGINE = get_configuration_value(
- [
- {
- "key": "OPENAI_API_KEY",
- "default": os.environ.get("OPENAI_API_KEY", None),
- },
- {
- "key": "GPT_ENGINE",
- "default": os.environ.get("GPT_ENGINE", "gpt-3.5-turbo"),
- },
- ]
- )
-
- # Get the configuration value
- # Check the keys
- if not OPENAI_API_KEY or not GPT_ENGINE:
+ api_key, model, provider = get_llm_config()
+
+ if not api_key or not model or not provider:
return Response(
- {"error": "OpenAI API key and engine is required"},
+ {"error": "LLM provider API key and model are required"},
status=status.HTTP_400_BAD_REQUEST,
)
- prompt = request.data.get("prompt", False)
task = request.data.get("task", False)
-
if not task:
return Response(
{"error": "Task is required"}, status=status.HTTP_400_BAD_REQUEST
)
- final_text = task + "\n" + prompt
-
- client = OpenAI(api_key=OPENAI_API_KEY)
-
- response = client.chat.completions.create(
- model=GPT_ENGINE, messages=[{"role": "user", "content": final_text}]
- )
+ text, error = get_llm_response(task, request.data.get("prompt", False), api_key, model, provider)
+ if not text and error:
+ return Response(
+ {"error": "An internal error has occurred."},
+ status=status.HTTP_500_INTERNAL_SERVER_ERROR,
+ )
- text = response.choices[0].message.content.strip()
- text_html = text.replace("\n", "
")
return Response(
- {"response": text, "response_html": text_html}, status=status.HTTP_200_OK
+ {
+ "response": text,
+ "response_html": text.replace("\n", "
"),
+ },
+ status=status.HTTP_200_OK,
)
diff --git a/apiserver/plane/license/management/commands/configure_instance.py b/apiserver/plane/license/management/commands/configure_instance.py
index 548c9c77ed0..8458df5df6d 100644
--- a/apiserver/plane/license/management/commands/configure_instance.py
+++ b/apiserver/plane/license/management/commands/configure_instance.py
@@ -132,20 +132,33 @@ def handle(self, *args, **options):
"is_encrypted": False,
},
{
- "key": "OPENAI_API_KEY",
- "value": os.environ.get("OPENAI_API_KEY"),
- "category": "OPENAI",
+ "key": "LLM_API_KEY",
+ "value": os.environ.get("LLM_API_KEY"),
+ "category": "AI",
"is_encrypted": True,
},
{
- "key": "GPT_ENGINE",
+ "key": "LLM_PROVIDER",
+ "value": os.environ.get("LLM_PROVIDER", "openai"),
+ "category": "AI",
+ "is_encrypted": False,
+ },
+ {
+ "key": "LLM_MODEL",
+ "value": os.environ.get("LLM_MODEL", "gpt-4o-mini"),
+ "category": "AI",
+ "is_encrypted": False,
+ },
+ # Deprecated, use LLM_MODEL
+ {
+ "key": "GPT_ENGINE",
"value": os.environ.get("GPT_ENGINE", "gpt-3.5-turbo"),
"category": "SMTP",
"is_encrypted": False,
},
{
"key": "UNSPLASH_ACCESS_KEY",
- "value": os.environ.get("UNSPLASH_ACESS_KEY", ""),
+ "value": os.environ.get("UNSPLASH_ACCESS_KEY", ""),
"category": "UNSPLASH",
"is_encrypted": True,
},
diff --git a/apiserver/requirements/base.txt b/apiserver/requirements/base.txt
index 40e90aedfc4..f7eb46a4a4f 100644
--- a/apiserver/requirements/base.txt
+++ b/apiserver/requirements/base.txt
@@ -37,7 +37,7 @@ uvicorn==0.29.0
# sockets
channels==4.1.0
# ai
-openai==1.25.0
+litellm==1.51.0
# slack
slack-sdk==3.27.1
# apm