From c3f0327dfb470d1a3e49fe5685644716d3d0efcf Mon Sep 17 00:00:00 2001 From: akash5100 Date: Tue, 29 Oct 2024 17:05:52 +0530 Subject: [PATCH 1/9] adds litellm gateway --- apiserver/plane/app/views/external/base.py | 45 +++++++++------------- apiserver/requirements/base.txt | 2 +- 2 files changed, 20 insertions(+), 27 deletions(-) diff --git a/apiserver/plane/app/views/external/base.py b/apiserver/plane/app/views/external/base.py index 6ae3f37ba8e..2a11aa322f7 100644 --- a/apiserver/plane/app/views/external/base.py +++ b/apiserver/plane/app/views/external/base.py @@ -1,23 +1,22 @@ -# Python imports -import requests +# Python import import os -# Third party imports -from openai import OpenAI -from rest_framework.response import Response +# Third party import +import litellm +import requests + +from litellm import completion from rest_framework import status +from rest_framework.response import Response -# Django imports +# Module import +from plane.app.permissions import ROLE, allow_permission +from plane.app.serializers import (ProjectLiteSerializer, + WorkspaceLiteSerializer) +from plane.db.models import Project, Workspace +from plane.license.utils.instance_value import get_configuration_value -# Module imports from ..base import BaseAPIView -from plane.app.permissions import allow_permission, ROLE -from plane.db.models import Workspace, Project -from plane.app.serializers import ( - ProjectLiteSerializer, - WorkspaceLiteSerializer, -) -from plane.license.utils.instance_value import get_configuration_value class GPTIntegrationEndpoint(BaseAPIView): @@ -32,7 +31,7 @@ def post(self, request, slug, project_id): }, { "key": "GPT_ENGINE", - "default": os.environ.get("GPT_ENGINE", "gpt-3.5-turbo"), + "default": os.environ.get("GPT_ENGINE", "gpt-4o-mini"), }, ] ) @@ -56,11 +55,8 @@ def post(self, request, slug, project_id): final_text = task + "\n" + prompt - client = OpenAI( - api_key=OPENAI_API_KEY, - ) - - response = client.chat.completions.create( + litellm.api_key = OPENAI_API_KEY + response = completion( model=GPT_ENGINE, messages=[{"role": "user", "content": final_text}], ) @@ -95,7 +91,7 @@ def post(self, request, slug): }, { "key": "GPT_ENGINE", - "default": os.environ.get("GPT_ENGINE", "gpt-3.5-turbo"), + "default": os.environ.get("GPT_ENGINE", "gpt-4o-mini"), }, ] ) @@ -119,11 +115,8 @@ def post(self, request, slug): final_text = task + "\n" + prompt - client = OpenAI( - api_key=OPENAI_API_KEY, - ) - - response = client.chat.completions.create( + litellm.api_key = OPENAI_API_KEY + response = completion( model=GPT_ENGINE, messages=[{"role": "user", "content": final_text}], ) diff --git a/apiserver/requirements/base.txt b/apiserver/requirements/base.txt index fbe6680d43f..3671723eb5c 100644 --- a/apiserver/requirements/base.txt +++ b/apiserver/requirements/base.txt @@ -37,7 +37,7 @@ uvicorn==0.29.0 # sockets channels==4.1.0 # ai -openai==1.25.0 +litellm==1.51.0 # slack slack-sdk==3.27.1 # apm From e68ab40c0280b57034ac451336ee8f8e4dcf7cbb Mon Sep 17 00:00:00 2001 From: akash5100 Date: Sat, 9 Nov 2024 13:31:58 +0530 Subject: [PATCH 2/9] Fixes repeating code --- apiserver/plane/app/views/external/base.py | 111 +++++++++++---------- 1 file changed, 57 insertions(+), 54 deletions(-) diff --git a/apiserver/plane/app/views/external/base.py b/apiserver/plane/app/views/external/base.py index 2a11aa322f7..b2f9c9e0489 100644 --- a/apiserver/plane/app/views/external/base.py +++ b/apiserver/plane/app/views/external/base.py @@ -5,7 +5,6 @@ import litellm import requests -from litellm import completion from rest_framework import status from rest_framework.response import Response @@ -19,57 +18,79 @@ from ..base import BaseAPIView +def get_gpt_config(): + """Helper to get GPT configuration values""" + OPENAI_API_KEY, GPT_ENGINE = get_configuration_value([ + { + "key": "OPENAI_API_KEY", + "default": os.environ.get("OPENAI_API_KEY", None), + }, + { + "key": "GPT_ENGINE", + "default": os.environ.get("GPT_ENGINE", "gpt-4o-mini"), + }, + ]) + + if not OPENAI_API_KEY or not GPT_ENGINE: + return None, None + return OPENAI_API_KEY, GPT_ENGINE + + +def get_gpt_response(task, prompt, api_key, engine): + """Helper to get GPT completion response""" + final_text = task + "\n" + prompt + try: + response = litellm.completion( + model=engine, + messages=[{"role": "user", "content": final_text}], + api_key=api_key, + ) + text = response.choices[0].message.content.strip() + return text, None + except Exception as e: + return None, str(e) + + class GPTIntegrationEndpoint(BaseAPIView): @allow_permission([ROLE.ADMIN, ROLE.MEMBER]) def post(self, request, slug, project_id): - OPENAI_API_KEY, GPT_ENGINE = get_configuration_value( - [ - { - "key": "OPENAI_API_KEY", - "default": os.environ.get("OPENAI_API_KEY", None), - }, - { - "key": "GPT_ENGINE", - "default": os.environ.get("GPT_ENGINE", "gpt-4o-mini"), - }, - ] - ) + OPENAI_API_KEY, GPT_ENGINE = get_gpt_config() + + supported_models = ["gpt-4o-mini", "gpt-4o"] + if GPT_ENGINE not in supported_models: + return Response( + {"error": f"Unsupported model. Please use one of: {', '.join(supported_models)}"}, + status=status.HTTP_400_BAD_REQUEST, + ) - # Get the configuration value - # Check the keys if not OPENAI_API_KEY or not GPT_ENGINE: return Response( {"error": "OpenAI API key and engine is required"}, status=status.HTTP_400_BAD_REQUEST, ) - prompt = request.data.get("prompt", False) task = request.data.get("task", False) - if not task: return Response( {"error": "Task is required"}, status=status.HTTP_400_BAD_REQUEST, ) - final_text = task + "\n" + prompt - - litellm.api_key = OPENAI_API_KEY - response = completion( - model=GPT_ENGINE, - messages=[{"role": "user", "content": final_text}], - ) + text, error = get_gpt_response(task, request.data.get("prompt", False), OPENAI_API_KEY, GPT_ENGINE) + if error: + return Response( + {"error": f"LLM API error: {error}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) workspace = Workspace.objects.get(slug=slug) project = Project.objects.get(pk=project_id) - text = response.choices[0].message.content.strip() - text_html = text.replace("\n", "
") return Response( { "response": text, - "response_html": text_html, + "response_html": text.replace("\n", "
"), "project_detail": ProjectLiteSerializer(project).data, "workspace_detail": WorkspaceLiteSerializer(workspace).data, }, @@ -83,50 +104,32 @@ class WorkspaceGPTIntegrationEndpoint(BaseAPIView): allowed_roles=[ROLE.ADMIN, ROLE.MEMBER], level="WORKSPACE" ) def post(self, request, slug): - OPENAI_API_KEY, GPT_ENGINE = get_configuration_value( - [ - { - "key": "OPENAI_API_KEY", - "default": os.environ.get("OPENAI_API_KEY", None), - }, - { - "key": "GPT_ENGINE", - "default": os.environ.get("GPT_ENGINE", "gpt-4o-mini"), - }, - ] - ) - - # Get the configuration value - # Check the keys + OPENAI_API_KEY, GPT_ENGINE = get_gpt_config() + if not OPENAI_API_KEY or not GPT_ENGINE: return Response( {"error": "OpenAI API key and engine is required"}, status=status.HTTP_400_BAD_REQUEST, ) - prompt = request.data.get("prompt", False) task = request.data.get("task", False) - if not task: return Response( {"error": "Task is required"}, status=status.HTTP_400_BAD_REQUEST, ) - final_text = task + "\n" + prompt - - litellm.api_key = OPENAI_API_KEY - response = completion( - model=GPT_ENGINE, - messages=[{"role": "user", "content": final_text}], - ) + text, error = get_gpt_response(task, request.data.get("prompt", False), OPENAI_API_KEY, GPT_ENGINE) + if error: + return Response( + {"error": f"LLM API error: {error}"}, + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + ) - text = response.choices[0].message.content.strip() - text_html = text.replace("\n", "
") return Response( { "response": text, - "response_html": text_html, + "response_html": text.replace("\n", "
"), }, status=status.HTTP_200_OK, ) From 30b45bf305c1cb53fe5e0ddc3f6fe3ab7c68d00e Mon Sep 17 00:00:00 2001 From: akash5100 Date: Sat, 9 Nov 2024 13:41:49 +0530 Subject: [PATCH 3/9] Fixes error exposing --- apiserver/plane/app/views/external/base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/apiserver/plane/app/views/external/base.py b/apiserver/plane/app/views/external/base.py index b2f9c9e0489..de886bff811 100644 --- a/apiserver/plane/app/views/external/base.py +++ b/apiserver/plane/app/views/external/base.py @@ -80,7 +80,7 @@ def post(self, request, slug, project_id): text, error = get_gpt_response(task, request.data.get("prompt", False), OPENAI_API_KEY, GPT_ENGINE) if error: return Response( - {"error": f"LLM API error: {error}"}, + {"error": "An internal error has occurred."}, status=status.HTTP_500_INTERNAL_SERVER_ERROR, ) @@ -122,7 +122,7 @@ def post(self, request, slug): text, error = get_gpt_response(task, request.data.get("prompt", False), OPENAI_API_KEY, GPT_ENGINE) if error: return Response( - {"error": f"LLM API error: {error}"}, + {"error": "An internal error has occurred."}, status=status.HTTP_500_INTERNAL_SERVER_ERROR, ) From bcd37bc07de3f8bc3f4808d30643e5a9a87f1e38 Mon Sep 17 00:00:00 2001 From: Akash Verma Date: Sat, 9 Nov 2024 15:40:02 +0530 Subject: [PATCH 4/9] Fixes error for None text --- apiserver/plane/app/views/external/base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/apiserver/plane/app/views/external/base.py b/apiserver/plane/app/views/external/base.py index de886bff811..6090c6bd17d 100644 --- a/apiserver/plane/app/views/external/base.py +++ b/apiserver/plane/app/views/external/base.py @@ -78,7 +78,7 @@ def post(self, request, slug, project_id): ) text, error = get_gpt_response(task, request.data.get("prompt", False), OPENAI_API_KEY, GPT_ENGINE) - if error: + if not text and error: return Response( {"error": "An internal error has occurred."}, status=status.HTTP_500_INTERNAL_SERVER_ERROR, @@ -120,7 +120,7 @@ def post(self, request, slug): ) text, error = get_gpt_response(task, request.data.get("prompt", False), OPENAI_API_KEY, GPT_ENGINE) - if error: + if not text and error: return Response( {"error": "An internal error has occurred."}, status=status.HTTP_500_INTERNAL_SERVER_ERROR, From 954fd2f549763dc2ed3447e66308983e5708e7b3 Mon Sep 17 00:00:00 2001 From: akash5100 Date: Wed, 11 Dec 2024 13:39:23 +0530 Subject: [PATCH 5/9] handles logging exception --- apiserver/plane/app/views/external/base.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/apiserver/plane/app/views/external/base.py b/apiserver/plane/app/views/external/base.py index d8bc3f30f15..4f49d5329cf 100644 --- a/apiserver/plane/app/views/external/base.py +++ b/apiserver/plane/app/views/external/base.py @@ -14,6 +14,7 @@ WorkspaceLiteSerializer) from plane.db.models import Project, Workspace from plane.license.utils.instance_value import get_configuration_value +from plane.utils.exception_logger import log_exception from ..base import BaseAPIView @@ -48,7 +49,8 @@ def get_gpt_response(task, prompt, api_key, engine): text = response.choices[0].message.content.strip() return text, None except Exception as e: - return None, str(e) + log_exception(e) + return None, "Error has occurred while generating reponse from OpenAI GPT" class GPTIntegrationEndpoint(BaseAPIView): From 65acf0c453616ddefdd04ce9feb6195d30a0b304 Mon Sep 17 00:00:00 2001 From: akash5100 Date: Thu, 12 Dec 2024 13:11:13 +0530 Subject: [PATCH 6/9] Adds multiple providers support --- .env.example | 9 +- apiserver/plane/app/views/external/base.py | 127 ++++++++++++++++----- 2 files changed, 106 insertions(+), 30 deletions(-) diff --git a/.env.example b/.env.example index 8b11217b5d4..c6f7dbb6ebf 100644 --- a/.env.example +++ b/.env.example @@ -27,9 +27,16 @@ FILE_SIZE_LIMIT=5242880 # GPT settings OPENAI_API_BASE="https://api.openai.com/v1" # deprecated -OPENAI_API_KEY="sk-" # deprecated GPT_ENGINE="gpt-3.5-turbo" # deprecated +# AI Assistant Settings +LLM_PROVIDER=openai # Can be "openai", "anthropic", or "google" +LLM_MODEL=gpt-4o-mini # The specific model you want to use + +OPENAI_API_KEY=your-openai-api-key +ANTHROPIC_API_KEY=your-anthropic-api-key +GEMINI_API_KEY=your-gemini-api-key + # Settings related to Docker DOCKERIZED=1 # deprecated diff --git a/apiserver/plane/app/views/external/base.py b/apiserver/plane/app/views/external/base.py index 4f49d5329cf..c886a01a10f 100644 --- a/apiserver/plane/app/views/external/base.py +++ b/apiserver/plane/app/views/external/base.py @@ -1,5 +1,6 @@ # Python import import os +from typing import List, Dict, Tuple # Third party import import litellm @@ -19,30 +20,105 @@ from ..base import BaseAPIView -def get_gpt_config(): - """Helper to get GPT configuration values""" - OPENAI_API_KEY, GPT_ENGINE = get_configuration_value([ +class LLMProvider: + """Base class for LLM provider configurations""" + name: str = "" + models: List[str] = [] + api_key_env: str = "" + default_model: str = "" + + @classmethod + def get_config(cls) -> Dict[str, str | List[str]]: + return { + "name": cls.name, + "models": cls.models, + "default_model": cls.default_model, + } + +class OpenAIProvider(LLMProvider): + name = "OpenAI" + models = ["gpt-3.5-turbo", "gpt-4o-mini", "gpt-4o", "o1-mini", "o1-preview"] + api_key_env = "OPENAI_API_KEY" + default_model = "gpt-4o-mini" + +class AnthropicProvider(LLMProvider): + name = "Anthropic" + models = [ + "claude-3-5-sonnet-20240620", + "claude-3-haiku-20240307", + "claude-3-opus-20240229", + "claude-3-sonnet-20240229", + "claude-2.1", + "claude-2", + "claude-instant-1.2", + "claude-instant-1" + ] + api_key_env = "ANTHROPIC_API_KEY" + default_model = "claude-3-sonnet-20240229" + +class GeminiProvider(LLMProvider): + name = "Gemini" + models = ["gemini-pro", "gemini-1.5-pro-latest", "gemini-pro-vision"] + api_key_env = "GEMINI_API_KEY" + default_model = "gemini-pro" + +SUPPORTED_PROVIDERS = { + "openai": OpenAIProvider, + "anthropic": AnthropicProvider, + "gemini": GeminiProvider, +} + +def get_llm_config() -> Tuple[str | None, str | None, str | None]: + """ + Helper to get LLM configuration values, returns: + - api_key, model, provider + """ + provider_key, model = get_configuration_value([ { - "key": "OPENAI_API_KEY", - "default": os.environ.get("OPENAI_API_KEY", None), + "key": "LLM_PROVIDER", + "default": os.environ.get("LLM_PROVIDER", "openai"), }, { - "key": "GPT_ENGINE", - "default": os.environ.get("GPT_ENGINE", "gpt-4o-mini"), + "key": "LLM_MODEL", + "default": None, }, ]) - - if not OPENAI_API_KEY or not GPT_ENGINE: - return None, None - return OPENAI_API_KEY, GPT_ENGINE + provider = SUPPORTED_PROVIDERS.get(provider_key.lower()) + if not provider: + return None, None, None -def get_gpt_response(task, prompt, api_key, engine): - """Helper to get GPT completion response""" + api_key, _ = get_configuration_value([ + { + "key": provider.api_key_env, + "default": os.environ.get(provider.api_key_env, None), + } + ]) + + if not api_key: + return None, None, None + + # If no model specified, use provider's default + if not model: + model = provider.default_model + + # Validate model is supported by provider + if model not in provider.models: + return None, None, None + + return api_key, model, provider_key + + +def get_llm_response(task, prompt, api_key: str, model: str, provider: str) -> Tuple[str | None, str | None]: + """Helper to get LLM completion response""" final_text = task + "\n" + prompt try: + # For Gemini, prepend provider name to model + if provider.lower() == "gemini": + model = f"gemini/{model}" + response = litellm.completion( - model=engine, + model=model, messages=[{"role": "user", "content": final_text}], api_key=api_key, ) @@ -56,18 +132,11 @@ def get_gpt_response(task, prompt, api_key, engine): class GPTIntegrationEndpoint(BaseAPIView): @allow_permission([ROLE.ADMIN, ROLE.MEMBER]) def post(self, request, slug, project_id): - OPENAI_API_KEY, GPT_ENGINE = get_gpt_config() - - supported_models = ["gpt-4o-mini", "gpt-4o"] - if GPT_ENGINE not in supported_models: - return Response( - {"error": f"Unsupported model. Please use one of: {', '.join(supported_models)}"}, - status=status.HTTP_400_BAD_REQUEST, - ) + api_key, model, provider = get_llm_config() - if not OPENAI_API_KEY or not GPT_ENGINE: + if not api_key or not model or not provider: return Response( - {"error": "OpenAI API key and engine is required"}, + {"error": "LLM provider API key and model are required"}, status=status.HTTP_400_BAD_REQUEST, ) @@ -77,7 +146,7 @@ def post(self, request, slug, project_id): {"error": "Task is required"}, status=status.HTTP_400_BAD_REQUEST ) - text, error = get_gpt_response(task, request.data.get("prompt", False), OPENAI_API_KEY, GPT_ENGINE) + text, error = get_llm_response(task, request.data.get("prompt", False), api_key, model, provider) if not text and error: return Response( {"error": "An internal error has occurred."}, @@ -101,11 +170,11 @@ def post(self, request, slug, project_id): class WorkspaceGPTIntegrationEndpoint(BaseAPIView): @allow_permission(allowed_roles=[ROLE.ADMIN, ROLE.MEMBER], level="WORKSPACE") def post(self, request, slug): - OPENAI_API_KEY, GPT_ENGINE = get_gpt_config() + api_key, model, provider = get_llm_config() - if not OPENAI_API_KEY or not GPT_ENGINE: + if not api_key or not model or not provider: return Response( - {"error": "OpenAI API key and engine is required"}, + {"error": "LLM provider API key and model are required"}, status=status.HTTP_400_BAD_REQUEST, ) @@ -115,7 +184,7 @@ def post(self, request, slug): {"error": "Task is required"}, status=status.HTTP_400_BAD_REQUEST ) - text, error = get_gpt_response(task, request.data.get("prompt", False), OPENAI_API_KEY, GPT_ENGINE) + text, error = get_llm_response(task, request.data.get("prompt", False), api_key, model, provider) if not text and error: return Response( {"error": "An internal error has occurred."}, From 2360af5ef9b7e60224f835f22e09f0b8dc5d6f9d Mon Sep 17 00:00:00 2001 From: akash5100 Date: Thu, 12 Dec 2024 13:44:55 +0530 Subject: [PATCH 7/9] handling edge cases --- apiserver/plane/app/views/external/base.py | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/apiserver/plane/app/views/external/base.py b/apiserver/plane/app/views/external/base.py index c886a01a10f..33d3c6afeef 100644 --- a/apiserver/plane/app/views/external/base.py +++ b/apiserver/plane/app/views/external/base.py @@ -86,6 +86,7 @@ def get_llm_config() -> Tuple[str | None, str | None, str | None]: provider = SUPPORTED_PROVIDERS.get(provider_key.lower()) if not provider: + log_exception(ValueError(f"Unsupported provider: {provider_key}")) return None, None, None api_key, _ = get_configuration_value([ @@ -96,16 +97,19 @@ def get_llm_config() -> Tuple[str | None, str | None, str | None]: ]) if not api_key: + log_exception(ValueError(f"Missing API key for provider: {provider.name}")) return None, None, None - # If no model specified, use provider's default + # If no model specified, use provider's default if not model: model = provider.default_model - # Validate model is supported by provider + # Validate model is supported by provider if model not in provider.models: - return None, None, None - + log_exception(ValueError( + f"Model {model} not supported by {provider.name}. " + f"Supported models: {', '.join(provider.models)}" + )) return api_key, model, provider_key @@ -126,8 +130,13 @@ def get_llm_response(task, prompt, api_key: str, model: str, provider: str) -> T return text, None except Exception as e: log_exception(e) - return None, "Error has occurred while generating reponse from OpenAI GPT" - + error_type = e.__class__.__name__ + if error_type == "AuthenticationError": + return None, f"Invalid API key for {provider}" + elif error_type == "RateLimitError": + return None, f"Rate limit exceeded for {provider}" + else: + return None, f"Error occurred while generating response from {provider}" class GPTIntegrationEndpoint(BaseAPIView): @allow_permission([ROLE.ADMIN, ROLE.MEMBER]) From 411e16a5bf3909255a77230716fcc6d3ab90e087 Mon Sep 17 00:00:00 2001 From: akash5100 Date: Thu, 12 Dec 2024 14:54:39 +0530 Subject: [PATCH 8/9] adds new envs to instance store --- .../management/commands/configure_instance.py | 28 +++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/apiserver/plane/license/management/commands/configure_instance.py b/apiserver/plane/license/management/commands/configure_instance.py index 548c9c77ed0..6476049cfab 100644 --- a/apiserver/plane/license/management/commands/configure_instance.py +++ b/apiserver/plane/license/management/commands/configure_instance.py @@ -134,9 +134,33 @@ def handle(self, *args, **options): { "key": "OPENAI_API_KEY", "value": os.environ.get("OPENAI_API_KEY"), - "category": "OPENAI", + "category": "AI", "is_encrypted": True, }, + { + "key": "ANTHROPIC_API_KEY", + "value": os.environ.get("ANTHROPIC_API_KEY"), + "category": "AI", + "is_encrypted": True, + }, + { + "key": "GEMINI_API_KEY", + "value": os.environ.get("GEMINI_API_KEY"), + "category": "AI", + "is_encrypted": True, + }, + { + "key": "LLM_PROVIDER", + "value": os.environ.get("LLM_PROVIDER", "openai"), + "category": "AI", + "is_encrypted": False, + }, + { + "key": "LLM_MODEL", + "value": os.environ.get("LLM_MODEL", "gpt-4o-mini"), + "category": "AI", + "is_encrypted": False, + }, { "key": "GPT_ENGINE", "value": os.environ.get("GPT_ENGINE", "gpt-3.5-turbo"), @@ -145,7 +169,7 @@ def handle(self, *args, **options): }, { "key": "UNSPLASH_ACCESS_KEY", - "value": os.environ.get("UNSPLASH_ACESS_KEY", ""), + "value": os.environ.get("UNSPLASH_ACCESS_KEY", ""), "category": "UNSPLASH", "is_encrypted": True, }, From 2d597b8a5a409cfdbe2ce8af39ef7942ae58a75f Mon Sep 17 00:00:00 2001 From: akash5100 Date: Mon, 23 Dec 2024 13:36:38 +0530 Subject: [PATCH 9/9] strategy pattern for llm config --- .env.example | 9 +------ apiserver/plane/app/views/external/base.py | 25 ++++++++----------- .../management/commands/configure_instance.py | 19 +++----------- 3 files changed, 15 insertions(+), 38 deletions(-) diff --git a/.env.example b/.env.example index c6f7dbb6ebf..8b11217b5d4 100644 --- a/.env.example +++ b/.env.example @@ -27,16 +27,9 @@ FILE_SIZE_LIMIT=5242880 # GPT settings OPENAI_API_BASE="https://api.openai.com/v1" # deprecated +OPENAI_API_KEY="sk-" # deprecated GPT_ENGINE="gpt-3.5-turbo" # deprecated -# AI Assistant Settings -LLM_PROVIDER=openai # Can be "openai", "anthropic", or "google" -LLM_MODEL=gpt-4o-mini # The specific model you want to use - -OPENAI_API_KEY=your-openai-api-key -ANTHROPIC_API_KEY=your-anthropic-api-key -GEMINI_API_KEY=your-gemini-api-key - # Settings related to Docker DOCKERIZED=1 # deprecated diff --git a/apiserver/plane/app/views/external/base.py b/apiserver/plane/app/views/external/base.py index 33d3c6afeef..ae5c47f1455 100644 --- a/apiserver/plane/app/views/external/base.py +++ b/apiserver/plane/app/views/external/base.py @@ -24,7 +24,6 @@ class LLMProvider: """Base class for LLM provider configurations""" name: str = "" models: List[str] = [] - api_key_env: str = "" default_model: str = "" @classmethod @@ -38,7 +37,6 @@ def get_config(cls) -> Dict[str, str | List[str]]: class OpenAIProvider(LLMProvider): name = "OpenAI" models = ["gpt-3.5-turbo", "gpt-4o-mini", "gpt-4o", "o1-mini", "o1-preview"] - api_key_env = "OPENAI_API_KEY" default_model = "gpt-4o-mini" class AnthropicProvider(LLMProvider): @@ -53,13 +51,11 @@ class AnthropicProvider(LLMProvider): "claude-instant-1.2", "claude-instant-1" ] - api_key_env = "ANTHROPIC_API_KEY" default_model = "claude-3-sonnet-20240229" class GeminiProvider(LLMProvider): name = "Gemini" models = ["gemini-pro", "gemini-1.5-pro-latest", "gemini-pro-vision"] - api_key_env = "GEMINI_API_KEY" default_model = "gemini-pro" SUPPORTED_PROVIDERS = { @@ -73,14 +69,18 @@ def get_llm_config() -> Tuple[str | None, str | None, str | None]: Helper to get LLM configuration values, returns: - api_key, model, provider """ - provider_key, model = get_configuration_value([ + api_key, provider_key, model = get_configuration_value([ + { + "key": "LLM_API_KEY", + "default": os.environ.get("LLM_API_KEY", None), + }, { "key": "LLM_PROVIDER", "default": os.environ.get("LLM_PROVIDER", "openai"), }, { "key": "LLM_MODEL", - "default": None, + "default": os.environ.get("LLM_MODEL", None), }, ]) @@ -89,27 +89,22 @@ def get_llm_config() -> Tuple[str | None, str | None, str | None]: log_exception(ValueError(f"Unsupported provider: {provider_key}")) return None, None, None - api_key, _ = get_configuration_value([ - { - "key": provider.api_key_env, - "default": os.environ.get(provider.api_key_env, None), - } - ]) - if not api_key: log_exception(ValueError(f"Missing API key for provider: {provider.name}")) return None, None, None - # If no model specified, use provider's default + # If no model specified, use provider's default if not model: model = provider.default_model - # Validate model is supported by provider + # Validate model is supported by provider if model not in provider.models: log_exception(ValueError( f"Model {model} not supported by {provider.name}. " f"Supported models: {', '.join(provider.models)}" )) + return None, None, None + return api_key, model, provider_key diff --git a/apiserver/plane/license/management/commands/configure_instance.py b/apiserver/plane/license/management/commands/configure_instance.py index 6476049cfab..8458df5df6d 100644 --- a/apiserver/plane/license/management/commands/configure_instance.py +++ b/apiserver/plane/license/management/commands/configure_instance.py @@ -132,20 +132,8 @@ def handle(self, *args, **options): "is_encrypted": False, }, { - "key": "OPENAI_API_KEY", - "value": os.environ.get("OPENAI_API_KEY"), - "category": "AI", - "is_encrypted": True, - }, - { - "key": "ANTHROPIC_API_KEY", - "value": os.environ.get("ANTHROPIC_API_KEY"), - "category": "AI", - "is_encrypted": True, - }, - { - "key": "GEMINI_API_KEY", - "value": os.environ.get("GEMINI_API_KEY"), + "key": "LLM_API_KEY", + "value": os.environ.get("LLM_API_KEY"), "category": "AI", "is_encrypted": True, }, @@ -161,8 +149,9 @@ def handle(self, *args, **options): "category": "AI", "is_encrypted": False, }, + # Deprecated, use LLM_MODEL { - "key": "GPT_ENGINE", + "key": "GPT_ENGINE", "value": os.environ.get("GPT_ENGINE", "gpt-3.5-turbo"), "category": "SMTP", "is_encrypted": False,