Skip to content

Commit

Permalink
feat: add check for supported llm
Browse files Browse the repository at this point in the history
  • Loading branch information
ankit-v2-3 committed Nov 29, 2024
1 parent 86983b2 commit d0da3fa
Show file tree
Hide file tree
Showing 7 changed files with 28 additions and 2 deletions.
17 changes: 16 additions & 1 deletion backend/director/agents/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

from openai_function_calling import FunctionInferrer

from director.core.session import Session, OutputMessage
from director.core.session import Session, OutputMessage, TextContent, MsgStatus

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -41,6 +41,20 @@ def get_parameters(self):
)
return parameters

def _check_supported_llm(self):
"""Check if supported_llm is present and validate LLM."""
if hasattr(self, "supported_llm") and hasattr(self, "llm"):
if self.llm.llm_type not in self.supported_llm:
error = f"`@{self.agent_name}` Agent does not support the configured LLM `{self.llm.llm_type.upper()}`, To use this Agent, please configure one of the following LLMs: {[llm.upper() for llm in self.supported_llm]}."
self.output_message.content.append(
TextContent(
status_message="LLM not supported.",
text=error,
status=MsgStatus.error,
)
)
raise Exception(error)

def to_llm_format(self):
"""Convert the agent to LLM tool format."""
return {
Expand All @@ -59,6 +73,7 @@ def agent_description(self):

def safe_call(self, *args, **kwargs):
try:
self._check_supported_llm()
return self.run(*args, **kwargs)

except Exception as e:
Expand Down
2 changes: 2 additions & 0 deletions backend/director/agents/meme_maker.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
VideoContent,
VideoData,
)
from director.constants import LLMType
from director.tools.videodb_tool import VideoDBTool
from director.llm import get_default_llm

Expand Down Expand Up @@ -42,6 +43,7 @@ def __init__(self, session: Session, **kwargs):
self.description = "Generates meme clips and images based on user prompts. This agent usages LLM to analyze the transcript and visual content of the video to generate memes."
self.parameters = MEMEMAKER_PARAMETERS
self.llm = get_default_llm()
self.supported_llm = [LLMType.OPENAI, LLMType.VIDEODB_PROXY]
super().__init__(session=session, **kwargs)

def _chunk_docs(self, docs, chunk_size):
Expand Down
2 changes: 2 additions & 0 deletions backend/director/agents/profanity_remover.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
ContextMessage,
RoleTypes,
)
from director.constants import LLMType
from director.llm import get_default_llm
from director.tools.videodb_tool import VideoDBTool

Expand All @@ -34,6 +35,7 @@ def __init__(self, session: Session, **kwargs):
)
self.parameters = self.get_parameters()
self.llm = get_default_llm()
self.supported_llm = [LLMType.OPENAI, LLMType.VIDEODB_PROXY]
super().__init__(session=session, **kwargs)

def add_beep(self, videodb_tool, video_id, beep_audio_id, timestamps):
Expand Down
2 changes: 2 additions & 0 deletions backend/director/agents/prompt_clip.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
VideoContent,
VideoData,
)
from director.constants import LLMType
from director.tools.videodb_tool import VideoDBTool
from director.llm import get_default_llm

Expand Down Expand Up @@ -47,6 +48,7 @@ def __init__(self, session: Session, **kwargs):
self.description = "Generates video clips based on user prompts. This agent uses AI to analyze the text of a video transcript and identify sentences relevant to the user prompt for making clips. It then generates video clips based on the identified sentences. Use this tool to create clips based on specific themes or topics from a video."
self.parameters = PROMPTCLIP_AGENT_PARAMETERS
self.llm = get_default_llm()
self.supported_llm = [LLMType.OPENAI, LLMType.VIDEODB_PROXY]
super().__init__(session=session, **kwargs)

def _chunk_docs(self, docs, chunk_size):
Expand Down
2 changes: 2 additions & 0 deletions backend/director/agents/subtitle.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
VideoData,
MsgStatus,
)
from director.constants import LLMType
from director.tools.videodb_tool import VideoDBTool
from director.llm import get_default_llm

Expand Down Expand Up @@ -111,6 +112,7 @@ def __init__(self, session: Session, **kwargs):
self.description = "An agent designed to add different languages subtitles to a specified video within VideoDB."
self.llm = get_default_llm()
self.parameters = SUBTITLE_AGENT_PARAMETERS
self.supported_llm = [LLMType.OPENAI, LLMType.VIDEODB_PROXY]
super().__init__(session=session, **kwargs)

def wrap_text(self, text, video_width, max_width_percent=0.60, avg_char_width=20):
Expand Down
1 change: 0 additions & 1 deletion backend/director/handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
from director.agents.composio import ComposioAgent


from director.constants import LLMType
from director.core.session import Session, InputMessage, MsgStatus, TextContent
from director.core.reasoning import ReasoningEngine
from director.db.base import BaseDB
Expand Down
4 changes: 4 additions & 0 deletions backend/director/llm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@

from director.llm.openai import OpenAI
from director.llm.anthropic import AnthropicAI
from director.llm.xai import XAI
from director.llm.videodb_proxy import VideoDBProxy


Expand All @@ -12,12 +13,15 @@ def get_default_llm():

openai = True if os.getenv("OPENAI_API_KEY") else False
anthropic = True if os.getenv("ANTHROPIC_API_KEY") else False
xai = True if os.getenv("XAI_API_KEY") else False

default_llm = os.getenv("DEFAULT_LLM")

if openai or default_llm == LLMType.OPENAI:
return OpenAI()
elif anthropic or default_llm == LLMType.ANTHROPIC:
return AnthropicAI()
elif xai or default_llm == LLMType.XAI:
return XAI()
else:
return VideoDBProxy()

0 comments on commit d0da3fa

Please sign in to comment.