Skip to content

Commit

Permalink
Adding vision litellm.supports_vision to check vision support
Browse files Browse the repository at this point in the history
  • Loading branch information
enoch3712 committed Sep 27, 2024
1 parent 1393a65 commit 94932be
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 5 deletions.
7 changes: 7 additions & 0 deletions extract_thinker/extractor.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from io import BytesIO
from typing import Any, Dict, List, Optional, IO, Union

import litellm
from pydantic import BaseModel
from extract_thinker.document_loader.document_loader import DocumentLoader
from extract_thinker.models.classification import Classification
Expand Down Expand Up @@ -200,6 +201,9 @@ def _classify(self, content: Any, classifications: List[Classification], image:
)
for classification in classifications:
if classification.image:
if not litellm.supports_vision(model=self.llm.model):
raise ValueError(f"Model {self.llm.model} is not supported for vision, since its not a vision model.")

messages.append({
"role": "user",
"content": [
Expand Down Expand Up @@ -270,6 +274,9 @@ def _extract(self,
messages.append({"role": "user", "content": "##Content\n\n" + content})

if vision:
if not litellm.supports_vision(model=self.llm.model):
raise ValueError(f"Model {self.llm.model} is not supported for vision, since its not a vision model.")

base64_encoded_image = encode_image(
file_or_stream, is_stream
)
Expand Down
7 changes: 2 additions & 5 deletions extract_thinker/image_splitter.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,11 @@
from extract_thinker.splitter import Splitter
from extract_thinker.utils import extract_json

VISION_MODELS = ["gpt-4o", "gpt-4-turbo", "model3", "claude-3-haiku-20240307", "claude-3-opus-20240229", "claude-3-sonnet-20240229"]


class ImageSplitter(Splitter):

def __init__(self, model: str):
if model not in VISION_MODELS:
raise ValueError(f"Model {model} is not supported for ImageSplitter. Supported models are {VISION_MODELS}")
if not litellm.supports_vision(model=model):
raise ValueError(f"Model {model} is not supported for ImageSplitter, since its not a vision model.")
self.model = model

def encode_image(self, image):
Expand Down

0 comments on commit 94932be

Please sign in to comment.