Skip to content

Commit

Permalink
Add stream_chat and conditionally set AutoModelClass to MllamaForCond…
Browse files Browse the repository at this point in the history
…itionalGeneration
  • Loading branch information
neon-ninja authored Nov 21, 2024
1 parent 6e3d66f commit 13f899c
Showing 1 changed file with 10 additions and 0 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
AutoConfig,
Qwen2VLForConditionalGeneration,
PaliGemmaForConditionalGeneration,
MllamaForConditionalGeneration
)
from qwen_vl_utils import (
process_vision_info,
Expand Down Expand Up @@ -93,6 +94,8 @@ def __init__(self, **kwargs: Any) -> None:
AutoModelClass = Qwen2VLForConditionalGeneration
if "PaliGemmaForConditionalGeneration" in architecture:
AutoModelClass = PaliGemmaForConditionalGeneration
if "MllamaForConditionalGeneration" in architecture:
AutoModelClass = MllamaForConditionalGeneration

# Load the model based on the architecture
self._model = AutoModelClass.from_pretrained(
Expand Down Expand Up @@ -179,6 +182,13 @@ async def astream_chat(
"HuggingFaceMultiModal does not support async streaming chat yet."
)

async def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
raise NotImplementedError(
"HuggingFaceMultiModal does not support async streaming chat yet."
)

async def astream_complete(
self, prompt: str, image_documents: Sequence[ImageNode], **kwargs: Any
) -> CompletionResponseAsyncGen:
Expand Down

0 comments on commit 13f899c

Please sign in to comment.