We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 41d5cfe commit f8eea45Copy full SHA for f8eea45
vllm/model_executor/models/mllama4.py
@@ -742,6 +742,9 @@ def _process_image_input(
742
for img in vision_embeddings_flat.split(patches_per_image, dim=0)
743
]
744
745
+ def get_language_model(self) -> torch.nn.Module:
746
+ return self.language_model
747
+
748
def get_multimodal_embeddings(self,
749
**kwargs) -> Optional[MultiModalEmbeddings]:
750
image_input = self._parse_and_validate_image_input(**kwargs)
0 commit comments