diff --git a/docs/source/contributing/model/multimodal.md b/docs/source/contributing/model/multimodal.md index c4894d39edc9..0c7496334fb7 100644 --- a/docs/source/contributing/model/multimodal.md +++ b/docs/source/contributing/model/multimodal.md @@ -79,6 +79,17 @@ Further update the model as follows: return inputs_embeds ``` +- Implement {meth}`~vllm.model_executor.models.interfaces.SupportsMultiModal.get_language_model` getter to provide stable access to the underlying language model. + + ```python + class YourModelForImage2Seq(nn.Module): + ... + + def get_language_model(self) -> torch.nn.Module: + # Change `language_model` according to your implementation. + return self.language_model + ``` + - Once the above steps are done, update the model class with the {class}`~vllm.model_executor.models.interfaces.SupportsMultiModal` interface. ```diff diff --git a/vllm/model_executor/models/aria.py b/vllm/model_executor/models/aria.py index 8cd3be90ca8d..af340feffcf9 100644 --- a/vllm/model_executor/models/aria.py +++ b/vllm/model_executor/models/aria.py @@ -605,6 +605,9 @@ def _process_image_input( return self.multi_modal_projector(image_outputs, image_attn_mask) + def get_language_model(self) -> torch.nn.Module: + return self.language_model + def get_multimodal_embeddings( self, **kwargs: object) -> Optional[MultiModalEmbeddings]: image_input = self._parse_and_validate_image_input(**kwargs) diff --git a/vllm/model_executor/models/aya_vision.py b/vllm/model_executor/models/aya_vision.py index 6b68885d375a..929c8f2a82a2 100644 --- a/vllm/model_executor/models/aya_vision.py +++ b/vllm/model_executor/models/aya_vision.py @@ -424,6 +424,9 @@ def _parse_and_validate_image_input( num_patches=num_patches, ) + def get_language_model(self) -> torch.nn.Module: + return self.language_model + def get_multimodal_embeddings( self, **kwargs: object) -> Optional[MultiModalEmbeddings]: image_input = self._parse_and_validate_image_input(**kwargs) diff --git a/vllm/model_executor/models/blip2.py b/vllm/model_executor/models/blip2.py index db9d42f5b86a..a1f20ea4e614 100644 --- a/vllm/model_executor/models/blip2.py +++ b/vllm/model_executor/models/blip2.py @@ -627,6 +627,9 @@ def _process_image_input(self, return self.language_projection(query_output) + def get_language_model(self) -> torch.nn.Module: + return self.language_model + def get_multimodal_embeddings( self, **kwargs: object) -> Optional[MultiModalEmbeddings]: image_input = self._parse_and_validate_image_input(**kwargs) diff --git a/vllm/model_executor/models/chameleon.py b/vllm/model_executor/models/chameleon.py index 3d527cb6f529..d46ae5327dcb 100644 --- a/vllm/model_executor/models/chameleon.py +++ b/vllm/model_executor/models/chameleon.py @@ -988,6 +988,9 @@ def _parse_and_validate_image_input( data=self._validate_pixel_values(pixel_values), ) + def get_language_model(self) -> torch.nn.Module: + return self.model + def get_multimodal_embeddings( self, **kwargs: object) -> Optional[MultiModalEmbeddings]: image_input = self._parse_and_validate_image_input(**kwargs) diff --git a/vllm/model_executor/models/deepseek_vl2.py b/vllm/model_executor/models/deepseek_vl2.py index 4554a997755f..03d5be2927bb 100644 --- a/vllm/model_executor/models/deepseek_vl2.py +++ b/vllm/model_executor/models/deepseek_vl2.py @@ -604,6 +604,9 @@ def _process_image_input( return self._pixel_values_to_embedding( pixel_values=pixel_values, images_spatial_crop=images_spatial_crop) + def get_language_model(self) -> torch.nn.Module: + return self.language_model + def get_multimodal_embeddings( self, **kwargs: object) -> Optional[MultiModalEmbeddings]: image_input = self._parse_and_validate_image_input(**kwargs) diff --git a/vllm/model_executor/models/florence2.py b/vllm/model_executor/models/florence2.py index 70b8d51b713c..62fd09398fac 100644 --- a/vllm/model_executor/models/florence2.py +++ b/vllm/model_executor/models/florence2.py @@ -1050,6 +1050,9 @@ def _process_image_input( pixel_values = image_input["data"] return self._encode_image(pixel_values) + def get_language_model(self) -> torch.nn.Module: + return self.language_model + def get_multimodal_embeddings( self, **kwargs: object) -> Optional[MultiModalEmbeddings]: image_input = self._parse_and_validate_image_input(**kwargs) diff --git a/vllm/model_executor/models/fuyu.py b/vllm/model_executor/models/fuyu.py index 189b91db4a86..c0a0f572ff3c 100644 --- a/vllm/model_executor/models/fuyu.py +++ b/vllm/model_executor/models/fuyu.py @@ -341,6 +341,9 @@ def _process_image_input( return vision_embeddings_flat.split(patches_per_image, dim=0) + def get_language_model(self) -> torch.nn.Module: + return self.language_model + def get_multimodal_embeddings( self, **kwargs: object) -> Optional[MultiModalEmbeddings]: image_input = self._parse_and_validate_image_input(**kwargs) diff --git a/vllm/model_executor/models/gemma3_mm.py b/vllm/model_executor/models/gemma3_mm.py index 9552ee1f0b3a..93d0aa301f54 100644 --- a/vllm/model_executor/models/gemma3_mm.py +++ b/vllm/model_executor/models/gemma3_mm.py @@ -591,6 +591,9 @@ def _process_image_input( e.flatten(0, 1) for e in image_embeds.split(num_patches.tolist()) ] + def get_language_model(self) -> torch.nn.Module: + return self.language_model + def get_multimodal_embeddings( self, **kwargs: object) -> Optional[MultiModalEmbeddings]: image_input = self._parse_and_validate_image_input(**kwargs) diff --git a/vllm/model_executor/models/glm4v.py b/vllm/model_executor/models/glm4v.py index c190a4585591..6d7b760d0dd7 100644 --- a/vllm/model_executor/models/glm4v.py +++ b/vllm/model_executor/models/glm4v.py @@ -596,6 +596,9 @@ def _process_image_input( return self.transformer.vision(pixel_values) + def get_language_model(self) -> torch.nn.Module: + return self.transformer + def get_multimodal_embeddings( self, **kwargs: object) -> Optional[MultiModalEmbeddings]: image_input = self._parse_and_validate_image_input(**kwargs) diff --git a/vllm/model_executor/models/idefics3.py b/vllm/model_executor/models/idefics3.py index 347106bc4dcf..713b1699a858 100644 --- a/vllm/model_executor/models/idefics3.py +++ b/vllm/model_executor/models/idefics3.py @@ -701,6 +701,9 @@ def _process_image_input( e.flatten(0, 1) for e in image_features.split(num_patches.tolist()) ] + def get_language_model(self) -> torch.nn.Module: + return self.model + def get_multimodal_embeddings( self, **kwargs: object) -> Optional[MultiModalEmbeddings]: image_input = self._parse_and_validate_image_input(**kwargs) diff --git a/vllm/model_executor/models/interfaces.py b/vllm/model_executor/models/interfaces.py index c61254ac9999..0cda199af471 100644 --- a/vllm/model_executor/models/interfaces.py +++ b/vllm/model_executor/models/interfaces.py @@ -56,6 +56,18 @@ def get_multimodal_embeddings( """ ... + def get_language_model(self) -> torch.nn.Module: + """ + Returns the underlying language model used for text generation. + + This is typically the `torch.nn.Module` instance responsible for + processing the merged multimodal embeddings and producing hidden states + + Returns: + torch.nn.Module: The core language model component. + """ + ... + # Only for models that support v0 chunked prefill # TODO(ywang96): Remove this overload once v0 is deprecated @overload diff --git a/vllm/model_executor/models/internvl.py b/vllm/model_executor/models/internvl.py index cf5608e3de7b..7fd628fa6c38 100644 --- a/vllm/model_executor/models/internvl.py +++ b/vllm/model_executor/models/internvl.py @@ -884,6 +884,9 @@ def _set_visual_token_mask(self, input_ids: torch.Tensor) -> None: else: self.visual_token_mask = None + def get_language_model(self) -> torch.nn.Module: + return self.language_model + def get_multimodal_embeddings( self, **kwargs: object) -> Optional[MultiModalEmbeddings]: image_input = self._parse_and_validate_image_input(**kwargs) diff --git a/vllm/model_executor/models/llava.py b/vllm/model_executor/models/llava.py index b34ac38f6807..9516550005d5 100644 --- a/vllm/model_executor/models/llava.py +++ b/vllm/model_executor/models/llava.py @@ -674,6 +674,9 @@ def _process_image_input( image_embeds = torch.split(image_embeds, feature_sizes) return image_embeds + def get_language_model(self) -> torch.nn.Module: + return self.language_model + def get_multimodal_embeddings( self, **kwargs: object) -> Optional[MultiModalEmbeddings]: image_input = self._parse_and_validate_image_input(**kwargs) diff --git a/vllm/model_executor/models/llava_next.py b/vllm/model_executor/models/llava_next.py index 4de13e540735..9c4d0e1fc275 100644 --- a/vllm/model_executor/models/llava_next.py +++ b/vllm/model_executor/models/llava_next.py @@ -480,6 +480,9 @@ def _process_image_input( for i, patch_features_batch in enumerate(patch_embeddings) ] + def get_language_model(self) -> torch.nn.Module: + return self.language_model + def get_multimodal_embeddings( self, **kwargs: object) -> Optional[MultiModalEmbeddings]: image_input = self._parse_and_validate_image_input(**kwargs) diff --git a/vllm/model_executor/models/llava_next_video.py b/vllm/model_executor/models/llava_next_video.py index 780af72d5720..6fc4c187efa7 100644 --- a/vllm/model_executor/models/llava_next_video.py +++ b/vllm/model_executor/models/llava_next_video.py @@ -421,6 +421,9 @@ def _process_video_pixels(self, inputs: LlavaNextVideoPixelInputs): return [e.flatten(0, 1) for e in embeds] + def get_language_model(self) -> torch.nn.Module: + return self.language_model + def get_multimodal_embeddings( self, **kwargs: object) -> Optional[MultiModalEmbeddings]: video_input = self._parse_and_validate_video_input(**kwargs) diff --git a/vllm/model_executor/models/llava_onevision.py b/vllm/model_executor/models/llava_onevision.py index c7e13bb352f4..5fbd27b9b0b3 100644 --- a/vllm/model_executor/models/llava_onevision.py +++ b/vllm/model_executor/models/llava_onevision.py @@ -852,6 +852,9 @@ def apply_pooling(self, image_features: torch.Tensor, stride: int = 2): image_feature = image_feature.view(batch_frames, -1, dim) return image_feature + def get_language_model(self) -> torch.nn.Module: + return self.language_model + def get_multimodal_embeddings( self, **kwargs: object) -> Optional[MultiModalEmbeddings]: modalities = self._parse_and_validate_multimodal_inputs(**kwargs) diff --git a/vllm/model_executor/models/minicpmv.py b/vllm/model_executor/models/minicpmv.py index eb20a963ae2a..12b5364cbaf8 100644 --- a/vllm/model_executor/models/minicpmv.py +++ b/vllm/model_executor/models/minicpmv.py @@ -892,6 +892,9 @@ def _process_multimodal_inputs(self, modalities: dict): return multimodal_embeddings + def get_language_model(self) -> torch.nn.Module: + return self.llm + def get_multimodal_embeddings( self, **kwargs: object) -> Optional[MultiModalEmbeddings]: modalities = self._parse_and_validate_multimodal_inputs(**kwargs) diff --git a/vllm/model_executor/models/mistral3.py b/vllm/model_executor/models/mistral3.py index b6fbc6b1fa3d..67c0e2ec233b 100644 --- a/vllm/model_executor/models/mistral3.py +++ b/vllm/model_executor/models/mistral3.py @@ -514,6 +514,9 @@ def _process_image_input( image_embeds = (image_embeds, ) return image_embeds + def get_language_model(self) -> torch.nn.Module: + return self.language_model + def get_multimodal_embeddings( self, **kwargs: object) -> Optional[MultiModalEmbeddings]: image_input = self._parse_and_validate_image_input(**kwargs) diff --git a/vllm/model_executor/models/mllama.py b/vllm/model_executor/models/mllama.py index 6a2e20840fcf..a67339ca5221 100644 --- a/vllm/model_executor/models/mllama.py +++ b/vllm/model_executor/models/mllama.py @@ -1325,6 +1325,9 @@ def flat_encoder_result(self, cross_attention_states: torch.Tensor, cross_attention_states = cross_attention_states_flat return cross_attention_states + def get_language_model(self) -> torch.nn.Module: + return self.language_model + def get_cross_attention_states( self, image_inputs: MllamaImagePixelInputs, diff --git a/vllm/model_executor/models/mllama4.py b/vllm/model_executor/models/mllama4.py index d76d63774b4e..0499fe09eb94 100644 --- a/vllm/model_executor/models/mllama4.py +++ b/vllm/model_executor/models/mllama4.py @@ -742,6 +742,9 @@ def _process_image_input( for img in vision_embeddings_flat.split(patches_per_image, dim=0) ] + def get_language_model(self) -> torch.nn.Module: + return self.language_model + def get_multimodal_embeddings(self, **kwargs) -> Optional[MultiModalEmbeddings]: image_input = self._parse_and_validate_image_input(**kwargs) diff --git a/vllm/model_executor/models/molmo.py b/vllm/model_executor/models/molmo.py index 6857bfa810e3..a7551e613dfc 100644 --- a/vllm/model_executor/models/molmo.py +++ b/vllm/model_executor/models/molmo.py @@ -1488,6 +1488,9 @@ def _process_image_input( ) ] + def get_language_model(self) -> torch.nn.Module: + return self.model + def get_multimodal_embeddings( self, **kwargs: object) -> Optional[MultiModalEmbeddings]: image_input = self._parse_and_validate_image_input(**kwargs) diff --git a/vllm/model_executor/models/paligemma.py b/vllm/model_executor/models/paligemma.py index 845f77ac39ce..274163ac9c42 100644 --- a/vllm/model_executor/models/paligemma.py +++ b/vllm/model_executor/models/paligemma.py @@ -323,6 +323,9 @@ def _process_image_input( return self.multi_modal_projector(image_features) + def get_language_model(self) -> torch.nn.Module: + return self.language_model + def get_multimodal_embeddings( self, **kwargs: object) -> Optional[MultiModalEmbeddings]: image_input = self._parse_and_validate_image_input(**kwargs) diff --git a/vllm/model_executor/models/phi3v.py b/vllm/model_executor/models/phi3v.py index d3b0688f21c3..344f348cd3d9 100644 --- a/vllm/model_executor/models/phi3v.py +++ b/vllm/model_executor/models/phi3v.py @@ -674,6 +674,9 @@ def _process_image_input( return image_embeds + def get_language_model(self) -> torch.nn.Module: + return self.language_model + def get_multimodal_embeddings( self, **kwargs: object) -> Optional[MultiModalEmbeddings]: image_input = self._parse_and_validate_image_input(**kwargs) diff --git a/vllm/model_executor/models/phi4mm.py b/vllm/model_executor/models/phi4mm.py index cb75ee1ea2cc..ec19797f8875 100644 --- a/vllm/model_executor/models/phi4mm.py +++ b/vllm/model_executor/models/phi4mm.py @@ -1802,3 +1802,6 @@ def get_mm_mapping(self) -> MultiModelKeys: connector=["audio_projection_for_vision", "audio_projection"], tower_model=["vision_encoder", "embed_tokens_extend"], ) + + def get_language_model(self) -> torch.nn.Module: + return self.model diff --git a/vllm/model_executor/models/pixtral.py b/vllm/model_executor/models/pixtral.py index e07c6516aef2..328d52711b5e 100644 --- a/vllm/model_executor/models/pixtral.py +++ b/vllm/model_executor/models/pixtral.py @@ -396,6 +396,9 @@ def _process_image_input( image_embeds = torch.split(image_embeds, feature_sizes) return image_embeds + def get_language_model(self) -> torch.nn.Module: + return self.language_model + def get_multimodal_embeddings( self, **kwargs: object) -> Optional[MultiModalEmbeddings]: image_input = self._parse_and_validate_image_input(**kwargs) diff --git a/vllm/model_executor/models/qwen2_5_vl.py b/vllm/model_executor/models/qwen2_5_vl.py index 1e6ff1fec6d5..84b7e59c8a0a 100644 --- a/vllm/model_executor/models/qwen2_5_vl.py +++ b/vllm/model_executor/models/qwen2_5_vl.py @@ -967,6 +967,9 @@ def _parse_and_validate_multimodal_inputs(self, **kwargs: object) -> dict: **kwargs) return modalities + def get_language_model(self) -> torch.nn.Module: + return self.language_model + def get_multimodal_embeddings( self, **kwargs: object) -> Optional[MultiModalEmbeddings]: diff --git a/vllm/model_executor/models/qwen2_audio.py b/vllm/model_executor/models/qwen2_audio.py index 54220037d253..9f2593fc94f4 100644 --- a/vllm/model_executor/models/qwen2_audio.py +++ b/vllm/model_executor/models/qwen2_audio.py @@ -355,6 +355,9 @@ def _process_audio_input(self, return torch.split(masked_audio_features, audio_output_lengths.flatten().tolist()) + def get_language_model(self) -> torch.nn.Module: + return self.language_model + def get_multimodal_embeddings( self, **kwargs: object) -> Optional[MultiModalEmbeddings]: audio_input = self._parse_and_validate_audio_input(**kwargs) diff --git a/vllm/model_executor/models/qwen2_vl.py b/vllm/model_executor/models/qwen2_vl.py index a7800d415366..f93654d0fcb3 100644 --- a/vllm/model_executor/models/qwen2_vl.py +++ b/vllm/model_executor/models/qwen2_vl.py @@ -1276,6 +1276,9 @@ def _parse_and_validate_multimodal_inputs(self, **kwargs: object) -> dict: return modalities + def get_language_model(self) -> torch.nn.Module: + return self.language_model + def get_multimodal_embeddings( self, **kwargs: object) -> Optional[MultiModalEmbeddings]: diff --git a/vllm/model_executor/models/qwen_vl.py b/vllm/model_executor/models/qwen_vl.py index a2ec9a9a4d17..2e941f3b7a31 100644 --- a/vllm/model_executor/models/qwen_vl.py +++ b/vllm/model_executor/models/qwen_vl.py @@ -740,6 +740,9 @@ def _process_image_input(self, return self.transformer.visual(image_input["data"]) + def get_language_model(self) -> torch.nn.Module: + return self.transformer + def get_multimodal_embeddings( self, **kwargs: object) -> Optional[MultiModalEmbeddings]: image_input = self._parse_and_validate_image_input(**kwargs) diff --git a/vllm/model_executor/models/skyworkr1v.py b/vllm/model_executor/models/skyworkr1v.py index e3deae828a33..a8460a2e1043 100644 --- a/vllm/model_executor/models/skyworkr1v.py +++ b/vllm/model_executor/models/skyworkr1v.py @@ -889,6 +889,9 @@ def _set_visual_token_mask(self, input_ids: torch.Tensor) -> None: else: self.visual_token_mask = None + def get_language_model(self) -> torch.nn.Module: + return self.language_model + def get_multimodal_embeddings( self, **kwargs: object) -> Optional[MultiModalEmbeddings]: image_input = self._parse_and_validate_image_input(**kwargs) diff --git a/vllm/model_executor/models/ultravox.py b/vllm/model_executor/models/ultravox.py index 6e73a2ae656c..6e9d15261b79 100644 --- a/vllm/model_executor/models/ultravox.py +++ b/vllm/model_executor/models/ultravox.py @@ -563,6 +563,9 @@ def _process_audio_input( ] return flattened_embeddings.split(embed_lens) + def get_language_model(self) -> torch.nn.Module: + return self.language_model + def get_multimodal_embeddings( self, **kwargs: object) -> Optional[MultiModalEmbeddings]: audio_input = self._parse_and_validate_audio_input(**kwargs) diff --git a/vllm/model_executor/models/whisper.py b/vllm/model_executor/models/whisper.py index e83abbe8b252..7751f96da6ae 100644 --- a/vllm/model_executor/models/whisper.py +++ b/vllm/model_executor/models/whisper.py @@ -692,6 +692,9 @@ def forward( ) return decoder_outputs + def get_language_model(self) -> torch.nn.Module: + return self.model.decoder + def get_multimodal_embeddings( self, **kwargs: object) -> Optional[MultiModalEmbeddings]: # TODO: This method does not obey the interface for SupportsMultiModal.