diff --git a/vllm/model_executor/models/qwen2_5_omni_thinker.py b/vllm/model_executor/models/qwen2_5_omni_thinker.py index c0ed473103ab..9497f15984b7 100644 --- a/vllm/model_executor/models/qwen2_5_omni_thinker.py +++ b/vllm/model_executor/models/qwen2_5_omni_thinker.py @@ -146,11 +146,11 @@ def get_hf_processor( kwargs["fps"] = fps processor = self.ctx.get_hf_processor( Qwen2_5OmniProcessor, - image_processor=self.get_image_processor( - min_pixels=min_pixels, - max_pixels=max_pixels, - size=size, - use_fast=kwargs.get("use_fast", True)), + image_processor=self.get_image_processor(min_pixels=min_pixels, + max_pixels=max_pixels, + size=size, + use_fast=kwargs.get( + "use_fast", True)), **kwargs, ) if not hasattr(processor, "audio_token"): diff --git a/vllm/model_executor/models/qwen2_5_vl.py b/vllm/model_executor/models/qwen2_5_vl.py index 4faa0d2c366e..ff53a2775e3d 100644 --- a/vllm/model_executor/models/qwen2_5_vl.py +++ b/vllm/model_executor/models/qwen2_5_vl.py @@ -794,11 +794,11 @@ def get_hf_processor( return self.ctx.get_hf_processor( Qwen2_5_VLProcessor, - image_processor=self.get_image_processor( - min_pixels=min_pixels, - max_pixels=max_pixels, - size=size, - use_fast=kwargs.get("use_fast", True)), + image_processor=self.get_image_processor(min_pixels=min_pixels, + max_pixels=max_pixels, + size=size, + use_fast=kwargs.get( + "use_fast", True)), **kwargs, ) diff --git a/vllm/model_executor/models/qwen2_vl.py b/vllm/model_executor/models/qwen2_vl.py index 3b939a43e924..690b8e02c2fd 100644 --- a/vllm/model_executor/models/qwen2_vl.py +++ b/vllm/model_executor/models/qwen2_vl.py @@ -759,11 +759,11 @@ def get_hf_processor( ) -> Qwen2VLProcessor: return self.ctx.get_hf_processor( Qwen2VLProcessor, - image_processor=self.get_image_processor( - min_pixels=min_pixels, - max_pixels=max_pixels, - size=size, - use_fast=kwargs.get("use_fast", True)), + image_processor=self.get_image_processor(min_pixels=min_pixels, + max_pixels=max_pixels, + size=size, + use_fast=kwargs.get( + "use_fast", True)), **kwargs, )