From d435c0f5ba43f075591bcf7220b4ad37995f86ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=B1=AA=E5=BF=97=E9=B9=8F?= Date: Fri, 20 Jun 2025 09:29:53 +0800 Subject: [PATCH 1/5] add tarsier2 support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 汪志鹏 --- docs/models/supported_models.md | 1 + examples/offline_inference/vision_language.py | 32 +++++++ .../vision_language_multi_image.py | 22 +++++ .../multimodal/processing/test_common.py | 1 + vllm/model_executor/models/qwen2_vl.py | 85 ++++++++++++++++++- vllm/model_executor/models/registry.py | 1 + 6 files changed, 141 insertions(+), 1 deletion(-) diff --git a/docs/models/supported_models.md b/docs/models/supported_models.md index 60f7dacebfa..1198fabfcad 100644 --- a/docs/models/supported_models.md +++ b/docs/models/supported_models.md @@ -562,6 +562,7 @@ Specified using `--task generate`. | `SkyworkR1VChatModel` | Skywork-R1V-38B | T + I | `Skywork/Skywork-R1V-38B` | | ✅︎ | ✅︎ | | `SmolVLMForConditionalGeneration` | SmolVLM2 | T + I | `SmolVLM2-2.2B-Instruct` | ✅︎ | | ✅︎ | | `TarsierForConditionalGeneration` | Tarsier | T + IE+ | `omni-search/Tarsier-7b`,`omni-search/Tarsier-34b` | | ✅︎ | ✅︎ | +| `Tarsier2ForConditionalGeneration` | Tarsier2 | T + IE+ + VE+ | `omni-research/Tarsier2-Recap-7b`,`omni-research/Tarsier2-7b-0115` | | ✅︎ | ✅︎ | ^ You need to set the architecture name via `--hf-overrides` to match the one in vLLM.     • For example, to use DeepSeek-VL2 series models: diff --git a/examples/offline_inference/vision_language.py b/examples/offline_inference/vision_language.py index 15dbd9f4412..57b042ed013 100644 --- a/examples/offline_inference/vision_language.py +++ b/examples/offline_inference/vision_language.py @@ -1040,6 +1040,37 @@ def run_qwen2_5_omni(questions: list[str], modality: str): ) +def run_tarsier2(questions: list[str], modality: str) -> ModelRequestData: + model_name = "omni-research/Tarsier2-Recap-7b" + + engine_args = EngineArgs( + model=model_name, + max_model_len=4096, + hf_overrides={"architectures": ["Tarsier2ForConditionalGeneration"]}, + limit_mm_per_prompt={modality: 1}, + ) + + if modality == "image": + placeholder = "<|image_pad|>" + elif modality == "video": + placeholder = "<|video_pad|>" + + prompts = [ + ( + "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n" + f"<|im_start|>user\n<|vision_start|>{placeholder}<|vision_end|>" + f"{question}<|im_end|>\n" + "<|im_start|>assistant\n" + ) + for question in questions + ] + + return ModelRequestData( + engine_args=engine_args, + prompts=prompts, + ) + + # SkyworkR1V def run_skyworkr1v(questions: list[str], modality: str) -> ModelRequestData: assert modality == "image" @@ -1112,6 +1143,7 @@ def run_skyworkr1v(questions: list[str], modality: str) -> ModelRequestData: "skywork_chat": run_skyworkr1v, "smolvlm": run_smolvlm, "tarsier": run_tarsier, + "tarsier2": run_tarsier2, } diff --git a/examples/offline_inference/vision_language_multi_image.py b/examples/offline_inference/vision_language_multi_image.py index e55181e4f49..b90c9e568eb 100644 --- a/examples/offline_inference/vision_language_multi_image.py +++ b/examples/offline_inference/vision_language_multi_image.py @@ -828,6 +828,27 @@ def load_tarsier(question: str, image_urls: list[str]) -> ModelRequestData: ) +def load_tarsier2(question: str, image_urls: list[str]) -> ModelRequestData: + model_name = "omni-research/Tarsier2-Recap-7b" + + engine_args = EngineArgs( + model=model_name, + trust_remote_code=True, + max_model_len=32768, + limit_mm_per_prompt={"image": len(image_urls)}, + hf_overrides={"architectures": ["Tarsier2ForConditionalGeneration"]}, + ) + + prompt = f"USER: {'<|image_pad|>' * len(image_urls)}\n{question}\n ASSISTANT:" + image_data = [fetch_image(url) for url in image_urls] + + return ModelRequestData( + engine_args=engine_args, + prompt=prompt, + image_data=image_data, + ) + + model_example_map = { "aria": load_aria, "aya_vision": load_aya_vision, @@ -853,6 +874,7 @@ def load_tarsier(question: str, image_urls: list[str]) -> ModelRequestData: "qwen2_5_vl": load_qwen2_5_vl, "smolvlm": load_smolvlm, "tarsier": load_tarsier, + "tarsier2": load_tarsier2, } diff --git a/tests/models/multimodal/processing/test_common.py b/tests/models/multimodal/processing/test_common.py index 1e6608955b3..1ba60178c13 100644 --- a/tests/models/multimodal/processing/test_common.py +++ b/tests/models/multimodal/processing/test_common.py @@ -284,6 +284,7 @@ def _test_processing_correctness_one( "fixie-ai/ultravox-v0_5-llama-3_2-1b", "openai/whisper-large-v3", "omni-research/Tarsier-7b", + "omni-research/Tarsier2-Recap-7b" ]) @pytest.mark.parametrize("hit_rate", [0.3, 0.5, 1.0]) @pytest.mark.parametrize("num_batches", [32]) diff --git a/vllm/model_executor/models/qwen2_vl.py b/vllm/model_executor/models/qwen2_vl.py index 49b709069cd..5c250f07b8f 100644 --- a/vllm/model_executor/models/qwen2_vl.py +++ b/vllm/model_executor/models/qwen2_vl.py @@ -32,12 +32,14 @@ import torch.nn as nn import torch.nn.functional as F from einops import rearrange, repeat -from transformers import BatchFeature +from transformers import AutoConfig, BatchFeature from transformers.models.qwen2_vl import (Qwen2VLImageProcessor, Qwen2VLProcessor) from transformers.models.qwen2_vl.configuration_qwen2_vl import ( Qwen2VLConfig, Qwen2VLVisionConfig) from transformers.models.qwen2_vl.image_processing_qwen2_vl import smart_resize +from transformers.models.qwen2_vl.video_processing_qwen2_vl import ( + Qwen2VLVideoProcessor) from vllm.config import VllmConfig from vllm.distributed import parallel_state, tensor_model_parallel_all_gather @@ -69,6 +71,7 @@ from vllm.transformers_utils.config import uses_mrope from vllm.transformers_utils.processor import ( cached_image_processor_from_config) +from vllm.transformers_utils.tokenizer import AnyTokenizer from .interfaces import (MultiModalEmbeddings, SupportsLoRA, SupportsMultiModal, SupportsPP) @@ -1403,3 +1406,83 @@ def get_mm_mapping(self) -> MultiModelKeys: connector="visual.merger.", tower_model="visual.", ) + + +class Tarsier2MultiModalProcessor(Qwen2VLMultiModalProcessor): + pass + + +class Tarsier2ImageProcessor(Qwen2VLImageProcessor): + + def __init__( + self, + size: Optional[dict[str, int]] = None, + **kwargs, + ) -> None: + size = { + "shortest_edge": size["min_pixels"], + "longest_edge": size["max_pixels"] + } + super().__init__(size=size, **kwargs) + + +class Tarsier2Processor(Qwen2VLProcessor): + + def __init__( + self, + vision_config: dict, + tokenizer: AnyTokenizer, + **kwargs, + ): + self.image_processor = Tarsier2ImageProcessor(**vision_config) + super().__init__(image_processor=self.image_processor, + tokenizer=tokenizer, + video_processor=Qwen2VLVideoProcessor(), + chat_template=None, + **kwargs) + + +class Tarsier2ProcessingInfo(Qwen2VLProcessingInfo): + + def get_hf_config(self) -> Qwen2VLConfig: + model_path = self.ctx.model_config.model + original_config = AutoConfig.from_pretrained(model_path) + config_dict = original_config.to_dict() + correct_config = Qwen2VLConfig.from_dict(config_dict) + + return correct_config + + def get_hf_processor(self, **kwargs: object) -> Tarsier2Processor: + return Tarsier2Processor( + vision_config=self.ctx.get_hf_image_processor_config(), + tokenizer=self.get_tokenizer(), + **kwargs, + ) + + def get_image_processor(self) -> Tarsier2ImageProcessor: + return Tarsier2ImageProcessor( + **self.ctx.get_hf_image_processor_config()) + + +@MULTIMODAL_REGISTRY.register_processor(Tarsier2MultiModalProcessor, + info=Tarsier2ProcessingInfo, + dummy_inputs=Qwen2VLDummyInputsBuilder) +class Tarsier2ForConditionalGeneration(Qwen2VLForConditionalGeneration): + hf_to_vllm_mapper = WeightsMapper(orig_to_new_prefix={ + "vision_tower.": "visual.", + }) + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + source = vllm_config.model_config.hf_config.text_config + destination = vllm_config.model_config.hf_config + vars(destination).update(vars(source)) + vllm_config.model_config.hf_config.architectures = [ + "Tarsier2ForConditionalGeneration" + ] + super().__init__(vllm_config=vllm_config, prefix=prefix) + + def load_weights(self, weights: Iterable[tuple[str, + torch.Tensor]]) -> set[str]: + + loader = AutoWeightsLoader(self) + return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper) diff --git a/vllm/model_executor/models/registry.py b/vllm/model_executor/models/registry.py index 83f7cc6eee0..a5074774ea4 100644 --- a/vllm/model_executor/models/registry.py +++ b/vllm/model_executor/models/registry.py @@ -216,6 +216,7 @@ "UltravoxModel": ("ultravox", "UltravoxModel"), "Phi4MMForCausalLM": ("phi4mm", "Phi4MMForCausalLM"), "TarsierForConditionalGeneration": ("tarsier", "TarsierForConditionalGeneration"), # noqa: E501 + "Tarsier2ForConditionalGeneration": ("qwen2_vl", "Tarsier2ForConditionalGeneration"), # noqa: E501 # [Encoder-decoder] "Florence2ForConditionalGeneration": ("florence2", "Florence2ForConditionalGeneration"), # noqa: E501 "MllamaForConditionalGeneration": ("mllama", "MllamaForConditionalGeneration"), # noqa: E501 From fa5cd81fd53860e2cd985a898803c403958ffcd0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=B1=AA=E5=BF=97=E9=B9=8F?= Date: Fri, 20 Jun 2025 10:00:11 +0800 Subject: [PATCH 2/5] xx MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 汪志鹏 --- tests/models/registry.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/models/registry.py b/tests/models/registry.py index fb93ba60c2e..d59dd5c8665 100644 --- a/tests/models/registry.py +++ b/tests/models/registry.py @@ -397,6 +397,8 @@ def check_available_online( trust_remote_code=True), "TarsierForConditionalGeneration": _HfExamplesInfo("omni-research/Tarsier-7b", # noqa: E501 hf_overrides={"architectures": ["TarsierForConditionalGeneration"]}), # noqa: E501 + "Tarsier2ForConditionalGeneration": _HfExamplesInfo("omni-research/Tarsier2-Recap-7b", # noqa: E501 + hf_overrides={"architectures": ["Tarsier2ForConditionalGeneration"]}), # noqa: E501 # [Encoder-decoder] # Florence-2 uses BartFastTokenizer which can't be loaded from AutoTokenizer # Therefore, we borrow the BartTokenizer from the original Bart model From 8ce194d441a9dd7272ce02426e82fc01032c879d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=B1=AA=E5=BF=97=E9=B9=8F?= Date: Fri, 20 Jun 2025 10:33:30 +0800 Subject: [PATCH 3/5] xx MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 汪志鹏 --- vllm/model_executor/models/qwen2_vl.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/vllm/model_executor/models/qwen2_vl.py b/vllm/model_executor/models/qwen2_vl.py index 5c250f07b8f..c63e0f9fd75 100644 --- a/vllm/model_executor/models/qwen2_vl.py +++ b/vllm/model_executor/models/qwen2_vl.py @@ -1419,11 +1419,15 @@ def __init__( size: Optional[dict[str, int]] = None, **kwargs, ) -> None: - size = { - "shortest_edge": size["min_pixels"], - "longest_edge": size["max_pixels"] - } - super().__init__(size=size, **kwargs) + if size is not None and "min_pixels" in size and "max_pixels" in size: + # Remap if Tarsier2-specific format is provided + remapped_size = { + "shortest_edge": size["min_pixels"], + "longest_edge": size["max_pixels"] + } + super().__init__(size=remapped_size, **kwargs) + else: + super().__init__(size=size, **kwargs) class Tarsier2Processor(Qwen2VLProcessor): From 95af1f786c2b7658e7ace02f50ba48ed9b8cff58 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=B1=AA=E5=BF=97=E9=B9=8F?= Date: Fri, 20 Jun 2025 11:32:19 +0800 Subject: [PATCH 4/5] xx MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 汪志鹏 --- docs/models/supported_models.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/models/supported_models.md b/docs/models/supported_models.md index 1198fabfcad..803d2938d2b 100644 --- a/docs/models/supported_models.md +++ b/docs/models/supported_models.md @@ -562,7 +562,7 @@ Specified using `--task generate`. | `SkyworkR1VChatModel` | Skywork-R1V-38B | T + I | `Skywork/Skywork-R1V-38B` | | ✅︎ | ✅︎ | | `SmolVLMForConditionalGeneration` | SmolVLM2 | T + I | `SmolVLM2-2.2B-Instruct` | ✅︎ | | ✅︎ | | `TarsierForConditionalGeneration` | Tarsier | T + IE+ | `omni-search/Tarsier-7b`,`omni-search/Tarsier-34b` | | ✅︎ | ✅︎ | -| `Tarsier2ForConditionalGeneration` | Tarsier2 | T + IE+ + VE+ | `omni-research/Tarsier2-Recap-7b`,`omni-research/Tarsier2-7b-0115` | | ✅︎ | ✅︎ | +| `Tarsier2ForConditionalGeneration`^ | Tarsier2 | T + IE+ + VE+ | `omni-research/Tarsier2-Recap-7b`,`omni-research/Tarsier2-7b-0115` | | ✅︎ | ✅︎ | ^ You need to set the architecture name via `--hf-overrides` to match the one in vLLM.     • For example, to use DeepSeek-VL2 series models: From 9aa2b33db61d5b867b61979a57f1396f8e500eb3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=B1=AA=E5=BF=97=E9=B9=8F?= Date: Fri, 20 Jun 2025 20:56:39 +0800 Subject: [PATCH 5/5] xx MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 汪志鹏 --- .../offline_inference/vision_language_multi_image.py | 7 ++++++- vllm/model_executor/models/qwen2_vl.py | 12 ++++++------ 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/examples/offline_inference/vision_language_multi_image.py b/examples/offline_inference/vision_language_multi_image.py index b90c9e568eb..edddd429364 100644 --- a/examples/offline_inference/vision_language_multi_image.py +++ b/examples/offline_inference/vision_language_multi_image.py @@ -839,7 +839,12 @@ def load_tarsier2(question: str, image_urls: list[str]) -> ModelRequestData: hf_overrides={"architectures": ["Tarsier2ForConditionalGeneration"]}, ) - prompt = f"USER: {'<|image_pad|>' * len(image_urls)}\n{question}\n ASSISTANT:" + prompt = ( + "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n" + f"<|im_start|>user\n<|vision_start|>{'<|image_pad|>' * len(image_urls)}" + f"<|vision_end|>{question}<|im_end|>\n" + "<|im_start|>assistant\n" + ) image_data = [fetch_image(url) for url in image_urls] return ModelRequestData( diff --git a/vllm/model_executor/models/qwen2_vl.py b/vllm/model_executor/models/qwen2_vl.py index c63e0f9fd75..a72abd6c47c 100644 --- a/vllm/model_executor/models/qwen2_vl.py +++ b/vllm/model_executor/models/qwen2_vl.py @@ -1477,12 +1477,12 @@ class Tarsier2ForConditionalGeneration(Qwen2VLForConditionalGeneration): }) def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): - source = vllm_config.model_config.hf_config.text_config - destination = vllm_config.model_config.hf_config - vars(destination).update(vars(source)) - vllm_config.model_config.hf_config.architectures = [ - "Tarsier2ForConditionalGeneration" - ] + # Tarsier2 uses llava as model_type, which will create a Qwen2VLConfig + # as text_config, we need to reconstruct Qwen2VLConfig from LlavaConfig. + config = vllm_config.model_config.hf_config + qwen2vl_config = config.text_config + qwen2vl_config.architectures = config.architectures + vllm_config.model_config.hf_config = qwen2vl_config super().__init__(vllm_config=vllm_config, prefix=prefix) def load_weights(self, weights: Iterable[tuple[str,