-
-
Notifications
You must be signed in to change notification settings - Fork 11.3k
[Misc] Clean up MiniCPM-V/O code #15337
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
DarkLight1337
merged 8 commits into
vllm-project:main
from
DarkLight1337:minicpm-cleanup
Mar 25, 2025
Merged
Changes from all commits
Commits
Show all changes
8 commits
Select commit
Hold shift + click to select a range
1dfc902
[Misc] Clean up MiniCPM-V/O code
DarkLight1337 352c3bc
Fixes
DarkLight1337 aabed82
Merge branch 'main' into minicpm-cleanup
DarkLight1337 7f0307a
Fix embeds
DarkLight1337 1c45aa8
Update
DarkLight1337 dd999b3
Clean
DarkLight1337 289619c
Update tests
DarkLight1337 4789620
Fix OOM
DarkLight1337 File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,6 +1,5 @@ | ||
| # SPDX-License-Identifier: Apache-2.0 | ||
|
|
||
| import copy | ||
| from functools import partial | ||
| from typing import Optional, Union | ||
|
|
||
|
|
@@ -29,7 +28,7 @@ def _test_processing_correctness( | |
| hit_rate: float, | ||
| num_batches: int, | ||
| simplify_rate: float, | ||
| ignore_mm_keys: Optional[list[str]] = None, | ||
| ignore_mm_keys: Optional[set[str]] = None, | ||
| ): | ||
| model_info = HF_EXAMPLE_MODELS.find_hf_info(model_id) | ||
| model_info.check_available_online(on_fail="skip") | ||
|
|
@@ -145,7 +144,7 @@ def _test_processing_correctness_hf( | |
| baseline_processor: BaseMultiModalProcessor, | ||
| cached_processor: BaseMultiModalProcessor, | ||
| batch_idx: int, | ||
| ignore_mm_keys: Optional[list[str]] = None, | ||
| ignore_mm_keys: Optional[set[str]] = None, | ||
| ): | ||
| if model_config.hf_config.model_type in ("mllama", "whisper", "ultravox"): | ||
| # For some multimodal models, tokenizer will always add bos_token | ||
|
|
@@ -167,35 +166,38 @@ def _test_processing_correctness_hf( | |
| hf_processor_mm_kwargs={}, | ||
| ) | ||
|
|
||
| assert _inputs_equal( | ||
| _assert_inputs_equal( | ||
| baseline_result, | ||
| cached_result, | ||
| ignore_mm_keys, | ||
| ), f"Failed ({batch_idx=}, {prompt=}, {mm_data=})" | ||
| ignore_mm_keys=ignore_mm_keys, | ||
| msg=f"Failed ({batch_idx=}, {prompt=}, {mm_data=})", | ||
| ) | ||
|
|
||
| baseline_tokenized_result = baseline_processor.apply( | ||
| token_prompt, | ||
| mm_data=mm_data, | ||
| hf_processor_mm_kwargs={}, | ||
| ) | ||
|
|
||
| assert _inputs_equal( | ||
| _assert_inputs_equal( | ||
| baseline_result, | ||
| baseline_tokenized_result, | ||
| ignore_mm_keys, | ||
| ), f"Failed ({batch_idx=}, {prompt=}, {mm_data=})" | ||
| ignore_mm_keys=ignore_mm_keys, | ||
| msg=f"Failed ({batch_idx=}, {prompt=}, {mm_data=})", | ||
| ) | ||
|
|
||
| cached_tokenized_result = cached_processor.apply( | ||
| token_prompt, | ||
| mm_data=mm_data, | ||
| hf_processor_mm_kwargs={}, | ||
| ) | ||
|
|
||
| assert _inputs_equal( | ||
| _assert_inputs_equal( | ||
| cached_result, | ||
| cached_tokenized_result, | ||
| ignore_mm_keys, | ||
| ), f"Failed ({batch_idx=}, {prompt=}, {mm_data=})" | ||
| ignore_mm_keys=ignore_mm_keys, | ||
| msg=f"Failed ({batch_idx=}, {prompt=}, {mm_data=})", | ||
| ) | ||
|
|
||
|
|
||
| def _test_processing_correctness_mistral( | ||
|
|
@@ -206,7 +208,7 @@ def _test_processing_correctness_mistral( | |
| baseline_processor: BaseMultiModalProcessor, | ||
| cached_processor: BaseMultiModalProcessor, | ||
| batch_idx: int, | ||
| ignore_mm_keys: Optional[list[str]] = None, | ||
| ignore_mm_keys: Optional[set[str]] = None, | ||
| ): | ||
| images = mm_data.get("image", []) | ||
| if not isinstance(images, list): | ||
|
|
@@ -233,11 +235,12 @@ def _test_processing_correctness_mistral( | |
| hf_processor_mm_kwargs={}, | ||
| ) | ||
|
|
||
| assert _inputs_equal( | ||
| _assert_inputs_equal( | ||
| baseline_tokenized_result, | ||
| cached_tokenized_result, | ||
| ignore_mm_keys, | ||
| ), f"Failed ({batch_idx=}, {prompt=}, {mm_data=})" | ||
| ignore_mm_keys=ignore_mm_keys, | ||
| msg=f"Failed ({batch_idx=}, {prompt=}, {mm_data=})", | ||
| ) | ||
|
|
||
|
|
||
| # yapf: disable | ||
|
|
@@ -261,6 +264,7 @@ def _test_processing_correctness_mistral( | |
| "TIGER-Lab/Mantis-8B-siglip-llama3", | ||
| "mistralai/Pixtral-12B-2409", | ||
| "mistral-community/pixtral-12b", | ||
| "openbmb/MiniCPM-Llama3-V-2_5", | ||
| "openbmb/MiniCPM-o-2_6", | ||
| "openbmb/MiniCPM-V-2_6", | ||
| "allenai/Molmo-7B-D-0924", | ||
|
|
@@ -290,7 +294,7 @@ def test_processing_correctness( | |
| # In Ultravox, the audio_features can be different depending on padding | ||
| # The slight difference should not be a problem though, since | ||
| # attention_mask lets us ignore the difference. | ||
| ignore_mm_keys = ['audio_features'] | ||
| ignore_mm_keys = {"audio_features"} | ||
|
|
||
| _test_processing_correctness( | ||
| model_id, | ||
|
|
@@ -328,38 +332,26 @@ def test_processing_correctness_phi3v( | |
| ) | ||
|
|
||
|
|
||
| def _inputs_equal( | ||
| def _assert_inputs_equal( | ||
| a: MultiModalInputs, | ||
| b: MultiModalInputs, | ||
| ignore_mm_keys: Optional[list[str]] = None, | ||
| *, | ||
| ignore_mm_keys: Optional[set[str]] = None, | ||
| msg: str = "", | ||
| ): | ||
| return _drop_mm_kwargs_keys(a, ignore_mm_keys) == _drop_mm_kwargs_keys( | ||
| b, ignore_mm_keys) | ||
|
|
||
|
|
||
| def _drop_mm_kwargs_keys( | ||
| result: MultiModalInputs, | ||
| ignore_mm_keys: Optional[list[str]] = None, | ||
| ) -> MultiModalInputs: | ||
| """Drop specified keys from result['mm_kwargs']. | ||
|
|
||
| This is mainly to avoid doing exact match of audio_features in ultravox. | ||
|
|
||
| Args: | ||
| result: Result to drop keys from | ||
| ignore_mm_keys: List of keys to ignore, e.g. ['audio_features'] | ||
| """ | ||
| if not ignore_mm_keys: | ||
| return result | ||
|
|
||
| if 'mm_kwargs' in result: | ||
| result = copy.deepcopy(result) | ||
| mm_kwargs = result['mm_kwargs'] | ||
| for key in ignore_mm_keys: | ||
| mm_kwargs.pop(key, None) | ||
| for items in mm_kwargs._items_by_modality.values(): | ||
| for item in items: | ||
| for key in ignore_mm_keys: | ||
| item.pop(key, None) | ||
|
|
||
| return result | ||
| if ignore_mm_keys is None: | ||
| ignore_mm_keys = set() | ||
|
|
||
| if msg is None: | ||
| assert "mm_kwargs" in a and "mm_kwargs" in b | ||
| else: | ||
| assert "mm_kwargs" in a and "mm_kwargs" in b, msg | ||
|
|
||
| for key in ignore_mm_keys: | ||
| a["mm_kwargs"].pop(key, None) | ||
| b["mm_kwargs"].pop(key, None) | ||
|
|
||
| if msg is None: | ||
| assert a == b | ||
| else: | ||
| assert a == b, msg | ||
|
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. These changes are to let pytest show the non-matching items in more detail |
||
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -295,8 +295,6 @@ def _call_hf_processor( | |
|
|
||
| # HF processor pops the `num_crops` kwarg, which is needed by vLLM | ||
| if (images := mm_data.get("images")) is not None: | ||
| assert isinstance(images, list) | ||
|
|
||
|
Comment on lines
-298
to
-299
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Unnecessary check since the data is parsed in the next line |
||
| parsed_images = (self._get_data_parser().parse_mm_data({ | ||
| "image": | ||
| images | ||
|
|
||
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Need this to test the
elsebranch in_base_call_hf_processor