Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Core][Frontend] Support Passing Multimodal Processor Kwargs #8657

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
36 commits
Select commit Hold shift + click to select a range
550378b
Allow for processor kwarg overrides
alex-jw-brooks Sep 16, 2024
190606f
Pass processor through to partial
alex-jw-brooks Sep 17, 2024
b1ca041
Add default & processor kwarg override tests
alex-jw-brooks Sep 17, 2024
195e31c
Don't allow ctx or inputs as kwargs
alex-jw-brooks Sep 17, 2024
1472d04
Add kwarg override for processor to dummy data factories
alex-jw-brooks Sep 17, 2024
f10601f
Add kwarg override forr processor to max token calc
alex-jw-brooks Sep 19, 2024
429097a
Move kwarg only override func to utils
alex-jw-brooks Sep 19, 2024
159cfc2
Force processor kwargs to be keyword-only
alex-jw-brooks Sep 19, 2024
af91930
Pass unfiltered processor kwargs to default mapper
alex-jw-brooks Sep 19, 2024
9adad10
Add hack for mapper preprocessor kwargs
alex-jw-brooks Sep 19, 2024
9f7aed8
Simplify dummy data processor kwarg & add tests
alex-jw-brooks Sep 19, 2024
ff59e44
Add tests for max multimodal token kwarg overrides
alex-jw-brooks Sep 19, 2024
6b26454
Format registry
alex-jw-brooks Sep 20, 2024
0e2d53d
Fix default mapper comparison
alex-jw-brooks Sep 20, 2024
5a3341b
Move kwarg filtering into hf processor getter
alex-jw-brooks Sep 20, 2024
3e1fe54
Enable processor_kwargs in video processor
alex-jw-brooks Sep 20, 2024
feccfd7
Add tests for mapper processor_kwargs
alex-jw-brooks Sep 20, 2024
3ada64d
Update mapper not on multimodal processor kwargs
alex-jw-brooks Sep 20, 2024
58dcc63
processor kwarg test cleanup
alex-jw-brooks Sep 20, 2024
1cee215
Move context builder to test utils
alex-jw-brooks Sep 19, 2024
d5f9efa
Use common context builder in processor kwarg tests
alex-jw-brooks Sep 20, 2024
b5d434b
Update vllm/entrypoints/llm.py
alex-jw-brooks Sep 22, 2024
a096301
Update vllm/inputs/registry.py
alex-jw-brooks Sep 22, 2024
79962e0
Update vllm/inputs/registry.py
alex-jw-brooks Sep 22, 2024
2cb1f72
Update vllm/inputs/registry.py
alex-jw-brooks Sep 22, 2024
37eb532
Update vllm/inputs/registry.py
alex-jw-brooks Sep 22, 2024
a4c7c3d
Update vllm/inputs/registry.py
alex-jw-brooks Sep 22, 2024
36dd2cb
Fix formatting
alex-jw-brooks Sep 22, 2024
f95c86f
Rename processor kwargs to mm processor kwargs
alex-jw-brooks Sep 22, 2024
229604f
Update docstring
DarkLight1337 Sep 22, 2024
2a48452
Merge branch 'main' into support_processor_kwargs
DarkLight1337 Sep 22, 2024
b732d72
Try to fix CUDA reinitialization error
DarkLight1337 Sep 22, 2024
844524a
Consolidate processor loading
DarkLight1337 Sep 23, 2024
2dd742b
Fix CUDA reinitialization error
DarkLight1337 Sep 23, 2024
ebc1c02
Simplify code
DarkLight1337 Sep 23, 2024
a7f32f5
Fix tests
DarkLight1337 Sep 23, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions tests/engine/test_arg_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,3 +40,24 @@ def test_limit_mm_per_prompt_parser(arg, expected):
def test_bad_nullable_kvs(arg):
with pytest.raises(ArgumentTypeError):
nullable_kvs(arg)


@pytest.mark.parametrize(("arg", "expected"), [
(None, None),
("{}", {}),
('{"num_crops": 4}', {
"num_crops": 4
}),
('{"foo": {"bar": "baz"}}', {
"foo": {
"bar": "baz"
}
}),
])
def test_mm_processor_kwargs_prompt_parser(arg, expected):
parser = EngineArgs.add_cli_args(FlexibleArgumentParser())
if arg is None:
args = parser.parse_args([])
else:
args = parser.parse_args(["--mm-processor-kwargs", arg])
assert args.mm_processor_kwargs == expected
29 changes: 1 addition & 28 deletions tests/models/decoder_only/vision_language/test_qwen.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,13 @@
import torch
from PIL.Image import Image

from vllm.config import ModelConfig
from vllm.inputs import InputContext, LLMInputs
from vllm.multimodal.base import MultiModalInputs
from vllm.multimodal.utils import cached_get_tokenizer, rescale_image_size

from ....conftest import (IMAGE_ASSETS, HfRunner, ImageAsset, PromptImageInput,
VllmRunner, _ImageAssets)
from ...utils import check_logprobs_close
from ...utils import build_model_context, check_logprobs_close

text_only_models = [
"Qwen/Qwen-7B-Chat" # Has no visual component
Expand Down Expand Up @@ -42,32 +41,6 @@
IMG_SIZE = 448


def build_model_context(model_name: str,
tokenizer_name: Optional[str] = None,
trust_remote_code: bool = False):
"""Creates an InputContext for a given model.

Args:
model_name: Name of the model being considered.
tokenizer_name: Name of the tokenizer being considered.
trust_remote_code: Whether or not to allow loading remote code.

Returns:
InputContext for the model being considered.
"""
if tokenizer_name is None:
tokenizer_name = model_name
model_config = ModelConfig(
model_name,
tokenizer_name,
tokenizer_mode="auto",
trust_remote_code=trust_remote_code,
dtype="float32",
seed=0,
)
return InputContext(model_config)


@pytest.fixture()
def input_mapper_for_qwen():
# Lazy import to avoid initializing CUDA during test collection
Expand Down
35 changes: 35 additions & 0 deletions tests/models/utils.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import warnings
from typing import Dict, List, Optional, Sequence, Tuple, Union

from vllm.config import ModelConfig
from vllm.inputs import InputContext
from vllm.sequence import Logprob, PromptLogprobs, SampleLogprobs

TokensText = Tuple[List[int], str]
Expand Down Expand Up @@ -240,3 +242,36 @@ def check_logprobs_close(
warnings.simplefilter("always")

warnings.warn(fail_msg, stacklevel=2)


def build_model_context(model_name: str,
tokenizer_name: Optional[str] = None,
trust_remote_code: bool = False,
mm_processor_kwargs: Optional[Dict] = None,
limit_mm_per_prompt: Optional[Dict] = None):
"""Creates an InputContext for a given model.

Args:
model_name: Name of the model being considered.
tokenizer_name: Name of the tokenizer being considered.
trust_remote_code: Whether or not to allow loading remote code.
mm_processor_kwargs: optional processor kwargs for to be leveraged
in the input processor, mapper, dummy data creation, etc.
limit_mm_per_prompt: Multimodal limits.

Returns:
InputContext for the model being considered.
"""
if tokenizer_name is None:
tokenizer_name = model_name
model_config = ModelConfig(
model_name,
tokenizer_name,
tokenizer_mode="auto",
trust_remote_code=trust_remote_code,
dtype="float32",
seed=0,
mm_processor_kwargs=mm_processor_kwargs,
limit_mm_per_prompt=limit_mm_per_prompt,
)
return InputContext(model_config)
Loading
Loading