Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions tests/models/test_registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
)
from vllm.model_executor.models.adapters import (
as_embedding_model,
as_reward_model,
as_seq_cls_model,
)
from vllm.model_executor.models.registry import (
Expand Down Expand Up @@ -46,7 +45,6 @@ def test_registry_imports(model_arch):
# All vLLM models should be convertible to a pooling model
assert is_pooling_model(as_seq_cls_model(model_cls))
assert is_pooling_model(as_embedding_model(model_cls))
assert is_pooling_model(as_reward_model(model_cls))

if model_arch in _MULTIMODAL_MODELS:
assert supports_multimodal(model_cls)
Expand Down
4 changes: 0 additions & 4 deletions vllm/model_executor/model_loader/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@
)
from vllm.model_executor.models.adapters import (
as_embedding_model,
as_reward_model,
as_seq_cls_model,
try_create_mm_pooling_model_cls,
)
Expand Down Expand Up @@ -209,9 +208,6 @@ def _get_model_architecture(model_config: ModelConfig) -> tuple[type[nn.Module],
elif convert_type == "classify":
logger.debug_once("Converting to sequence classification model.")
model_cls = as_seq_cls_model(model_cls)
elif convert_type == "reward":
logger.debug_once("Converting to reward model.")
model_cls = as_reward_model(model_cls)
else:
assert_never(convert_type)

Expand Down
34 changes: 0 additions & 34 deletions vllm/model_executor/models/adapters.py
Original file line number Diff line number Diff line change
Expand Up @@ -382,40 +382,6 @@ def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]):
return ModelForSequenceClassification # type: ignore


def as_reward_model(cls: _T) -> _T:
"""
Subclass an existing vLLM model to support reward modeling.

By default, we return the hidden states of each token directly.

Note:
We assume that no extra layers are added to the original model;
please implement your own model if this is not the case.
"""
# Avoid modifying existing reward models
if is_pooling_model(cls):
return cls

# Lazy import
from vllm.model_executor.layers.pooler import DispatchPooler, Pooler

from .interfaces_base import default_pooling_type

@default_pooling_type("ALL")
class ModelForReward(_create_pooling_model_cls(cls)):
def _init_pooler(self, vllm_config: "VllmConfig", prefix: str = ""):
pooler_config = vllm_config.model_config.pooler_config
assert pooler_config is not None

self.pooler = DispatchPooler(
{"encode": Pooler.for_encode(pooler_config)},
)

ModelForReward.__name__ = _get_pooling_model_name(cls.__name__, "ForReward")

return ModelForReward # type: ignore


class SequenceClassificationConfig(VerifyAndUpdateConfig):
@staticmethod
def verify_and_update_config(vllm_config: "VllmConfig") -> None:
Expand Down