Skip to content

Commit 7c2e91c

Browse files
[Misc] Remove unused executor.apply_model (#26215)
Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
1 parent 736fbf4 commit 7c2e91c

File tree

1 file changed

+1
-12
lines changed

1 file changed

+1
-12
lines changed

vllm/executor/executor_base.py

Lines changed: 1 addition & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,7 @@
77
from functools import cached_property
88
from typing import Any, Awaitable, Callable, List, Optional, Set, Union
99

10-
import torch.nn as nn
11-
from typing_extensions import TypeVar, deprecated
10+
from typing_extensions import TypeVar
1211

1312
import vllm.platforms
1413
from vllm.config import VllmConfig
@@ -127,16 +126,6 @@ def initialize_cache(self, num_gpu_blocks: int, num_cpu_blocks) -> None:
127126
self.collective_rpc("initialize_cache",
128127
args=(num_gpu_blocks, num_cpu_blocks))
129128

130-
@deprecated("`llm_engine.model_executor.apply_model` will no longer work "
131-
"in V1 Engine. Please replace with `llm_engine.apply_model` "
132-
"and set `VLLM_ALLOW_INSECURE_SERIALIZATION=1`.")
133-
def apply_model(self, func: Callable[[nn.Module], _R]) -> list[_R]:
134-
"""
135-
Run a function directly on the model inside each worker,
136-
returning the result for each of them.
137-
"""
138-
return self.collective_rpc("apply_model", args=(func, ))
139-
140129
@cached_property # Avoid unnecessary RPC calls
141130
def supported_tasks(self) -> tuple[SupportedTask, ...]:
142131
output = self.collective_rpc("get_supported_tasks")

0 commit comments

Comments
 (0)