Skip to content

Commit 9dbaf81

Browse files
zhuohan123yeqcharlotte
authored andcommitted
[Core] Enable command line logging for LLMEngine (vllm-project#25610)
Co-authored-by: Ye (Charlotte) Qi <yeq@meta.com> Signed-off-by: Zhuohan Li <zhuohan123@gmail.com>
1 parent 9097407 commit 9dbaf81

File tree

2 files changed

+31
-9
lines changed

2 files changed

+31
-9
lines changed

vllm/v1/engine/llm_engine.py

Lines changed: 31 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
# SPDX-License-Identifier: Apache-2.0
22
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
33

4+
import time
45
from collections.abc import Mapping
56
from copy import copy
67
from typing import Any, Callable, Optional, Union
@@ -31,8 +32,7 @@
3132
from vllm.v1.engine.parallel_sampling import ParentRequest
3233
from vllm.v1.engine.processor import Processor
3334
from vllm.v1.executor.abstract import Executor
34-
from vllm.v1.metrics.loggers import (PrometheusStatLogger, StatLoggerBase,
35-
StatLoggerFactory)
35+
from vllm.v1.metrics.loggers import StatLoggerFactory, StatLoggerManager
3636
from vllm.v1.metrics.reader import Metric, get_metrics_snapshot
3737
from vllm.v1.metrics.stats import IterationStats
3838
from vllm.v1.worker.worker_base import WorkerBase
@@ -74,9 +74,6 @@ def __init__(
7474
self.cache_config = vllm_config.cache_config
7575

7676
self.log_stats = log_stats
77-
self.stat_logger: Optional[StatLoggerBase] = None
78-
if self.log_stats:
79-
self.stat_logger = PrometheusStatLogger(vllm_config)
8077

8178
executor_backend = (
8279
self.vllm_config.parallel_config.distributed_executor_backend)
@@ -122,6 +119,15 @@ def __init__(
122119
log_stats=self.log_stats,
123120
)
124121

122+
self.logger_manager: Optional[StatLoggerManager] = None
123+
if self.log_stats:
124+
self.logger_manager = StatLoggerManager(
125+
vllm_config=vllm_config,
126+
custom_stat_loggers=stat_loggers,
127+
enable_default_loggers=log_stats,
128+
)
129+
self.logger_manager.log_engine_initialized()
130+
125131
if not multiprocess_mode:
126132
# for v0 compatibility
127133
self.model_executor = self.engine_core.engine_core.model_executor # type: ignore
@@ -269,10 +275,13 @@ def step(self) -> Union[list[RequestOutput], list[PoolingRequestOutput]]:
269275
self.engine_core.abort_requests(processed_outputs.reqs_to_abort)
270276

271277
# 4) Record stats
272-
if self.stat_logger is not None:
278+
if self.logger_manager is not None:
273279
assert outputs.scheduler_stats is not None
274-
self.stat_logger.record(scheduler_stats=outputs.scheduler_stats,
275-
iteration_stats=iteration_stats)
280+
self.logger_manager.record(
281+
scheduler_stats=outputs.scheduler_stats,
282+
iteration_stats=iteration_stats,
283+
)
284+
self.do_log_stats_with_interval()
276285

277286
return processed_outputs.request_outputs
278287

@@ -315,6 +324,20 @@ def get_tokenizer(self) -> AnyTokenizer:
315324

316325
return self.tokenizer
317326

327+
def do_log_stats(self) -> None:
328+
"""Log stats if logging is enabled."""
329+
if self.logger_manager:
330+
self.logger_manager.log()
331+
332+
def do_log_stats_with_interval(self) -> None:
333+
"""Log stats when the time interval has passed."""
334+
now = time.time()
335+
if not hasattr(self, "_last_log_time"):
336+
self._last_log_time = now
337+
if now - self._last_log_time >= envs.VLLM_LOG_STATS_INTERVAL:
338+
self.do_log_stats()
339+
self._last_log_time = now
340+
318341
def add_lora(self, lora_request: LoRARequest) -> bool:
319342
"""Load a new LoRA adapter into the engine for future requests."""
320343
return self.engine_core.add_lora(lora_request)

vllm/v1/metrics/loggers.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,6 @@ def record(self,
9090
iteration_stats: Optional[IterationStats],
9191
engine_idx: int = 0):
9292
"""Log Stats to standard output."""
93-
9493
if iteration_stats:
9594
self._track_iteration_stats(iteration_stats)
9695

0 commit comments

Comments
 (0)