Skip to content

Commit 51010a1

Browse files
authored
[Misc] set single whitespace between log sentences (#13771)
Signed-off-by: cjackal <44624812+cjackal@users.noreply.github.com>
1 parent 7196a3b commit 51010a1

File tree

36 files changed

+54
-54
lines changed

36 files changed

+54
-54
lines changed

vllm/attention/backends/flashinfer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -438,7 +438,7 @@ def __post_init__(self):
438438
not in supported_head_sizes:
439439
raise ValueError(
440440
f"Only {supported_head_sizes} are supported for head_dim,",
441-
f"received {self.head_dim}.")
441+
f" received {self.head_dim}.")
442442

443443
def begin_forward(self):
444444
if self.num_prefill_tokens > 0:

vllm/attention/backends/mla/common.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -533,7 +533,7 @@ def __post_init__(self):
533533
not in supported_head_sizes:
534534
raise ValueError(
535535
f"Only {supported_head_sizes} are supported for head_dim,",
536-
f"received {self.head_dim}.")
536+
f" received {self.head_dim}.")
537537

538538
@property
539539
def prefill_metadata(self) -> Optional["MLACommonMetadata"]:

vllm/attention/backends/rocm_flash_attn.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -497,7 +497,7 @@ def __init__(
497497
if logits_soft_cap is not None:
498498
raise ValueError(
499499
"ROCm Triton FlashAttention does not support attention"
500-
"logits soft capping."
500+
" logits soft capping."
501501
" please try using the ROCm CK "
502502
"FA backend instead by setting the env var "
503503
"`VLLM_USE_TRITON_FLASH_ATTN=0`")
@@ -528,7 +528,7 @@ def __init__(
528528
if self.use_naive_attn:
529529
if logits_soft_cap is not None:
530530
raise ValueError(
531-
"ROCm Naive FlashAttention does not support"
531+
"ROCm Naive FlashAttention does not support "
532532
"attention logits soft capping.")
533533

534534
self.attn_func = _sdpa_attention

vllm/config.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -924,8 +924,8 @@ def get_num_layers_by_block_type(
924924
layers_block_type_value = getattr(self.hf_config,
925925
"layers_block_type", None)
926926
if layers_block_type_value is None:
927-
raise ValueError("The model is an hybrid without a"
928-
"layers_block_type in the hf_config,"
927+
raise ValueError("The model is an hybrid without a "
928+
"layers_block_type in the hf_config, "
929929
"cannot determine the num of "
930930
f"{block_type.value} layers")
931931

@@ -2516,7 +2516,7 @@ def _get_and_verify_dtype(
25162516

25172517
if current_platform.is_hpu() and config_dtype == torch.float16:
25182518
logger.info(
2519-
"For HPU, we cast models to bfloat16 instead of"
2519+
"For HPU, we cast models to bfloat16 instead of "
25202520
"using float16 by default. Please specify `dtype` if you "
25212521
"want to use float16.")
25222522
torch_dtype = torch.bfloat16
@@ -2732,7 +2732,7 @@ def __post_init__(self):
27322732
backend=self.guided_decoding_backend).backend_name
27332733
if backend not in valid_guided_backends:
27342734
raise ValueError(f"Invalid guided_decoding_backend '{backend},"
2735-
f"must be one of {valid_guided_backends}")
2735+
f" must be one of {valid_guided_backends}")
27362736

27372737

27382738
@dataclass
@@ -3008,7 +3008,7 @@ def uuid(self):
30083008
def model_post_init(self, __context: Any) -> None:
30093009
if not self.enable_reshape and self.enable_fusion:
30103010
logger.warning_once(
3011-
"Fusion enabled but reshape elimination disabled."
3011+
"Fusion enabled but reshape elimination disabled. "
30123012
"RMSNorm + quant (fp8) fusion might not work")
30133013

30143014
pass_config: PassConfig = Field(default_factory=PassConfig)
@@ -3563,7 +3563,7 @@ def set_current_vllm_config(vllm_config: VllmConfig, check_compile=False):
35633563
logger.warning(
35643564
"`torch.compile` is turned on, but the model %s"
35653565
" does not support it. Please open an issue on GitHub"
3566-
"if you want it to be supported.",
3566+
" if you want it to be supported.",
35673567
vllm_config.model_config.model)
35683568
_current_vllm_config = old_vllm_config
35693569

vllm/distributed/device_communicators/pynccl_wrapper.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -227,10 +227,10 @@ def __init__(self, so_file: Optional[str] = None):
227227
self.lib = NCCLLibrary.path_to_library_cache[so_file]
228228
except Exception as e:
229229
logger.error(
230-
"Failed to load NCCL library from %s ."
230+
"Failed to load NCCL library from %s. "
231231
"It is expected if you are not running on NVIDIA/AMD GPUs."
232232
"Otherwise, the nccl library might not exist, be corrupted "
233-
"or it does not support the current platform %s."
233+
"or it does not support the current platform %s. "
234234
"If you already have the library, please set the "
235235
"environment variable VLLM_NCCL_SO_PATH"
236236
" to point to the correct nccl library path.", so_file,

vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,7 @@ def initialize(self, local_hostname: str, metadata_server: str,
137137
if metadata_backend not in supported_backend:
138138
raise ValueError(
139139
"Mooncake Configuration error. `metadata_backend`"
140-
f"should be one of {supported_backend}.")
140+
f" should be one of {supported_backend}.")
141141

142142
self.engine.initializeExt(local_hostname, metadata_server,
143143
protocol, device_name, metadata_backend)

vllm/entrypoints/chat_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -823,7 +823,7 @@ def _parse_chat_message_content_part(
823823
# content is empty, log a warning and skip
824824
if part_type in VALID_MESSAGE_CONTENT_MM_PART_TYPES and not content:
825825
logger.warning(
826-
"Skipping multimodal part (type: '%s')"
826+
"Skipping multimodal part (type: '%s') "
827827
"with empty / unparsable content.", part_type)
828828
return None
829829

vllm/entrypoints/llm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1342,7 +1342,7 @@ def _add_guided_params(
13421342
return params
13431343

13441344
if params.guided_decoding is not None:
1345-
raise ValueError("Cannot set both guided_options_request and"
1345+
raise ValueError("Cannot set both guided_options_request and "
13461346
"params.guided_decoding.")
13471347

13481348
params.guided_decoding = GuidedDecodingParams(

vllm/entrypoints/openai/api_server.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -575,7 +575,7 @@ async def do_rerank(request: RerankRequest, raw_request: Request):
575575
async def do_rerank_v1(request: RerankRequest, raw_request: Request):
576576
logger.warning_once(
577577
"To indicate that the rerank API is not part of the standard OpenAI"
578-
" API, we have located it at `/rerank`. Please update your client"
578+
" API, we have located it at `/rerank`. Please update your client "
579579
"accordingly. (Note: Conforms to JinaAI rerank API)")
580580

581581
return await do_rerank(request, raw_request)

vllm/executor/ray_distributed_executor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -513,7 +513,7 @@ def _check_ray_adag_installation(self):
513513
if cupy_spec is None and envs.VLLM_USE_RAY_COMPILED_DAG_NCCL_CHANNEL:
514514
raise ValueError(
515515
"cupy is not installed but required since "
516-
"VLLM_USE_RAY_COMPILED_DAG_NCCL_CHANNEL is set."
516+
"VLLM_USE_RAY_COMPILED_DAG_NCCL_CHANNEL is set. "
517517
"Run `pip install ray[adag]` and check cupy installation.")
518518

519519
def _compiled_ray_dag(self, enable_asyncio: bool):

0 commit comments

Comments
 (0)