Skip to content

Commit 2015223

Browse files
didier-durandFeiDaLI
authored andcommitted
[Doc]: fixing doc typos (vllm-project#24635)
Signed-off-by: Didier Durand <durand.didier@gmail.com>
1 parent ff1ba49 commit 2015223

File tree

9 files changed

+10
-10
lines changed

9 files changed

+10
-10
lines changed

vllm/config/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3881,7 +3881,7 @@ def contains_object_print(text):
38813881
Check if the text looks like a printed Python object, e.g.
38823882
contains any substring matching the pattern: "at 0xFFFFFFF>"
38833883
We match against 0x followed by 2-16 hex chars (there's
3884-
a max of 16 on a 64 bit system).
3884+
a max of 16 on a 64-bit system).
38853885
38863886
Args:
38873887
text (str): The text to check

vllm/entrypoints/openai/tool_parsers/internlm2_tool_parser.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ def extract_tool_calls_streaming(
6060
if '<|action_start|>' not in current_text:
6161
self.position = len(current_text)
6262
return DeltaMessage(content=delta_text)
63-
# if the tool call is sended, return an empty delta message
63+
# if the tool call is sent, return an empty delta message
6464
# to make sure the finish_reason will be sent correctly.
6565
if self.current_tool_id > 0:
6666
return DeltaMessage(content='')

vllm/model_executor/layers/mamba/ops/ssd_chunk_state.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -502,7 +502,7 @@ def _chunk_state_varlen_kernel(
502502
dA_cumsum_ptrs += BLOCK_SIZE_K * stride_dA_cs_csize
503503

504504
# If the sequence starts after the last chunk idx, we don't need to add the contribution from the last chunk
505-
# If HAS_INITSTATES==True need to consider two possiblties
505+
# If HAS_INITSTATES==True need to consider two possibilities
506506
# - if start_idx < pid_c * chunk_size, then we need to take the past_states_ptrs
507507
# - if state_idx >= pid * chunk_size, then we need to insert initstates
508508
if ((start_idx < pid_c * chunk_size) # first chunk

vllm/model_executor/models/arcee.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -342,7 +342,7 @@ def load_weights(self, weights: Iterable[tuple[str,
342342
class ArceeForCausalLM(nn.Module, SupportsLoRA, SupportsPP):
343343
"""Arcee Model for causal language modeling, integrated with vLLM
344344
runtime."""
345-
# Map fused module names to their sub-module components
345+
# Map fused module names to their submodule components
346346
# (for quantization and LoRA)
347347
packed_modules_mapping = {
348348
"qkv_proj": ["q_proj", "k_proj", "v_proj"],

vllm/model_executor/models/llava_onevision.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -835,7 +835,7 @@ def get_multimodal_embeddings(self,
835835
return None
836836

837837
# The result multimodal_embeddings is tuple of tensors, with each
838-
# tensor correspoending to a multimodal data item (image or video).
838+
# tensor corresponding to a multimodal data item (image or video).
839839
multimodal_embeddings: tuple[torch.Tensor, ...] = ()
840840

841841
# NOTE: It is important to iterate over the keys in this dictionary

vllm/model_executor/models/phi4_multimodal.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1350,7 +1350,7 @@ def get_multimodal_embeddings(
13501350
return None
13511351

13521352
# The result multimodal_embeddings is tuple of tensors, with each
1353-
# tensor correspoending to a multimodal data item (image or video).
1353+
# tensor corresponding to a multimodal data item (image or video).
13541354
multimodal_embeddings: tuple[torch.Tensor, ...] = ()
13551355

13561356
# NOTE: It is important to iterate over the keys in this dictionary

vllm/model_executor/models/phi4mm_audio.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ class ConformerEncoderLayer(nn.Module):
100100
activation function for glu used in the multihead attention,
101101
default "swish".
102102
activation_checkpointing: str, optional
103-
a dictionarry of {"module","interval","offload"}, where
103+
a dictionary of {"module","interval","offload"}, where
104104
"module": str
105105
accept ["transformer", "attention"] to select
106106
which module should do activation checkpointing.

vllm/model_executor/models/qwen2_5_omni_thinker.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -846,7 +846,7 @@ def get_multimodal_embeddings(self,
846846
return []
847847

848848
# The result multimodal_embeddings is tuple of tensors, with each
849-
# tensor correspoending to a multimodal data item (image or video).
849+
# tensor corresponding to a multimodal data item (image or video).
850850
multimodal_embeddings: tuple[torch.Tensor, ...] = ()
851851

852852
# NOTE: It is important to iterate over the keys in this dictionary
@@ -873,7 +873,7 @@ def get_input_embeddings(
873873
if multimodal_embeddings is not None \
874874
and len(multimodal_embeddings) != 0:
875875

876-
# TODO (ywang96): support overlapping modalitiy embeddings so that
876+
# TODO (ywang96): support overlapping modality embeddings so that
877877
# `use_audio_in_video` will work on V1.
878878
inputs_embeds = merge_multimodal_embeddings(
879879
input_ids, inputs_embeds, multimodal_embeddings, [

vllm/v1/attention/backends/mla/common.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -463,7 +463,7 @@ def __init__(self,
463463
self.dcp_world_size = 1
464464
self.dcp_rank = 0
465465

466-
# Dont try to access the runner on AMD
466+
# Don't try to access the runner on AMD
467467
if self.aot_schedule:
468468
self.page_size = self.kv_cache_spec.block_size
469469

0 commit comments

Comments
 (0)