Skip to content

Commit 8055f98

Browse files
Zerohertzxuebwang-amd
authored andcommitted
[Docs] Fix warnings in mkdocs build (continued) (vllm-project#25163)
Signed-off-by: Zerohertz <ohg3417@gmail.com> Signed-off-by: xuebwang-amd <xuebwang@amd.com>
1 parent da9a745 commit 8055f98

File tree

2 files changed

+3
-7
lines changed

2 files changed

+3
-7
lines changed

vllm/distributed/device_communicators/shm_object_storage.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -253,7 +253,7 @@ def free_buf(self,
253253
254254
Args:
255255
nbytes (int, optional): The size of the buffer to free. If None,
256-
frees the maximum size of the ring buffer.
256+
frees the maximum size of the ring buffer.
257257
'''
258258

259259
assert self.is_writer, "Only the writer can free buffers."

vllm/entrypoints/openai/serving_engine.py

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -697,9 +697,7 @@ async def _tokenize_prompt_input_async(
697697
add_special_tokens: bool = True,
698698
) -> TextTokensPrompt:
699699
"""
700-
A simpler implementation of
701-
[`_tokenize_prompt_input_or_inputs`][vllm.entrypoints.openai.serving_engine.OpenAIServing._tokenize_prompt_input_or_inputs]
702-
that assumes single input.
700+
A simpler implementation that tokenizes a single prompt input.
703701
"""
704702
async for result in self._tokenize_prompt_inputs_async(
705703
request,
@@ -718,9 +716,7 @@ async def _tokenize_prompt_inputs_async(
718716
add_special_tokens: bool = True,
719717
) -> AsyncGenerator[TextTokensPrompt, None]:
720718
"""
721-
A simpler implementation of
722-
[`_tokenize_prompt_input_or_inputs`][vllm.entrypoints.openai.serving_engine.OpenAIServing._tokenize_prompt_input_or_inputs]
723-
that assumes multiple inputs.
719+
A simpler implementation that tokenizes multiple prompt inputs.
724720
"""
725721
for prompt in prompt_inputs:
726722
if isinstance(prompt, str):

0 commit comments

Comments
 (0)