Skip to content

Commit 9365625

Browse files
committed
docs: class migration from sphinx to mkdocs (vllm)
Signed-off-by: Zerohertz <ohg3417@gmail.com>
1 parent be58f38 commit 9365625

File tree

5 files changed

+42
-32
lines changed

5 files changed

+42
-32
lines changed

vllm/config.py

Lines changed: 20 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -2980,7 +2980,7 @@ class PoolerConfig:
29802980
pooling_type: Optional[str] = None
29812981
"""
29822982
The pooling method of the pooling model. This should be a key in
2983-
{class}`vllm.model_executor.layers.pooler.PoolingType`.
2983+
[`vllm.model_executor.layers.pooler.PoolingType`][].
29842984
"""
29852985

29862986
normalize: Optional[bool] = None
@@ -3691,23 +3691,27 @@ class CompilationConfig:
36913691
"""Configuration for compilation. It has three parts:
36923692
36933693
- Top-level Compilation control:
3694-
- {attr}`level`
3695-
- {attr}`debug_dump_path`
3696-
- {attr}`cache_dir`
3697-
- {attr}`backend`
3698-
- {attr}`custom_ops`
3699-
- {attr}`splitting_ops`
3694+
- [`level`][vllm.config.CompilationConfig.level]
3695+
- [`debug_dump_path`][vllm.config.CompilationConfig.debug_dump_path]
3696+
- [`cache_dir`][vllm.config.CompilationConfig.cache_dir]
3697+
- [`backend`][vllm.config.CompilationConfig.backend]
3698+
- [`custom_ops`][vllm.config.CompilationConfig.custom_ops]
3699+
- [`splitting_ops`][vllm.config.CompilationConfig.splitting_ops]
37003700
- CudaGraph capture:
3701-
- {attr}`use_cudagraph`
3702-
- {attr}`cudagraph_capture_sizes`
3703-
- {attr}`cudagraph_num_of_warmups`
3704-
- {attr}`cudagraph_copy_inputs`
3705-
- {attr}`full_cuda_graph`
3701+
- [`use_cudagraph`][vllm.config.CompilationConfig.use_cudagraph]
3702+
- [`cudagraph_capture_sizes`]
3703+
[vllm.config.CompilationConfig.cudagraph_capture_sizes]
3704+
- [`cudagraph_num_of_warmups`]
3705+
[vllm.config.CompilationConfig.cudagraph_num_of_warmups]
3706+
- [`cudagraph_copy_inputs`]
3707+
[vllm.config.CompilationConfig.cudagraph_copy_inputs]
3708+
- [`full_cuda_graph`][vllm.config.CompilationConfig.full_cuda_graph]
37063709
- Inductor compilation:
3707-
- {attr}`use_inductor`
3708-
- {attr}`compile_sizes`
3709-
- {attr}`inductor_compile_config`
3710-
- {attr}`inductor_passes`
3710+
- [`use_inductor`][vllm.config.CompilationConfig.use_inductor]
3711+
- [`compile_sizes`][vllm.config.CompilationConfig.compile_sizes]
3712+
- [`inductor_compile_config`]
3713+
[vllm.config.CompilationConfig.inductor_compile_config]
3714+
- [`inductor_passes`][vllm.config.CompilationConfig.inductor_passes]
37113715
- custom inductor passes
37123716
37133717
Why we have different sizes for cudagraph and inductor:

vllm/connections.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -167,4 +167,7 @@ async def async_download_file(
167167

168168

169169
global_http_connection = HTTPConnection()
170-
"""The global {class}`HTTPConnection` instance used by vLLM."""
170+
"""
171+
The global [`HTTPConnection`][vllm.connections.HTTPConnection] instance used
172+
by vLLM.
173+
"""

vllm/logger.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -68,22 +68,22 @@ class _VllmLogger(Logger):
6868
"""
6969
Note:
7070
This class is just to provide type information.
71-
We actually patch the methods directly on the {class}`logging.Logger`
71+
We actually patch the methods directly on the [`logging.Logger`][]
7272
instance to avoid conflicting with other libraries such as
7373
`intel_extension_for_pytorch.utils._logger`.
7474
"""
7575

7676
def info_once(self, msg: str, *args: Hashable) -> None:
7777
"""
78-
As {meth}`info`, but subsequent calls with the same message
79-
are silently dropped.
78+
As [`info`][logging.Logger.info], but subsequent calls with
79+
the same message are silently dropped.
8080
"""
8181
_print_info_once(self, msg, *args)
8282

8383
def warning_once(self, msg: str, *args: Hashable) -> None:
8484
"""
85-
As {meth}`warning`, but subsequent calls with the same message
86-
are silently dropped.
85+
As [`warning`][logging.Logger.warning], but subsequent calls with
86+
the same message are silently dropped.
8787
"""
8888
_print_warning_once(self, msg, *args)
8989

vllm/sequence.py

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727

2828

2929
def array_full(token_id: int, count: int):
30-
"""{class}`array` equivalent of [numpy.full][]."""
30+
"""[`array`][] equivalent of [numpy.full][]."""
3131
return array(VLLM_TOKEN_ID_ARRAY_TYPE, [token_id]) * count
3232

3333

@@ -192,8 +192,8 @@ class SequenceData(msgspec.Struct,
192192
def from_prompt_token_counts(
193193
*token_counts: tuple[int, int]) -> "SequenceData":
194194
"""
195-
Construct a {class}`SequenceData` instance by concatenating
196-
prompt token sequences.
195+
Construct a [`SequenceData`][vllm.sequence.SequenceData] instance
196+
by concatenating prompt token sequences.
197197
198198
Each tuple represents one token sequence, expressed in the form
199199
`(token_id, count)`.
@@ -216,8 +216,8 @@ def from_seqs(
216216
prompt_embeds: Optional[torch.Tensor] = None,
217217
) -> "SequenceData":
218218
"""
219-
Construct a {class}`SequenceData` instance from prompt and output
220-
token sequences.
219+
Construct a [`SequenceData`][vllm.sequence.SequenceData] instance
220+
from prompt and output token sequences.
221221
"""
222222
prompt_token_ids_arr = array(VLLM_TOKEN_ID_ARRAY_TYPE,
223223
prompt_token_ids)
@@ -452,9 +452,11 @@ def __repr__(self) -> str:
452452
class Sequence:
453453
"""Stores the data, status, and block information of a sequence.
454454
455-
The sequence is constructed from the {data}`DecoderOnlyInputs`
456-
(for decoder-only) or {data}`EncoderDecoderInputs` (for encoder-decoder)
457-
instance passed in through the `inputs` constructor argument.
455+
The sequence is constructed from the
456+
[`DecoderOnlyInputs`][vllm.inputs.data.DecoderOnlyInputs] (for decoder-only)
457+
or [`EncoderDecoderInputs`][vllm.inputs.data.EncoderDecoderInputs]
458+
(for encoder-decoder) instance passed in through the `inputs`
459+
constructor argument.
458460
459461
Args:
460462
seq_id: The ID of the sequence.

vllm/utils.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1004,7 +1004,7 @@ def flatten_2d_lists(lists: Iterable[Iterable[T]]) -> list[T]:
10041004

10051005
def full_groupby(values: Iterable[_V], *, key: Callable[[_V], _K]):
10061006
"""
1007-
Unlike {class}`itertools.groupby`, groups are not broken by
1007+
Unlike [`itertools.groupby`][], groups are not broken by
10081008
non-contiguous data.
10091009
"""
10101010
groups = defaultdict[_K, list[_V]](list)
@@ -1924,7 +1924,8 @@ class _PlaceholderBase:
19241924
Disallows downstream usage of placeholder modules.
19251925
19261926
We need to explicitly override each dunder method because
1927-
{meth}`__getattr__` is not called when they are accessed.
1927+
[`__getattr__`][vllm.utils._PlaceholderBase.__getattr__]
1928+
is not called when they are accessed.
19281929
19291930
Info:
19301931
[Special method lookup](https://docs.python.org/3/reference/datamodel.html#special-lookup)

0 commit comments

Comments
 (0)