We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 79455cf commit 656fd72Copy full SHA for 656fd72
vllm/config.py
@@ -2359,12 +2359,10 @@ def num_lookahead_slots(self) -> int:
2359
return self.num_speculative_tokens
2360
2361
def __repr__(self) -> str:
2362
- if self.prompt_lookup_max is not None and self.prompt_lookup_max > 0:
2363
- draft_model = "ngram"
2364
- else:
2365
- draft_model = self.draft_model_config.model
+ method = self.method
+ model = None if method == "ngram" else self.draft_model_config.model
2366
num_spec_tokens = self.num_speculative_tokens
2367
- return f"SpeculativeConfig({draft_model=}, {num_spec_tokens=})"
+ return f"SpeculativeConfig({method=}, {model=}, {num_spec_tokens=})"
2368
2369
2370
@dataclass
0 commit comments