|
| 1 | +# SPDX-License-Identifier: Apache-2.0 |
| 2 | +import os |
| 3 | + |
| 4 | +import pytest |
| 5 | + |
| 6 | +from vllm import LLM, SamplingParams |
| 7 | + |
| 8 | +if os.getenv("VLLM_USE_V1", "0") != "1": |
| 9 | + pytest.skip("Test package requires V1", allow_module_level=True) |
| 10 | + |
| 11 | +MODEL = "meta-llama/Llama-3.2-1B" |
| 12 | +PROMPT = "Hello my name is Robert and I" |
| 13 | + |
| 14 | + |
| 15 | +@pytest.fixture(scope="module") |
| 16 | +def model() -> LLM: |
| 17 | + return LLM(MODEL, enforce_eager=True) |
| 18 | + |
| 19 | + |
| 20 | +def test_n_gt_1(model): |
| 21 | + """ParallelSampling is supported.""" |
| 22 | + |
| 23 | + params = SamplingParams(n=3) |
| 24 | + outputs = model.generate(PROMPT, params) |
| 25 | + assert len(outputs[0].outputs) == 3 |
| 26 | + |
| 27 | + |
| 28 | +def test_best_of(model): |
| 29 | + """Raise a ValueError since best_of is deprecated.""" |
| 30 | + |
| 31 | + params = SamplingParams(n=2, best_of=3) |
| 32 | + with pytest.raises(ValueError): |
| 33 | + _ = model.generate(PROMPT, params) |
| 34 | + |
| 35 | + |
| 36 | +def test_penalties(model): |
| 37 | + """Check that we do not get errors if applied.""" |
| 38 | + |
| 39 | + params = SamplingParams( |
| 40 | + temperature=1.2, |
| 41 | + presence_penalty=1.2, |
| 42 | + frequency_penalty=1.2, |
| 43 | + repetition_penalty=1.2, |
| 44 | + min_p=0.5, |
| 45 | + top_p=0.5, |
| 46 | + top_k=3, |
| 47 | + ) |
| 48 | + _ = model.generate(PROMPT, params) |
| 49 | + |
| 50 | + |
| 51 | +def test_stop(model): |
| 52 | + """Check that we respect the stop words.""" |
| 53 | + |
| 54 | + output = model.generate(PROMPT, SamplingParams(temperature=0)) |
| 55 | + split_text = output[0].outputs[0].text.split() |
| 56 | + |
| 57 | + STOP_IDX = 5 |
| 58 | + params = SamplingParams(temperature=0, stop=split_text[STOP_IDX]) |
| 59 | + output = model.generate(PROMPT, params) |
| 60 | + new_split_text = output[0].outputs[0].text.split() |
| 61 | + |
| 62 | + # Output should not contain the stop word. |
| 63 | + assert len(new_split_text) == STOP_IDX |
| 64 | + |
| 65 | + params = SamplingParams(temperature=0, |
| 66 | + stop=split_text[STOP_IDX], |
| 67 | + include_stop_str_in_output=True) |
| 68 | + output = model.generate(PROMPT, params) |
| 69 | + new_split_text = output[0].outputs[0].text.split() |
| 70 | + |
| 71 | + # Output should contain the stop word. |
| 72 | + assert len(new_split_text) == STOP_IDX + 1 |
| 73 | + |
| 74 | + |
| 75 | +def test_stop_token_ids(model): |
| 76 | + """Check that we respect the stop token ids.""" |
| 77 | + |
| 78 | + output = model.generate(PROMPT, SamplingParams(temperature=0)) |
| 79 | + |
| 80 | + stop_token_id_0 = output[0].outputs[0].token_ids[5] |
| 81 | + stop_token_id_1 = output[0].outputs[0].token_ids[6] |
| 82 | + |
| 83 | + stop_token_ids = [stop_token_id_1, stop_token_id_0] |
| 84 | + params = SamplingParams(temperature=0, stop_token_ids=stop_token_ids) |
| 85 | + output = model.generate(PROMPT, params) |
| 86 | + assert output[0].outputs[0].token_ids[-1] == stop_token_id_0 |
| 87 | + |
| 88 | + stop_token_ids = [stop_token_id_0, stop_token_id_1] |
| 89 | + params = SamplingParams(temperature=0, stop_token_ids=stop_token_ids) |
| 90 | + assert output[0].outputs[0].token_ids[-1] == stop_token_id_0 |
| 91 | + |
| 92 | + |
| 93 | +def test_bad_words(model): |
| 94 | + """Check that we respect bad words.""" |
| 95 | + |
| 96 | + with pytest.raises(ValueError): |
| 97 | + _ = model.generate(PROMPT, SamplingParams(bad_words=["Hello"])) |
| 98 | + |
| 99 | + |
| 100 | +def test_logits_processor(model): |
| 101 | + """Check that we reject logits processor.""" |
| 102 | + |
| 103 | + # This sample logits processor gives infinite score to the i-th token, |
| 104 | + # where i is the length of the input sequence. |
| 105 | + # We therefore expect the output token sequence to be [0, 1, 2, ...] |
| 106 | + def pick_ith(token_ids, logits): |
| 107 | + logits[len(token_ids)] = float("inf") |
| 108 | + return logits |
| 109 | + |
| 110 | + with pytest.raises(ValueError): |
| 111 | + _ = model.generate(PROMPT, |
| 112 | + SamplingParams(logits_processors=[pick_ith])) |
| 113 | + |
| 114 | + |
| 115 | +def test_allowed_token_ids(model): |
| 116 | + """Check that we can use allowed_token_ids.""" |
| 117 | + |
| 118 | + TOKEN_ID = 10 |
| 119 | + allowed_token_ids = [TOKEN_ID] |
| 120 | + output = model.generate( |
| 121 | + PROMPT, SamplingParams(allowed_token_ids=allowed_token_ids)) |
| 122 | + assert output[0].outputs[0].token_ids[-1] == TOKEN_ID |
| 123 | + |
| 124 | + # Reject negative token id. |
| 125 | + with pytest.raises(ValueError): |
| 126 | + _ = model.generate(PROMPT, SamplingParams(allowed_token_ids=[-1])) |
| 127 | + |
| 128 | + # Reject out of vocabulary. |
| 129 | + with pytest.raises(ValueError): |
| 130 | + _ = model.generate(PROMPT, |
| 131 | + SamplingParams(allowed_token_ids=[10000000])) |
| 132 | + |
| 133 | + |
| 134 | +def test_priority(model): |
| 135 | + """Check that we reject requests with priority.""" |
| 136 | + |
| 137 | + # Reject all allowed token ids |
| 138 | + with pytest.raises(ValueError): |
| 139 | + _ = model.generate(PROMPT, priority=[1]) |
| 140 | + |
| 141 | + |
| 142 | +def test_seed(model): |
| 143 | + """Check that seed impacts randomness.""" |
| 144 | + |
| 145 | + out_1 = model.generate(PROMPT, SamplingParams(seed=42)) |
| 146 | + out_2 = model.generate(PROMPT, SamplingParams(seed=42)) |
| 147 | + out_3 = model.generate(PROMPT, SamplingParams(seed=43)) |
| 148 | + |
| 149 | + assert out_1[0].outputs[0].text == out_2[0].outputs[0].text |
| 150 | + assert out_1[0].outputs[0].text != out_3[0].outputs[0].text |
0 commit comments