Skip to content

Commit

Permalink
def test_text_completion_with_echo(stream): (#6401)
Browse files Browse the repository at this point in the history
test
  • Loading branch information
ishaan-jaff authored Oct 23, 2024
1 parent d063086 commit 182adec
Show file tree
Hide file tree
Showing 3 changed files with 87 additions and 2 deletions.
4 changes: 2 additions & 2 deletions litellm/types/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -970,9 +970,9 @@ def json(self, **kwargs): # type: ignore

class Logprobs(OpenAIObject):
text_offset: List[int]
token_logprobs: List[float]
token_logprobs: List[Union[float, None]]
tokens: List[str]
top_logprobs: List[Dict[str, float]]
top_logprobs: List[Union[Dict[str, float], None]]


class TextChoices(OpenAIObject):
Expand Down
64 changes: 64 additions & 0 deletions tests/llm_translation/test_text_completion_unit_tests.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
import json
import os
import sys
from datetime import datetime
from unittest.mock import AsyncMock

sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path

from litellm.types.utils import TextCompletionResponse


def test_convert_dict_to_text_completion_response():
input_dict = {
"id": "cmpl-ALVLPJgRkqpTomotoOMi3j0cAaL4L",
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": {
"text_offset": [0, 5],
"token_logprobs": [None, -12.203847],
"tokens": ["hello", " crisp"],
"top_logprobs": [None, {",": -2.1568563}],
},
"text": "hello crisp",
}
],
"created": 1729688739,
"model": "davinci-002",
"object": "text_completion",
"system_fingerprint": None,
"usage": {
"completion_tokens": 1,
"prompt_tokens": 1,
"total_tokens": 2,
"completion_tokens_details": None,
"prompt_tokens_details": None,
},
}

response = TextCompletionResponse(**input_dict)

assert response.id == "cmpl-ALVLPJgRkqpTomotoOMi3j0cAaL4L"
assert len(response.choices) == 1
assert response.choices[0].finish_reason == "length"
assert response.choices[0].index == 0
assert response.choices[0].text == "hello crisp"
assert response.created == 1729688739
assert response.model == "davinci-002"
assert response.object == "text_completion"
assert response.system_fingerprint is None
assert response.usage.completion_tokens == 1
assert response.usage.prompt_tokens == 1
assert response.usage.total_tokens == 2
assert response.usage.completion_tokens_details is None
assert response.usage.prompt_tokens_details is None

# Test logprobs
assert response.choices[0].logprobs.text_offset == [0, 5]
assert response.choices[0].logprobs.token_logprobs == [None, -12.203847]
assert response.choices[0].logprobs.tokens == ["hello", " crisp"]
assert response.choices[0].logprobs.top_logprobs == [None, {",": -2.1568563}]
21 changes: 21 additions & 0 deletions tests/local_testing/test_text_completion.py
Original file line number Diff line number Diff line change
Expand Up @@ -4259,3 +4259,24 @@ def test_completion_fireworks_ai_multiple_choices():
print(response.choices)

assert len(response.choices) == 4


@pytest.mark.parametrize("stream", [True, False])
def test_text_completion_with_echo(stream):
litellm.set_verbose = True
response = litellm.text_completion(
model="davinci-002",
prompt="hello",
max_tokens=1, # only see the first token
stop="\n", # stop at the first newline
logprobs=1, # return log prob
echo=True, # if True, return the prompt as well
stream=stream,
)
print(response)

if stream:
for chunk in response:
print(chunk)
else:
assert isinstance(response, TextCompletionResponse)

0 comments on commit 182adec

Please sign in to comment.