From 182adec7d020fc01ea7ee504e7eee9b50b6f1a7d Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Wed, 23 Oct 2024 23:27:19 +0530 Subject: [PATCH] def test_text_completion_with_echo(stream): (#6401) test --- litellm/types/utils.py | 4 +- .../test_text_completion_unit_tests.py | 64 +++++++++++++++++++ tests/local_testing/test_text_completion.py | 21 ++++++ 3 files changed, 87 insertions(+), 2 deletions(-) create mode 100644 tests/llm_translation/test_text_completion_unit_tests.py diff --git a/litellm/types/utils.py b/litellm/types/utils.py index 28a37e88d1e8..341c9fc8b852 100644 --- a/litellm/types/utils.py +++ b/litellm/types/utils.py @@ -970,9 +970,9 @@ def json(self, **kwargs): # type: ignore class Logprobs(OpenAIObject): text_offset: List[int] - token_logprobs: List[float] + token_logprobs: List[Union[float, None]] tokens: List[str] - top_logprobs: List[Dict[str, float]] + top_logprobs: List[Union[Dict[str, float], None]] class TextChoices(OpenAIObject): diff --git a/tests/llm_translation/test_text_completion_unit_tests.py b/tests/llm_translation/test_text_completion_unit_tests.py new file mode 100644 index 000000000000..2012ae11b2e8 --- /dev/null +++ b/tests/llm_translation/test_text_completion_unit_tests.py @@ -0,0 +1,64 @@ +import json +import os +import sys +from datetime import datetime +from unittest.mock import AsyncMock + +sys.path.insert( + 0, os.path.abspath("../..") +) # Adds the parent directory to the system path + +from litellm.types.utils import TextCompletionResponse + + +def test_convert_dict_to_text_completion_response(): + input_dict = { + "id": "cmpl-ALVLPJgRkqpTomotoOMi3j0cAaL4L", + "choices": [ + { + "finish_reason": "length", + "index": 0, + "logprobs": { + "text_offset": [0, 5], + "token_logprobs": [None, -12.203847], + "tokens": ["hello", " crisp"], + "top_logprobs": [None, {",": -2.1568563}], + }, + "text": "hello crisp", + } + ], + "created": 1729688739, + "model": "davinci-002", + "object": "text_completion", + "system_fingerprint": None, + "usage": { + "completion_tokens": 1, + "prompt_tokens": 1, + "total_tokens": 2, + "completion_tokens_details": None, + "prompt_tokens_details": None, + }, + } + + response = TextCompletionResponse(**input_dict) + + assert response.id == "cmpl-ALVLPJgRkqpTomotoOMi3j0cAaL4L" + assert len(response.choices) == 1 + assert response.choices[0].finish_reason == "length" + assert response.choices[0].index == 0 + assert response.choices[0].text == "hello crisp" + assert response.created == 1729688739 + assert response.model == "davinci-002" + assert response.object == "text_completion" + assert response.system_fingerprint is None + assert response.usage.completion_tokens == 1 + assert response.usage.prompt_tokens == 1 + assert response.usage.total_tokens == 2 + assert response.usage.completion_tokens_details is None + assert response.usage.prompt_tokens_details is None + + # Test logprobs + assert response.choices[0].logprobs.text_offset == [0, 5] + assert response.choices[0].logprobs.token_logprobs == [None, -12.203847] + assert response.choices[0].logprobs.tokens == ["hello", " crisp"] + assert response.choices[0].logprobs.top_logprobs == [None, {",": -2.1568563}] diff --git a/tests/local_testing/test_text_completion.py b/tests/local_testing/test_text_completion.py index 76d1dbb19ed2..6059d60bc2ad 100644 --- a/tests/local_testing/test_text_completion.py +++ b/tests/local_testing/test_text_completion.py @@ -4259,3 +4259,24 @@ def test_completion_fireworks_ai_multiple_choices(): print(response.choices) assert len(response.choices) == 4 + + +@pytest.mark.parametrize("stream", [True, False]) +def test_text_completion_with_echo(stream): + litellm.set_verbose = True + response = litellm.text_completion( + model="davinci-002", + prompt="hello", + max_tokens=1, # only see the first token + stop="\n", # stop at the first newline + logprobs=1, # return log prob + echo=True, # if True, return the prompt as well + stream=stream, + ) + print(response) + + if stream: + for chunk in response: + print(chunk) + else: + assert isinstance(response, TextCompletionResponse)