-
Notifications
You must be signed in to change notification settings - Fork 3.2k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
convert_to_openai_object in new API #715
Comments
How were you using this function? In v1 you can simply construct the object yourself without the need for a helper function, e.g. from openai.types.chat import ChatCompletionMessage
from openai.types.chat.chat_completion import ChatCompletion, Choice
completion = ChatCompletion(
id="foo",
model="gpt-4",
object="chat.completion",
choices=[
Choice(
finish_reason="stop",
index=0,
message=ChatCompletionMessage(
content="Hello world!",
role="assistant",
),
)
],
created=int(datetime.now().timestamp()),
) More details: #398 (comment) |
Thanks! That's a much cleaner interface...I started using However I'm still having issues mocking the request; I still get an auth error when mocking the
Here's the unit test: import datetime
import unittest
from unittest.mock import patch, MagicMock
import os
import httpx
from respx import MockRouter
from openai import OpenAI
from openai.types.chat import ChatCompletionMessage
from openai.types.chat.chat_completion import ChatCompletion, Choice
from x_lib import gpt_lib
from x_lib.model_config import ModelConfig
os.environ["OPENAI_API_KEY"] = "OPENAI_API_KEY"
client = OpenAI()
class TestRunGptPrompt(unittest.IsolatedAsyncioTestCase):
@patch('x_lib.gpt_lib.retry')
async def test_run_gpt_prompt(self, _):
mock_model_config = MagicMock(spec=ModelConfig)
mock_model_config.api_env_var = "OPENAI_API_KEY"
mock_model_config.model_version = "gpt-version"
mock_model_config.model_temperature = 0.5
mock_model_config.output_token_limit = None
mock_model_config.seed = None
mock_model_config.supports_json_mode = False
mocked_content = "mocked content"
completion = ChatCompletion(
id="foo",
model=mock_model_config.model_version,
object="chat.completion",
choices=[
Choice(
finish_reason="stop",
index=0,
message=ChatCompletionMessage(
content=mocked_content,
role="assistant",
),
)
],
created=int(datetime.datetime.now().timestamp()),
)
mock_router = MockRouter()
mock_router.post("/v1/chat/completions").mock(
return_value=httpx.Response(200, json=completion.model_dump_json())
)
output = await gpt_lib.run_gpt_prompt(
mock_model_config, messages=[{"role": "system", "content": "test"}])
self.assertEqual(output, mocked_content)
if __name__ == "__main__":
unittest.main() For reference import os
from typing import Dict, List
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
) # for exponential backoff
import openai
from x_lib.model_config import ModelConfig
from x_lib.logging import LogSeverity, log_exception, log_status
DEFAULT_MAX_TOKENS = 3000
@retry(
wait=wait_random_exponential(min=1, max=60),
stop=stop_after_attempt(1),
)
async def run_gpt_prompt(
model_config: ModelConfig,
messages: List[Dict[str, str]],
max_tokens: int = DEFAULT_MAX_TOKENS,
**kwargs) -> str:
try:
client = openai.OpenAI()
openai.api_key = os.getenv(model_config.api_env_var)
if not openai.api_key:
raise ValueError(
f"OpenAI API key not set. Please set the {model_config.api_env_var} environment variable.")
if model_config.output_token_limit:
max_tokens = min(max_tokens, model_config.output_token_limit)
response = client.chat.completions.create(
model=model_config.model_version,
messages=messages,
temperature=model_config.model_temperature,
seed=model_config.seed,
max_tokens=max_tokens,
response_format={"type": "json_object"} if model_config.supports_json_mode else None,
)
choice = response.choices[0]
if choice.finish_reason == 'length':
log_status(LogSeverity.WARNING, "OpenAI GPT response exceeded token limits", **kwargs)
return choice.message.content
except openai.APITimeoutError as exc:
log_exception(LogSeverity.WARNING, "OpenAI Timeout error running GPT", exception=exc, **kwargs)
raise exc
except openai.RateLimitError as exc:
log_exception(LogSeverity.WARNING, "OpenAI RateLimitError exceeded running GPT", exception=exc, **kwargs)
raise exc
except openai.APIStatusError as exc:
log_exception(LogSeverity.WARNING, "OpenAI APIStatusError running GPT", exception=exc, **kwargs)
raise exc
except openai.APIError as exc:
log_exception(LogSeverity.ERROR, "OpenAI APIError running GPT", exception=exc, **kwargs)
raise exc
except openai.OpenAIError as exc:
log_exception(LogSeverity.ERROR, "OpenAI OpenAIError running GPT", exception=exc, **kwargs)
raise exc
except Exception as exc: # pylint: disable=broad-except
log_exception(LogSeverity.ERROR, "Error running GPT", exception=exc, **kwargs)
raise exc |
It looks like I'm not sure if Please ask for any further help using respx in their repo! https://github.com/lundberg/respx |
For those reading, this is the working version of the test import datetime
import unittest
from unittest.mock import patch, MagicMock
import os
import httpx
import respx
from openai import OpenAI
from openai.types.chat import ChatCompletionMessage
from openai.types.chat.chat_completion import ChatCompletion, Choice
from x_lib import gpt_lib
from x_lib.model_config import ModelConfig
os.environ["OPENAI_API_KEY"] = "OPENAI_API_KEY"
client = OpenAI()
class TestRunGptPrompt(unittest.IsolatedAsyncioTestCase):
@respx.mock
@patch('owler_lib.gpt_lib.retry')
async def test_run_gpt_prompt(self, _):
mock_model_config = MagicMock(spec=ModelConfig)
mock_model_config.api_env_var = "OPENAI_API_KEY"
mock_model_config.model_version = "gpt-version"
mock_model_config.model_temperature = 0.5
mock_model_config.output_token_limit = None
mock_model_config.seed = None
mock_model_config.supports_json_mode = False
mocked_content = "mocked content"
completion = ChatCompletion(
id="foo",
model=mock_model_config.model_version,
object="chat.completion",
choices=[
Choice(
finish_reason="stop",
index=0,
message=ChatCompletionMessage(
content=mocked_content,
role="assistant",
),
)
],
created=int(datetime.datetime.now().timestamp()),
)
respx.post("https://api.openai.com/v1/chat/completions").mock(
return_value=httpx.Response(200, json=completion.dict())
)
output = await gpt_lib.run_gpt_prompt(
mock_model_config, messages=[{"role": "system", "content": "test"}])
self.assertEqual(output, mocked_content)
if __name__ == "__main__":
unittest.main() |
a little more intuitive version of unit test with v2 for OpenAIAssistant class
|
Here's an example without any helper stuff, just using the raw API. Includes both basic response and streamed response mocks. import datetime
from unittest.mock import patch
from openai.types.chat import ChatCompletionMessage
from openai.types.chat.chat_completion import ChatCompletion, Choice
from openai.types.chat.chat_completion_chunk import ChatCompletionChunk, ChoiceDelta
from openai.types.chat.chat_completion_chunk import Choice as StreamChoice
def create_chat_completion(response: str, role: str = "assistant") -> ChatCompletion:
return ChatCompletion(
id="foo",
model="gpt-3.5-turbo",
object="chat.completion",
choices=[
Choice(
finish_reason="stop",
index=0,
message=ChatCompletionMessage(
content=response,
role=role,
),
)
],
created=int(datetime.datetime.now().timestamp()),
)
@patch("openai.resources.chat.Completions.create")
def test_chat_completion(openai_create):
from openai import OpenAI
EXPECTED_RESPONSE = "The mock is working! ;)"
openai_create.return_value = create_chat_completion(EXPECTED_RESPONSE)
client = OpenAI(api_key="sk-...")
r = client.chat.completions.create(
messages=[{"role": "user", "content": "Do you know any jokes?"}],
model="gpt-3.5-turbo",
)
response = r.choices[0].message.content
assert response == EXPECTED_RESPONSE
def create_stream_chat_completion(response: str, role: str = "assistant"):
for token in response:
yield ChatCompletionChunk(
id="foo",
model="gpt-3.5-turbo",
object="chat.completion.chunk",
choices=[
StreamChoice(
index=0,
finish_reason=None,
delta=ChoiceDelta(
content=token,
role=role,
)
),
],
created=int(datetime.datetime.now().timestamp()),
)
@patch("openai.resources.chat.Completions.create")
def test_stream_chat_completion(openai_create):
from openai import OpenAI
EXPECTED_RESPONSE = "The mock is STILL working! ;)"
openai_create.return_value = create_stream_chat_completion(EXPECTED_RESPONSE)
client = OpenAI(api_key="sk-...")
stream = client.chat.completions.create(
messages=[{"role": "user", "content": "Do you know any jokes?"}],
model="gpt-3.5-turbo",
stream=True,
)
response = ""
chunk_count = 0
for chunk in stream:
response += (chunk.choices[0].delta.content or "")
chunk_count += 1
assert response == EXPECTED_RESPONSE
assert chunk_count == len(EXPECTED_RESPONSE) |
Dude, thanks, I spent hours yesterday trying to mock one of the endpoints through client.chat and couldn't figure out why it was working until I saw you do this through openai.resources.chat. Thank you so much! |
If it's helpful for anyone else, I just made mocks for the Async Azure versions, using pytest monkeypatch instead of mock.patch: https://github.com/pamelafox/chatgpt-backend-fastapi/blob/main/tests/conftest.py |
# Fix Tests Marked as xfail These were marked as `xfail` in #999 as part of the parser refactor. I noticed in openai/openai-python#715 (comment) that applying the patch to `openai.resources.chat.Completions.create` seems to fix the tests Test Plan: ``` (aiconfig) ryanholinshead@Ryans-MBP python % pytest tests/parsers/test_openai_util.py ========================================================== test session starts =========================================================== platform darwin -- Python 3.12.1, pytest-7.4.3, pluggy-1.4.0 rootdir: /Users/ryanholinshead/Projects/aiconfig/python plugins: asyncio-0.23.5, hypothesis-6.91.0, cov-4.1.0, mock-3.12.0, anyio-4.2.0 asyncio: mode=Mode.STRICT collected 3 items tests/parsers/test_openai_util.py ... [100%] ===================================================== 3 passed, 10 warnings in 0.50s ===================================================== ``` ``` (aiconfig) ryanholinshead@Ryans-MBP python % pytest tests/test_run_config.py ========================================================== test session starts =========================================================== platform darwin -- Python 3.12.1, pytest-7.4.3, pluggy-1.4.0 rootdir: /Users/ryanholinshead/Projects/aiconfig/python plugins: asyncio-0.23.5, hypothesis-6.91.0, cov-4.1.0, mock-3.12.0, anyio-4.2.0 asyncio: mode=Mode.STRICT collected 1 item tests/test_run_config.py . ===================================================== 1 passed, 10 warnings in 0.50s ===================================================== ```
Fix Tests Marked as xfail # Fix Tests Marked as xfail These were marked as `xfail` in #999 as part of the parser refactor. I noticed in openai/openai-python#715 (comment) that applying the patch to `openai.resources.chat.Completions.create` seems to fix the tests Test Plan: ``` (aiconfig) ryanholinshead@Ryans-MBP python % pytest tests/parsers/test_openai_util.py ========================================================== test session starts =========================================================== platform darwin -- Python 3.12.1, pytest-7.4.3, pluggy-1.4.0 rootdir: /Users/ryanholinshead/Projects/aiconfig/python plugins: asyncio-0.23.5, hypothesis-6.91.0, cov-4.1.0, mock-3.12.0, anyio-4.2.0 asyncio: mode=Mode.STRICT collected 3 items tests/parsers/test_openai_util.py ... [100%] ===================================================== 3 passed, 10 warnings in 0.50s ===================================================== ``` ``` (aiconfig) ryanholinshead@Ryans-MBP python % pytest tests/test_run_config.py ========================================================== test session starts =========================================================== platform darwin -- Python 3.12.1, pytest-7.4.3, pluggy-1.4.0 rootdir: /Users/ryanholinshead/Projects/aiconfig/python plugins: asyncio-0.23.5, hypothesis-6.91.0, cov-4.1.0, mock-3.12.0, anyio-4.2.0 asyncio: mode=Mode.STRICT collected 1 item tests/test_run_config.py . ===================================================== 1 passed, 10 warnings in 0.50s ===================================================== ```
This function was quite useful for unit testing - was this moved to another location / under a different name? I cannot find similar functionality in the new API.
Thanks in advance.
The text was updated successfully, but these errors were encountered: