diff --git a/.gitignore b/.gitignore
index 4704a41..0672d5e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,10 +1,12 @@
+__pycache__/
.mypy_cache/
.pytest_cache/
.tox/
.venv/
.vscode/
-__pycache__/
dist/
nbs/
+site/
*.lock
+.plan
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 08ac7ef..75a42d3 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,7 +1,38 @@
# Changelog
+## 0.3.0
+
+> [!IMPORTANT]
+> **Breaking change**: Completely redesigned API
+
+Introducing an all-new API that is both simpler to use and much more flexible. See [docs](https://mharrisb1.github.io/openai-responses-python) for more.
+
+In addition to a new API, this release closed these issues:
+
+- [#1: feat: ability to raise exceptions](https://github.com/mharrisb1/openai-responses-python/issues/1)
+- [#9: feat: base url override](https://github.com/mharrisb1/openai-responses-python/issues/9)
+- [#28: feat: automatically share state between chained mocks](https://github.com/mharrisb1/openai-responses-python/issues/28)
+
+Additional notes:
+
+- Removes token estimation. This is now the responsibility of the user to provided mock token count
+- Adds more example files
+- Still not completely happy with current state of mocking run steps. Will likely change in the near future.
+
+## 0.2.1
+
+> [!WARNING]
+> Deprecated
+
+Fixes issue where messages included in run create params (using `additional_messages`) was ignored.
+
+- [#10: fix: create run ignores additional_messages](https://github.com/mharrisb1/openai-responses-python/issues/10)
+
## 0.2.0
+> [!CAUTION]
+> Yanked
+
Migrates assistant endpoints to Assistants V2
- [#8: feat: assistants v2](https://github.com/mharrisb1/openai-responses-python/issues/8)
@@ -9,10 +40,16 @@ Migrates assistant endpoints to Assistants V2
## 0.1.1
+> [!CAUTION]
+> Yanked
+
Fixes some issues with chat completions and other stateless mocks
- [#7: fix(endpoints): fix issues with chat completions endpoint](https://github.com/mharrisb1/openai-responses-python/issues/7)
## 0.1.0
+> [!CAUTION]
+> Yanked
+
Initial release with minimally useful support for what I needed.
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 885022c..8cddca1 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -33,18 +33,18 @@ I (Michael) am the BDFL of the project and can and will arbitrarily rank issues
- _Effort_ is a rough estimate of whether something will take a lot of work, some work, or little work to implement
- _Impact_ is a rough estimate of how important something is
-Both are unscientific flawed but they allow me to try to focus on maximizing impact while minimizing effort which is important for an open source project.
+Both are unscientific and flawed but they allow me to try to focus on what to work on when.
-If you look at the [project's issues](https://github.com/mharrisb1/openai-responses-python/issues), you'll see they are labeled with some non-standard labels.
+If you look at the [project's issues](https://github.com/mharrisb1/openai-responses-python/issues), you'll see they are labeled with some non-standard issue labels.
-| Label | Description |
-| -------------------------------------------------------------------------------------- | ------------- |
-| e0 ๐ต | Low effort |
+| Label | Description |
+| ------------------------------------------------------------------------------------ | ------------- |
+| e0 ๐ต | Low effort |
| e1 โก๏ธ | Medium effort |
-| e2 ๐ฅ | High effort |
-| i0 ๐ต | Low impact |
+| e2 ๐ฅ | High effort |
+| i0 ๐ต | Low impact |
| i1 โก๏ธ | Medium impact |
-| i2 ๐ฅ | High impact |
+| i2 ๐ฅ | High impact |
Ranking is evaluated according to this quadrant where the priority is ordered from top to bottom and left to right.
@@ -75,33 +75,3 @@ poetry install --with dev # install deps including development
poetry shell # activate venv
tox run # run lint, static analysis, unit tests, and examples
```
-
-## Design Overview
-
-### Mocks
-
-Mocks are classes that are responsible for encapsulating all request patching of a given endpoint.
-
-Endpoints are classified as either _stateless_ or _stateful_ mocks. Right now, the only difference between `StatelessMock` and `StatefulMock` is the injection of `used_state` (see [state store](#state-store) below for more).
-
-```mermaid
-classDiagram
- Mock <-- StatelessMock
- Mock <-- StatefulMock
-
- StatelessMock <-- ChatCompletionMock
- StatelessMock <-- EmbeddingsMock
-
- StatefulMock <-- FilesMock
- StatefulMock <-- AssistantsMock
- StatefulMock <-- ThreadsMock
- StatefulMock <-- MessagesMock
- StatefulMock <-- RunsMock
- StatefulMock <-- RunStepsMock
-```
-
-### State store
-
-The state store is responsible for managing the state of all mocked objects throughout the lifetime of the test scope (function, module, session).
-
-The current implementation of the state store is just a naive dictionary-based KV stores with support for common CRUD operations.
diff --git a/README.md b/README.md
index 7e27c52..aba09b2 100644
--- a/README.md
+++ b/README.md
@@ -1,8 +1,10 @@
# ๐งช๐ค openai-responses
-Pytest plugin for automatically mocking OpenAI requests. Built on top of [RESPX](https://github.com/lundberg/respx).
+Pytest plugin for automatically mocking OpenAI requests. Powered by [RESPX](https://github.com/lundberg/respx).
-## Supported endpoints
+[](https://github.com/openai/openai-python)
+
+## Supported Routes
- `/v1/chat/completions`
- `/v1/embeddings`
@@ -10,39 +12,33 @@ Pytest plugin for automatically mocking OpenAI requests. Built on top of [RESPX]
- `/v1/assistants`
- `/v1/threads` (+ messages, runs, and steps)
-View full support coverage [here](https://mharrisb1.github.io/openai-responses-python/endpoints/).
+View full support coverage [here](https://mharrisb1.github.io/openai-responses-python/routes).
## Usage
-Simply decorate any test function that contains code that makes a call to an OpenAI endpoint. See [docs](https://mharrisb1.github.io/openai-responses-python) for more.
+Just decorate any test function that makes a call to the OpenAI API (either using the [official Python SDK](https://github.com/openai/openai-python) or with [HTTPX](https://www.python-httpx.org/)).
```python
+import openai
+
import openai_responses
-from openai import OpenAI
-
-
-@openai_responses.mock.chat.completions(
- choices=[
- {"message": {"content": "Hello, how can I help?"}},
- {"message": {"content": "Hi! I'm here to help!"}},
- {"message": {"content": "How can I help?"}},
- ],
-)
-def test_create_completion_with_multiple_choices():
- client = OpenAI(api_key="fakeKey")
- completion = client.chat.completions.create(
- model="gpt-3.5-turbo",
- messages=[
- {"role": "system", "content": "You are a helpful assistant."},
- {"role": "user", "content": "Hello!"},
- ],
- n=3,
+
+
+@openai_responses.mock()
+def test_create_assistant():
+ client = openai.Client(api_key="sk-fake123")
+
+ assistant = client.beta.assistants.create(
+ instructions="You are a personal math tutor.",
+ name="Math Tutor",
+ tools=[{"type": "code_interpreter"}],
+ model="gpt-4-turbo",
)
- assert len(completion.choices) == 3
+
+ assert assistant.name == "Math Tutor"
```
-> [!IMPORTANT]
-> This project does not try to generate fake responses from the models. Any part of a response that would be generated by a model will need to be defined by the user or will fallback to a default value.
+See [examples](https://github.com/mharrisb1/openai-responses-python/tree/main/examples) or [docs](https://mharrisb1.github.io/openai-responses-python) for more.
## Installation
diff --git a/docs/endpoints/assistants/assistants.md b/docs/endpoints/assistants/assistants.md
deleted file mode 100644
index 285d89b..0000000
--- a/docs/endpoints/assistants/assistants.md
+++ /dev/null
@@ -1,49 +0,0 @@
-# Assistants
-
-!!! warning
-
- There is currently no support for attaching files. Subscribe to [#5: feat: add support for attached files for all assistants APIs](https://github.com/mharrisb1/openai-responses-python/issues/5) to be notified when this is added.
-
-!!! note
-
- Only Assistants V2 is supported
-
-!!! tip
-
- See [examples](https://github.com/mharrisb1/openai-responses-python/blob/main/examples/test_assistants_api.py) for more
-
-## Decorator Arguments
-
-- `latency` - synthetic latency in seconds to introduce to the call(s). Defaults to `0.0`.
-- `failures` - number of failures to simulate. Defaults to `0`.
-- `state_store` - Optional [state store](../../user_guide/state.md) override for custom and shared states.
-
-## Create assistant
-
-!!! info
-
- Docs are incomplete but feature is supported
-
-## List assistants
-
-!!! info
-
- Docs are incomplete but feature is supported
-
-## Retrieve assistant
-
-!!! info
-
- Docs are incomplete but feature is supported
-
-## Modify assistant
-
-!!! info
-
- Docs are incomplete but feature is supported
-
-## Delete assistant
-
-!!! info
-
- Docs are incomplete but feature is supported
diff --git a/docs/endpoints/assistants/messages.md b/docs/endpoints/assistants/messages.md
deleted file mode 100644
index 1ffe006..0000000
--- a/docs/endpoints/assistants/messages.md
+++ /dev/null
@@ -1,44 +0,0 @@
-# Messages
-
-!!! warning
-
- There is currently no support for attaching files. Subscribe to [#5: feat: add support for attached files for all assistants APIs](https://github.com/mharrisb1/openai-responses-python/issues/5) to be notified when this is added.
-
-!!! note
-
- Only Assistants V2 is supported
-
-!!! tip
-
- See [examples](https://github.com/mharrisb1/openai-responses-python/blob/main/examples/test_threads_api.py) for more
-
-## Decorator Arguments
-
-- `latency` - synthetic latency in seconds to introduce to the call(s). Defaults to `0.0`.
-- `failures` - number of failures to simulate. Defaults to `0`.
-- `state_store` - Optional [state store](../../user_guide/state.md) override for custom and shared states.
-- `validate_thread_exists` - Optional flag for asserting that thread exists. Defaults to `False`.
-
-## Create message
-
-!!! info
-
- Docs are incomplete but feature is supported
-
-## List messages
-
-!!! info
-
- Docs are incomplete but feature is supported
-
-## Retrieve message
-
-!!! info
-
- Docs are incomplete but feature is supported
-
-## Modify message
-
-!!! info
-
- Docs are incomplete but feature is supported
diff --git a/docs/endpoints/assistants/run_steps.md b/docs/endpoints/assistants/run_steps.md
deleted file mode 100644
index 1a5928e..0000000
--- a/docs/endpoints/assistants/run_steps.md
+++ /dev/null
@@ -1,30 +0,0 @@
-# Run Steps
-
-!!! note
-
- Only Assistants V2 is supported
-
-!!! tip
-
- See [examples](https://github.com/mharrisb1/openai-responses-python/blob/main/examples/test_threads_api.py) for more
-
-## Decorator Arguments
-
-- `latency` - synthetic latency in seconds to introduce to the call(s). Defaults to `0.0`.
-- `failures` - number of failures to simulate. Defaults to `0`.
-- `state_store` - Optional [state store](../../user_guide/state.md) override for custom and shared states.
-- `steps` - Optional steps list. This is needed since there is no create steps route. Defaults to `[]`.
-- `validate_thread_exists` - Optional flag for asserting that thread exists. Defaults to `False`.
-- `validate_run_exists` - Optional flag for asserting that run exists. Defaults to `False`.
-
-## List run steps
-
-!!! info
-
- Docs are incomplete but feature is supported
-
-## Retrieve run step
-
-!!! info
-
- Docs are incomplete but feature is supported
diff --git a/docs/endpoints/assistants/runs.md b/docs/endpoints/assistants/runs.md
deleted file mode 100644
index b5ddff1..0000000
--- a/docs/endpoints/assistants/runs.md
+++ /dev/null
@@ -1,58 +0,0 @@
-# Runs
-
-!!! note
-
- Only Assistants V2 is supported
-
-!!! tip
-
- See [examples](https://github.com/mharrisb1/openai-responses-python/blob/main/examples/test_threads_api.py) for more
-
-## Decorator Arguments
-
-- `latency` - synthetic latency in seconds to introduce to the call(s). Defaults to `0.0`.
-- `failures` - number of failures to simulate. Defaults to `0`.
-- `state_store` - Optional [state store](../../user_guide/state.md) override for custom and shared states.
-- `sequence` - Optional state sequences for create and/or retrieve. Will use sequence `n` for call `n - failure`. Defaults to `{}`
-- `validate_thread_exists` - Optional flag for asserting that thread exists. Defaults to `False`.
-- `validate_assistant_exists` - Optional flag for asserting that assistant exists. Defaults to `False`.
-
-## Create run
-
-!!! warning
-
- Messages created with `additional_messages` request params are currently ignored. Subscribe to [#10: fix: create run ignores `additional_messages`](https://github.com/mharrisb1/openai-responses-python/issues/10) to be notified when this is fixed
-
-!!! info
-
- Docs are incomplete but feature is supported
-
-## Create thread and run
-
-!!! warning
-
- Not implemented. Subscribe to [#11: feat: support create thread and run route](https://github.com/mharrisb1/openai-responses-python/issues/11) to be notified when support is added.
-
-## Retrieve run
-
-!!! info
-
- Docs are incomplete but feature is supported
-
-## Modify run
-
-!!! info
-
- Docs are incomplete but feature is supported
-
-## Submit tool outputs to run
-
-!!! info
-
- Docs are incomplete but feature is supported
-
-## Cancel run
-
-!!! info
-
- Docs are incomplete but feature is supported
diff --git a/docs/endpoints/assistants/threads.md b/docs/endpoints/assistants/threads.md
deleted file mode 100644
index ca2ecb4..0000000
--- a/docs/endpoints/assistants/threads.md
+++ /dev/null
@@ -1,39 +0,0 @@
-# Threads
-
-!!! note
-
- Only Assistants V2 is supported
-
-!!! tip
-
- See [examples](https://github.com/mharrisb1/openai-responses-python/blob/main/examples/test_threads_api.py) for more
-
-## Decorator Arguments
-
-- `latency` - synthetic latency in seconds to introduce to the call(s). Defaults to `0.0`.
-- `failures` - number of failures to simulate. Defaults to `0`.
-- `state_store` - Optional [state store](../../user_guide/state.md) override for custom and shared states.
-
-## Create thread
-
-!!! info
-
- Docs are incomplete but feature is supported
-
-## Retrive thread
-
-!!! info
-
- Docs are incomplete but feature is supported
-
-## Modify thread
-
-!!! info
-
- Docs are incomplete but feature is supported
-
-## Delete thread
-
-!!! info
-
- Docs are incomplete but feature is supported
diff --git a/docs/endpoints/chat.md b/docs/endpoints/chat.md
deleted file mode 100644
index dd96aa0..0000000
--- a/docs/endpoints/chat.md
+++ /dev/null
@@ -1,96 +0,0 @@
-# Chat
-
-!!! tip
-
- See [examples](https://github.com/mharrisb1/openai-responses-python/blob/main/examples/test_chat_completion_api.py) for more
-
-## Decorator Arguments
-
-- `latency` - synthetic latency in seconds to introduce to the call(s). Defaults to `0.0`.
-- `failures` - number of failures to simulate. Defaults to `0`.
-- `choices` - list of model choice responses. Defaults to `[]`.
-
-## Create chat completion
-
-=== "Sync"
-
- ```python linenums="1"
- from openai import OpenAI
-
- import openai_responses
-
-
- @openai_responses.mock.chat.completions(
- choices=[
- {"message": {"content": "Hello, how can I help?"}},
- ]
- )
- def test_create_completion():
- client = OpenAI(api_key="fakeKey")
- completion = client.chat.completions.create(
- model="gpt-3.5-turbo",
- messages=[
- {"role": "system", "content": "You are a helpful assistant."},
- {"role": "user", "content": "Hello!"},
- ],
- )
- assert len(completion.choices) == 1
- assert completion.choices[0].message.content == "Hello, how can I help?"
- ```
-
-=== "Async"
-
- ```python linenums="1"
- import pytest
- from openai import AsyncOpenAI
-
- import openai_responses
-
- @pytest.mark.asyncio
- @openai_responses.mock.chat.completions(
- choices=[
- {"message": {"content": "Hello, how can I help?"}},
- ]
- )
- async def test_create_completion():
- client = AsyncOpenAI(api_key="fakeKey")
- completion = await client.chat.completions.create(
- model="gpt-3.5-turbo",
- messages=[
- {"role": "system", "content": "You are a helpful assistant."},
- {"role": "user", "content": "Hello!"},
- ],
- )
- assert len(completion.choices) == 1
- assert completion.choices[0].message.content == "Hello, how can I help?"
- ```
-
-=== "With Mocker Class"
-
- ```python linenums="1" hl_lines="4 12 23"
- from openai import OpenAI
-
- import openai_responses
- from openai_responses import ChatCompletionMock # (1)
-
-
- @openai_responses.mock.chat.completions(
- choices=[
- {"message": {"content": "Hello, how can I help?"}},
- ]
- )
- def test_create_completion(chat_completion_mock: ChatCompletionMock):
- client = OpenAI(api_key="fakeKey")
- completion = client.chat.completions.create(
- model="gpt-3.5-turbo",
- messages=[
- {"role": "system", "content": "You are a helpful assistant."},
- {"role": "user", "content": "Hello!"},
- ],
- )
- assert len(completion.choices) == 1
- assert completion.choices[0].message.content == "Hello, how can I help?"
- assert chat_completion_mock.create.route.calls.call_count == 1
- ```
-
- 1. See [mockers guide](../user_guide/mocks.md) for more
diff --git a/docs/endpoints/embeddings.md b/docs/endpoints/embeddings.md
deleted file mode 100644
index ed1f780..0000000
--- a/docs/endpoints/embeddings.md
+++ /dev/null
@@ -1,94 +0,0 @@
-# Embeddings
-
-!!! tip
-
- See [examples](https://github.com/mharrisb1/openai-responses-python/blob/main/examples/test_embeddings_api.py) for more
-
-## Decorator Arguments
-
-- `latency` - synthetic latency in seconds to introduce to the call(s). Defaults to `0.0`.
-- `failures` - number of failures to simulate. Defaults to `0`.
-- `embedding` - array of floats to mock a returned embedding. Default `[]`.
-
-## Create embeddings
-
-=== "Sync"
-
- ```python linenums="1"
- import random
-
- from openai import OpenAI
-
- import openai_responses
-
- EMBEDDING = [random.uniform(0.01, -0.01) for _ in range(100)]
-
-
- @openai_responses.mock.embeddings(embedding=EMBEDDING)
- def test_create_embeddings():
- client = OpenAI(api_key="fakeKey")
- embeddings = client.embeddings.create(
- model="text-embedding-ada-002",
- input="The food was delicious and the waiter...",
- encoding_format="float",
- )
- assert len(embeddings.data) == 1
- assert embeddings.data[0].embedding == EMBEDDING
- assert embeddings.model == "text-embedding-ada-002"
- ```
-
-=== "Async"
-
- ```python linenums="1"
- import random
-
- import pytest
- from openai import AsyncOpenAI
-
- import openai_responses
-
- EMBEDDING = [random.uniform(0.01, -0.01) for _ in range(100)]
-
-
- @pytest.mark.asyncio
- @openai_responses.mock.embeddings(embedding=EMBEDDING)
- async def test_async_create_embeddings():
- client = AsyncOpenAI(api_key="fakeKey")
- embeddings = await client.embeddings.create(
- model="text-embedding-ada-002",
- input="The food was delicious and the waiter...",
- encoding_format="float",
- )
- assert len(embeddings.data) == 1
- assert embeddings.data[0].embedding == EMBEDDING
- assert embeddings.model == "text-embedding-ada-002"
- ```
-
-=== "With Mocker Class"
-
- ```python linenums="1" hl_lines="6 12 22"
- import random
-
- from openai import OpenAI
-
- import openai_responses
- from openai_responses import EmbeddingsMock # (1)
-
- EMBEDDING = [random.uniform(0.01, -0.01) for _ in range(100)]
-
-
- @openai_responses.mock.embeddings(embedding=EMBEDDING)
- def test_create_embeddings(embeddings_mock: EmbeddingsMock):
- client = OpenAI(api_key="fakeKey")
- embeddings = client.embeddings.create(
- model="text-embedding-ada-002",
- input="The food was delicious and the waiter...",
- encoding_format="float",
- )
- assert len(embeddings.data) == 1
- assert embeddings.data[0].embedding == EMBEDDING
- assert embeddings.model == "text-embedding-ada-002"
- assert embeddings_mock.create.route.calls.call_count == 1
- ```
-
- 1. See [mockers guide](../user_guide/mocks.md) for more
diff --git a/docs/endpoints/files.md b/docs/endpoints/files.md
deleted file mode 100644
index 2423f31..0000000
--- a/docs/endpoints/files.md
+++ /dev/null
@@ -1,300 +0,0 @@
-# Files
-
-!!! warning
-
- There is currently no support for retrieving file content. Subscribe to [#4: feat: add support for retrieving file content](https://github.com/mharrisb1/openai-responses-python/issues/4) to be updated when this is available.
-
-!!! tip
-
- See [examples](https://github.com/mharrisb1/openai-responses-python/blob/main/examples/test_files_api.py) for more
-
-## Decorator Arguments
-
-- `latency` - synthetic latency in seconds to introduce to the call(s). Defaults to `0.0`.
-- `failures` - number of failures to simulate. Defaults to `0`.
-- `state_store` - Optional [state store](../user_guide/state.md) override for custom and shared states.
-
-## Upload file
-
-=== "Sync"
-
- ```python linenums="1"
- from openai import OpenAI
-
- import openai_responses
-
-
- @openai_responses.mock.files()
- def test_upload_file():
- client = OpenAI(api_key="fakeKey")
- file = client.files.create(
- file=open("examples/example.json", "rb"),
- purpose="assistants",
- )
- assert file.filename == "example.json"
- assert file.purpose == "assistants"
- ```
-
-=== "Async"
-
- ```python linenums="1"
- import pytest
-
- from openai import AsyncOpenAI
-
- import openai_responses
-
-
- @pytest.mark.asyncio
- @openai_responses.mock.files()
- async def test_upload_file():
- client = AsyncOpenAI(api_key="fakeKey")
- file = await client.files.create(
- file=open("examples/example.json", "rb"),
- purpose="assistants",
- )
- assert file.filename == "example.json"
- assert file.purpose == "assistants"
- ```
-
-=== "With Mocker Class"
-
- ```python linenums="1" hl_lines="4 8 16"
- from openai import OpenAI
-
- import openai_responses
- from openai_responses import FilesMock # (1)
-
-
- @openai_responses.mock.files()
- def test_upload_file(files_mock: FilesMock):
- client = OpenAI(api_key="fakeKey")
- file = client.files.create(
- file=open("examples/example.json", "rb"),
- purpose="assistants",
- )
- assert file.filename == "example.json"
- assert file.purpose == "assistants"
- assert files_mock.create.route.calls.call_count == 1
- ```
-
- 1. See [mockers guide](../user_guide/mocks.md) for more
-
-## List files
-
-=== "Sync"
-
- ```python linenums="1"
- from openai import OpenAI
-
- import openai_responses
-
-
- @openai_responses.mock.files()
- def test_list_uploaded_files():
- client = OpenAI(api_key="fakeKey")
-
- files = client.files.list()
- assert len(files.data) == 0
-
- client.files.create(file=open("file_1.json", "rb"))
- client.files.create(file=open("file_2.json", "rb"))
-
- files = client.files.list()
- assert len(files.data) == 2
- ```
-
-=== "Async"
-
- ```python linenums="1"
- import pytest
-
- from openai import AsyncOpenAI
-
- import openai_responses
-
-
- @pytest.mark.asyncio
- @openai_responses.mock.files()
- async def test_list_uploaded_files():
- client = AsyncOpenAI(api_key="fakeKey")
-
- files = await client.files.list()
- assert len(files.data) == 0
-
- await client.files.create(file=open("file_1.json", "rb"))
- await client.files.create(file=open("file_2.json", "rb"))
-
- files = await client.files.list()
- assert len(files.data) == 2
- ```
-
-=== "With Mocker Class"
-
- ```python linenums="1" hl_lines="6 11 23 24"
- import pytest
-
- from openai import AsyncOpenAI
-
- import openai_responses
- from openai_responses import FilesMock # (1)
-
-
- @pytest.mark.asyncio
- @openai_responses.mock.files()
- async def test_list_uploaded_files(files_mock: FilesMock):
- client = AsyncOpenAI(api_key="fakeKey")
-
- files = await client.files.list()
- assert len(files.data) == 0
-
- await client.files.create(file=open("file_1.json", "rb"))
- await client.files.create(file=open("file_2.json", "rb"))
-
- files = await client.files.list()
- assert len(files.data) == 2
-
- assert files_mock.create.route.calls.call_count == 2
- assert files_mock.list.route.calls.call_count == 2
- ```
-
- 1. See [mockers guide](../user_guide/mocks.md) for more
-
-## Retrieve file
-
-=== "Sync"
-
- ```python linenums="1"
- from openai import OpenAI
-
- import openai_responses
-
-
- @openai_responses.mock.files()
- def test_retrieve_file():
- client = OpenAI(api_key="fakeKey")
-
- file = client.files.create(file=open("my_file.json", "rb"))
-
- found = client.files.retrieve(file.id)
-
- assert found
- assert found.id == file.id
- ```
-
-=== "Async"
-
- ```python linenums="1"
- import pytest
-
- from openai import AsyncOpenAI
-
- import openai_responses
-
-
- @pytest.mark.asyncio
- @openai_responses.mock.files()
- async def test_retrieve_file():
- client = AsyncOpenAI(api_key="fakeKey")
-
- file = await client.files.create(file=open("my_file.json", "rb"))
-
- found = await client.files.retrieve(file.id)
-
- assert found
- assert found.id == file.id
- ```
-
-=== "With Mocker Class"
-
- ```python linenums="1" hl_lines="4 8 18 19"
- from openai import OpenAI
-
- import openai_responses
- from openai_responses import FilesMock # (1)
-
-
- @openai_responses.mock.files()
- def test_retrieve_file(files_mock: FilesMock):
- client = OpenAI(api_key="fakeKey")
-
- file = client.files.create(file=open("my_file.json", "rb"))
-
- found = client.files.retrieve(file.id)
-
- assert found
- assert found.id == file.id
-
- assert files_mock.create.route.calls.call_count == 1
- assert files_mock.retrieve.route.calls.call_count == 1
- ```
-
- 1. See [mockers guide](../user_guide/mocks.md) for more
-
-## Delete file
-
-=== "Sync"
-
- ```python linenums="1"
- from openai import OpenAI
-
- import openai_responses
-
-
- @openai_responses.mock.files()
- def test_delete_file():
- client = OpenAI(api_key="fakeKey")
-
- file = client.files.create(file=open("my_file.json", "rb"))
-
- deleted = client.files.delete(file.id)
-
- assert deleted.deleted
- ```
-
-=== "Async"
-
- ```python linenums="1"
- import pytest
-
- from openai import AsyncOpenAI
-
- import openai_responses
-
-
- @pytest.mark.asyncio
- @openai_responses.mock.files()
- def test_delete_file():
- client = AsyncOpenAI(api_key="fakeKey")
-
- file = await client.files.create(file=open("my_file.json", "rb"))
-
- deleted = await client.files.delete(file.id)
-
- assert deleted.deleted
- ```
-
-=== "With Mocker Class"
-
- ```python linenums="1" hl_lines="4 8 17 18"
- from openai import OpenAI
-
- import openai_responses
- from openai_responses import FilesMock # (1)
-
-
- @openai_responses.mock.files()
- def test_delete_file(files_mock: FilesMock):
- client = OpenAI(api_key="fakeKey")
-
- file = client.files.create(file=open("my_file.json", "rb"))
-
- deleted = client.files.delete(file.id)
-
- assert deleted.deleted
-
- assert files_mock.create.route.calls.call_count = 1
- assert files_mock.delete.route.calls.call_count = 1
- ```
-
- 1. See [mockers guide](../user_guide/mocks.md) for more
diff --git a/docs/endpoints/index.md b/docs/endpoints/index.md
deleted file mode 100644
index 7ac79a3..0000000
--- a/docs/endpoints/index.md
+++ /dev/null
@@ -1,42 +0,0 @@
-# Coverage
-
-??? warning "Streaming Support"
-
- Currently, there is no support for streaming. This is a top feature request so once I have time to tackle it I will.
- Subscribe to [#3: feat: streaming support](https://github.com/mharrisb1/openai-responses-python/issues/3) to be notified when it is added.
-
-Table is assembled according to APIs listed in the [OpenAI API Reference](https://platform.openai.com/docs/api-reference)
-
-| Endpoint | Supported | Streaming Supported | Mock Type |
-| -------------------------------------- | :----------------------: | :-----------------: | ------------ |
-| Audio | :material-close: | - | Stateless |
-| [Chat](chat.md) | :material-check-all: | :material-close: | Stateless |
-| [Embeddings](embeddings.md) | :material-check-all: | - | Stateless |
-| Fine-tuning | :material-close: | - | Stateful |
-| Batch | :material-close: | - | Stateful |
-| [Files](files.md) | :material-check:[^1] | - | Stateful |
-| Images | :material-close: | - | Stateless |
-| Models | :material-close: | - | Stateful[^2] |
-| Moderations | :material-close: | - | Stateless |
-| [Assistants](assistants/assistants.md) | :material-check:[^3] | - | Stateful |
-| [Threads](assistants/threads.md) | :material-check-all: | - | Stateful |
-| [Messages](assistants/messages.md) | :material-check:[^3] | - | Stateful |
-| [Runs](assistants/runs.md) | :material-check:[^4][^5] | :material-close: | Stateful |
-| [Run Steps](assistants/run_steps.md) | :material-check:[^6] | - | Stateful |
-| Vector Stores | :material-close: | - | Stateful |
-| Vector Store Files | :material-close: | - | Stateful |
-| Vector Store File Batches | :material-close: | - | Stateful |
-| Completions (Legacy) | :material-close: | :material-close: | Stateless |
-
-:material-close: = Not implemented
-
-:material-check: = Partially implemented
-
-:material-check-all: = Fully implemented
-
-[^1]: Need to add support for retrieving file content
-[^2]: Blocked by fine-tuning support
-[^3]: Need to add support for attached files
-[^4]: Need to add support for create thread and run
-[^5]: No state changes on submit tool call
-[^6]: Fragile API for run steps
diff --git a/docs/index.md b/docs/index.md
index e6e2f95..1c430a8 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -1,17 +1,17 @@
# Introduction
-Pytest plugin for automatically mocking OpenAI requests.
+๐งช๐ค Pytest plugin for automatically mocking OpenAI requests. Powered by [RESPX](https://github.com/lundberg/respx).
## Why mock?
-Mocking is a common practice in software testing. Instead of sending _actual_ requests over the wire, you can mock the behavior of API and avoid that external call entirely.
+Mocking is a common practice in software testing. Instead of sending actual requests over the network, you can patch the requests to return predefined responses.
This has many benefits:
+- ๐ฐ **Cost**: Avoiding actual API calls ensures you're not paying for usage during testing
+- โก๏ธ **Speed**: No round-trip latencies since you completely avoid the network
- ๐ **Security**: No physical requests to separate servers helps ensure no private information is leaked from your system
-- ๐ฐ **Cost**: Calling an API often incurs a cost. If you've set up a robust testing infrastructure then you'd be calling APIs on every PR check, deployment smoke test, local unit test, etc. Simulating the call avoids the actual cost of calling the API
-- ๐ฉโ๐ฌ**Reproducibility**: Mocks give you control and predictability of responses allowing you to test more scenarios
-- โก๏ธ**Speed**: Calling an API also often incurs some latency cost. Sending data over the network from machine to machine is one of the most expensive actions a machine can take. Add on top of that fact the latency _within_ the system that is serving the API and one of your test cases can easily take 5+ seconds. Multiply that by every test run and you'll quickly find yourself waiting longer and longer for deployments to succeed.
+- ๐ฉโ๐ฌ **Reproducibility**: Mocks give you control and predictability of responses allowing you to test more scenarios
## Installation
@@ -31,29 +31,25 @@ Available on [PyPi](https://pypi.org/project/openai-responses/)
## Quickstart
-Simply decorate any test function that makes a call to the OpenAI API (either using the [official library](https://github.com/openai/openai-python) or with [HTTPX](https://www.python-httpx.org/)).
+Simply decorate any test function that makes a call to the OpenAI API (either using the [official Python SDK](https://github.com/openai/openai-python) or with [HTTPX](https://www.python-httpx.org/)).
+
+See [examples](https://github.com/mharrisb1/openai-responses-python/tree/main/examples) for more.
```python
+import openai
import openai_responses
-from openai import OpenAI
-
-
-@openai_responses.mock.chat.completions(
- choices=[
- {"message": {"content": "Hello, how can I help?"}},
- {"message": {"content": "Hi! I'm here to help!"}},
- {"message": {"content": "How can I help?"}},
- ],
-)
-def test_create_completion_with_multiple_choices():
- client = OpenAI(api_key="fakeKey")
- completion = client.chat.completions.create(
- model="gpt-3.5-turbo",
- messages=[
- {"role": "system", "content": "You are a helpful assistant."},
- {"role": "user", "content": "Hello!"},
- ],
- n=3,
+
+
+@openai_responses.mock()
+def test_create_assistant():
+ client = openai.Client(api_key="sk-fake123")
+
+ assistant = client.beta.assistants.create(
+ instructions="You are a personal math tutor.",
+ name="Math Tutor",
+ tools=[{"type": "code_interpreter"}],
+ model="gpt-4-turbo",
)
- assert len(completion.choices) == 3
+
+ assert assistant.name == "Math Tutor"
```
diff --git a/docs/routes/index.md b/docs/routes/index.md
new file mode 100644
index 0000000..1ec1c5d
--- /dev/null
+++ b/docs/routes/index.md
@@ -0,0 +1,103 @@
+# Routes
+
+See example usage [here](https://github.com/mharrisb1/openai-responses-python/tree/main/examples).
+
+??? warning "Streaming Support"
+
+ Currently, there is no support for streaming. This is a top feature request so once I have time to tackle it I will. Subscribe to [#3: feat: streaming support](https://github.com/mharrisb1/openai-responses-python/issues/3) to be notified when it is added.
+
+## Coverage
+
+The end-goal of this library is to support all OpenAI API routes but currently only a subset is supported. See [Roadmap](https://github.com/mharrisb1/openai-responses-python/blob/main/CONTRIBUTING.md#roadmap) for more information on support priority.
+
+
+| Route | Supported | Streaming Supported | Route Type |
+| ---------------------------------- | :------------------------: | :----------------------: | ---------- |
+| **Audio** |
+| Create speech | :material-close:{ .red } | - | - |
+| Create transcription | :material-close:{ .red } | - | - |
+| Create translation | :material-close:{ .red } | - | - |
+| **Chat** |
+| Create chat completion | :material-check:{ .green } | :material-close:{ .red } | Stateless |
+| **Embeddings** |
+| Create embedding | :material-check:{ .green } | - | Stateless |
+| **Fine-tuning** |
+| Create fine-tuning job | :material-close:{ .red } | - | - |
+| List fine-tuning jobs | :material-close:{ .red } | - | - |
+| List fine-tuning events | :material-close:{ .red } | - | - |
+| List fine-tuning checkpoints | :material-close:{ .red } | - | - |
+| Retrieve fine-tuning job | :material-close:{ .red } | - | - |
+| Cancel fine-tuning | :material-close:{ .red } | - | - |
+| **Batch** |
+| Create batch | :material-close:{ .red } | - | - |
+| Retrieve batch | :material-close:{ .red } | - | - |
+| Cancel batch | :material-close:{ .red } | - | - |
+| List batch | :material-close:{ .red } | - | - |
+| **Files** |
+| Upload file | :material-check:{ .green } | - | Stateful |
+| List files | :material-check:{ .green } | - | Stateful |
+| Retrieve file | :material-check:{ .green } | - | Stateful |
+| Delete file | :material-check:{ .green } | - | Stateful |
+| Retrieve file content | :material-close:{ .red } | - | Stateful |
+| **Images** |
+| Create image | :material-close:{ .red } | - | - |
+| Create image edit | :material-close:{ .red } | - | - |
+| Create image variation | :material-close:{ .red } | - | - |
+| **Models** |
+| List models | :material-close:{ .red } | - | - |
+| Retrieve model | :material-close:{ .red } | - | - |
+| Delete a fine-tuned model | :material-close:{ .red } | - | - |
+| **Moderations** |
+| Create moderation | :material-close:{ .red } | - | - |
+| **Assistants** |
+| Create assistant | :material-check:{ .green } | - | Stateful |
+| List assistants | :material-check:{ .green } | - | Stateful |
+| Retrieve assistant | :material-check:{ .green } | - | Stateful |
+| Modify assistant | :material-check:{ .green } | - | Stateful |
+| Delete assistant | :material-check:{ .green } | - | Stateful |
+| **Threads** |
+| Create thread | :material-check:{ .green } | - | Stateful |
+| Retrieve thread | :material-check:{ .green } | - | Stateful |
+| Modify thread | :material-check:{ .green } | - | Stateful |
+| Delete thread | :material-check:{ .green } | - | Stateful |
+| **Messages** |
+| Create message | :material-check:{ .green } | - | Stateful |
+| List messages | :material-check:{ .green } | - | Stateful |
+| Retrieve message | :material-check:{ .green } | - | Stateful |
+| Modify message | :material-check:{ .green } | - | Stateful |
+| Delete message | :material-check:{ .green } | - | Stateful |
+| **Runs** |
+| Create run | :material-check:{ .green } | :material-close:{ .red } | Stateful |
+| Create thread and run | :material-check:{ .green } | :material-close:{ .red } | Stateful |
+| List runs | :material-check:{ .green } | | Stateful |
+| Retrieve run | :material-check:{ .green } | | Stateful |
+| Modify run | :material-check:{ .green } | | Stateful |
+| Submit tool outputs to run | :material-check:{ .green } | | Stateful |
+| Cancel run | :material-check:{ .green } | | Stateful |
+| **Run Steps** |
+| List run steps | :material-check:{ .green } | - | Stateful |
+| Retrieve run step | :material-check:{ .green } | - | Stateful |
+| **Vector Stores** |
+| Create vector store | :material-close:{ .red } | - | - |
+| List vector stores | :material-close:{ .red } | - | - |
+| Retrieve vector store | :material-close:{ .red } | - | - |
+| Modify vector store | :material-close:{ .red } | - | - |
+| Delete vector store | :material-close:{ .red } | - | - |
+| **Vector Store Files** |
+| Create vector store file | :material-close:{ .red } | - | - |
+| List vector store files | :material-close:{ .red } | - | - |
+| Retrieve vector store file | :material-close:{ .red } | - | - |
+| Delete vector store file | :material-close:{ .red } | - | - |
+| **Vector Store File Batches** |
+| Create vector store file batch | :material-close:{ .red } | - | - |
+| Retrieve vector store file batch | :material-close:{ .red } | - | - |
+| Cancel vector store file batch | :material-close:{ .red } | - | - |
+| List vector store files in a batch | :material-close:{ .red } | - | - |
+
+!!! warning
+
+ There is currently no support for actually attaching files to assistant resources. Subscribe to [#5: feat: add support for attached files for all assistants APIs](https://github.com/mharrisb1/openai-responses-python/issues/5) to be notified when it is added.
+
+!!! note
+
+ [Legacy endpoints](https://platform.openai.com/docs/api-reference/completions) are not supported and are not on the roadmap.
diff --git a/docs/stylesheets/icons.css b/docs/stylesheets/icons.css
new file mode 100644
index 0000000..df37c0d
--- /dev/null
+++ b/docs/stylesheets/icons.css
@@ -0,0 +1,7 @@
+.green {
+ color: rgb(68, 208, 68);
+}
+
+.red {
+ color: rgb(201, 76, 76);
+}
\ No newline at end of file
diff --git a/docs/user_guide/async.md b/docs/user_guide/async.md
index 1eaac33..d2da567 100644
--- a/docs/user_guide/async.md
+++ b/docs/user_guide/async.md
@@ -1,64 +1,76 @@
# Async
-Async is supported out of the box and you shouldn't have to do anything different from defining synchronous tests. The only additional bit of setup needed is you need to also install the [pytest-asyncio](https://pypi.org/project/pytest-asyncio/) plugin and mark the test as async.
+Async is supported out of the box and you don't have to do anything different from defining synchronous tests. The only additional bit of setup needed is you need to also install the [pytest-asyncio](https://pypi.org/project/pytest-asyncio/) plugin and mark the test as async.
-View the below examples to see the difference between using async and not.
-
-=== "Sync"
+=== "Async"
```python linenums="1"
- from openai import OpenAI
+ import pytest
+
+ import openai
import openai_responses
+ from openai_responses import OpenAIMock
- @openai_responses.mock.chat.completions(
- choices=[
- {"message": {"content": "Hello, how can I help?"}},
- {"message": {"content": "Hi! I'm here to help!"}},
- {"message": {"content": "How can I help?"}},
- ],
- )
- def test_create_completion_with_multiple_choices():
- client = OpenAI(api_key="fakeKey")
- completion = client.chat.completions.create(
+ @pytest.mark.asyncio
+ @openai_responses.mock()
+ async def test_async_create_chat_completion(openai_mock: OpenAIMock):
+ openai_mock.chat.completions.create.response = {
+ "choices": [
+ {
+ "index": 0,
+ "finish_reason": "stop",
+ "message": {"content": "Hello! How can I help?", "role": "assistant"},
+ }
+ ]
+ }
+
+ client = openai.AsyncClient(api_key="sk-fake123")
+ completion = await client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello!"},
],
- n=3,
)
- assert len(completion.choices) == 3
+
+ assert len(completion.choices) == 1
+ assert completion.choices[0].message.content == "Hello! How can I help?"
+ assert openai_mock.chat.completions.create.calls.call_count == 1
```
-=== "Async"
+=== "Sync"
```python linenums="1"
- import pytest
-
- from openai import AsyncOpenAI
+ import openai
import openai_responses
+ from openai_responses import OpenAIMock
- @pytest.mark.asyncio
- @openai_responses.mock.chat.completions(
- choices=[
- {"message": {"content": "Hello, how can I help?"}},
- {"message": {"content": "Hi! I'm here to help!"}},
- {"message": {"content": "How can I help?"}},
- ],
- )
- async def test_create_completion_with_multiple_choices():
- client = AsyncOpenAI(api_key="fakeKey")
- completion = await client.chat.completions.create(
+ @openai_responses.mock()
+ def test_create_chat_completion(openai_mock: OpenAIMock):
+ openai_mock.chat.completions.create.response = {
+ "choices": [
+ {
+ "index": 0,
+ "finish_reason": "stop",
+ "message": {"content": "Hello! How can I help?", "role": "assistant"},
+ }
+ ]
+ }
+
+ client = openai.Client(api_key="sk-fake123")
+ completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello!"},
],
- n=3,
)
- assert len(completion.choices) == 3
+
+ assert len(completion.choices) == 1
+ assert completion.choices[0].message.content == "Hello! How can I help?"
+ assert openai_mock.chat.completions.create.calls.call_count == 1
```
diff --git a/docs/user_guide/chaining.md b/docs/user_guide/chaining.md
deleted file mode 100644
index b3b8856..0000000
--- a/docs/user_guide/chaining.md
+++ /dev/null
@@ -1,26 +0,0 @@
-# Chaining
-
-To mock more than one API endpoint, you can chain decorators as much as you'd like.
-
-```python linenums="1"
-@openai_responses.mock.beta.threads()
-@openai_responses.mock.beta.threads.runs()
-def test_list_runs(threads_mock: ThreadsMock, runs_mock: RunsMock):
- client = OpenAI(api_key="fakeKey")
- thread = client.beta.threads.create()
-
- for _ in range(20):
- client.beta.threads.runs.create(thread.id, assistant_id="asst_abc123")
-
- runs = client.beta.threads.runs.list(thread.id)
-
- assert len(runs.data) == 20
-
- assert threads_mock.create.route.calls.call_count == 1
- assert runs_mock.create.route.calls.call_count == 20
- assert runs_mock.list.route.calls.call_count == 1
-```
-
-!!! tip
-
- To share state between mocks, see the [state](state.md) page in the user guide.
diff --git a/docs/user_guide/decorator.md b/docs/user_guide/decorator.md
new file mode 100644
index 0000000..9d50390
--- /dev/null
+++ b/docs/user_guide/decorator.md
@@ -0,0 +1,14 @@
+# Decorator
+
+The `mock` decorator is currently the only official way to use this library.
+
+```python linenums="1"
+@openai_responses.mock()
+def test_my_code():
+ pass
+```
+
+The decorator accepts two optional arguments (with more coming soon):
+
+1. `base_url`: override the base URL which defaults to [https://api.openai.com](https://api.openai.com)
+2. `state`: override the default empty state used for [stateful routes](routes.md#stateful)
diff --git a/docs/user_guide/decorators.md b/docs/user_guide/decorators.md
deleted file mode 100644
index 90b71c1..0000000
--- a/docs/user_guide/decorators.md
+++ /dev/null
@@ -1,16 +0,0 @@
-# Decorators
-
-Each mock decorator will have the following options:
-
-- `latency` - synthetic latency in seconds to introduce to the call(s). Defaults to `0.0`.
-- `failures` - number of failures to simulate. Defaults to `0`.
-
-All stateful mocks will have these additional arguments:
-
-- `state_store` - Optional [state store](state.md) override for custom and shared states.
-
-Some decorators will have additional arguments which are listed on the respective [endpoints](../endpoints/index.md) page.
-
-All decorator arguments are optional but if you want to mock a response from a model you'll need to provide. This library does not automatically generate responses from LLMs.
-
-Arguments have all been well-defined so autocompletion and type hints are available.
diff --git a/docs/user_guide/helpers.md b/docs/user_guide/helpers.md
new file mode 100644
index 0000000..df4359e
--- /dev/null
+++ b/docs/user_guide/helpers.md
@@ -0,0 +1,3 @@
+# Helpers
+
+๐๏ธ Docs are under construction
diff --git a/docs/user_guide/index.md b/docs/user_guide/index.md
index b8bab20..1757ea6 100644
--- a/docs/user_guide/index.md
+++ b/docs/user_guide/index.md
@@ -6,60 +6,54 @@ Before getting started, make sure that you've installed [pytest](https://pytest.
## Overview
-This library will automatically mock any call to the OpenAI API by just decorating the test function.
+This library will automatically mock any call to the OpenAI API by just decorating the test function. The API call(s) can either come from the [official Python library](https://github.com/openai/openai-python), [HTTPX](https://www.python-httpx.org), or [HTTP Core](https://www.encode.io/httpcore). All documented examples will use the official Python library but can easily be ported.
-!!! info
+Let's look at an example:
- This project does not try to generate fake responses from the models. Any part of a response that would be generated by a model will need to
- be defined by the user or will fallback to a default value.
-
-Each endpoint has it's own [decorator](decorators.md) that can be accessed under `openai_responses.mock`. The decorator API tries to stay aligned with the OpenAI Python client API. So for chat completions, with the client you would access the endpoint with `client.chat.completions`, similarly you would access the chat completions mock with `openai_responses.mock.chat.completions`.
-
-See all of the suppored endpoints [here](../endpoints/index.md).
-
-For tests where you need to access multiple endpoints, you can stack or "chain" the decorators one on top of the otherand everything will still work as expected. See more in the [chaining](chaining.md) overview.
-
-For each decorator used, a [pytest fixture](https://docs.pytest.org/en/6.2.x/fixture.html) is provided in order to access additional information about the API calls. Each fixture is an instance of a given [mock class](mocks.md). Mocks fall under 1 of 2 buckets:
-
-1. **Stateless** for endpoints that are fully stateless
-2. **Stateful** for endpoints that require keeping track of some resource state
-
-For stateful mocks, a custom [state store](state.md) is used to keep track of the state over the course of a test or another scope.
-
-## Example
-
-Below is an example that covers many of the topics mentioned above.
-
-```python linenums="1" hl_lines="7 8 14 24 25"
-from openai import OpenAI
+```python linenums="1" hl_lines="4 7 8 9 20 30"
+import openai
import openai_responses
-from openai_responses import ChatCompletionMock
-
-
-@openai_responses.mock.chat.completions( # (1)
- choices=[ # (2)
- {"message": {"content": "Hello, how can I help?"}},
- {"message": {"content": "Hi! I'm here to help!"}},
- {"message": {"content": "How can I help?"}},
- ],
-)
-def test_create_chat_completion(chat_completion_mock: ChatCompletionMock): # (3)
- client = OpenAI(api_key="fakeKey")
- completion = client.chat.completions.create(
+from openai_responses import OpenAIMock # (1)
+
+
+@openai_responses.mock() # (2)
+def test_create_chat_completion(openai_mock: OpenAIMock): # (3)
+ openai_mock.chat.completions.create.response = { # (4)
+ "choices": [
+ {
+ "index": 0,
+ "finish_reason": "stop",
+ "message": {"content": "Hello! How can I help?", "role": "assistant"},
+ }
+ ]
+ }
+
+ client = openai.Client(api_key="sk-fake123")
+ completion = client.chat.completions.create( # (5)
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello!"},
],
- n=3,
)
- assert len(completion.choices) == 3 # (4)
- assert chat_completion_mock.create.route.calls.call_count == 1 # (5)
+
+ assert len(completion.choices) == 1
+ assert completion.choices[0].message.content == "Hello! How can I help?"
+ assert openai_mock.chat.completions.create.calls.call_count == 1 # (6)
```
-1. Wrap the test function with in the endpoint decorator
-2. Define the choices to be returned in the response
-3. Use the provided fixture for this endpoint
-4. Without calling out to the API, a response with 3 valid choices was provided
-5. Access route information from the mock class
+1. Optional import for type annotations
+2. Decorate the test function
+3. Request mock instance fixture from Pytest
+4. Define the response
+5. Make the API call as you normally would
+6. Use the route call history for assertions
+
+Now, walking through this example, let's focus on the highlighted lines.
+
+- **Lines 4 and 8**: For type inference and autocompletion support in you editor or IDE, it's recommended to import the [mock class](mock.md) and annotate the fixture `openai_mock`.
+- **Line 7**: The `mock()` [decorator](decorator.md) wraps a function and will patch any request called within that function.
+- **Line 9**: You can define the response of the API call. There are many options for defining a response. See [Responses](responses.md) for more.
+- **Line 20**: You can use the SDK or HTTP calls the way you normally would, including [async support](async.md).
+- **Line 30**: You can access each route's [call history](mock.md#call-history) for additional assertions like ensuring a route was not called or was called only a certain number of times
diff --git a/docs/user_guide/mock.md b/docs/user_guide/mock.md
new file mode 100644
index 0000000..5815239
--- /dev/null
+++ b/docs/user_guide/mock.md
@@ -0,0 +1,22 @@
+# Mock Class
+
+!!! tip
+
+ The mock class is not intended to be initialized manually by the user. An instance is provided to the test function by Pytest.
+
+The main mock object is the `OpenAIMock` class. This class contains all of the supported routes as well as a private state store for [stateful routes](routes.md#stateful).
+
+The class is initialized on a per-test basis and is accessible as a fixture in Pytest function.
+
+## Routes
+
+The routes try to match the client routes from the official Python library client to make it easy and natural to navigate. So if the create chat completion route in the client is `client.chat.completions.create` then mock route is accessible as `openai_mock.chat.completions.create`. See [routes](routes.md) for more information.
+
+### Call History
+
+Each route has two main properties:
+
+1. `calls` which allows access to call history
+2. `response` which allows the user to [define the response](responses.md)
+
+The call history is provided by RESPX and you can see the full documentation [here](https://lundberg.github.io/respx/guide/#call-history).
diff --git a/docs/user_guide/mocks.md b/docs/user_guide/mocks.md
deleted file mode 100644
index 43e5c2d..0000000
--- a/docs/user_guide/mocks.md
+++ /dev/null
@@ -1,41 +0,0 @@
-# Mocks
-
-Each mock decorator also has an accompanying mocker class. These classes are provided as pytest fixtures and are always available. To access them from your test function, just include the mocker class name as snake case (e.g. access `FilesMock` mocker with `files_mock`).
-
-**Mockers**
-
-- `ChatCompletionMock`
-- `EmbeddingsMock`
-- `FilesMock`
-- `AssistantsMock`
-- `ThreadsMock`
-- `MessagesMock`
-- `RunsMock`
-- `RunStepsMock`
-
-### Example Access
-
-```python linenums="1" hl_lines="8 19"
-from openai import OpenAI
-
-import openai_responses
-from openai_responses import FilesMock
-
-
-@openai_responses.mock.files(failures=2)
-def test_upload_files_with_retries(files_mock: FilesMock): # (1)
- client = OpenAI(api_key="fakeKey", max_retries=2, timeout=0)
-
- file = client.files.create(
- file=open("examples/example.json", "rb"),
- purpose="assistants",
- )
-
- assert file.filename == "example.json"
- assert file.purpose == "assistants"
-
- assert files_mock.create.route.calls.call_count == 3 # (2)
-```
-
-1. Pass in mocker class name (snake case) with optional type annotation for code completion
-2. Access attributes of the mocker class
diff --git a/docs/user_guide/responses.md b/docs/user_guide/responses.md
new file mode 100644
index 0000000..d9f827f
--- /dev/null
+++ b/docs/user_guide/responses.md
@@ -0,0 +1,136 @@
+# Responses
+
+There are many ways to define the response for your mocked API request. To define a response you just need to set the `response` property to either a [partial](#partial), a [model](#model), an [HTTPX response](#httpx-response), or a [function](#function) that returns an HTTPX response. You can also ignore setting the response and a [default response](#default-response) will be used.
+
+## Default Response
+
+For all routes, but especially stateful routes, you can skip manually defining the response and a default response will be returned. The default response will have all required fields of the return object but will not include any meaningful values for fields that would have been generated by the LLM.
+
+!!! tip
+
+ For stateful routes that do not involve LLM generated fields it is actually recommended to *not* define the response. Doing so might actually result in an error.
+
+## Partial
+
+All routes have an associated *partial* object. Partials are just typed dictionaries representations of the response objects where fields are not required. Any field not defined by the user will be given a default value by merging the partial object with the default response object.
+
+Let's look at an example:
+
+```python linenums="1"
+openai_mock.chat.completions.create.response = {
+ "choices": [
+ {
+ "index": 0,
+ "finish_reason": "stop",
+ "message": {"content": "Hello! How can I help?", "role": "assistant"},
+ }
+ ]
+}
+```
+
+In this example, we're explicitly defining what the completion choice field should look like in the response but we're not explicitly defining any of the other fields.
+
+Thanks to Python's `TypedDict` type, autocompletion for field names are automatically supported in your text editor or IDE.
+
+## Model
+
+Along with partial objects, you can also define the response as a full Pydantic `BaseModel` object which is what the official Python library uses for defining resource types.
+
+One use case for this is to manually set the `status` field on the run resource object for polling.
+
+```python linenums="1"
+# create run
+run = client.beta.threads.runs.create(thread.id, assistant_id=assistant.id)
+
+# manually change status and assign updated run as response for retrieve call
+run.status = "in_progress"
+openai_mock.beta.threads.runs.retrieve.response = run
+
+# retrieve run
+run = client.beta.threads.runs.retrieve(run.id, thread_id=thread.id)
+assert run.status == "in_progress"
+```
+
+## HTTPX Response
+
+You can also set the response to a raw HTTPX response object. This is more involved than using either a partial or model but can allow you to test things like server failures or other status codes.
+
+For convenience, `openai_responses` provides a re-import of `httpx.Response`.
+
+```python linenums="1"
+import pytest
+
+import openai
+from openai import APIStatusError
+
+import openai_responses
+from openai_responses import OpenAIMock, Response
+
+
+@openai_responses.mock()
+def test_create_chat_completion_failure(openai_mock: OpenAIMock):
+ openai_mock.chat.completions.create.response = Response(500)
+
+ client = openai.Client(api_key="sk-fake123", max_retries=0)
+
+ with pytest.raises(APIStatusError):
+ client.chat.completions.create(
+ model="gpt-3.5-turbo",
+ messages=[
+ {"role": "system", "content": "You are a helpful assistant."},
+ {"role": "user", "content": "Hello!"},
+ ],
+ )
+```
+
+## Function
+
+For more complex scenarios or for taking advantage of RESPX [side effects](https://lundberg.github.io/respx/guide/#mock-with-a-side-effect), you can also define the response as a function as long as that function returns an HTTPX response object.
+
+The function's signature must match one of:
+
+```
+(request: httpx.Request) -> httpx.Response
+(request: httpx.Request, route: respx.Route) -> httpx.Response
+(request: httpx.Request, route: respx.Route, state: openai_responses.StateStore) -> httpx.Response
+```
+
+Again, for convenience, the necessary HTTPX and RESPX imports are re-imported and provided by this library.
+
+Looking at a real-life example, this test simulates two failed calls before finally succeeding on the third call.
+
+```python linenums="1"
+import openai
+
+import openai_responses
+from openai_responses import OpenAIMock, Request, Response, Route
+from openai_responses.helpers.builders.chat import chat_completion_from_create_request
+
+
+def completion_with_failures(request: Request, route: Route) -> Response:
+ """Simulate 2 failures before sending successful response"""
+ if route.call_count < 2:
+ return Response(500)
+
+ completion = chat_completion_from_create_request(request, extra={"choices": []})
+
+ return Response(201, json=completion.model_dump())
+
+
+@openai_responses.mock()
+def test_create_chat_completion(openai_mock: OpenAIMock):
+ openai_mock.chat.completions.create.response = completion_with_failures
+
+ client = openai.Client(api_key="sk-fake123", max_retries=3)
+ client.chat.completions.create(
+ model="gpt-3.5-turbo",
+ messages=[
+ {"role": "system", "content": "You are a helpful assistant."},
+ {"role": "user", "content": "Hello!"},
+ ],
+ )
+
+ assert openai_mock.chat.completions.create.calls.call_count == 3
+```
+
+This example also makes use of [helpers](helpers.md) which are convenient utilities for common operations like creating a model from a request, storing data in the state store, etc.
diff --git a/docs/user_guide/routes.md b/docs/user_guide/routes.md
new file mode 100644
index 0000000..e52083a
--- /dev/null
+++ b/docs/user_guide/routes.md
@@ -0,0 +1,13 @@
+# Routes
+
+Each route is either *stateless*, meaning each time you call it with the same request you will get the same response[^1], or *stateful*, meaning each identical request may result in a different response.
+
+[^1]: Ignoring the fact that LLM generated tokens are non-deterministic
+
+## Stateless
+
+๐๏ธ Docs are under construction
+
+## Stateful
+
+๐๏ธ Docs are under construction
diff --git a/docs/user_guide/state.md b/docs/user_guide/state.md
deleted file mode 100644
index 11648d5..0000000
--- a/docs/user_guide/state.md
+++ /dev/null
@@ -1,53 +0,0 @@
-# State
-
-State is managed in a custom state store. For stateful [mocks](mocks.md) you can define a custom state prior to a test run or allow a fresh empty state to be created. Additionally, you can share state between [chained mocks](chaining.md).
-
-!!! note
-
- State store is still a work in progress and the API may change at any time. For instance, sharing state between chained mocks should happen automatically. Subscribe to [#28: feat: automatically share state between chained mocks](https://github.com/mharrisb1/openai-responses-python/issues/28) to be notified of when that is added.
-
-## Defining custom state
-
-If you want to establish state prior to a test run you can pass a state store instance as an argument in the decorator. Every stateful mock can accept a custom state store instance.
-
-```python linenums="1"
-from openai import OpenAI
-from openai.types.beta.assistant import Assistant
-
-import openai_responses
-from openai_responses.state import StateStore
-
-custom_state_store = StateStore()
-
-asst = Assistant(id="asst_abc123"...) # create assistant
-custom_state_store.beta.assistants.put(asst) # put assistant in state store
-
-
-@openai_responses.mock.assistants(state_store=custom_state_store):
-def test_retrieve_assistant():
- client = OpenAI(api_key="fakeKey")
- found = client.beta.assistants.retrieve("asst_abc123")
-```
-
-## Sharing state
-
-If you're using more than one decorator and you want those mocks to be able to access a common state, you can do so like the example below.
-
-```python linenums="1"
-from openai import OpenAI
-
-import openai_responses
-from openai_responses.state import StateStore
-
-shared_state = StateStore()
-
-@openai_responses.mock.beta.assistants(state_store=shared_state)
-@openai_responses.mock.beta.threads(state_store=shared_state)
-@openai_responses.mock.beta.threads.runs(state_store=shared_state)
-def test_create_thread_run():
- ...
-```
-
-!!! tip
-
- For assistants mocks, make sure to flip on the validate exists flags so the mock will ensure that the resource actually exists in the state store.
diff --git a/examples/test_assistants.py b/examples/test_assistants.py
new file mode 100644
index 0000000..d1d0059
--- /dev/null
+++ b/examples/test_assistants.py
@@ -0,0 +1,99 @@
+import openai
+
+import openai_responses
+from openai_responses import OpenAIMock
+
+
+@openai_responses.mock()
+def test_create_assistant(openai_mock: OpenAIMock):
+ client = openai.Client(api_key="sk-fake123")
+
+ assistant = client.beta.assistants.create(
+ instructions="You are a personal math tutor. When asked a question, write and run Python code to answer the question.",
+ name="Math Tutor",
+ tools=[{"type": "code_interpreter"}],
+ model="gpt-4-turbo",
+ )
+
+ assert assistant.name == "Math Tutor"
+ assert openai_mock.beta.assistants.create.calls.call_count == 1
+
+
+@openai_responses.mock()
+def test_list_assistants(openai_mock: OpenAIMock):
+ client = openai.Client(api_key="sk-fake123")
+
+ for _ in range(10):
+ client.beta.assistants.create(
+ instructions="You are a personal math tutor. When asked a question, write and run Python code to answer the question.",
+ name="Math Tutor",
+ tools=[{"type": "code_interpreter"}],
+ model="gpt-4-turbo",
+ )
+
+ assistants = client.beta.assistants.list()
+
+ assert len(assistants.data) == 10
+ assert openai_mock.beta.assistants.create.calls.call_count == 10
+ assert openai_mock.beta.assistants.list.calls.call_count == 1
+
+
+@openai_responses.mock()
+def test_retrieve_assistant(openai_mock: OpenAIMock):
+ client = openai.Client(api_key="sk-fake123")
+
+ assistant = client.beta.assistants.create(
+ instructions="You are a personal math tutor. When asked a question, write and run Python code to answer the question.",
+ name="Math Tutor",
+ tools=[{"type": "code_interpreter"}],
+ model="gpt-4-turbo",
+ )
+
+ found = client.beta.assistants.retrieve(assistant.id)
+
+ assert assistant.name == "Math Tutor"
+ assert found.name == assistant.name
+ assert openai_mock.beta.assistants.create.calls.call_count == 1
+ assert openai_mock.beta.assistants.retrieve.calls.call_count == 1
+
+
+@openai_responses.mock()
+def test_update_assistant(openai_mock: OpenAIMock):
+ client = openai.Client(api_key="sk-fake123")
+
+ assistant = client.beta.assistants.create(
+ instructions="You are a personal math tutor. When asked a question, write and run Python code to answer the question.",
+ name="Math Tutor",
+ tools=[{"type": "code_interpreter"}],
+ model="gpt-4-turbo",
+ )
+
+ updated = client.beta.assistants.update(
+ assistant.id,
+ name="Math Tutor (Slim)",
+ model="gpt-3.5-turbo",
+ )
+
+ assert updated.id == assistant.id
+ assert assistant.name == "Math Tutor"
+ assert assistant.model == "gpt-4-turbo"
+ assert updated.name == "Math Tutor (Slim)"
+ assert updated.model == "gpt-3.5-turbo"
+ assert openai_mock.beta.assistants.create.calls.call_count == 1
+ assert openai_mock.beta.assistants.update.calls.call_count == 1
+
+
+@openai_responses.mock()
+def test_delete_assistant(openai_mock: OpenAIMock):
+ client = openai.Client(api_key="sk-fake123")
+
+ assistant = client.beta.assistants.create(
+ instructions="You are a personal math tutor. When asked a question, write and run Python code to answer the question.",
+ name="Math Tutor",
+ tools=[{"type": "code_interpreter"}],
+ model="gpt-4-turbo",
+ )
+
+ assert client.beta.assistants.delete(assistant.id).deleted
+ assert openai_mock.beta.assistants.create.calls.call_count == 1
+ assert openai_mock.beta.assistants.delete.calls.call_count == 1
diff --git a/examples/test_assistants_api.py b/examples/test_assistants_api.py
deleted file mode 100644
index a0c0819..0000000
--- a/examples/test_assistants_api.py
+++ /dev/null
@@ -1,85 +0,0 @@
-import pytest
-from openai import OpenAI, AsyncOpenAI, NotFoundError
-
-import openai_responses
-from openai_responses import AssistantsMock
-
-
-@openai_responses.mock.beta.assistants()
-def test_create_assistant(assistants_mock: AssistantsMock):
- client = OpenAI(api_key="fakeKey")
- my_assistant = client.beta.assistants.create(
- instructions="You are a personal math tutor. When asked a question, write and run Python code to answer the question.",
- name="Math Tutor",
- tools=[{"type": "code_interpreter"}],
- model="gpt-4",
- )
- assert my_assistant.name == "Math Tutor"
- assert my_assistant.model == "gpt-4"
- assert assistants_mock.create.route.calls.call_count == 1
-
-
-@pytest.mark.asyncio
-@openai_responses.mock.beta.assistants()
-async def test_async_create_assistant(assistants_mock: AssistantsMock):
- client = AsyncOpenAI(api_key="fakeKey")
- my_assistant = await client.beta.assistants.create(
- instructions="You are a personal math tutor. When asked a question, write and run Python code to answer the question.",
- name="Math Tutor",
- tools=[{"type": "code_interpreter"}],
- model="gpt-4",
- )
- assert my_assistant.name == "Math Tutor"
- assert my_assistant.model == "gpt-4"
- assert assistants_mock.create.route.calls.call_count == 1
-
-
-@openai_responses.mock.beta.assistants()
-def test_list_assistants(assistants_mock: AssistantsMock):
- client = OpenAI(api_key="fakeKey")
- for _ in range(20):
- client.beta.assistants.create(model="gpt-4")
-
- assistants = client.beta.assistants.list()
- assert len(assistants.data) == 20
- assert assistants_mock.create.route.calls.call_count == 20
- assert assistants_mock.list.route.calls.call_count == 1
-
-
-@openai_responses.mock.beta.assistants()
-def test_retrieve_assistant(assistants_mock: AssistantsMock):
- client = OpenAI(api_key="fakeKey")
-
- with pytest.raises(NotFoundError):
- client.beta.assistants.retrieve("invalid-id")
-
- asst = client.beta.assistants.create(model="gpt-4")
- found = client.beta.assistants.retrieve(asst.id)
- assert found.id == asst.id
- assert assistants_mock.retrieve.route.calls.call_count == 2
-
-
-@openai_responses.mock.beta.assistants()
-def test_update_assistant(assistants_mock: AssistantsMock):
- client = OpenAI(api_key="fakeKey")
-
- with pytest.raises(NotFoundError):
- client.beta.assistants.update("invalid-id", model="gpt-4")
-
- asst = client.beta.assistants.create(model="gpt-4")
- updated = client.beta.assistants.update(asst.id, model="gpt-3.5-turbo")
-
- assert updated.id == asst.id
- assert updated.model == "gpt-3.5-turbo"
- assert assistants_mock.update.route.calls.call_count == 2
-
-
-@openai_responses.mock.beta.assistants()
-def test_delete_assistant(assistants_mock: AssistantsMock):
- client = OpenAI(api_key="fakeKey")
-
- assert not client.beta.assistants.delete("invalid-id").deleted
-
- asst = client.beta.assistants.create(model="gpt-4")
- assert client.beta.assistants.delete(asst.id).deleted
- assert assistants_mock.delete.route.calls.call_count == 2
diff --git a/examples/test_async_client.py b/examples/test_async_client.py
new file mode 100644
index 0000000..c66fd92
--- /dev/null
+++ b/examples/test_async_client.py
@@ -0,0 +1,33 @@
+import pytest
+
+import openai
+
+import openai_responses
+from openai_responses import OpenAIMock
+
+
+@pytest.mark.asyncio
+@openai_responses.mock()
+async def test_async_create_chat_completion(openai_mock: OpenAIMock):
+ openai_mock.chat.completions.create.response = {
+ "choices": [
+ {
+ "index": 0,
+ "finish_reason": "stop",
+ "message": {"content": "Hello! How can I help?", "role": "assistant"},
+ }
+ ]
+ }
+
+ client = openai.AsyncClient(api_key="sk-fake123")
+ completion = await client.chat.completions.create(
+ model="gpt-3.5-turbo",
+ messages=[
+ {"role": "system", "content": "You are a helpful assistant."},
+ {"role": "user", "content": "Hello!"},
+ ],
+ )
+
+ assert len(completion.choices) == 1
+ assert completion.choices[0].message.content == "Hello! How can I help?"
+ assert openai_mock.chat.completions.create.calls.call_count == 1
diff --git a/examples/test_chat_completion.py b/examples/test_chat_completion.py
new file mode 100644
index 0000000..071afe5
--- /dev/null
+++ b/examples/test_chat_completion.py
@@ -0,0 +1,30 @@
+import openai
+
+import openai_responses
+from openai_responses import OpenAIMock
+
+
+@openai_responses.mock()
+def test_create_chat_completion(openai_mock: OpenAIMock):
+ openai_mock.chat.completions.create.response = {
+ "choices": [
+ {
+ "index": 0,
+ "finish_reason": "stop",
+ "message": {"content": "Hello! How can I help?", "role": "assistant"},
+ }
+ ]
+ }
+
+ client = openai.Client(api_key="sk-fake123")
+ completion = client.chat.completions.create(
+ model="gpt-3.5-turbo",
+ messages=[
+ {"role": "system", "content": "You are a helpful assistant."},
+ {"role": "user", "content": "Hello!"},
+ ],
+ )
+
+ assert len(completion.choices) == 1
+ assert completion.choices[0].message.content == "Hello! How can I help?"
+ assert openai_mock.chat.completions.create.calls.call_count == 1
diff --git a/examples/test_chat_completion_api.py b/examples/test_chat_completion_api.py
deleted file mode 100644
index c4584cc..0000000
--- a/examples/test_chat_completion_api.py
+++ /dev/null
@@ -1,71 +0,0 @@
-import pytest
-from openai import OpenAI, AsyncOpenAI
-
-import openai_responses
-from openai_responses import ChatCompletionMock
-
-
-@openai_responses.mock.chat.completions(
- choices=[
- {"message": {"content": "Hello, how can I help?"}},
- ]
-)
-def test_create_completion(chat_completion_mock: ChatCompletionMock):
- client = OpenAI(api_key="fakeKey")
- completion = client.chat.completions.create(
- model="gpt-3.5-turbo",
- messages=[
- {"role": "system", "content": "You are a helpful assistant."},
- {"role": "user", "content": "Hello!"},
- ],
- )
- assert len(completion.choices) == 1
- assert completion.choices[0].message.content == "Hello, how can I help?"
- assert chat_completion_mock.create.route.calls.call_count == 1
-
-
-@openai_responses.mock.chat.completions(
- choices=[
- {"message": {"content": "Hello, how can I help?"}},
- {"message": {"content": "Hi! I'm here to help!"}},
- {"message": {"content": "How can I help?"}},
- ]
-)
-def test_create_completion_with_multiple_choices(
- chat_completion_mock: ChatCompletionMock,
-):
- client = OpenAI(api_key="fakeKey")
- completion = client.chat.completions.create(
- model="gpt-3.5-turbo",
- messages=[
- {"role": "system", "content": "You are a helpful assistant."},
- {"role": "user", "content": "Hello!"},
- ],
- n=3,
- )
- assert len(completion.choices) == 3
- assert chat_completion_mock.create.route.calls.call_count == 1
-
-
-@pytest.mark.asyncio
-@openai_responses.mock.chat.completions(
- choices=[
- {"message": {"content": "Hello, how can I help?"}},
- {"message": {"content": "Hi! I'm here to help!"}},
- {"message": {"content": "How can I help?"}},
- ]
-)
-async def test_async_create_completion_with_multiple_choices(
- chat_completion_mock: ChatCompletionMock,
-):
- client = AsyncOpenAI(api_key="fakeKey")
- completion = await client.chat.completions.create(
- model="gpt-3.5-turbo",
- messages=[
- {"role": "system", "content": "You are a helpful assistant."},
- {"role": "user", "content": "Hello!"},
- ],
- n=3,
- )
- assert len(completion.choices) == 3
- assert chat_completion_mock.create.route.calls.call_count == 1
diff --git a/examples/test_custom_response_handler.py b/examples/test_custom_response_handler.py
new file mode 100644
index 0000000..77f0cd8
--- /dev/null
+++ b/examples/test_custom_response_handler.py
@@ -0,0 +1,31 @@
+import openai
+
+import openai_responses
+from openai_responses import OpenAIMock, Request, Response, Route
+from openai_responses.helpers.builders.chat import chat_completion_from_create_request
+
+
+def completion_with_failures(request: Request, route: Route) -> Response:
+ """Simulate 2 failures before sending successful response"""
+ if route.call_count < 2:
+ return Response(500)
+
+ completion = chat_completion_from_create_request(request, extra={"choices": []})
+
+ return Response(201, json=completion.model_dump())
+
+
+@openai_responses.mock()
+def test_create_chat_completion(openai_mock: OpenAIMock):
+ openai_mock.chat.completions.create.response = completion_with_failures
+
+ client = openai.Client(api_key="sk-fake123", max_retries=3)
+ client.chat.completions.create(
+ model="gpt-3.5-turbo",
+ messages=[
+ {"role": "system", "content": "You are a helpful assistant."},
+ {"role": "user", "content": "Hello!"},
+ ],
+ )
+
+ assert openai_mock.chat.completions.create.calls.call_count == 3
diff --git a/examples/test_embeddings.py b/examples/test_embeddings.py
new file mode 100644
index 0000000..f2521f0
--- /dev/null
+++ b/examples/test_embeddings.py
@@ -0,0 +1,32 @@
+import openai
+
+import openai_responses
+from openai_responses import OpenAIMock
+
+
+EMBEDDING = [0.0023064255, -0.009327292, -0.0028842222]
+
+
+@openai_responses.mock()
+def test_create_embedding(openai_mock: OpenAIMock):
+ openai_mock.embeddings.create.response = {
+ "data": [
+ {
+ "object": "embedding",
+ "embedding": EMBEDDING,
+ "index": 0,
+ },
+ ]
+ }
+
+ client = openai.Client(api_key="sk-fake123")
+ embeddings = client.embeddings.create(
+ model="text-embedding-ada-002",
+ input="The food was delicious and the waiter...",
+ encoding_format="float",
+ )
+
+ assert embeddings.model == "text-embedding-ada-002"
+ assert len(embeddings.data) == 1
+ assert embeddings.data[0].embedding == EMBEDDING
+ assert embeddings.data[0]
diff --git a/examples/test_embeddings_api.py b/examples/test_embeddings_api.py
deleted file mode 100644
index 91aeec7..0000000
--- a/examples/test_embeddings_api.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import random
-
-import pytest
-from openai import OpenAI, AsyncOpenAI
-
-import openai_responses
-from openai_responses import EmbeddingsMock
-
-EMBEDDING = [random.uniform(0.01, -0.01) for _ in range(100)]
-
-
-@openai_responses.mock.embeddings(embedding=EMBEDDING)
-def test_create_embeddings(embeddings_mock: EmbeddingsMock):
- client = OpenAI(api_key="fakeKey")
- embeddings = client.embeddings.create(
- model="text-embedding-ada-002",
- input="The food was delicious and the waiter...",
- encoding_format="float",
- )
- assert len(embeddings.data) == 1
- assert embeddings.data[0].embedding == EMBEDDING
- assert embeddings.model == "text-embedding-ada-002"
- assert embeddings_mock.create.route.calls.call_count == 1
-
-
-@pytest.mark.asyncio
-@openai_responses.mock.embeddings(embedding=EMBEDDING)
-async def test_async_create_embeddings(embeddings_mock: EmbeddingsMock):
- client = AsyncOpenAI(api_key="fakeKey")
- embeddings = await client.embeddings.create(
- model="text-embedding-ada-002",
- input="The food was delicious and the waiter...",
- encoding_format="float",
- )
- assert len(embeddings.data) == 1
- assert embeddings.data[0].embedding == EMBEDDING
- assert embeddings.model == "text-embedding-ada-002"
- assert embeddings_mock.create.route.calls.call_count == 1
diff --git a/examples/test_explicit_model.py b/examples/test_explicit_model.py
new file mode 100644
index 0000000..91976cb
--- /dev/null
+++ b/examples/test_explicit_model.py
@@ -0,0 +1,46 @@
+import openai
+from openai.types.chat import ChatCompletion
+
+import openai_responses
+from openai_responses import OpenAIMock
+
+
+@openai_responses.mock()
+def test_create_chat_completion(openai_mock: OpenAIMock):
+ openai_mock.chat.completions.create.response = ChatCompletion.model_validate(
+ {
+ "id": "chatcmpl-123",
+ "object": "chat.completion",
+ "created": 1677652288,
+ "model": "gpt-3.5-turbo-0125",
+ "system_fingerprint": "fp_44709d6fcb",
+ "choices": [
+ {
+ "index": 0,
+ "message": {
+ "role": "assistant",
+ "content": "Hello there, how may I assist you today?",
+ },
+ "logprobs": None,
+ "finish_reason": "stop",
+ }
+ ],
+ "usage": {"prompt_tokens": 9, "completion_tokens": 12, "total_tokens": 21},
+ }
+ )
+
+ client = openai.Client(api_key="sk-fake123", max_retries=3)
+ completion = client.chat.completions.create(
+ model="gpt-3.5-turbo",
+ messages=[
+ {"role": "system", "content": "You are a helpful assistant."},
+ {"role": "user", "content": "Hello!"},
+ ],
+ )
+
+ assert len(completion.choices) == 1
+ assert (
+ completion.choices[0].message.content
+ == "Hello there, how may I assist you today?"
+ )
+ assert openai_mock.chat.completions.create.calls.call_count == 1
diff --git a/examples/test_files.py b/examples/test_files.py
new file mode 100644
index 0000000..8ba3e0c
--- /dev/null
+++ b/examples/test_files.py
@@ -0,0 +1,66 @@
+import openai
+
+import openai_responses
+from openai_responses import OpenAIMock
+
+
+@openai_responses.mock()
+def test_create_file(openai_mock: OpenAIMock):
+ client = openai.Client(api_key="sk-fake123")
+
+ file = client.files.create(
+ file=open("examples/example.json", "rb"),
+ purpose="fine-tune",
+ )
+
+ assert file.filename == "example.json"
+ assert openai_mock.files.create.calls.call_count == 1
+
+
+@openai_responses.mock()
+def test_list_files(openai_mock: OpenAIMock):
+ client = openai.Client(api_key="sk-fake123")
+
+ for _ in range(10):
+ client.files.create(
+ file=open("examples/example.json", "rb"),
+ purpose="fine-tune",
+ )
+
+ files = client.files.list()
+
+ assert len(files.data) == 10
+ assert openai_mock.files.create.calls.call_count == 10
+ assert openai_mock.files.list.calls.call_count == 1
+
+
+@openai_responses.mock()
+def test_retrieve_file(openai_mock: OpenAIMock):
+ client = openai.Client(api_key="sk-fake123")
+
+ file = client.files.create(
+ file=open("examples/example.json", "rb"),
+ purpose="fine-tune",
+ )
+
+ found = client.files.retrieve(file.id)
+
+ assert found.id == file.id
+ assert file.filename == "example.json"
+ assert found.filename == file.filename
+ assert openai_mock.files.create.calls.call_count == 1
+ assert openai_mock.files.retrieve.calls.call_count == 1
+
+
+@openai_responses.mock()
+def test_delete_file(openai_mock: OpenAIMock):
+ client = openai.Client(api_key="sk-fake123")
+
+ file = client.files.create(
+ file=open("examples/example.json", "rb"),
+ purpose="fine-tune",
+ )
+
+ assert client.files.delete(file.id).deleted == True
+ assert openai_mock.files.create.calls.call_count == 1
+ assert openai_mock.files.delete.calls.call_count == 1
diff --git a/examples/test_files_api.py b/examples/test_files_api.py
deleted file mode 100644
index c53e562..0000000
--- a/examples/test_files_api.py
+++ /dev/null
@@ -1,92 +0,0 @@
-import pytest
-from openai import OpenAI, AsyncOpenAI
-from openai import NotFoundError
-
-import openai_responses
-from openai_responses import FilesMock
-
-
-@pytest.fixture(scope="module")
-def client():
- return OpenAI(api_key="fakeKey")
-
-
-@openai_responses.mock.files()
-def test_upload_file(client: OpenAI):
- file = client.files.create(
- file=open("examples/example.json", "rb"),
- purpose="assistants",
- )
- assert file.filename == "example.json"
- assert file.purpose == "assistants"
-
-
-@pytest.mark.asyncio
-@openai_responses.mock.files()
-async def test_async_upload_file():
- client = AsyncOpenAI(api_key="fakeKey")
- file = await client.files.create(
- file=open("examples/example.json", "rb"),
- purpose="assistants",
- )
- assert file.filename == "example.json"
- assert file.purpose == "assistants"
-
-
-@openai_responses.mock.files(failures=2)
-def test_upload_files_with_retries(client: OpenAI, files_mock: FilesMock):
- file = client.files.create(
- file=open("examples/example.json", "rb"),
- purpose="assistants",
- )
- assert file.filename == "example.json"
- assert file.purpose == "assistants"
- assert files_mock.create.route.calls.call_count == 3
-
-
-@openai_responses.mock.files()
-def test_list_uploaded_files(client: OpenAI):
- files = client.files.list()
- assert len(files.data) == 0
-
- client.files.create(
- file=open("examples/example.json", "rb"),
- purpose="assistants",
- )
- client.files.create(
- file=open("examples/example.json", "rb"),
- purpose="assistants",
- )
-
- files = client.files.list()
- assert len(files.data) == 2
-
- files = client.files.list(purpose="fine-tune")
- assert len(files.data) == 0
-
-
-@openai_responses.mock.files()
-def test_retrieve_file(client: OpenAI):
- with pytest.raises(NotFoundError):
- client.files.retrieve("invalid-id")
-
- file = client.files.create(
- file=open("examples/example.json", "rb"),
- purpose="assistants",
- )
-
- found = client.files.retrieve(file.id)
-
- assert found.id == file.id
-
-
-@openai_responses.mock.files()
-def test_delete_file(client: OpenAI):
- assert not client.files.delete("invalid").deleted
-
- file = client.files.create(
- file=open("examples/example.json", "rb"),
- purpose="assistants",
- )
-
- assert client.files.delete(file.id).deleted
diff --git a/examples/test_messages.py b/examples/test_messages.py
new file mode 100644
index 0000000..c022fa5
--- /dev/null
+++ b/examples/test_messages.py
@@ -0,0 +1,101 @@
+import openai
+
+import openai_responses
+from openai_responses import OpenAIMock
+
+
+@openai_responses.mock()
+def test_create_message(openai_mock: OpenAIMock):
+ client = openai.Client(api_key="sk-fake123")
+ thread = client.beta.threads.create()
+ message = client.beta.threads.messages.create(
+ thread.id,
+ content="Hello!",
+ role="user",
+ )
+
+ assert message.id
+ assert message.thread_id == thread.id
+ assert openai_mock.beta.threads.create.calls.call_count == 1
+ assert openai_mock.beta.threads.messages.create.calls.call_count == 1
+
+
+@openai_responses.mock()
+def test_list_messages(openai_mock: OpenAIMock):
+ client = openai.Client(api_key="sk-fake123")
+ thread = client.beta.threads.create()
+
+ for i in range(10):
+ client.beta.threads.messages.create(
+ thread.id,
+ content=f"Hello, {i}!",
+ role="user",
+ )
+
+ messages = client.beta.threads.messages.list(thread.id)
+
+ assert len(messages.data) == 10
+ assert openai_mock.beta.threads.create.calls.call_count == 1
+ assert openai_mock.beta.threads.messages.create.calls.call_count == 10
+ assert openai_mock.beta.threads.messages.list.calls.call_count == 1
+
+
+@openai_responses.mock()
+def test_retrieve_message(openai_mock: OpenAIMock):
+ client = openai.Client(api_key="sk-fake123")
+
+ thread = client.beta.threads.create()
+ message = client.beta.threads.messages.create(
+ thread.id,
+ content="Hello!",
+ role="user",
+ )
+ found = client.beta.threads.messages.retrieve(message.id, thread_id=thread.id)
+
+ assert found.id == message.id
+ assert openai_mock.beta.threads.create.calls.call_count == 1
+ assert openai_mock.beta.threads.messages.create.calls.call_count == 1
+ assert openai_mock.beta.threads.messages.retrieve.calls.call_count == 1
+
+
+@openai_responses.mock()
+def test_update_message(openai_mock: OpenAIMock):
+ client = openai.Client(api_key="sk-fake123")
+
+ thread = client.beta.threads.create()
+ message = client.beta.threads.messages.create(
+ thread.id,
+ content="Hello!",
+ role="user",
+ metadata={"foo": "1"},
+ )
+ updated = client.beta.threads.messages.update(
+ message.id,
+ thread_id=thread.id,
+ metadata={"foo": "2"},
+ )
+
+ assert updated.id == message.id
+ assert message.metadata == {"foo": "1"}
+ assert updated.metadata == {"foo": "2"}
+ assert openai_mock.beta.threads.create.calls.call_count == 1
+ assert openai_mock.beta.threads.messages.create.calls.call_count == 1
+ assert openai_mock.beta.threads.messages.update.calls.call_count == 1
+
+
+@openai_responses.mock()
+def test_delete_message(openai_mock: OpenAIMock):
+ client = openai.Client(api_key="sk-fake123")
+
+ thread = client.beta.threads.create()
+ message = client.beta.threads.messages.create(
+ thread.id,
+ content="Hello!",
+ role="user",
+ metadata={"foo": "1"},
+ )
+
+ assert client.beta.threads.messages.delete(message.id, thread_id=thread.id).deleted
+ assert openai_mock.beta.threads.create.calls.call_count == 1
+ assert openai_mock.beta.threads.messages.create.calls.call_count == 1
+ assert openai_mock.beta.threads.messages.delete.calls.call_count == 1
diff --git a/examples/test_raw_httpx_response.py b/examples/test_raw_httpx_response.py
new file mode 100644
index 0000000..e6a4b35
--- /dev/null
+++ b/examples/test_raw_httpx_response.py
@@ -0,0 +1,23 @@
+import pytest
+
+import openai
+from openai import APIStatusError
+
+import openai_responses
+from openai_responses import OpenAIMock, Response
+
+
+@openai_responses.mock()
+def test_create_chat_completion(openai_mock: OpenAIMock):
+ openai_mock.chat.completions.create.response = Response(500)
+
+ client = openai.Client(api_key="sk-fake123", max_retries=0)
+
+ with pytest.raises(APIStatusError):
+ client.chat.completions.create(
+ model="gpt-3.5-turbo",
+ messages=[
+ {"role": "system", "content": "You are a helpful assistant."},
+ {"role": "user", "content": "Hello!"},
+ ],
+ )
diff --git a/examples/test_run_steps.py b/examples/test_run_steps.py
new file mode 100644
index 0000000..67f5e07
--- /dev/null
+++ b/examples/test_run_steps.py
@@ -0,0 +1,68 @@
+import openai
+
+import openai_responses
+from openai_responses import OpenAIMock
+from openai_responses.helpers.builders.messages import build_message
+from openai_responses.helpers.builders.run_steps import build_run_step
+from openai_responses.helpers.state_store import add_resource_to_state_store
+
+
+@openai_responses.mock()
+def test_list_run_steps(openai_mock: OpenAIMock):
+ client = openai.Client(api_key="sk-fake123")
+
+ assistant = client.beta.assistants.create(
+ instructions="You are a personal math tutor. When asked a question, write and run Python code to answer the question.",
+ name="Math Tutor",
+ tools=[{"type": "code_interpreter"}],
+ model="gpt-4-turbo",
+ )
+
+ thread = client.beta.threads.create()
+
+ run = client.beta.threads.runs.create(
+ thread.id,
+ assistant_id=assistant.id,
+ )
+
+ # NOTE: need to manually construct assistant message and run step
+ assistant_message = build_message(
+ {
+ "assistant_id": assistant.id,
+ "content": [
+ {
+ "type": "text",
+ "text": {
+ "annotations": [],
+ "value": "Hello! Feel free to ask me any questions.",
+ },
+ }
+ ],
+ "role": "assistant",
+ "run_id": run.id,
+ "status": "completed",
+ "thread_id": thread.id,
+ }
+ )
+
+ run_step = build_run_step(
+ {
+ "assistant_id": assistant.id,
+ "thread_id": thread.id,
+ "run_id": run.id,
+ "status": "in_progress",
+ "type": "message_creation",
+ "step_details": {
+ "type": "message_creation",
+ "message_creation": {
+ "message_id": assistant_message.id,
+ },
+ },
+ }
+ )
+ add_resource_to_state_store(assistant_message, mock=openai_mock)
+ add_resource_to_state_store(run_step, mock=openai_mock)
+
+ steps = client.beta.threads.runs.steps.list(run.id, thread_id=thread.id)
+
+ assert len(steps.data) == 1
diff --git a/examples/test_runs.py b/examples/test_runs.py
new file mode 100644
index 0000000..8cb1eda
--- /dev/null
+++ b/examples/test_runs.py
@@ -0,0 +1,178 @@
+import openai
+
+import openai_responses
+from openai_responses import OpenAIMock
+
+
+@openai_responses.mock()
+def test_create_run(openai_mock: OpenAIMock):
+ client = openai.Client(api_key="sk-fake123")
+
+ assistant = client.beta.assistants.create(
+ instructions="You are a personal math tutor. When asked a question, write and run Python code to answer the question.",
+ name="Math Tutor",
+ tools=[{"type": "code_interpreter"}],
+ model="gpt-4-turbo",
+ )
+
+ thread = client.beta.threads.create()
+
+ run = client.beta.threads.runs.create(
+ thread.id,
+ assistant_id=assistant.id,
+ )
+
+ assert run.id
+ assert run.thread_id == thread.id
+ assert run.assistant_id == assistant.id
+ assert run.instructions == assistant.instructions
+ assert run.tools == assistant.tools
+
+ assert openai_mock.beta.assistants.create.calls.call_count == 1
+ assert openai_mock.beta.threads.create.calls.call_count == 1
+ assert openai_mock.beta.threads.runs.create.calls.call_count == 1
+
+
+@openai_responses.mock()
+def test_create_thread_run(openai_mock: OpenAIMock):
+ client = openai.Client(api_key="sk-fake123")
+
+ assistant = client.beta.assistants.create(
+ instructions="You are a personal math tutor. When asked a question, write and run Python code to answer the question.",
+ name="Math Tutor",
+ tools=[{"type": "code_interpreter"}],
+ model="gpt-4-turbo",
+ )
+
+ run = client.beta.threads.create_and_run(assistant_id=assistant.id)
+
+ assert run.id
+ assert run.assistant_id == assistant.id
+ assert run.instructions == assistant.instructions
+ assert run.tools == assistant.tools
+
+ assert openai_mock.beta.assistants.create.calls.call_count == 1
+ assert openai_mock.beta.threads.create_and_run.calls.call_count == 1
+
+
+@openai_responses.mock()
+def test_list_runs(openai_mock: OpenAIMock):
+ client = openai.Client(api_key="sk-fake123")
+
+ assistant = client.beta.assistants.create(
+ instructions="You are a personal math tutor. When asked a question, write and run Python code to answer the question.",
+ name="Math Tutor",
+ tools=[{"type": "code_interpreter"}],
+ model="gpt-4-turbo",
+ )
+
+ thread = client.beta.threads.create()
+
+ for _ in range(10):
+ client.beta.threads.runs.create(
+ thread.id,
+ assistant_id=assistant.id,
+ )
+
+ runs = client.beta.threads.runs.list(thread.id)
+
+ assert len(runs.data) == 10
+
+ assert openai_mock.beta.assistants.create.calls.call_count == 1
+ assert openai_mock.beta.threads.create.calls.call_count == 1
+ assert openai_mock.beta.threads.runs.create.calls.call_count == 10
+ assert openai_mock.beta.threads.runs.list.calls.call_count == 1
+
+
+@openai_responses.mock()
+def test_retrieve_run(openai_mock: OpenAIMock):
+ client = openai.Client(api_key="sk-fake123")
+
+ assistant = client.beta.assistants.create(
+ instructions="You are a personal math tutor. When asked a question, write and run Python code to answer the question.",
+ name="Math Tutor",
+ tools=[{"type": "code_interpreter"}],
+ model="gpt-4-turbo",
+ )
+
+ thread = client.beta.threads.create()
+
+ run = client.beta.threads.runs.create(
+ thread.id,
+ assistant_id=assistant.id,
+ )
+
+ found = client.beta.threads.runs.retrieve(run.id, thread_id=thread.id)
+
+ assert found.id == run.id
+
+ assert openai_mock.beta.assistants.create.calls.call_count == 1
+ assert openai_mock.beta.threads.create.calls.call_count == 1
+ assert openai_mock.beta.threads.runs.create.calls.call_count == 1
+ assert openai_mock.beta.threads.runs.retrieve.calls.call_count == 1
+
+
+@openai_responses.mock()
+def test_update_run(openai_mock: OpenAIMock):
+ client = openai.Client(api_key="sk-fake123")
+
+ assistant = client.beta.assistants.create(
+ instructions="You are a personal math tutor. When asked a question, write and run Python code to answer the question.",
+ name="Math Tutor",
+ tools=[{"type": "code_interpreter"}],
+ model="gpt-4-turbo",
+ )
+
+ thread = client.beta.threads.create()
+
+ run = client.beta.threads.runs.create(
+ thread.id,
+ assistant_id=assistant.id,
+ metadata={"foo": "1"},
+ )
+
+ updated = client.beta.threads.runs.update(
+ run.id,
+ thread_id=thread.id,
+ metadata={"foo": "2"},
+ )
+
+ assert updated.id == run.id
+ assert run.metadata == {"foo": "1"}
+ assert updated.metadata == {"foo": "2"}
+
+ assert openai_mock.beta.assistants.create.calls.call_count == 1
+ assert openai_mock.beta.threads.create.calls.call_count == 1
+ assert openai_mock.beta.threads.runs.create.calls.call_count == 1
+ assert openai_mock.beta.threads.runs.update.calls.call_count == 1
+
+
+@openai_responses.mock()
+def test_cancel_run(openai_mock: OpenAIMock):
+ client = openai.Client(api_key="sk-fake123")
+
+ assistant = client.beta.assistants.create(
+ instructions="You are a personal math tutor. When asked a question, write and run Python code to answer the question.",
+ name="Math Tutor",
+ tools=[{"type": "code_interpreter"}],
+ model="gpt-4-turbo",
+ )
+
+ thread = client.beta.threads.create()
+
+ run = client.beta.threads.runs.create(thread.id, assistant_id=assistant.id)
+
+ cancelled = client.beta.threads.runs.cancel(run.id, thread_id=thread.id)
+
+ found = client.beta.threads.runs.retrieve(run.id, thread_id=thread.id)
+
+ assert cancelled.id == run.id
+ assert run.status == "queued"
+ assert cancelled.status == "cancelling"
+ assert found.status == "cancelled"
+
+ assert openai_mock.beta.assistants.create.calls.call_count == 1
+ assert openai_mock.beta.threads.create.calls.call_count == 1
+ assert openai_mock.beta.threads.runs.create.calls.call_count == 1
+ assert openai_mock.beta.threads.runs.cancel.calls.call_count == 1
+ assert openai_mock.beta.threads.runs.retrieve.calls.call_count == 1
diff --git a/examples/test_threads.py b/examples/test_threads.py
new file mode 100644
index 0000000..d2d1a08
--- /dev/null
+++ b/examples/test_threads.py
@@ -0,0 +1,66 @@
+import openai
+
+import openai_responses
+from openai_responses import OpenAIMock
+
+
+@openai_responses.mock()
+def test_create_thread(openai_mock: OpenAIMock):
+ client = openai.Client(api_key="sk-fake123")
+
+ thread = client.beta.threads.create()
+
+ assert thread.id
+ assert openai_mock.beta.threads.create.calls.call_count == 1
+
+
+@openai_responses.mock()
+def test_create_thread_with_additional_messages(openai_mock: OpenAIMock):
+ client = openai.Client(api_key="sk-fake123")
+
+ thread = client.beta.threads.create(
+ messages=[{"role": "assistant", "content": "How can I help?"}]
+ )
+
+ messages = client.beta.threads.messages.list(thread.id)
+
+ assert thread.id
+ assert len(messages.data) == 1
+ assert openai_mock.beta.threads.create.calls.call_count == 1
+
+
+@openai_responses.mock()
+def test_retrieve_thread(openai_mock: OpenAIMock):
+ client = openai.Client(api_key="sk-fake123")
+
+ thread = client.beta.threads.create()
+ found = client.beta.threads.retrieve(thread.id)
+
+ assert found.id == thread.id
+ assert openai_mock.beta.threads.create.calls.call_count == 1
+ assert openai_mock.beta.threads.retrieve.calls.call_count == 1
+
+
+@openai_responses.mock()
+def test_update_thread(openai_mock: OpenAIMock):
+ client = openai.Client(api_key="sk-fake123")
+
+ thread = client.beta.threads.create(metadata={"foo": "bar"})
+ updated = client.beta.threads.update(thread.id, metadata={"foo": "baz"})
+
+ assert updated.id == thread.id
+ assert thread.metadata == {"foo": "bar"}
+ assert updated.metadata == {"foo": "baz"}
+ assert openai_mock.beta.threads.create.calls.call_count == 1
+ assert openai_mock.beta.threads.update.calls.call_count == 1
+
+
+@openai_responses.mock()
+def test_delete_thread(openai_mock: OpenAIMock):
+ client = openai.Client(api_key="sk-fake123")
+
+ thread = client.beta.threads.create()
+
+ assert client.beta.threads.delete(thread.id).deleted
+ assert openai_mock.beta.threads.create.calls.call_count == 1
+ assert openai_mock.beta.threads.delete.calls.call_count == 1
diff --git a/examples/test_threads_api.py b/examples/test_threads_api.py
deleted file mode 100644
index 07956dd..0000000
--- a/examples/test_threads_api.py
+++ /dev/null
@@ -1,356 +0,0 @@
-import pytest
-from openai import OpenAI, AsyncOpenAI, NotFoundError
-
-import openai_responses
-from openai_responses import ThreadsMock, MessagesMock, RunsMock
-from openai_responses.state import StateStore
-
-
-@openai_responses.mock.beta.threads()
-def test_create_empty_thread(threads_mock: ThreadsMock):
- client = OpenAI(api_key="fakeKey")
- empty_thread = client.beta.threads.create()
- assert empty_thread.id
- assert threads_mock.create.route.calls.call_count == 1
-
-
-@pytest.mark.asyncio
-@openai_responses.mock.beta.threads()
-async def test_async_create_empty_thread(threads_mock: ThreadsMock):
- client = AsyncOpenAI(api_key="fakeKey")
- empty_thread = await client.beta.threads.create()
- assert empty_thread.id
- assert threads_mock.create.route.calls.call_count == 1
-
-
-@openai_responses.mock.beta.threads()
-def test_create_thread_with_messages(threads_mock: ThreadsMock):
- client = OpenAI(api_key="fakeKey")
- message_thread = client.beta.threads.create(
- messages=[
- {
- "role": "user",
- "content": "Hello, what is AI?",
- },
- {
- "role": "user",
- "content": "How does AI work? Explain it in simple terms.",
- },
- ]
- )
- assert message_thread.id
- assert threads_mock.create.route.calls.call_count == 1
-
-
-@openai_responses.mock.beta.threads()
-def test_retrieve_tread(threads_mock: ThreadsMock):
- client = OpenAI(api_key="fakeKey")
-
- with pytest.raises(NotFoundError):
- client.beta.threads.retrieve("invalid-id")
-
- thread = client.beta.threads.create()
- found = client.beta.threads.retrieve(thread.id)
-
- assert found.id == thread.id
-
- assert threads_mock.retrieve.route.calls.call_count == 2
-
-
-@openai_responses.mock.beta.threads()
-def test_update_thread(threads_mock: ThreadsMock):
- client = OpenAI(api_key="fakeKey")
-
- with pytest.raises(NotFoundError):
- client.beta.threads.update("invalid-id")
-
- thread = client.beta.threads.create()
- updated = client.beta.threads.update(thread.id, metadata={"modified": "true"})
- assert updated.id == thread.id
- assert updated.metadata == {"modified": "true"}
-
- assert threads_mock.update.route.calls.call_count == 2
-
-
-@openai_responses.mock.beta.threads()
-def test_delete_thread(threads_mock: ThreadsMock):
- client = OpenAI(api_key="fakeKey")
-
- assert not client.beta.threads.delete("invalid-id").deleted
-
- thread = client.beta.threads.create()
- assert client.beta.threads.delete(thread.id).deleted
-
- assert threads_mock.delete.route.calls.call_count == 2
-
-
-@openai_responses.mock.beta.threads.messages()
-def test_create_thread_message(messages_mock: MessagesMock):
- client = OpenAI(api_key="fakeKey")
-
- thread_message = client.beta.threads.messages.create(
- "thread_abc123",
- role="user",
- content="How does AI work? Explain it in simple terms.",
- )
-
- assert thread_message.id
- assert messages_mock.create.route.calls.call_count == 1
-
-
-@pytest.mark.asyncio
-@openai_responses.mock.beta.threads.messages()
-async def test_async_create_thread_message(messages_mock: MessagesMock):
- client = AsyncOpenAI(api_key="fakeKey")
-
- thread_message = await client.beta.threads.messages.create(
- "thread_abc123",
- role="user",
- content="How does AI work? Explain it in simple terms.",
- )
-
- assert thread_message.id
- assert messages_mock.create.route.calls.call_count == 1
-
-
-@openai_responses.mock.beta.threads.messages(validate_thread_exists=True)
-def test_create_thread_message_with_thread_exists_validation(
- messages_mock: MessagesMock,
-):
- client = OpenAI(api_key="fakeKey")
-
- with pytest.raises(NotFoundError):
- client.beta.threads.messages.create(
- "thread_abc123",
- role="user",
- content="How does AI work? Explain it in simple terms.",
- )
-
- assert messages_mock.create.route.calls.call_count == 1
-
-
-@openai_responses.mock.beta.threads.messages()
-def test_list_thread_messages(messages_mock: MessagesMock):
- client = OpenAI(api_key="fakeKey")
-
- for _ in range(20):
- client.beta.threads.messages.create(
- "thread_abc123",
- role="user",
- content="How does AI work? Explain it in simple terms.",
- )
-
- messages = client.beta.threads.messages.list("thread_abc123")
- assert len(messages.data) == 20
- assert messages_mock.create.route.calls.call_count == 20
- assert messages_mock.list.route.calls.call_count == 1
-
-
-@openai_responses.mock.beta.threads.messages()
-def test_retrieve_thread_message(messages_mock: MessagesMock):
- client = OpenAI(api_key="fakeKey")
-
- with pytest.raises(NotFoundError):
- client.beta.threads.messages.retrieve("invalid_id", thread_id="thread_abc123")
-
- thread_message = client.beta.threads.messages.create(
- "thread_abc123",
- role="user",
- content="How does AI work? Explain it in simple terms.",
- )
- found = client.beta.threads.messages.retrieve(
- thread_message.id, thread_id="thread_abc123"
- )
-
- assert thread_message.id == found.id
- assert messages_mock.retrieve.route.calls.call_count == 2
-
-
-@openai_responses.mock.beta.threads.messages()
-def test_update_thread_message(messages_mock: MessagesMock):
- client = OpenAI(api_key="fakeKey")
-
- with pytest.raises(NotFoundError):
- client.beta.threads.messages.update("invalid_id", thread_id="thread_abc123")
-
- thread_message = client.beta.threads.messages.create(
- "thread_abc123",
- role="user",
- content="How does AI work? Explain it in simple terms.",
- )
- updated = client.beta.threads.messages.update(
- thread_message.id, thread_id="thread_abc123", metadata={"modified": "true"}
- )
- assert updated.id == thread_message.id
- assert updated.metadata == {"modified": "true"}
-
- assert messages_mock.update.route.calls.call_count == 2
-
-
-@openai_responses.mock.beta.threads.runs()
-def test_create_thread_run(runs_mock: RunsMock):
- client = OpenAI(api_key="fakeKey")
-
- run = client.beta.threads.runs.create(
- thread_id="thread_abc123",
- assistant_id="asst_abc123",
- )
-
- assert run.id
- assert run.status == "queued"
- assert runs_mock.create.route.calls.call_count == 1
-
-
-shared_state = StateStore()
-
-
-@openai_responses.mock.beta.threads.messages(state_store=shared_state)
-@openai_responses.mock.beta.threads.runs(state_store=shared_state)
-def test_create_thread_run_with_additional_messages(
- messages_mock: MessagesMock,
- runs_mock: RunsMock,
-):
- client = OpenAI(api_key="fakeKey")
-
- run = client.beta.threads.runs.create(
- thread_id="thread_abc123",
- assistant_id="asst_abc123",
- additional_messages=[
- {
- "role": "user",
- "content": "Hello, additional messages!",
- }
- ],
- )
- assert run.id
- assert run.status == "queued"
-
- messages = client.beta.threads.messages.list(thread_id="thread_abc123")
- assert len(messages.data) == 1
-
- assert messages_mock.list.route.calls.call_count == 1
- assert runs_mock.create.route.calls.call_count == 1
-
-
-@openai_responses.mock.beta.threads.runs(
- sequence={
- "retrieve": [
- {"status": "in_progress"},
- ]
- }
-)
-def test_retrieve_thread_run(runs_mock: RunsMock):
- client = OpenAI(api_key="fakeKey")
-
- run = client.beta.threads.runs.create(
- thread_id="thread_abc123",
- assistant_id="asst_abc123",
- )
- assert run.status == "queued"
-
- found = client.beta.threads.runs.retrieve(run.id, thread_id="thread_abc123")
-
- assert found.id == run.id
- assert found.status == "in_progress"
-
- assert runs_mock.create.route.calls.call_count == 1
- assert runs_mock.retrieve.route.calls.call_count == 1
-
-
-@openai_responses.mock.beta.threads.runs(
- sequence={
- "create": [
- {"status": "in_progress"},
- ],
- "retrieve": [
- {"status": "in_progress"},
- {"status": "in_progress"},
- {"status": "in_progress"},
- {"status": "in_progress"},
- {"status": "completed"},
- ],
- }
-)
-def test_polled_get_status(runs_mock: RunsMock):
- client = OpenAI(api_key="fakeKey")
-
- run = client.beta.threads.runs.create(
- thread_id="thread_abc123",
- assistant_id="asst_abc123",
- )
-
- while run.status != "completed":
- run = client.beta.threads.runs.retrieve(run.id, thread_id="thread_abc123")
-
- assert run.status == "completed"
- assert runs_mock.create.route.calls.call_count == 1
- assert runs_mock.retrieve.route.calls.call_count == 5
-
-
-@openai_responses.mock.beta.threads()
-@openai_responses.mock.beta.threads.runs()
-def test_list_runs(threads_mock: ThreadsMock, runs_mock: RunsMock):
- client = OpenAI(api_key="fakeKey")
- thread = client.beta.threads.create()
-
- for _ in range(20):
- client.beta.threads.runs.create(thread.id, assistant_id="asst_abc123")
-
- runs = client.beta.threads.runs.list(thread.id)
-
- assert len(runs.data) == 20
-
- assert threads_mock.create.route.calls.call_count == 1
- assert runs_mock.create.route.calls.call_count == 20
- assert runs_mock.list.route.calls.call_count == 1
-
-
-@openai_responses.mock.beta.threads.runs()
-def test_update_thread_run(runs_mock: RunsMock):
- client = OpenAI(api_key="fakeKey")
-
- run = client.beta.threads.runs.create(
- thread_id="thread_abc123",
- assistant_id="asst_abc123",
- )
- assert run.status == "queued"
-
- run = client.beta.threads.runs.update(
- run.id, thread_id="thread_abc123", metadata={"modified": "true"}
- )
- assert run.metadata == {"modified": "true"}
-
- assert runs_mock.create.route.calls.call_count == 1
- assert runs_mock.update.route.calls.call_count == 1
-
-
-@openai_responses.mock.beta.threads.runs(
- sequence={
- "retrieve": [
- {"status": "in_progress"},
- {"status": "cancelled"},
- ],
- }
-)
-def test_cancel_run(runs_mock: RunsMock):
- client = OpenAI(api_key="fakeKey")
-
- run = client.beta.threads.runs.create(
- thread_id="thread_abc123",
- assistant_id="asst_abc123",
- )
-
- assert run.status == "queued"
-
- run = client.beta.threads.runs.retrieve(run.id, thread_id="thread_abc123")
- assert run.status == "in_progress"
-
- run = client.beta.threads.runs.cancel(run.id, thread_id="thread_abc123")
- assert run.status == "cancelling"
-
- run = client.beta.threads.runs.retrieve(run.id, thread_id="thread_abc123")
- assert run.status == "cancelled"
-
- assert runs_mock.create.route.calls.call_count == 1
- assert runs_mock.cancel.route.calls.call_count == 1
- assert runs_mock.retrieve.route.calls.call_count == 2
diff --git a/mkdocs.yml b/mkdocs.yml
index a70d8d7..54d76c1 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -60,19 +60,14 @@ nav:
- index.md
- User Guide:
- user_guide/index.md
- - user_guide/decorators.md
- - user_guide/chaining.md
- - user_guide/mocks.md
- - user_guide/state.md
+ - user_guide/mock.md
+ - user_guide/decorator.md
+ - user_guide/routes.md
+ - user_guide/responses.md
- user_guide/async.md
- - Endpoints:
- - endpoints/index.md
- - endpoints/chat.md
- - endpoints/embeddings.md
- - endpoints/files.md
- - Assistants:
- - endpoints/assistants/assistants.md
- - endpoints/assistants/threads.md
- - endpoints/assistants/messages.md
- - endpoints/assistants/runs.md
- - endpoints/assistants/run_steps.md
+ - user_guide/helpers.md
+ - Routes:
+ - routes/index.md
+
+extra_css:
+ - stylesheets/icons.css
diff --git a/pyproject.toml b/pyproject.toml
index 43e8f7f..05a3c49 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "openai-responses"
-version = "0.2.1"
+version = "0.3.0"
description = "Automatically mock OpenAI requests"
authors = ["Michael Harris "]
license = "MIT"
@@ -15,11 +15,9 @@ openai_responses = "openai_responses.plugin"
[tool.poetry.dependencies]
python = ">=3.9,<4.0"
-openai = "^1.23"
+openai = "^1.25"
respx = "^0.20.2"
-decorator = "^5.1.1"
faker-openai-api-provider = "^0.1.0"
-tiktoken = "^0.6.0"
[tool.poetry.group.dev.dependencies]
black = "^24.2.0"
diff --git a/src/openai_responses/__init__.py b/src/openai_responses/__init__.py
index 62427bd..149a32a 100644
--- a/src/openai_responses/__init__.py
+++ b/src/openai_responses/__init__.py
@@ -1,39 +1,18 @@
-from openai_responses.endpoints.assistants import AssistantsMock
-from openai_responses.endpoints.chat import ChatMock, ChatCompletionMock
-from openai_responses.endpoints.embeddings import EmbeddingsMock
-from openai_responses.endpoints.files import FilesMock
-from openai_responses.endpoints.threads import ThreadsMock
-from openai_responses.endpoints.messages import MessagesMock
-from openai_responses.endpoints.runs import RunsMock
-from openai_responses.endpoints.run_steps import RunStepsMock
+from httpx import Request, Response
+from respx import Route
+
+from ._api import mock
+from ._mock import OpenAIMock
+from ._stores import StateStore
__all__ = [
# main API
"mock",
- # mockers
- "AssistantsMock",
- "ChatCompletionMock",
- "EmbeddingsMock",
- "FilesMock",
- "ThreadsMock",
- "MessagesMock",
- "RunsMock",
- "RunStepsMock",
+ # internal types
+ "OpenAIMock",
+ "StateStore",
+ # external types
+ "Request",
+ "Response",
+ "Route",
]
-
-
-class Api:
- def __init__(self) -> None:
- self.files = FilesMock()
- self.embeddings = EmbeddingsMock()
- self.chat = ChatMock()
- self.beta = Beta()
-
-
-class Beta:
- def __init__(self) -> None:
- self.assistants = AssistantsMock()
- self.threads = ThreadsMock()
-
-
-mock = Api()
diff --git a/src/openai_responses/_api.py b/src/openai_responses/_api.py
new file mode 100644
index 0000000..2e9aad7
--- /dev/null
+++ b/src/openai_responses/_api.py
@@ -0,0 +1,20 @@
+from typing import Any, Awaitable, Callable, Optional, Union
+
+from ._mock import OpenAIMock
+from ._stores import StateStore
+
+WrappedFn = Callable[..., Union[Callable[..., Any], Awaitable[Callable[..., Any]]]]
+
+
+def mock(
+ *,
+ base_url: Optional[str] = None,
+ state: Optional[StateStore] = None,
+) -> WrappedFn:
+ """
+ Args:
+ base_url (Optional[str], optional): Override base URL. Defaults to None.
+ state (Optional[StateStore], optional): Override default empty state. Defaults to None.
+ """
+ openai_mock = OpenAIMock(base_url, state)
+ return openai_mock._start_mock()
diff --git a/src/openai_responses/_mock.py b/src/openai_responses/_mock.py
new file mode 100644
index 0000000..7e24deb
--- /dev/null
+++ b/src/openai_responses/_mock.py
@@ -0,0 +1,55 @@
+import inspect
+from functools import wraps
+from typing import Any, Callable, Optional
+
+import respx
+
+from ._routes import BetaRoutes, ChatRoutes, EmbeddingsRoutes, FileRoutes
+from ._stores import StateStore
+
+
+class OpenAIMock:
+ def __init__(
+ self,
+ base_url: Optional[str] = None,
+ state: Optional[StateStore] = None,
+ ) -> None:
+ self._router = respx.mock(
+ assert_all_called=False,
+ base_url=base_url or "https://api.openai.com",
+ )
+ self._state = state or StateStore()
+
+ self.beta = BetaRoutes(self._router, self._state)
+ self.chat = ChatRoutes(self._router)
+ self.embeddings = EmbeddingsRoutes(self._router)
+ self.files = FileRoutes(self._router, self._state)
+
+ # NOTE: need to sort routes to avoid match conflicts
+ self._router.routes._routes.sort(key=lambda r: len(repr(r._pattern)), reverse=True) # type: ignore
+
+ def _start_mock(self):
+ def wrapper(fn: Callable[..., Any]):
+ is_async = inspect.iscoroutinefunction(fn)
+ argspec = inspect.getfullargspec(fn)
+ needs_ref = "openai_mock" in argspec.args
+
+ @wraps(fn)
+ async def async_wrapper(*args: Any, **kwargs: Any):
+ if needs_ref:
+ kwargs["openai_mock"] = self
+ assert self._router is not None
+ with self._router:
+ return await fn(*args, **kwargs)
+
+ @wraps(fn)
+ def sync_wrapper(*args: Any, **kwargs: Any):
+ if needs_ref:
+ kwargs["openai_mock"] = self
+ assert self._router is not None
+ with self._router:
+ return fn(*args, **kwargs)
+
+ return async_wrapper if is_async else sync_wrapper
+
+ return wrapper
diff --git a/src/openai_responses/_routes/__init__.py b/src/openai_responses/_routes/__init__.py
new file mode 100644
index 0000000..362560e
--- /dev/null
+++ b/src/openai_responses/_routes/__init__.py
@@ -0,0 +1,121 @@
+import respx
+
+from .._stores import StateStore
+
+from .chat import ChatCompletionsCreateRoute
+from .embeddings import EmbeddingsCreateRoute
+from .files import FileCreateRoute, FileListRoute, FileRetrieveRoute, FileDeleteRoute
+from .assistants import (
+ AssistantCreateRoute,
+ AssistantListRoute,
+ AssistantRetrieveRoute,
+ AssistantUpdateRoute,
+ AssistantDeleteRoute,
+)
+from .threads import (
+ ThreadCreateRoute,
+ ThreadRetrieveRoute,
+ ThreadUpdateRoute,
+ ThreadDeleteRoute,
+)
+from .messages import (
+ MessageCreateRoute,
+ MessageListRoute,
+ MessageRetrieveRoute,
+ MessageUpdateRoute,
+ MessageDeleteRoute,
+)
+from .runs import (
+ RunCreateRoute,
+ ThreadCreateAndRun,
+ RunListRoute,
+ RunRetrieveRoute,
+ RunUpdateRoute,
+ RunSubmitToolOutputsRoute,
+ RunCancelRoute,
+)
+from .run_steps import RunStepListRoute, RunStepRetrieveRoute
+
+__all__ = [
+ "BetaRoutes",
+ "ChatRoutes",
+ "EmbeddingsRoutes",
+ "FileRoutes",
+]
+
+
+class ChatRoutes:
+ def __init__(self, router: respx.MockRouter) -> None:
+ self.completions = ChatCompletionRoutes(router)
+
+
+class ChatCompletionRoutes:
+ def __init__(self, router: respx.MockRouter) -> None:
+ self.create = ChatCompletionsCreateRoute(router)
+
+
+class EmbeddingsRoutes:
+ def __init__(self, router: respx.MockRouter) -> None:
+ self.create = EmbeddingsCreateRoute(router)
+
+
+class FileRoutes:
+ def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
+ self.create = FileCreateRoute(router, state)
+ self.list = FileListRoute(router, state)
+ self.retrieve = FileRetrieveRoute(router, state)
+ self.delete = FileDeleteRoute(router, state)
+
+
+class BetaRoutes:
+ def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
+ self.assistants = AssistantsRoutes(router, state)
+ self.threads = ThreadRoutes(router, state)
+
+
+class AssistantsRoutes:
+ def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
+ self.create = AssistantCreateRoute(router, state)
+ self.list = AssistantListRoute(router, state)
+ self.retrieve = AssistantRetrieveRoute(router, state)
+ self.update = AssistantUpdateRoute(router, state)
+ self.delete = AssistantDeleteRoute(router, state)
+
+
+class ThreadRoutes:
+ def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
+ self.create = ThreadCreateRoute(router, state)
+ self.retrieve = ThreadRetrieveRoute(router, state)
+ self.update = ThreadUpdateRoute(router, state)
+ self.delete = ThreadDeleteRoute(router, state)
+ self.create_and_run = ThreadCreateAndRun(router, state)
+
+ self.messages = MessageRoutes(router, state)
+ self.runs = RunRoutes(router, state)
+
+
+class MessageRoutes:
+ def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
+ self.create = MessageCreateRoute(router, state)
+ self.list = MessageListRoute(router, state)
+ self.retrieve = MessageRetrieveRoute(router, state)
+ self.update = MessageUpdateRoute(router, state)
+ self.delete = MessageDeleteRoute(router, state)
+
+
+class RunRoutes:
+ def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
+ self.create = RunCreateRoute(router, state)
+ self.list = RunListRoute(router, state)
+ self.retrieve = RunRetrieveRoute(router, state)
+ self.update = RunUpdateRoute(router, state)
+ self.submit_tool_outputs = RunSubmitToolOutputsRoute(router, state)
+ self.cancel = RunCancelRoute(router, state)
+
+ self.steps = RunStepRoutes(router, state)
+
+
+class RunStepRoutes:
+ def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
+ self.list = RunStepListRoute(router, state)
+ self.retrieve = RunStepRetrieveRoute(router, state)
diff --git a/src/openai_responses/_routes/_base.py b/src/openai_responses/_routes/_base.py
new file mode 100644
index 0000000..19489da
--- /dev/null
+++ b/src/openai_responses/_routes/_base.py
@@ -0,0 +1,169 @@
+from abc import ABC, abstractmethod
+from functools import partial
+import inspect
+from typing import Any, Callable, Generic, Union
+from typing_extensions import override
+
+import httpx
+import respx
+
+from openai import BaseModel
+
+from .._stores import StateStore
+from .._types.generics import M, P
+from .._utils.serde import model_dict
+
+__all__ = ["StatelessRoute", "StatefulRoute"]
+
+
+class Route(ABC, Generic[M, P]):
+ def __init__(
+ self,
+ route: respx.Route,
+ status_code: int,
+ ) -> None:
+ self._route = route
+ self._status_code = status_code
+ self._response: Union[httpx.Response, M, P, Callable[..., httpx.Response]] = (
+ self._handler
+ )
+ self._route.side_effect = self._response
+
+ @property
+ def calls(self):
+ return self._route.calls
+
+ @property
+ def response(self) -> Union[httpx.Response, M, P, Callable[..., httpx.Response]]:
+ return self._response
+
+ @response.setter
+ def response(
+ self,
+ value: Union[httpx.Response, M, P, Callable[..., httpx.Response]],
+ ) -> None:
+ """
+ Sets the value of route response. See docs for more details and examples.
+
+ Args:
+ value: Either an HTTPX response, an OpenAI model, a partial model, or a callable that returns an HTTPX response
+ """
+ self._response = value
+ self._route.side_effect = self._side_effect
+
+ @property
+ def _side_effect(self) -> Callable[..., httpx.Response]:
+ if callable(self._response):
+ return self._response
+
+ def _handler(request: httpx.Request, route: respx.Route, **kwargs: Any):
+ if isinstance(self._response, BaseModel):
+ return httpx.Response(
+ status_code=self._status_code,
+ json=model_dict(self._response),
+ )
+
+ elif isinstance(self._response, httpx.Response):
+ return self._response
+
+ else:
+ assert not callable(self._response)
+ return httpx.Response(
+ status_code=self._status_code,
+ json=model_dict(self._build(self._response, request)),
+ )
+
+ return _handler
+
+ def _handler(
+ self,
+ request: httpx.Request,
+ route: respx.Route,
+ ) -> httpx.Response:
+ """Default response handler for route
+
+ Args:
+ request (httpx.Request): User request
+ route (respx.Route): Associated route
+
+ Returns:
+ httpx.Response: Mocked response
+ """
+ self._route = route
+ empty: Any = {} # NOTE: avoids mypy complaint
+ model = self._build(empty, request)
+ return httpx.Response(
+ status_code=self._status_code,
+ json=model_dict(model),
+ )
+
+ @staticmethod
+ @abstractmethod
+ def _build(partial: P, request: httpx.Request) -> M:
+ """Merge partial and content to create a full instance of model M
+
+ Args:
+ partial (P): Partial model
+ content (bytes): Request content serializable to JSON
+
+ Returns:
+ M: Full model instance
+ """
+ raise NotImplementedError
+
+
+class StatelessRoute(Route[M, P]):
+ def __init__(self, *, route: respx.Route, status_code: int) -> None:
+ super().__init__(route, status_code)
+
+
+class StatefulRoute(Route[M, P]):
+ def __init__(
+ self,
+ *,
+ route: respx.Route,
+ status_code: int,
+ state: StateStore,
+ ) -> None:
+ super().__init__(route, status_code)
+ self._state = state
+
+ @property
+ @override
+ def _side_effect(self) -> Callable[..., httpx.Response]:
+ if callable(self._response):
+ argspec = inspect.getfullargspec(self._response)
+ needs_store = "state_store" in argspec.args
+ if needs_store:
+ return partial(self._response, state_store=self._state)
+ else:
+ return self._response
+
+ def _handler(request: httpx.Request, route: respx.Route, **kwargs: Any):
+ if isinstance(self._response, BaseModel):
+ self._state._blind_put(self._response)
+ return httpx.Response(
+ status_code=self._status_code,
+ json=model_dict(self._response),
+ )
+
+ elif isinstance(self._response, httpx.Response):
+ return self._response
+
+ else:
+ assert not callable(self._response)
+ try:
+ model = self._build(self._response, request)
+ self._state._blind_put(model)
+ return httpx.Response(
+ status_code=self._status_code,
+ json=model_dict(model),
+ )
+ except NotImplementedError:
+ import warnings
+
+ warnings.warn("Failed to build model")
+ warnings.warn("Falling back to default handler")
+ return self._handler(request, route)
+
+ return _handler
diff --git a/src/openai_responses/_routes/assistants.py b/src/openai_responses/_routes/assistants.py
new file mode 100644
index 0000000..9d1d7af
--- /dev/null
+++ b/src/openai_responses/_routes/assistants.py
@@ -0,0 +1,197 @@
+import json
+from typing import Any
+from typing_extensions import override
+
+import httpx
+import respx
+
+from openai.pagination import SyncCursorPage
+from openai.types.beta.assistant import Assistant
+from openai.types.beta.assistant_deleted import AssistantDeleted
+from openai.types.beta.assistant_update_params import AssistantUpdateParams
+
+from ._base import StatefulRoute
+
+from .._stores import StateStore
+from .._types.partials.assistants import (
+ PartialAssistant,
+ PartialAssistantList,
+ PartialAssistantDeleted,
+)
+
+from .._utils.faker import faker
+from .._utils.serde import model_dict, model_parse
+from .._utils.time import utcnow_unix_timestamp_s
+
+__all__ = [
+ "AssistantCreateRoute",
+ "AssistantListRoute",
+ "AssistantRetrieveRoute",
+ "AssistantUpdateRoute",
+ "AssistantDeleteRoute",
+]
+
+
+class AssistantCreateRoute(StatefulRoute[Assistant, PartialAssistant]):
+ def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
+ super().__init__(
+ route=router.post(url__regex="/v1/assistants"),
+ status_code=201,
+ state=state,
+ )
+
+ @override
+ def _handler(self, request: httpx.Request, route: respx.Route) -> httpx.Response:
+ self._route = route
+ model = self._build({}, request)
+ self._state.beta.assistants.put(model)
+ return httpx.Response(
+ status_code=self._status_code,
+ json=model_dict(model),
+ )
+
+ @staticmethod
+ def _build(partial: PartialAssistant, request: httpx.Request) -> Assistant:
+ content = json.loads(request.content)
+ defaults: PartialAssistant = {
+ "id": faker.beta.assistant.id(),
+ "created_at": utcnow_unix_timestamp_s(),
+ "tools": [],
+ "object": "assistant",
+ }
+ return model_parse(Assistant, defaults | partial | content)
+
+
+class AssistantListRoute(
+ StatefulRoute[SyncCursorPage[Assistant], PartialAssistantList]
+):
+ def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
+ super().__init__(
+ route=router.get(url__regex="/v1/assistants"),
+ status_code=200,
+ state=state,
+ )
+
+ @override
+ def _handler(self, request: httpx.Request, route: respx.Route) -> httpx.Response:
+ self._route = route
+
+ limit = request.url.params.get("limit")
+ order = request.url.params.get("order")
+ after = request.url.params.get("after")
+ before = request.url.params.get("before")
+
+ data = self._state.beta.assistants.list(limit, order, after, before)
+ result_count = len(data)
+ total_count = len(self._state.beta.assistants.list())
+ has_data = bool(result_count)
+ has_more = total_count != result_count
+ first_id = data[0].id if has_data else None
+ last_id = data[-1].id if has_data else None
+ model = SyncCursorPage[Assistant](data=data)
+ return httpx.Response(
+ status_code=200,
+ json=model_dict(model)
+ | {"first_id": first_id, "last_id": last_id, "has_more": has_more},
+ )
+
+ @staticmethod
+ def _build(
+ partial: PartialAssistantList,
+ request: httpx.Request,
+ ) -> SyncCursorPage[Assistant]:
+ raise NotImplementedError
+
+
+class AssistantRetrieveRoute(StatefulRoute[Assistant, PartialAssistant]):
+ def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
+ super().__init__(
+ route=router.get(url__regex=r"/v1/assistants/(?P[a-zA-Z0-9\_]+)"),
+ status_code=200,
+ state=state,
+ )
+
+ @override
+ def _handler(
+ self,
+ request: httpx.Request,
+ route: respx.Route,
+ **kwargs: Any,
+ ) -> httpx.Response:
+ self._route = route
+ id = kwargs["id"]
+ found = self._state.beta.assistants.get(id)
+ if not found:
+ return httpx.Response(404)
+
+ return httpx.Response(status_code=200, json=model_dict(found))
+
+ @staticmethod
+ def _build(partial: PartialAssistant, request: httpx.Request) -> Assistant:
+ raise NotImplementedError
+
+
+class AssistantUpdateRoute(StatefulRoute[Assistant, PartialAssistant]):
+ def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
+ super().__init__(
+ route=router.post(url__regex=r"/v1/assistants/(?P[a-zA-Z0-9\_]+)"),
+ status_code=200,
+ state=state,
+ )
+
+ @override
+ def _handler(
+ self,
+ request: httpx.Request,
+ route: respx.Route,
+ **kwargs: Any,
+ ) -> httpx.Response:
+ self._route = route
+ id = kwargs["id"]
+ found = self._state.beta.assistants.get(id)
+ if not found:
+ return httpx.Response(404)
+
+ content: AssistantUpdateParams = json.loads(request.content)
+ deserialized = model_dict(found)
+ updated = model_parse(Assistant, deserialized | content)
+ self._state.beta.assistants.put(updated)
+
+ return httpx.Response(status_code=200, json=model_dict(updated))
+
+ @staticmethod
+ def _build(partial: PartialAssistant, request: httpx.Request) -> Assistant:
+ raise NotImplementedError
+
+
+class AssistantDeleteRoute(StatefulRoute[AssistantDeleted, PartialAssistantDeleted]):
+ def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
+ super().__init__(
+ route=router.delete(url__regex=r"/v1/assistants/(?P[a-zA-Z0-9\_]+)"),
+ status_code=200,
+ state=state,
+ )
+
+ @override
+ def _handler(
+ self,
+ request: httpx.Request,
+ route: respx.Route,
+ **kwargs: Any,
+ ) -> httpx.Response:
+ self._route = route
+ id = kwargs["id"]
+ deleted = self._state.beta.assistants.delete(id)
+ return httpx.Response(
+ status_code=200,
+ json=model_dict(
+ AssistantDeleted(id=id, deleted=deleted, object="assistant.deleted")
+ ),
+ )
+
+ @staticmethod
+ def _build(
+ partial: PartialAssistantDeleted,
+ request: httpx.Request,
+ ) -> AssistantDeleted:
+ raise NotImplementedError
diff --git a/src/openai_responses/_routes/chat.py b/src/openai_responses/_routes/chat.py
new file mode 100644
index 0000000..64a8574
--- /dev/null
+++ b/src/openai_responses/_routes/chat.py
@@ -0,0 +1,37 @@
+import json
+
+import httpx
+import respx
+
+from openai.types.chat.chat_completion import ChatCompletion
+
+from ._base import StatelessRoute
+
+from .._types.partials.chat import PartialChatCompletion
+
+from .._utils.faker import faker
+from .._utils.serde import model_parse
+from .._utils.time import utcnow_unix_timestamp_s
+
+__all__ = ["ChatCompletionsCreateRoute"]
+
+
+class ChatCompletionsCreateRoute(StatelessRoute[ChatCompletion, PartialChatCompletion]):
+ def __init__(self, router: respx.MockRouter) -> None:
+ super().__init__(
+ route=router.post(url__regex="/v1/chat/completions"),
+ status_code=201,
+ )
+
+ @staticmethod
+ def _build(
+ partial: PartialChatCompletion,
+ request: httpx.Request,
+ ) -> ChatCompletion:
+ content = json.loads(request.content)
+ defaults: PartialChatCompletion = {
+ "id": partial.get("id", faker.chat.completion.id()),
+ "created": partial.get("created", utcnow_unix_timestamp_s()),
+ "object": "chat.completion",
+ }
+ return model_parse(ChatCompletion, defaults | partial | content)
diff --git a/src/openai_responses/_routes/embeddings.py b/src/openai_responses/_routes/embeddings.py
new file mode 100644
index 0000000..feb1592
--- /dev/null
+++ b/src/openai_responses/_routes/embeddings.py
@@ -0,0 +1,47 @@
+import json
+
+from openai.types.embedding import Embedding
+from openai.types.embedding_create_params import EmbeddingCreateParams
+from openai.types.create_embedding_response import CreateEmbeddingResponse, Usage
+
+import httpx
+import respx
+
+from ._base import StatelessRoute
+
+from .._types.partials.embeddings import PartialCreateEmbeddingResponse
+
+from .._utils.serde import model_parse
+
+__all__ = ["EmbeddingsCreateRoute"]
+
+
+class EmbeddingsCreateRoute(
+ StatelessRoute[
+ CreateEmbeddingResponse,
+ PartialCreateEmbeddingResponse,
+ ]
+):
+ def __init__(self, router: respx.MockRouter) -> None:
+ super().__init__(
+ route=router.post(url__regex="/v1/embeddings"),
+ status_code=201,
+ )
+
+ @staticmethod
+ def _build(
+ partial: PartialCreateEmbeddingResponse,
+ request: httpx.Request,
+ ) -> CreateEmbeddingResponse:
+ content: EmbeddingCreateParams = json.loads(request.content)
+ embeddings = partial.get("data", [])
+ response = CreateEmbeddingResponse(
+ data=[model_parse(Embedding, e) for e in embeddings],
+ model=partial.get("model", content["model"]),
+ object="list",
+ usage=model_parse(
+ Usage,
+ partial.get("usage", dict({"prompt_tokens": 0, "total_tokens": 0})),
+ ),
+ )
+ return response
diff --git a/src/openai_responses/_routes/files.py b/src/openai_responses/_routes/files.py
new file mode 100644
index 0000000..a8252fd
--- /dev/null
+++ b/src/openai_responses/_routes/files.py
@@ -0,0 +1,161 @@
+import re
+from typing import Any
+from typing_extensions import override
+
+import httpx
+import respx
+
+from openai.pagination import SyncPage
+from openai.types.file_object import FileObject
+from openai.types.file_deleted import FileDeleted
+
+from ._base import StatefulRoute
+
+from .._stores import StateStore
+from .._types.partials.files import (
+ PartialFileObject,
+ PartialFileList,
+ PartialFileDeleted,
+)
+
+from .._utils.faker import faker
+from .._utils.serde import model_dict
+from .._utils.time import utcnow_unix_timestamp_s
+
+REGEXP_FILE = r'Content-Disposition: form-data;[^;]+; name="purpose"\r\n\r\n(?P[^\r\n]+)|filename="(?P[^"]+)"'
+
+__all__ = ["FileCreateRoute", "FileListRoute", "FileRetrieveRoute", "FileDeleteRoute"]
+
+
+class FileCreateRoute(StatefulRoute[FileObject, PartialFileObject]):
+ def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
+ super().__init__(
+ route=router.post(url__regex="/v1/files"),
+ status_code=201,
+ state=state,
+ )
+
+ @override
+ def _handler(self, request: httpx.Request, route: respx.Route) -> httpx.Response:
+ self._route = route
+ model = self._build({}, request)
+ self._state.files.put(model)
+ return httpx.Response(
+ status_code=self._status_code,
+ json=model_dict(model),
+ )
+
+ @staticmethod
+ def _build(partial: PartialFileObject, request: httpx.Request) -> FileObject:
+ content = request.content.decode("utf-8")
+
+ filename = ""
+ purpose = "assistants"
+
+ # FIXME: hacky
+ prog = re.compile(REGEXP_FILE)
+ matches = prog.finditer(content)
+ for match in matches:
+ if match.group("filename"):
+ filename = match.group("filename")
+ if match.group("purpose_value"):
+ purpose = match.group("purpose_value")
+
+ return FileObject(
+ id=partial.get("id", faker.file.id()),
+ bytes=partial.get("bytes", 0),
+ created_at=partial.get("created_at", utcnow_unix_timestamp_s()),
+ filename=partial.get("filename", filename),
+ object="file",
+ purpose=partial.get("purpose", purpose), # type: ignore
+ status=partial.get("status", "uploaded"),
+ status_details=partial.get("status_details"),
+ )
+
+
+class FileListRoute(StatefulRoute[SyncPage[FileObject], PartialFileList]):
+ def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
+ super().__init__(
+ route=router.get(url__regex="/v1/files"),
+ status_code=200,
+ state=state,
+ )
+
+ @override
+ def _handler(self, request: httpx.Request, route: respx.Route) -> httpx.Response:
+ self._route = route
+ purpose = request.url.params.get("purpose")
+ files = SyncPage[FileObject](
+ object="list",
+ data=self._state.files.list(purpose=purpose),
+ )
+ return httpx.Response(status_code=200, json=model_dict(files))
+
+ @staticmethod
+ def _build(
+ partial: PartialFileList,
+ request: httpx.Request,
+ ) -> SyncPage[FileObject]:
+ raise NotImplementedError
+
+
+class FileRetrieveRoute(StatefulRoute[FileObject, PartialFileObject]):
+ def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
+ super().__init__(
+ route=router.get(url__regex=r"/v1/files/(?P[a-zA-Z0-9\-]+)"),
+ status_code=200,
+ state=state,
+ )
+
+ @override
+ def _handler(
+ self,
+ request: httpx.Request,
+ route: respx.Route,
+ **kwargs: Any,
+ ) -> httpx.Response:
+ self._route = route
+ id = kwargs["id"]
+ found = self._state.files.get(id)
+ if not found:
+ return httpx.Response(404)
+
+ return httpx.Response(status_code=200, json=model_dict(found))
+
+ @staticmethod
+ def _build(
+ partial: PartialFileObject,
+ request: httpx.Request,
+ ) -> FileObject:
+ raise NotImplementedError
+
+
+class FileDeleteRoute(StatefulRoute[FileObject, PartialFileDeleted]):
+ def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
+ super().__init__(
+ route=router.delete(url__regex=r"/v1/files/(?P[a-zA-Z0-9\-]+)"),
+ status_code=200,
+ state=state,
+ )
+
+ @override
+ def _handler(
+ self,
+ request: httpx.Request,
+ route: respx.Route,
+ **kwargs: Any,
+ ) -> httpx.Response:
+ self._route = route
+ id = kwargs["id"]
+ deleted = self._state.files.delete(id)
+ return httpx.Response(
+ status_code=200,
+ json=model_dict(FileDeleted(id=id, deleted=deleted, object="file")),
+ )
+
+ @staticmethod
+ def _build(
+ partial: PartialFileDeleted,
+ request: httpx.Request,
+ ) -> FileObject:
+ raise NotImplementedError
diff --git a/src/openai_responses/_routes/messages.py b/src/openai_responses/_routes/messages.py
new file mode 100644
index 0000000..742675e
--- /dev/null
+++ b/src/openai_responses/_routes/messages.py
@@ -0,0 +1,262 @@
+import json
+from typing import Any
+from typing_extensions import override
+
+import httpx
+import respx
+
+from openai.pagination import SyncCursorPage
+from openai.types.beta.threads.message import Message
+from openai.types.beta.threads.message_deleted import MessageDeleted
+from openai.types.beta.threads.message_update_params import MessageUpdateParams
+
+from ._base import StatefulRoute
+
+from .._stores import StateStore
+from .._types.partials.messages import (
+ PartialMessage,
+ PartialMessageList,
+ PartialMessageDeleted,
+)
+
+from .._utils.faker import faker
+from .._utils.serde import model_dict, model_parse
+from .._utils.time import utcnow_unix_timestamp_s
+
+__all__ = [
+ "MessageCreateRoute",
+ "MessageListRoute",
+ "MessageRetrieveRoute",
+ "MessageUpdateRoute",
+ "MessageDeleteRoute",
+]
+
+
+class MessageCreateRoute(StatefulRoute[Message, PartialMessage]):
+ def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
+ super().__init__(
+ route=router.post(
+ url__regex=r"/v1/threads/(?P[a-zA-Z0-9\_]+)/messages"
+ ),
+ status_code=201,
+ state=state,
+ )
+
+ @override
+ def _handler(
+ self,
+ request: httpx.Request,
+ route: respx.Route,
+ **kwargs: Any,
+ ) -> httpx.Response:
+ self._route = route
+
+ thread_id = kwargs["thread_id"]
+ found = self._state.beta.threads.get(thread_id)
+ if not found:
+ return httpx.Response(404)
+
+ model = self._build({"thread_id": thread_id}, request)
+ self._state.beta.threads.messages.put(model)
+ return httpx.Response(
+ status_code=self._status_code,
+ json=model_dict(model),
+ )
+
+ @staticmethod
+ def _build(partial: PartialMessage, request: httpx.Request) -> Message:
+ content = json.loads(request.content)
+ defaults: PartialMessage = {
+ "id": faker.beta.thread.message.id(),
+ "content": [],
+ "created_at": utcnow_unix_timestamp_s(),
+ "object": "thread.message",
+ "role": "user",
+ "status": "completed",
+ }
+ if content.get("content"):
+ defaults["content"].append(
+ {
+ "type": "text",
+ "text": {"annotations": [], "value": content.get("content")},
+ }
+ )
+ del content["content"]
+ return model_parse(Message, defaults | partial | content)
+
+
+class MessageListRoute(StatefulRoute[SyncCursorPage[Message], PartialMessageList]):
+ def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
+ super().__init__(
+ route=router.get(
+ url__regex=r"/v1/threads/(?P[a-zA-Z0-9\_]+)/messages"
+ ),
+ status_code=200,
+ state=state,
+ )
+
+ @override
+ def _handler(
+ self,
+ request: httpx.Request,
+ route: respx.Route,
+ **kwargs: Any,
+ ) -> httpx.Response:
+ self._route = route
+
+ thread_id = kwargs["thread_id"]
+ found = self._state.beta.threads.get(thread_id)
+ if not found:
+ return httpx.Response(404)
+
+ limit = request.url.params.get("limit")
+ order = request.url.params.get("order")
+ after = request.url.params.get("after")
+ before = request.url.params.get("before")
+ run_id = request.url.params.get("run_id")
+
+ data = self._state.beta.threads.messages.list(
+ thread_id,
+ limit,
+ order,
+ after,
+ before,
+ run_id,
+ )
+ result_count = len(data)
+ total_count = len(self._state.beta.threads.messages.list(thread_id))
+ has_data = bool(result_count)
+ has_more = total_count != result_count
+ first_id = data[0].id if has_data else None
+ last_id = data[-1].id if has_data else None
+ model = SyncCursorPage[Message](data=data)
+ return httpx.Response(
+ status_code=200,
+ json=model_dict(model)
+ | {"first_id": first_id, "last_id": last_id, "has_more": has_more},
+ )
+
+ @staticmethod
+ def _build(
+ partial: PartialMessageList,
+ request: httpx.Request,
+ ) -> SyncCursorPage[Message]:
+ raise NotImplementedError
+
+
+class MessageRetrieveRoute(StatefulRoute[Message, PartialMessage]):
+ def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
+ super().__init__(
+ route=router.get(
+ url__regex=r"/v1/threads/(?P[a-zA-Z0-9\_]+)/messages/(?P[a-zA-Z0-9\_]+)"
+ ),
+ status_code=200,
+ state=state,
+ )
+
+ @override
+ def _handler(
+ self,
+ request: httpx.Request,
+ route: respx.Route,
+ **kwargs: Any,
+ ) -> httpx.Response:
+ self._route = route
+
+ thread_id = kwargs["thread_id"]
+ found_thread = self._state.beta.threads.get(thread_id)
+ if not found_thread:
+ return httpx.Response(404)
+
+ id = kwargs["id"]
+ found_message = self._state.beta.threads.messages.get(id)
+ if not found_message:
+ return httpx.Response(404)
+
+ return httpx.Response(status_code=200, json=model_dict(found_message))
+
+ @staticmethod
+ def _build(partial: PartialMessage, request: httpx.Request) -> Message:
+ raise NotImplementedError
+
+
+class MessageUpdateRoute(StatefulRoute[Message, PartialMessage]):
+ def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
+ super().__init__(
+ route=router.post(
+ url__regex=r"/v1/threads/(?P[a-zA-Z0-9\_]+)/messages/(?P[a-zA-Z0-9\_]+)"
+ ),
+ status_code=200,
+ state=state,
+ )
+
+ @override
+ def _handler(
+ self,
+ request: httpx.Request,
+ route: respx.Route,
+ **kwargs: Any,
+ ) -> httpx.Response:
+ self._route = route
+
+ thread_id = kwargs["thread_id"]
+ found_thread = self._state.beta.threads.get(thread_id)
+ if not found_thread:
+ return httpx.Response(404)
+
+ id = kwargs["id"]
+ found_message = self._state.beta.threads.messages.get(id)
+ if not found_message:
+ return httpx.Response(404)
+
+ content: MessageUpdateParams = json.loads(request.content)
+ deserialized = model_dict(found_message)
+ updated = model_parse(Message, deserialized | content)
+ self._state.beta.threads.messages.put(updated)
+
+ return httpx.Response(status_code=200, json=model_dict(updated))
+
+ @staticmethod
+ def _build(partial: PartialMessage, request: httpx.Request) -> Message:
+ raise NotImplementedError
+
+
+class MessageDeleteRoute(StatefulRoute[MessageDeleted, PartialMessageDeleted]):
+ def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
+ super().__init__(
+ route=router.delete(
+ url__regex=r"/v1/threads/(?P[a-zA-Z0-9\_]+)/messages/(?P[a-zA-Z0-9\_]+)"
+ ),
+ status_code=200,
+ state=state,
+ )
+
+ @override
+ def _handler(
+ self,
+ request: httpx.Request,
+ route: respx.Route,
+ **kwargs: Any,
+ ) -> httpx.Response:
+ self._route = route
+
+ thread_id = kwargs["thread_id"]
+ found_thread = self._state.beta.threads.get(thread_id)
+ if not found_thread:
+ return httpx.Response(404)
+
+ id = kwargs["id"]
+ deleted = self._state.beta.threads.messages.delete(id)
+ return httpx.Response(
+ status_code=200,
+ json=model_dict(
+ MessageDeleted(id=id, deleted=deleted, object="thread.message.deleted")
+ ),
+ )
+
+ @staticmethod
+ def _build(
+ partial: PartialMessageDeleted,
+ request: httpx.Request,
+ ) -> MessageDeleted:
+ raise NotImplementedError
diff --git a/src/openai_responses/_routes/run_steps.py b/src/openai_responses/_routes/run_steps.py
new file mode 100644
index 0000000..86922a7
--- /dev/null
+++ b/src/openai_responses/_routes/run_steps.py
@@ -0,0 +1,122 @@
+from typing import Any
+from typing_extensions import override
+
+import httpx
+import respx
+
+from openai.pagination import SyncCursorPage
+from openai.types.beta.threads.runs.run_step import RunStep
+
+from ._base import StatefulRoute
+
+from .._stores import StateStore
+from .._types.partials.run_steps import PartialRunStep, PartialRunStepList
+
+from .._utils.serde import model_dict
+
+
+__all__ = ["RunStepListRoute", "RunStepRetrieveRoute"]
+
+
+class RunStepListRoute(StatefulRoute[SyncCursorPage[RunStep], PartialRunStepList]):
+ def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
+ super().__init__(
+ route=router.get(
+ url__regex=r"/v1/threads/(?P[a-zA-Z0-9\_]+)/runs/(?P[a-zA-Z0-9\_]+)/steps"
+ ),
+ status_code=200,
+ state=state,
+ )
+
+ @override
+ def _handler(
+ self,
+ request: httpx.Request,
+ route: respx.Route,
+ **kwargs: Any,
+ ) -> httpx.Response:
+ self._route = route
+
+ thread_id = kwargs["thread_id"]
+ found_thread = self._state.beta.threads.get(thread_id)
+ if not found_thread:
+ return httpx.Response(404)
+
+ run_id = kwargs["run_id"]
+ found_run = self._state.beta.threads.runs.get(run_id)
+ if not found_run:
+ return httpx.Response(404)
+
+ limit = request.url.params.get("limit")
+ order = request.url.params.get("order")
+ after = request.url.params.get("after")
+ before = request.url.params.get("before")
+
+ data = self._state.beta.threads.runs.steps.list(
+ thread_id,
+ run_id,
+ limit,
+ order,
+ after,
+ before,
+ )
+ result_count = len(data)
+ total_count = len(self._state.beta.threads.runs.steps.list(thread_id, run_id))
+ has_data = bool(result_count)
+ has_more = total_count != result_count
+ first_id = data[0].id if has_data else None
+ last_id = data[-1].id if has_data else None
+ model = SyncCursorPage[RunStep](data=data)
+ return httpx.Response(
+ status_code=200,
+ json=model_dict(model)
+ | {"first_id": first_id, "last_id": last_id, "has_more": has_more},
+ )
+
+ @staticmethod
+ def _build(
+ partial: PartialRunStepList,
+ request: httpx.Request,
+ ) -> SyncCursorPage[RunStep]:
+ raise NotImplementedError
+
+
+class RunStepRetrieveRoute(StatefulRoute[RunStep, PartialRunStep]):
+ def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
+ super().__init__(
+ route=router.get(
+ url__regex=r"/v1/threads/(?P[a-zA-Z0-9\_]+)/runs/(?P[a-zA-Z0-9\_]+)/steps/(?P[a-zA-Z0-9\_]+)"
+ ),
+ status_code=200,
+ state=state,
+ )
+
+ @override
+ def _handler(
+ self,
+ request: httpx.Request,
+ route: respx.Route,
+ **kwargs: Any,
+ ) -> httpx.Response:
+ self._route = route
+
+ thread_id = kwargs["thread_id"]
+ found_thread = self._state.beta.threads.get(thread_id)
+ if not found_thread:
+ return httpx.Response(404)
+
+ run_id = kwargs["run_id"]
+ found_run = self._state.beta.threads.runs.get(run_id)
+ if not found_run:
+ return httpx.Response(404)
+
+ id = kwargs["id"]
+ found_run_step = self._state.beta.threads.runs.steps.get(id)
+ if not found_run_step:
+ return httpx.Response(404)
+
+ return httpx.Response(status_code=200, json=model_dict(found_run_step))
+
+ @staticmethod
+ def _build(partial: PartialRunStep, request: httpx.Request) -> RunStep:
+ raise NotImplementedError
diff --git a/src/openai_responses/_routes/runs.py b/src/openai_responses/_routes/runs.py
new file mode 100644
index 0000000..5cea16c
--- /dev/null
+++ b/src/openai_responses/_routes/runs.py
@@ -0,0 +1,351 @@
+import json
+from typing import Any
+from typing_extensions import override
+
+import httpx
+import respx
+
+from openai.pagination import SyncCursorPage
+from openai.types.beta.threads.run import Run
+from openai.types.beta.threads.run_create_params import RunCreateParams
+from openai.types.beta.threads.run_update_params import RunUpdateParams
+from openai.types.beta.thread_create_and_run_params import ThreadCreateAndRunParams
+
+from ._base import StatefulRoute
+
+from ..helpers.builders.threads import thread_from_create_request
+
+from .._stores import StateStore
+from .._types.partials.runs import PartialRun, PartialRunList
+
+from .._utils.copy import model_copy
+from .._utils.faker import faker
+from .._utils.serde import model_dict, model_parse
+from .._utils.time import utcnow_unix_timestamp_s
+
+
+__all__ = [
+ "RunCreateRoute",
+ "ThreadCreateAndRun",
+ "RunListRoute",
+ "RunRetrieveRoute",
+ "RunUpdateRoute",
+ "RunSubmitToolOutputsRoute",
+ "RunCancelRoute",
+]
+
+
+class RunCreateRoute(StatefulRoute[Run, PartialRun]):
+ def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
+ super().__init__(
+ route=router.post(
+ url__regex=r"/v1/threads/(?P[a-zA-Z0-9\_]+)/runs"
+ ),
+ status_code=201,
+ state=state,
+ )
+
+ @override
+ def _handler(
+ self,
+ request: httpx.Request,
+ route: respx.Route,
+ **kwargs: Any,
+ ) -> httpx.Response:
+ self._route = route
+
+ thread_id = kwargs["thread_id"]
+ found_thread = self._state.beta.threads.get(thread_id)
+ if not found_thread:
+ return httpx.Response(404)
+
+ content: RunCreateParams = json.loads(request.content)
+
+ found_asst = self._state.beta.assistants.get(content["assistant_id"])
+ if not found_asst:
+ return httpx.Response(404)
+
+ model = self._build(
+ {
+ "thread_id": thread_id,
+ "instructions": found_asst.instructions or "",
+ "model": found_asst.model,
+ "tools": [model_dict(t) for t in (found_asst.tools or [])], # type: ignore
+ },
+ request,
+ )
+ self._state.beta.threads.runs.put(model)
+ return httpx.Response(
+ status_code=self._status_code,
+ json=model_dict(model),
+ )
+
+ @staticmethod
+ def _build(partial: PartialRun, request: httpx.Request) -> Run:
+ content = json.loads(request.content)
+ defaults: PartialRun = {
+ "id": faker.beta.thread.run.id(),
+ "created_at": utcnow_unix_timestamp_s(),
+ "instructions": "",
+ "object": "thread.run",
+ "status": "queued",
+ }
+ return model_parse(Run, defaults | partial | content)
+
+
+class ThreadCreateAndRun(StatefulRoute[Run, PartialRun]):
+ def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
+ super().__init__(
+ route=router.post(url__regex="/v1/threads/runs"),
+ status_code=201,
+ state=state,
+ )
+
+ @override
+ def _handler(self, request: httpx.Request, route: respx.Route) -> httpx.Response:
+ self._route = route
+
+ content: ThreadCreateAndRunParams = json.loads(request.content)
+
+ found_asst = self._state.beta.assistants.get(content["assistant_id"])
+ if not found_asst:
+ return httpx.Response(404)
+
+ thread_create_params = content.get("thread", {})
+ encoded = json.dumps(thread_create_params).encode("utf-8")
+ thread_create_req = httpx.Request("", "", content=encoded)
+ thread = thread_from_create_request(thread_create_req)
+ self._state.beta.threads.put(thread)
+
+ model = self._build(
+ {
+ "thread_id": thread.id,
+ "instructions": found_asst.instructions or "",
+ "model": found_asst.model,
+ "tools": [model_dict(t) for t in (found_asst.tools or [])], # type: ignore
+ },
+ request,
+ )
+ self._state.beta.threads.runs.put(model)
+ return httpx.Response(
+ status_code=self._status_code,
+ json=model_dict(model),
+ )
+
+ @staticmethod
+ def _build(partial: PartialRun, request: httpx.Request) -> Run:
+ content = json.loads(request.content)
+ if content.get("thread"):
+ del content["thread"]
+
+ return RunCreateRoute._build(partial, request)
+
+
+class RunListRoute(StatefulRoute[SyncCursorPage[Run], PartialRunList]):
+ def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
+ super().__init__(
+ route=router.get(
+ url__regex=r"/v1/threads/(?P[a-zA-Z0-9\_]+)/runs"
+ ),
+ status_code=200,
+ state=state,
+ )
+
+ @override
+ def _handler(
+ self,
+ request: httpx.Request,
+ route: respx.Route,
+ **kwargs: Any,
+ ) -> httpx.Response:
+ self._route = route
+
+ thread_id = kwargs["thread_id"]
+ found_thread = self._state.beta.threads.get(thread_id)
+ if not found_thread:
+ return httpx.Response(404)
+
+ limit = request.url.params.get("limit")
+ order = request.url.params.get("order")
+ after = request.url.params.get("after")
+ before = request.url.params.get("before")
+
+ data = self._state.beta.threads.runs.list(
+ thread_id,
+ limit,
+ order,
+ after,
+ before,
+ )
+ result_count = len(data)
+ total_count = len(self._state.beta.threads.runs.list(thread_id))
+ has_data = bool(result_count)
+ has_more = total_count != result_count
+ first_id = data[0].id if has_data else None
+ last_id = data[-1].id if has_data else None
+ model = SyncCursorPage[Run](data=data)
+ return httpx.Response(
+ status_code=200,
+ json=model_dict(model)
+ | {"first_id": first_id, "last_id": last_id, "has_more": has_more},
+ )
+
+ @staticmethod
+ def _build(partial: PartialRunList, request: httpx.Request) -> SyncCursorPage[Run]:
+ raise NotImplementedError
+
+
+class RunRetrieveRoute(StatefulRoute[Run, PartialRun]):
+ def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
+ super().__init__(
+ route=router.get(
+ url__regex=r"/v1/threads/(?P[a-zA-Z0-9\_]+)/runs/(?P[a-zA-Z0-9\_]+)"
+ ),
+ status_code=200,
+ state=state,
+ )
+
+ @override
+ def _handler(
+ self,
+ request: httpx.Request,
+ route: respx.Route,
+ **kwargs: Any,
+ ) -> httpx.Response:
+ self._route = route
+
+ thread_id = kwargs["thread_id"]
+ found_thread = self._state.beta.threads.get(thread_id)
+ if not found_thread:
+ return httpx.Response(404)
+
+ id = kwargs["id"]
+ found_run = self._state.beta.threads.runs.get(id)
+ if not found_run:
+ return httpx.Response(404)
+
+ return httpx.Response(status_code=200, json=model_dict(found_run))
+
+ @staticmethod
+ def _build(partial: PartialRun, request: httpx.Request) -> Run:
+ raise NotImplementedError
+
+
+class RunUpdateRoute(StatefulRoute[Run, PartialRun]):
+ def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
+ super().__init__(
+ route=router.post(
+ url__regex=r"/v1/threads/(?P[a-zA-Z0-9\_]+)/runs/(?P[a-zA-Z0-9\_]+)"
+ ),
+ status_code=200,
+ state=state,
+ )
+
+ @override
+ def _handler(
+ self,
+ request: httpx.Request,
+ route: respx.Route,
+ **kwargs: Any,
+ ) -> httpx.Response:
+ self._route = route
+
+ thread_id = kwargs["thread_id"]
+ found_thread = self._state.beta.threads.get(thread_id)
+ if not found_thread:
+ return httpx.Response(404)
+
+ id = kwargs["id"]
+ found_run = self._state.beta.threads.runs.get(id)
+ if not found_run:
+ return httpx.Response(404)
+
+ content: RunUpdateParams = json.loads(request.content)
+ deserialized = model_dict(found_run)
+ updated = model_parse(Run, deserialized | content)
+ self._state.beta.threads.runs.put(updated)
+
+ return httpx.Response(status_code=200, json=model_dict(updated))
+
+ @staticmethod
+ def _build(partial: PartialRun, request: httpx.Request) -> Run:
+ raise NotImplementedError
+
+
+class RunSubmitToolOutputsRoute(StatefulRoute[Run, PartialRun]):
+ def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
+ super().__init__(
+ route=router.post(
+ url__regex=r"/v1/threads/(?P[a-zA-Z0-9\_]+)/runs/(?P[a-zA-Z0-9\_]+)/submit_tool_outputs"
+ ),
+ status_code=200,
+ state=state,
+ )
+
+ @override
+ def _handler(
+ self,
+ request: httpx.Request,
+ route: respx.Route,
+ **kwargs: Any,
+ ) -> httpx.Response:
+ # TODO: update associated run step in store
+ self._route = route
+
+ thread_id = kwargs["thread_id"]
+ found_thread = self._state.beta.threads.get(thread_id)
+ if not found_thread:
+ return httpx.Response(404)
+
+ id = kwargs["id"]
+ found_run = self._state.beta.threads.runs.get(id)
+ if not found_run:
+ return httpx.Response(404)
+
+ return httpx.Response(status_code=200, json=model_dict(found_run))
+
+ @staticmethod
+ def _build(partial: PartialRun, request: httpx.Request) -> Run:
+ raise NotImplementedError
+
+
+class RunCancelRoute(StatefulRoute[Run, PartialRun]):
+ def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
+ super().__init__(
+ route=router.post(
+ url__regex=r"/v1/threads/(?P[a-zA-Z0-9\_]+)/runs/(?P[a-zA-Z0-9\_]+)/cancel"
+ ),
+ status_code=200,
+ state=state,
+ )
+
+ @override
+ def _handler(
+ self,
+ request: httpx.Request,
+ route: respx.Route,
+ **kwargs: Any,
+ ) -> httpx.Response:
+ # TODO: update associated run step in store
+ self._route = route
+
+ thread_id = kwargs["thread_id"]
+ found_thread = self._state.beta.threads.get(thread_id)
+ if not found_thread:
+ return httpx.Response(404)
+
+ id = kwargs["id"]
+ found_run = self._state.beta.threads.runs.get(id)
+ if not found_run:
+ return httpx.Response(404)
+
+ found_run.status = "cancelled"
+ self._state.beta.threads.runs.put(found_run)
+ copy = model_copy(found_run)
+ copy.status = "cancelling"
+
+ return httpx.Response(status_code=200, json=model_dict(copy))
+
+ @staticmethod
+ def _build(partial: PartialRun, request: httpx.Request) -> Run:
+ raise NotImplementedError
diff --git a/src/openai_responses/_routes/threads.py b/src/openai_responses/_routes/threads.py
new file mode 100644
index 0000000..8ee1a0e
--- /dev/null
+++ b/src/openai_responses/_routes/threads.py
@@ -0,0 +1,166 @@
+import json
+from typing import Any
+from typing_extensions import override
+
+import httpx
+import respx
+
+from openai.types.beta.thread import Thread
+from openai.types.beta.thread_create_params import ThreadCreateParams
+from openai.types.beta.thread_update_params import ThreadUpdateParams
+from openai.types.beta.thread_deleted import ThreadDeleted
+
+from ._base import StatefulRoute
+
+from ..helpers.builders.messages import message_from_create_request
+
+from .._stores import StateStore
+from .._types.partials.threads import PartialThread, PartialThreadDeleted
+
+from .._utils.faker import faker
+from .._utils.serde import model_dict, model_parse
+from .._utils.time import utcnow_unix_timestamp_s
+
+
+__all__ = [
+ "ThreadCreateRoute",
+ "ThreadRetrieveRoute",
+ "ThreadUpdateRoute",
+ "ThreadDeleteRoute",
+]
+
+
+class ThreadCreateRoute(StatefulRoute[Thread, PartialThread]):
+ def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
+ super().__init__(
+ route=router.post(url__regex="/v1/threads"),
+ status_code=201,
+ state=state,
+ )
+
+ @override
+ def _handler(self, request: httpx.Request, route: respx.Route) -> httpx.Response:
+ self._route = route
+
+ content: ThreadCreateParams = json.loads(request.content)
+ model = self._build({}, request)
+ self._state.beta.threads.put(model)
+
+ if content.get("messages"):
+ for message_create_params in content.get("messages", []):
+ encoded = json.dumps(message_create_params).encode("utf-8")
+ create_message_req = httpx.Request(method="", url="", content=encoded)
+ message = message_from_create_request(model.id, create_message_req)
+ self._state.beta.threads.messages.put(message)
+
+ return httpx.Response(
+ status_code=self._status_code,
+ json=model_dict(model),
+ )
+
+ @staticmethod
+ def _build(partial: PartialThread, request: httpx.Request) -> Thread:
+ content = json.loads(request.content)
+ if content.get("messages"):
+ del content["messages"]
+ if content.get("tool_resources"):
+ del content["tool_resources"]
+ defaults: PartialThread = {
+ "id": faker.beta.thread.id(),
+ "created_at": utcnow_unix_timestamp_s(),
+ "object": "thread",
+ }
+ return model_parse(Thread, defaults | partial | content)
+
+
+class ThreadRetrieveRoute(StatefulRoute[Thread, PartialThread]):
+ def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
+ super().__init__(
+ route=router.get(url__regex=r"/v1/threads/(?P[a-zA-Z0-9\_]+)"),
+ status_code=200,
+ state=state,
+ )
+
+ @override
+ def _handler(
+ self,
+ request: httpx.Request,
+ route: respx.Route,
+ **kwargs: Any,
+ ) -> httpx.Response:
+ self._route = route
+ id = kwargs["id"]
+ found = self._state.beta.threads.get(id)
+ if not found:
+ return httpx.Response(404)
+
+ return httpx.Response(status_code=200, json=model_dict(found))
+
+ @staticmethod
+ def _build(partial: PartialThread, request: httpx.Request) -> Thread:
+ raise NotImplementedError
+
+
+class ThreadUpdateRoute(StatefulRoute[Thread, PartialThread]):
+ def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
+ super().__init__(
+ route=router.post(
+ url__regex=r"/v1/threads/(?P(?!.*runs)[a-zA-Z0-9_]+)" # NOTE: avoids match on /threads/runs
+ ),
+ status_code=200,
+ state=state,
+ )
+
+ @override
+ def _handler(
+ self,
+ request: httpx.Request,
+ route: respx.Route,
+ **kwargs: Any,
+ ) -> httpx.Response:
+ self._route = route
+ id = kwargs["id"]
+ found = self._state.beta.threads.get(id)
+ if not found:
+ return httpx.Response(404)
+
+ content: ThreadUpdateParams = json.loads(request.content)
+ deserialized = model_dict(found)
+ updated = model_parse(Thread, deserialized | content)
+ self._state.beta.threads.put(updated)
+
+ return httpx.Response(status_code=200, json=model_dict(updated))
+
+ @staticmethod
+ def _build(partial: PartialThread, request: httpx.Request) -> Thread:
+ raise NotImplementedError
+
+
+class ThreadDeleteRoute(StatefulRoute[ThreadDeleted, PartialThreadDeleted]):
+ def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
+ super().__init__(
+ route=router.delete(url__regex=r"/v1/threads/(?P[a-zA-Z0-9\_]+)"),
+ status_code=200,
+ state=state,
+ )
+
+ @override
+ def _handler(
+ self,
+ request: httpx.Request,
+ route: respx.Route,
+ **kwargs: Any,
+ ) -> httpx.Response:
+ self._route = route
+ id = kwargs["id"]
+ deleted = self._state.beta.threads.delete(id)
+ return httpx.Response(
+ status_code=200,
+ json=model_dict(
+ ThreadDeleted(id=id, deleted=deleted, object="thread.deleted")
+ ),
+ )
+
+ @staticmethod
+ def _build(partial: PartialThreadDeleted, request: httpx.Request) -> ThreadDeleted:
+ raise NotImplementedError
diff --git a/src/openai_responses/_stores/__init__.py b/src/openai_responses/_stores/__init__.py
new file mode 100644
index 0000000..3cf1511
--- /dev/null
+++ b/src/openai_responses/_stores/__init__.py
@@ -0,0 +1,3 @@
+from .state_store import StateStore
+
+__all__ = ["StateStore"]
diff --git a/src/openai_responses/state.py b/src/openai_responses/_stores/state_store.py
similarity index 83%
rename from src/openai_responses/state.py
rename to src/openai_responses/_stores/state_store.py
index 4dee73c..256b384 100644
--- a/src/openai_responses/state.py
+++ b/src/openai_responses/_stores/state_store.py
@@ -1,4 +1,4 @@
-from typing import Dict, Generic, List, Literal, Optional, TypeVar, Union
+from typing import Any, Dict, Generic, List, Literal, Optional, TypeVar, Union
from openai.types import FileObject
from openai.types.beta.assistant import Assistant
@@ -22,12 +22,30 @@
],
)
+Resource = Union[FileObject, Assistant, Thread, Message, Run, RunStep, Any]
+
class StateStore:
def __init__(self) -> None:
self.files = FileStore()
self.beta = Beta()
+ def _blind_put(self, resource: Resource) -> None:
+ if isinstance(resource, FileObject):
+ self.files.put(resource)
+ elif isinstance(resource, Assistant):
+ self.beta.assistants.put(resource)
+ elif isinstance(resource, Thread):
+ self.beta.threads.put(resource)
+ elif isinstance(resource, Message):
+ self.beta.threads.messages.put(resource)
+ elif isinstance(resource, Run):
+ self.beta.threads.runs.put(resource)
+ elif isinstance(resource, RunStep):
+ self.beta.threads.runs.steps.put(resource)
+ else:
+ raise TypeError(f"Cannot put object of type {type(resource)} in store")
+
class Beta:
def __init__(self) -> None:
@@ -114,8 +132,11 @@ def list(
order: Optional[str] = None,
after: Optional[str] = None,
before: Optional[str] = None,
+ run_id: Optional[str] = None,
) -> List[Message]:
objs = [m for m in list(self._data.values()) if m.thread_id == thread_id]
+ if run_id:
+ objs = [obj for obj in objs if obj.run_id == run_id]
objs = list(reversed(objs)) if (order or "desc") == "desc" else objs
start_ix = 0
diff --git a/src/openai_responses/_types/generics.py b/src/openai_responses/_types/generics.py
new file mode 100644
index 0000000..6b281ab
--- /dev/null
+++ b/src/openai_responses/_types/generics.py
@@ -0,0 +1,11 @@
+from typing import Any, Mapping, TypeVar
+
+from openai import BaseModel
+
+__all__ = ["M", "P"]
+
+M = TypeVar("M", bound=BaseModel)
+"""OpenAI model generic"""
+
+P = TypeVar("P", bound=Mapping[str, Any])
+"""Partial OpenAI model generic"""
diff --git a/src/openai_responses/_types/partials/assistants.py b/src/openai_responses/_types/partials/assistants.py
new file mode 100644
index 0000000..ab7af10
--- /dev/null
+++ b/src/openai_responses/_types/partials/assistants.py
@@ -0,0 +1,85 @@
+from typing import Annotated, Any, Dict, Literal, List, TypedDict, Union
+from typing_extensions import NotRequired
+
+from openai._utils._transform import PropertyInfo
+
+__all__ = ["PartialAssistant", "PartialAssistantList", "PartialAssistantDeleted"]
+
+
+class PartialAssistantResponseFormat(TypedDict):
+ type: NotRequired[Literal["text", "json_object"]]
+
+
+class PartialFunctionDefinition(TypedDict):
+ name: str
+ description: NotRequired[str]
+ parameters: NotRequired[Dict[str, Any]]
+
+
+class PartialCodeInterpreterTool(TypedDict):
+ type: Literal["code_interpreter"]
+
+
+class PartialFileSearchTool(TypedDict):
+ type: Literal["file_search"]
+
+
+class PartialFunctionTool(TypedDict):
+ type: Literal["function"]
+ function: PartialFunctionDefinition
+
+
+class PartialToolResourcesCodeInterpreter(TypedDict):
+ file_ids: NotRequired[List[str]]
+
+
+class PartialToolResourcesFileSearch(TypedDict):
+ vector_store_ids: NotRequired[List[str]]
+
+
+class PartialToolResources(TypedDict):
+ code_interpreter: NotRequired[PartialToolResourcesCodeInterpreter]
+ file_search: NotRequired[PartialToolResourcesFileSearch]
+
+
+class PartialAssistant(TypedDict):
+ id: NotRequired[str]
+ created_at: NotRequired[int]
+ description: NotRequired[str]
+ instructions: NotRequired[str]
+ metadata: NotRequired[Dict[str, str]]
+ model: NotRequired[str]
+ name: NotRequired[str]
+ object: NotRequired[Literal["assistant"]]
+ tools: NotRequired[
+ List[
+ Annotated[
+ Union[
+ PartialCodeInterpreterTool,
+ PartialFileSearchTool,
+ PartialFunctionTool,
+ ],
+ PropertyInfo(discriminator="type"),
+ ]
+ ]
+ ]
+ response_format: NotRequired[
+ Union[Literal["none", "auto"], PartialAssistantResponseFormat]
+ ]
+ temperature: NotRequired[float]
+ tool_resources: NotRequired[PartialToolResources]
+ top_p: NotRequired[float]
+
+
+class PartialAssistantList(TypedDict):
+ object: NotRequired[Literal["list"]]
+ data: NotRequired[List[PartialAssistant]]
+ first_id: NotRequired[str]
+ last_id: NotRequired[str]
+ has_more: NotRequired[bool]
+
+
+class PartialAssistantDeleted(TypedDict):
+ id: NotRequired[str]
+ object: NotRequired[Literal["assistant.deleted"]]
+ deleted: NotRequired[bool]
diff --git a/src/openai_responses/_types/partials/chat.py b/src/openai_responses/_types/partials/chat.py
new file mode 100644
index 0000000..008a877
--- /dev/null
+++ b/src/openai_responses/_types/partials/chat.py
@@ -0,0 +1,68 @@
+from typing import List, Literal, TypedDict
+from typing_extensions import NotRequired
+
+__all__ = ["PartialChatCompletion"]
+
+
+class PartialFunctionCall(TypedDict):
+ arguments: str
+ name: str
+
+
+class PartialToolCall(TypedDict):
+ id: str
+ function: PartialFunctionCall
+ type: Literal["function"]
+
+
+class PartialMessage(TypedDict):
+ content: NotRequired[str]
+ role: Literal["assistant"]
+ function_call: NotRequired[PartialFunctionCall]
+ tool_calls: NotRequired[List[PartialToolCall]]
+
+
+class PartialTopLogprob(TypedDict):
+ token: str
+ bytes: NotRequired[List[int]]
+ logprob: float
+
+
+class PartialChatCompletionTokenLogprob(TypedDict):
+ token: str
+ bytes: NotRequired[List[int]]
+ logprob: float
+ top_logprobs: List[PartialTopLogprob]
+
+
+class PartialChoiceLogprobs(TypedDict):
+ content: NotRequired[List[PartialChatCompletionTokenLogprob]]
+
+
+class PartialChoice(TypedDict):
+ finish_reason: Literal[
+ "stop",
+ "length",
+ "tool_calls",
+ "content_filter",
+ "function_call",
+ ]
+ index: int
+ logprops: NotRequired[PartialChatCompletionTokenLogprob]
+ message: PartialMessage
+
+
+class PartialCompletionUsage(TypedDict):
+ completion_tokens: int
+ prompt_tokens: int
+ total_tokens: int
+
+
+class PartialChatCompletion(TypedDict):
+ id: NotRequired[str]
+ choices: NotRequired[List[PartialChoice]]
+ created: NotRequired[int]
+ model: NotRequired[str]
+ object: NotRequired[Literal["chat.completion"]]
+ system_fingerprint: NotRequired[str]
+ usage: NotRequired[PartialCompletionUsage]
diff --git a/src/openai_responses/_types/partials/embeddings.py b/src/openai_responses/_types/partials/embeddings.py
new file mode 100644
index 0000000..0bed9e0
--- /dev/null
+++ b/src/openai_responses/_types/partials/embeddings.py
@@ -0,0 +1,22 @@
+from typing import List, Literal, TypedDict
+from typing_extensions import NotRequired
+
+__all__ = ["PartialCreateEmbeddingResponse"]
+
+
+class PartialEmbedding(TypedDict):
+ embedding: List[float]
+ index: int
+ object: Literal["embedding"]
+
+
+class PartialUsage(TypedDict):
+ prompt_tokens: int
+ total_tokens: int
+
+
+class PartialCreateEmbeddingResponse(TypedDict):
+ data: NotRequired[List[PartialEmbedding]]
+ model: NotRequired[str]
+ object: NotRequired[Literal["list"]]
+ usage: NotRequired[PartialUsage]
diff --git a/src/openai_responses/_types/partials/files.py b/src/openai_responses/_types/partials/files.py
new file mode 100644
index 0000000..11a9d7b
--- /dev/null
+++ b/src/openai_responses/_types/partials/files.py
@@ -0,0 +1,28 @@
+from typing import List, Literal, TypedDict
+from typing_extensions import NotRequired
+
+__all__ = ["PartialFileObject", "PartialFileList", "PartialFileDeleted"]
+
+
+class PartialFileObject(TypedDict):
+ id: NotRequired[str]
+ bytes: NotRequired[int]
+ created_at: NotRequired[int]
+ filename: NotRequired[str]
+ object: NotRequired[Literal["file"]]
+ purpose: NotRequired[
+ Literal["fine-tune", "fine-tune-results", "assistants", "assistants_output"]
+ ]
+ status: NotRequired[Literal["uploaded", "processed", "error"]]
+ status_details: NotRequired[str]
+
+
+class PartialFileList(TypedDict):
+ data: NotRequired[List[PartialFileObject]]
+ object: NotRequired[Literal["list"]]
+
+
+class PartialFileDeleted(TypedDict):
+ id: NotRequired[str]
+ object: NotRequired[Literal["file"]]
+ deleted: NotRequired[bool]
diff --git a/src/openai_responses/_types/partials/messages.py b/src/openai_responses/_types/partials/messages.py
new file mode 100644
index 0000000..df0bf21
--- /dev/null
+++ b/src/openai_responses/_types/partials/messages.py
@@ -0,0 +1,105 @@
+from typing import Annotated, Dict, List, Literal, TypedDict, Union
+from typing_extensions import NotRequired
+
+from openai._utils import PropertyInfo
+
+__all__ = ["PartialMessage", "PartialMessageList", "PartialMessageDeleted"]
+
+
+class PartialFileSearchTool(TypedDict):
+ type: Literal["file_search"]
+
+
+class PartialCodeInterpreterTool(TypedDict):
+ type: Literal["code_interpreter"]
+
+
+class PartialAttachment(TypedDict):
+ file_id: NotRequired[str]
+ tools: NotRequired[List[Union[PartialCodeInterpreterTool, PartialFileSearchTool]]]
+
+
+class PartialImageFile(TypedDict):
+ file_id: str
+
+
+class PartialImageFileContentBlock(TypedDict):
+ type: Literal["image_file"]
+ image_file: PartialImageFile
+
+
+class PartialFileCitation(TypedDict):
+ file_id: str
+ quote: str
+
+
+class PartialFileCitationAnnotation(TypedDict):
+ end_index: int
+ file_citation: PartialFileCitation
+ start_index: int
+ text: str
+ type: Literal["file_citation"]
+
+
+class PartialFilePath(TypedDict):
+ file_id: str
+
+
+class PartialFilePathAnnotation(TypedDict):
+ end_index: int
+ file_path: PartialFilePath
+ start_index: int
+ text: str
+ type: Literal["file_path"]
+
+
+class PartialText(TypedDict):
+ annotations: List[
+ Annotated[
+ Union[PartialFileCitationAnnotation, PartialFilePathAnnotation],
+ PropertyInfo(discriminator="type"),
+ ]
+ ]
+ value: str
+
+
+class PartialTextContentBlock(TypedDict):
+ type: Literal["text"]
+ text: PartialText
+
+
+class PartialMessage(TypedDict):
+ id: NotRequired[str]
+ assistant_id: NotRequired[str]
+ attachments: NotRequired[List[PartialAttachment]]
+ completed_at: NotRequired[int]
+ content: NotRequired[
+ List[
+ Annotated[
+ Union[PartialImageFileContentBlock, PartialTextContentBlock],
+ PropertyInfo(discriminator="type"),
+ ]
+ ]
+ ]
+ created_at: NotRequired[int]
+ incomplete_at: NotRequired[int]
+ metadata: NotRequired[Dict[str, str]]
+ object: NotRequired[Literal["thread.message"]]
+ role: NotRequired[Literal["user", "assistant"]]
+ run_id: NotRequired[str]
+ status: NotRequired[Literal["in_progress", "incomplete", "completed"]]
+ thread_id: NotRequired[str]
+
+
+class PartialMessageList(TypedDict):
+ object: NotRequired[Literal["list"]]
+ data: NotRequired[List[PartialMessage]]
+ first_id: NotRequired[str]
+ last_id: NotRequired[str]
+ has_more: NotRequired[bool]
+
+
+class PartialMessageDeleted(TypedDict):
+ id: NotRequired[str]
+ object: NotRequired[Literal["thread.message.deleted"]]
+ deleted: NotRequired[bool]
diff --git a/src/openai_responses/_types/partials/run_steps.py b/src/openai_responses/_types/partials/run_steps.py
new file mode 100644
index 0000000..c4a7611
--- /dev/null
+++ b/src/openai_responses/_types/partials/run_steps.py
@@ -0,0 +1,119 @@
+from typing import Annotated, Dict, List, Literal, TypedDict, Union
+from typing_extensions import NotRequired
+
+from openai._utils import PropertyInfo
+
+__all__ = ["PartialRunStep", "PartialRunStepList"]
+
+
+class PartialLastError(TypedDict):
+ code: Literal["server_error", "rate_limit_exceeded"]
+ message: str
+
+
+class PartialMessageCreation(TypedDict):
+ message_id: str
+
+
+class PartialMessageCreationStepDetails(TypedDict):
+ message_creation: PartialMessageCreation
+ type: Literal["message_creation"]
+
+
+class PartialCodeInterpreterOutputLogs(TypedDict):
+ logs: str
+ type: Literal["logs"]
+
+
+class PartialCodeInterpreterOutputImageImage(TypedDict):
+ file_id: str
+
+
+class PartialCodeInterpreterOutputImage(TypedDict):
+ image: PartialCodeInterpreterOutputImageImage
+ type: Literal["image"]
+
+
+class PartialCodeInterpreter(TypedDict):
+ input: str
+ outputs: List[
+ Annotated[
+ Union[PartialCodeInterpreterOutputLogs, PartialCodeInterpreterOutputImage],
+ PropertyInfo(discriminator="type"),
+ ]
+ ]
+
+
+class PartialCodeInterpreterToolCall(TypedDict):
+ id: str
+ code_interpreter: PartialCodeInterpreter
+ type: Literal["code_interpreter"]
+
+
+class PartialFileSearchToolCall(TypedDict):
+ id: str
+ file_search: object
+ type: Literal["file_search"]
+
+
+class PartialFunction(TypedDict):
+ arguments: str
+ name: str
+ output: NotRequired[str]
+
+
+class PartialFunctionToolCall(TypedDict):
+ id: str
+ function: PartialFunction
+ type: Literal["function"]
+
+
+class PartialToolCallsStepDetails(TypedDict):
+ tool_calls: Annotated[
+ Union[
+ PartialCodeInterpreterToolCall,
+ PartialFileSearchToolCall,
+ PartialFunctionToolCall,
+ ],
+ PropertyInfo(discriminator="type"),
+ ]
+ type: Literal["tool_calls"]
+
+
+class PartialUsage(TypedDict):
+ completion_tokens: int
+ prompt_tokens: int
+ total_tokens: int
+
+
+PartialStepDetails = Annotated[
+ Union[PartialMessageCreationStepDetails, PartialToolCallsStepDetails],
+ PropertyInfo(discriminator="type"),
+]
+
+
+class PartialRunStep(TypedDict):
+ id: NotRequired[str]
+ assistant_id: str
+ cancelled_at: NotRequired[int]
+ completed_at: NotRequired[int]
+ created_at: NotRequired[int]
+ expired_at: NotRequired[int]
+ failed_at: NotRequired[int]
+ last_error: NotRequired[PartialLastError]
+ metadata: NotRequired[Dict[str, str]]
+ object: NotRequired[Literal["thread.run.step"]]
+ run_id: str
+ status: Literal["in_progress", "cancelled", "failed", "completed", "expired"]
+ step_details: PartialStepDetails
+ thread_id: str
+ type: Literal["message_creation", "tool_calls"]
+ usage: NotRequired[PartialUsage]
+
+
+class PartialRunStepList(TypedDict):
+ object: NotRequired[Literal["list"]]
+ data: NotRequired[List[PartialRunStep]]
+ first_id: NotRequired[str]
+ last_id: NotRequired[str]
+ has_more: NotRequired[bool]
diff --git a/src/openai_responses/_types/partials/runs.py b/src/openai_responses/_types/partials/runs.py
new file mode 100644
index 0000000..aea8f4b
--- /dev/null
+++ b/src/openai_responses/_types/partials/runs.py
@@ -0,0 +1,141 @@
+from typing import Annotated, Any, Dict, List, Literal, TypedDict, Union
+from typing_extensions import NotRequired
+
+from openai._utils import PropertyInfo
+
+__all__ = ["PartialRun", "PartialRunList"]
+
+
+class PartialIncompleteDetails(TypedDict):
+ reason: NotRequired[Literal["max_completion_tokens", "max_prompt_tokens"]]
+
+
+class PartialLastError(TypedDict):
+ code: Literal["server_error", "rate_limit_exceeded", "invalid_prompt"]
+ message: str
+
+
+class PartialFunction(TypedDict):
+ arguments: str
+ name: str
+
+
+class PartialRequiredActionFunctionToolCall(TypedDict):
+ id: str
+ function: PartialFunction
+ type: Literal["function"]
+
+
+class PartialRequiredActionSubmitToolOutputs(TypedDict):
+ tool_calls: List[PartialRequiredActionFunctionToolCall]
+
+
+class PartialRequiredAction(TypedDict):
+ submit_tool_outputs: PartialRequiredActionSubmitToolOutputs
+ type: Literal["submit_tool_outputs"]
+
+
+class PartialAssistantResponseFormat(TypedDict):
+ type: NotRequired[Literal["text", "json_object"]]
+
+
+class PartialAssistantToolChoiceFunction(TypedDict):
+ name: str
+
+
+class PartialAssistantToolChoice(TypedDict):
+ type: Literal["function", "code_interpreter", "file_search"]
+ function: NotRequired[PartialAssistantToolChoiceFunction]
+
+
+class PartialCodeInterpreterTool(TypedDict):
+ type: Literal["code_interpreter"]
+
+
+class PartialFileSearchTool(TypedDict):
+ type: Literal["file_search"]
+
+
+class PartialFunctionDefinition(TypedDict):
+ name: str
+ description: NotRequired[str]
+ parameters: NotRequired[Dict[str, Any]]
+
+
+class PartialFunctionTool(TypedDict):
+ function: PartialFunctionDefinition
+ type: Literal["function"]
+
+
+class PartialTruncationStrategy(TypedDict):
+ type: Literal["auto", "last_messages"]
+ last_messages: NotRequired[int]
+
+
+class PartialUsage(TypedDict):
+ completion_tokens: int
+ prompt_tokens: int
+ total_tokens: int
+
+
+class PartialRun(TypedDict):
+ id: NotRequired[str]
+ assistant_id: NotRequired[str]
+ cancelled_at: NotRequired[int]
+ completed_at: NotRequired[int]
+ created_at: NotRequired[int]
+ expires_at: NotRequired[int]
+ failed_at: NotRequired[int]
+ incomplete_details: NotRequired[PartialIncompleteDetails]
+ instructions: NotRequired[str]
+ last_error: NotRequired[PartialLastError]
+ max_completion_tokens: NotRequired[int]
+ max_prompt_tokens: NotRequired[int]
+ metadata: NotRequired[Dict[str, str]]
+ model: NotRequired[str]
+ object: NotRequired[Literal["thread.run"]]
+ required_action: NotRequired[PartialRequiredAction]
+ response_format: NotRequired[
+ Union[Literal["none", "auto"], PartialAssistantResponseFormat]
+ ]
+ started_at: NotRequired[int]
+ status: NotRequired[
+ Literal[
+ "queued",
+ "in_progress",
+ "requires_action",
+ "cancelling",
+ "cancelled",
+ "failed",
+ "completed",
+ "expired",
+ ]
+ ]
+ thread_id: NotRequired[str]
+ tool_choice: NotRequired[
+ Union[Literal["none", "auto", "required"], PartialAssistantToolChoice]
+ ]
+ tools: NotRequired[
+ List[
+ Annotated[
+ Union[
+ PartialCodeInterpreterTool,
+ PartialFileSearchTool,
+ PartialFunctionTool,
+ ],
+ PropertyInfo(discriminator="type"),
+ ]
+ ]
+ ]
+ truncation_strategy: NotRequired[PartialTruncationStrategy]
+ usage: NotRequired[PartialUsage]
+ temperature: NotRequired[float]
+ top_p: NotRequired[float]
+
+
+class PartialRunList(TypedDict):
+ object: NotRequired[Literal["list"]]
+ data: NotRequired[List[PartialRun]]
+ first_id: NotRequired[str]
+ last_id: NotRequired[str]
+ has_more: NotRequired[bool]
diff --git a/src/openai_responses/_types/partials/threads.py b/src/openai_responses/_types/partials/threads.py
new file mode 100644
index 0000000..0e8a3c1
--- /dev/null
+++ b/src/openai_responses/_types/partials/threads.py
@@ -0,0 +1,30 @@
+from typing import Dict, List, Literal, TypedDict
+from typing_extensions import NotRequired
+
+__all__ = ["PartialThread", "PartialThreadDeleted"]
+
+
+class PartialToolResourcesCodeInterpreter(TypedDict):
+ file_ids: NotRequired[List[str]]
+
+
+class PartialToolResourcesFileSearch(TypedDict):
+ vector_store_ids: NotRequired[List[str]]
+
+
+class PartialToolResources(TypedDict):
+ code_interpreter: NotRequired[PartialToolResourcesCodeInterpreter]
+ file_search: NotRequired[PartialToolResourcesFileSearch]
+
+
+class PartialThread(TypedDict):
+ id: NotRequired[str]
+ created_at: NotRequired[int]
+ metadata: NotRequired[Dict[str, str]]
+ object: NotRequired[Literal["thread"]]
+
+
+class PartialThreadDeleted(TypedDict):
+ id: NotRequired[str]
+ object: NotRequired[Literal["thread.deleted"]]
+ deleted: NotRequired[bool]
diff --git a/src/openai_responses/_utils/copy.py b/src/openai_responses/_utils/copy.py
new file mode 100644
index 0000000..e28fe68
--- /dev/null
+++ b/src/openai_responses/_utils/copy.py
@@ -0,0 +1,8 @@
+from .._types.generics import M
+
+
+def model_copy(m: M) -> M:
+ if hasattr(m, "model_validate"):
+ return getattr(m, "model_copy")()
+ else:
+ return getattr(m, "copy")()
diff --git a/src/openai_responses/_utils/faker.py b/src/openai_responses/_utils/faker.py
new file mode 100644
index 0000000..8ea929e
--- /dev/null
+++ b/src/openai_responses/_utils/faker.py
@@ -0,0 +1,8 @@
+from faker import Faker
+from faker_openai_api_provider import OpenAiApiProvider
+
+__all__ = ["faker"]
+
+fake = Faker()
+fake.add_provider(OpenAiApiProvider)
+faker: OpenAiApiProvider.Api = fake.openai()
diff --git a/src/openai_responses/_utils/serde.py b/src/openai_responses/_utils/serde.py
new file mode 100644
index 0000000..1f75082
--- /dev/null
+++ b/src/openai_responses/_utils/serde.py
@@ -0,0 +1,19 @@
+from typing import Any, Type
+
+from openai import BaseModel
+
+from .._types.generics import M
+
+
+def model_dict(m: BaseModel) -> dict[str, Any]:
+ if hasattr(m, "model_dump"):
+ return getattr(m, "model_dump")()
+ else:
+ return getattr(m, "dict")()
+
+
+def model_parse(m: Type[M], d: object) -> M:
+ if hasattr(m, "model_validate"):
+ return getattr(m, "model_validate")(d)
+ else:
+ return getattr(m, "parse_obj")(d)
diff --git a/src/openai_responses/_utils/time.py b/src/openai_responses/_utils/time.py
new file mode 100644
index 0000000..40f62c0
--- /dev/null
+++ b/src/openai_responses/_utils/time.py
@@ -0,0 +1,5 @@
+import datetime as dt
+
+
+def utcnow_unix_timestamp_s() -> int:
+ return int(dt.datetime.now().timestamp())
diff --git a/src/openai_responses/decorators.py b/src/openai_responses/decorators.py
deleted file mode 100644
index 3b590f3..0000000
--- a/src/openai_responses/decorators.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import time
-import warnings
-from typing import Any, Callable
-
-import httpx
-import respx
-from decorator import decorator
-
-
-@decorator
-def side_effect(fn: Callable[..., httpx.Response], *args: Any, **kwargs: Any):
- # inject latency
- latency: float = kwargs.get("latency", 0.0)
- time.sleep(latency)
-
- try:
- route = next((arg for arg in args if isinstance(arg, respx.Route)))
- failures: int = kwargs.get("failures", 0)
- if route.call_count < failures:
- return httpx.Response(status_code=500)
- except StopIteration:
- warnings.warn("Could not find route in side effect call")
-
- return fn(*args, **kwargs)
-
-
-def unwrap(wrapped: Callable[..., Any]) -> Callable[..., Any]:
- """
- https://stackoverflow.com/a/77694433
- """
- closure = wrapped.__closure__
- if closure:
- for cell in closure:
- if hasattr(cell.cell_contents, "__module__"):
- if cell.cell_contents.__module__.split(".")[0] == "openai_responses":
- continue
- if hasattr(cell.cell_contents, "__closure__"):
- return (
- cell.cell_contents
- if cell.cell_contents.__closure__ is None
- else unwrap(cell.cell_contents)
- )
- return wrapped
diff --git a/src/openai_responses/endpoints/_base.py b/src/openai_responses/endpoints/_base.py
deleted file mode 100644
index 2ad9b94..0000000
--- a/src/openai_responses/endpoints/_base.py
+++ /dev/null
@@ -1,160 +0,0 @@
-import inspect
-from functools import partial, wraps
-from typing import Any, Callable, Dict, List, Optional, Protocol, TypedDict
-
-import httpx
-import respx
-from faker import Faker
-from faker_openai_api_provider import OpenAiApiProvider
-
-from ..decorators import unwrap
-from ..state import StateStore
-
-
-class KwargsGetterProtocol(Protocol):
- def __call__(self, *args: Any, **kwargs: Any) -> Dict[str, Any]:
- ...
-
-
-class RouteRegistration(TypedDict):
- name: str
- method: Callable[..., respx.Route]
- pattern: Optional[str]
- side_effect: Callable[..., httpx.Response]
-
-
-class Mock:
- def __init__(
- self,
- *,
- name: str,
- endpoint: Optional[str] = None,
- route_registrations: Optional[List[RouteRegistration]] = None,
- ) -> None:
- print("Mocker initialized")
- self._name = name
- self._base_url = "https://api.openai.com"
- self._endpoint = endpoint or ""
- self._registrations = route_registrations or []
-
- fake = Faker()
- fake.add_provider(OpenAiApiProvider)
- self._faker: OpenAiApiProvider.Api = fake.openai()
-
- @property
- def name(self) -> str:
- return self._name
-
- @property
- def base_url(self) -> str:
- return self._base_url
-
- def set_base_url(self, url: str) -> None:
- self._base_url = url
-
- @property
- def endpoint(self) -> str:
- return self._endpoint
-
- @property
- def url(self) -> str:
- return self._base_url + self._endpoint
-
- @staticmethod
- def _sort_routes(routes: List[respx.Route]) -> None:
- routes.sort(key=lambda r: len(repr(r._pattern)), reverse=True) # type: ignore
-
- def _register_routes(self, **kwargs: Any) -> None:
- for registration in self._registrations:
- setattr(
- self,
- registration["name"],
- CallContainer(
- route=registration["method"](
- url__regex=self.url + (registration["pattern"] or "")
- ).mock(
- side_effect=partial(
- registration["side_effect"],
- **kwargs,
- )
- )
- ),
- )
-
-
-class StatelessMock(Mock):
- def _make_decorator(
- self,
- common_kwargs_getter: KwargsGetterProtocol,
- ):
- def decorator(fn: Callable[..., Any]):
- is_async = inspect.iscoroutinefunction(fn)
- argspec = inspect.getfullargspec(unwrap(fn))
- needs_ref = self._name in argspec.args
-
- @wraps(fn)
- async def async_wrapper(*args: Any, **kwargs: Any):
- if needs_ref:
- kwargs[self.name] = self
- common_kwargs = common_kwargs_getter(*args, **kwargs)
- with respx.mock:
- self._register_routes(**common_kwargs)
- self._sort_routes(respx.mock.routes._routes)
- return await fn(*args, **kwargs)
-
- @wraps(fn)
- def wrapper(*args: Any, **kwargs: Any):
- if needs_ref:
- kwargs[self.name] = self
- common_kwargs = common_kwargs_getter(*args, **kwargs)
- with respx.mock:
- self._register_routes(**common_kwargs)
- self._sort_routes(respx.mock.routes._routes)
- return fn(*args, **kwargs)
-
- return wrapper if not is_async else async_wrapper
-
- return decorator
-
-
-class StatefulMock(Mock):
- def _make_decorator(
- self,
- common_kwargs_getter: KwargsGetterProtocol,
- state_store: StateStore,
- ):
- def decorator(fn: Callable[..., Any]):
- is_async = inspect.iscoroutinefunction(fn)
- argspec = inspect.getfullargspec(unwrap(fn))
- needs_ref = self._name in argspec.args
-
- @wraps(fn)
- async def async_wrapper(*args: Any, **kwargs: Any):
- if needs_ref:
- kwargs[self.name] = self
- state = kwargs.get("state_store", state_store)
- common_kwargs = common_kwargs_getter(used_state=state, *args, **kwargs)
- with respx.mock:
- self._register_routes(**common_kwargs)
- self._sort_routes(respx.mock.routes._routes)
- return await fn(*args, **kwargs)
-
- @wraps(fn)
- def wrapper(*args: Any, **kwargs: Any):
- if needs_ref:
- kwargs[self.name] = self
- state = kwargs.get("state_store", state_store)
- common_kwargs = common_kwargs_getter(used_state=state, *args, **kwargs)
- with respx.mock:
- self._register_routes(**common_kwargs)
- self._sort_routes(respx.mock.routes._routes)
- return fn(*args, **kwargs)
-
- return wrapper if not is_async else async_wrapper
-
- return decorator
-
-
-class CallContainer:
- def __init__(self, route: Optional[respx.Route] = None) -> None:
- self.route = route or respx.Route()
diff --git a/src/openai_responses/endpoints/_partial_schemas.py b/src/openai_responses/endpoints/_partial_schemas.py
deleted file mode 100644
index ac178c2..0000000
--- a/src/openai_responses/endpoints/_partial_schemas.py
+++ /dev/null
@@ -1,143 +0,0 @@
-from typing import Annotated, Literal, List, Optional, TypedDict, Union
-from typing_extensions import Required
-
-from openai._utils._transform import PropertyInfo
-from openai.types.beta.assistant_tool_param import AssistantToolParam
-from openai.types.beta.threads.run_status import RunStatus
-
-__all__ = ["PartialRun", "PartialRunStep"]
-
-
-class PartialIncompleteDetails(TypedDict):
- reason: Optional[Literal["max_completion_tokens", "max_prompt_tokens"]]
-
-
-class PartialFunctionNoOutput(TypedDict):
- name: str
- arguments: str
-
-
-class PartialRequiredActionFunctionToolCall(TypedDict):
- id: str
- function: PartialFunctionNoOutput
- type: Literal["function"]
-
-
-class PartialRequiredActionSubmitToolOutputs(TypedDict):
- tool_calls: List[PartialRequiredActionFunctionToolCall]
-
-
-class PartialRequiredAction(TypedDict):
- submit_tool_outputs: PartialRequiredActionSubmitToolOutputs
- type: Literal["submit_tool_outputs"]
-
-
-class PartialLastError(TypedDict, total=False):
- code: Literal["server_error", "rate_limit_exceeded", "invalid_prompt"]
- message: str
-
-
-class PartialRun(TypedDict, total=False):
- cancelled_at: int
- completed_at: int
- expires_at: int
- failed_at: int
- incomplete_details: PartialIncompleteDetails
- instructions: str
- last_error: PartialLastError
- model: str
- required_action: PartialRequiredAction
- started_at: int
- status: RunStatus
- tools: List[AssistantToolParam]
-
-
-class PartialCodeInterpreterOutputLogs(TypedDict):
- logs: str
- type: Literal["logs"]
-
-
-class PartialCodeInterpreterOutputImageImage(TypedDict):
- file_id: str
-
-
-class PartialCodeInterpreterOutputImage(TypedDict):
- type: Literal["image"]
- image: PartialCodeInterpreterOutputImageImage
-
-
-class PartialCodeInterpreter(TypedDict):
- input: str
- outputs: List[
- Annotated[
- Union[
- PartialCodeInterpreterOutputLogs,
- PartialCodeInterpreterOutputImage,
- ],
- PropertyInfo(discriminator="type"),
- ]
- ]
-
-
-class PartialCodeInterpreterToolCall(TypedDict):
- type: Literal["code_interpreter"]
- id: str
- code_interpreter: PartialCodeInterpreter
-
-
-class PartialRetrievalToolCall(TypedDict):
- type: Literal["retrieval"]
- id: str
-
-
-class PartialFunction(TypedDict, total=False):
- name: Required[str]
- arguments: Required[str]
- output: Optional[str]
-
-
-class PartialFunctionToolCall(TypedDict):
- type: Literal["function"]
- id: str
- function: PartialFunction
-
-
-class PartialToolCallsStepDetails(TypedDict):
- type: Literal["tool_calls"]
- tool_calls: List[
- Annotated[
- Union[
- PartialCodeInterpreterToolCall,
- PartialRetrievalToolCall,
- PartialFunctionToolCall,
- ],
- PropertyInfo(discriminator="type"),
- ]
- ]
-
-
-class PartialMessageCreation(TypedDict):
- message_id: str
-
-
-class PartialMessageCreationStepDetails(TypedDict):
- type: Literal["message_creation"]
- message_creation: PartialMessageCreation
-
-
-class PartialRunStep(TypedDict, total=False):
- id: str
- assistant_id: str
- cancelled_at: Optional[int]
- completed_at: Optional[int]
- expired_at: Optional[int]
- failed_at: Optional[int]
- last_error: Optional[PartialLastError]
- status: Literal["in_progress", "cancelled", "failed", "completed", "expired"]
- step_details: Required[
- Annotated[
- Union[PartialMessageCreationStepDetails, PartialToolCallsStepDetails],
- PropertyInfo(discriminator="type"),
- ]
- ]
- type: Literal["message_creation", "tool_calls"]
diff --git a/src/openai_responses/endpoints/assistants.py b/src/openai_responses/endpoints/assistants.py
deleted file mode 100644
index c7327a4..0000000
--- a/src/openai_responses/endpoints/assistants.py
+++ /dev/null
@@ -1,256 +0,0 @@
-import json
-from typing import Any, Iterable, List, Optional
-
-import httpx
-import respx
-
-from openai.pagination import SyncCursorPage
-from openai.types.beta.assistant import Assistant, ToolResources
-from openai.types.beta.function_tool import FunctionTool
-from openai.types.beta.file_search_tool import FileSearchTool
-from openai.types.beta.assistant_tool import AssistantTool
-from openai.types.beta.assistant_deleted import AssistantDeleted
-from openai.types.beta.code_interpreter_tool import CodeInterpreterTool
-from openai.types.beta.assistant_tool_param import AssistantToolParam
-from openai.types.beta.assistant_create_params import AssistantCreateParams
-from openai.types.beta.assistant_update_params import AssistantUpdateParams
-from openai.types.beta.assistant_response_format import AssistantResponseFormat
-from openai.types.beta.assistant_response_format_option import (
- AssistantResponseFormatOption,
-)
-from openai.types.beta.assistant_response_format_option_param import (
- AssistantResponseFormatOptionParam,
-)
-
-from ._base import StatefulMock, CallContainer
-from ..decorators import side_effect
-from ..state import StateStore
-from ..utils import model_dict, model_parse, utcnow_unix_timestamp_s, remove_none
-
-
-class AssistantsMock(StatefulMock):
- def __init__(self) -> None:
- super().__init__(
- name="assistants_mock",
- endpoint="/v1/assistants",
- route_registrations=[
- {
- "name": "create",
- "method": respx.post,
- "pattern": None,
- "side_effect": self._create,
- },
- {
- "name": "list",
- "method": respx.get,
- "pattern": None,
- "side_effect": self._list,
- },
- {
- "name": "retrieve",
- "method": respx.get,
- "pattern": r"/(?P\w+)",
- "side_effect": self._retrieve,
- },
- {
- "name": "update",
- "method": respx.post,
- "pattern": r"/(?P\w+)",
- "side_effect": self._update,
- },
- {
- "name": "delete",
- "method": respx.delete,
- "pattern": r"/(?P\w+)",
- "side_effect": self._delete,
- },
- ],
- )
-
- # NOTE: these are explicitly defined to help with autocomplete and type hints
- self.create = CallContainer()
- self.list = CallContainer()
- self.retrieve = CallContainer()
- self.update = CallContainer()
- self.delete = CallContainer()
-
- def __call__(
- self,
- *,
- latency: Optional[float] = None,
- failures: Optional[int] = None,
- state_store: Optional[StateStore] = None,
- ):
- def getter(*args: Any, **kwargs: Any):
- return dict(
- latency=latency or 0,
- failures=failures or 0,
- state_store=kwargs["used_state"],
- )
-
- return self._make_decorator(getter, state_store or StateStore())
-
- @side_effect
- def _create(
- self,
- request: httpx.Request,
- route: respx.Route,
- state_store: StateStore,
- **kwargs: Any,
- ) -> httpx.Response:
- self.create.route = route
-
- content: AssistantCreateParams = json.loads(request.content)
-
- content_tools = content.get("tools", [])
-
- asst = Assistant(
- id=self._faker.beta.assistant.id(),
- created_at=utcnow_unix_timestamp_s(),
- description=content.get("description"),
- instructions=content.get("instructions"),
- metadata=content.get("metadata"),
- model=content["model"],
- name=content.get("name"),
- object="assistant",
- tools=self._parse_tool_params(content_tools) or [],
- response_format=self._parse_response_format_params(
- content.get("response_format")
- ),
- temperature=content.get("temperature"),
- tool_resources=model_parse(
- ToolResources,
- content.get("tool_resources"),
- ),
- top_p=content.get("top_p"),
- )
- state_store.beta.assistants.put(asst)
-
- return httpx.Response(status_code=201, json=model_dict(asst))
-
- @side_effect
- def _list(
- self,
- request: httpx.Request,
- route: respx.Route,
- state_store: StateStore,
- **kwargs: Any,
- ) -> httpx.Response:
- self.list.route = route
-
- limit = request.url.params.get("limit")
- order = request.url.params.get("order")
- after = request.url.params.get("after")
- before = request.url.params.get("before")
-
- assts = SyncCursorPage[Assistant](
- data=state_store.beta.assistants.list(limit, order, after, before)
- )
-
- return httpx.Response(status_code=200, json=model_dict(assts))
-
- @side_effect
- def _retrieve(
- self,
- request: httpx.Request,
- route: respx.Route,
- id: str,
- state_store: StateStore,
- **kwargs: Any,
- ) -> httpx.Response:
- self.retrieve.route = route
-
- *_, id = request.url.path.split("/")
- asst = state_store.beta.assistants.get(id)
-
- if not asst:
- return httpx.Response(status_code=404)
-
- else:
- return httpx.Response(status_code=200, json=model_dict(asst))
-
- @side_effect
- def _update(
- self,
- request: httpx.Request,
- route: respx.Route,
- id: str,
- state_store: StateStore,
- **kwargs: Any,
- ) -> httpx.Response:
- self.update.route = route
-
- *_, id = request.url.path.split("/")
- content: AssistantUpdateParams = json.loads(request.content)
-
- asst = state_store.beta.assistants.get(id)
-
- if not asst:
- return httpx.Response(status_code=404)
-
- asst.description = content.get("description", asst.description)
- asst.instructions = content.get("instructions", asst.instructions)
- asst.metadata = content.get("metadata", asst.metadata)
- asst.model = content.get("model", asst.model)
- asst.name = content.get("name", asst.name)
- asst.response_format = (
- self._parse_response_format_params(content.get("response_format"))
- or asst.response_format
- )
- asst.temperature = content.get("temperature", asst.temperature)
- asst.tool_resources = (
- model_parse(ToolResources, content.get("tool_resources"))
- or asst.tool_resources
- )
- asst.tools = self._parse_tool_params(content.get("tools")) or asst.tools
- asst.top_p = content.get("top_p", asst.top_p)
-
- state_store.beta.assistants.put(asst)
-
- return httpx.Response(status_code=200, json=model_dict(asst))
-
- @side_effect
- def _delete(
- self,
- request: httpx.Request,
- route: respx.Route,
- id: str,
- state_store: StateStore,
- **kwargs: Any,
- ) -> httpx.Response:
- self.delete.route = route
-
- *_, id = request.url.path.split("/")
- deleted = state_store.beta.assistants.delete(id)
-
- return httpx.Response(
- status_code=200,
- json=model_dict(
- AssistantDeleted(id=id, deleted=deleted, object="assistant.deleted")
- ),
- )
-
- @staticmethod
- def _parse_tool_params(
- params: Optional[Iterable[AssistantToolParam]],
- ) -> Optional[List[AssistantTool]]:
- m = {
- "code_interpreter": CodeInterpreterTool,
- "file_search": FileSearchTool,
- "function": FunctionTool,
- }
- return (
- remove_none([model_parse(m[tool["type"]], tool) for tool in params]) # type: ignore
- if params
- else None
- )
-
- @staticmethod
- def _parse_response_format_params(
- params: Optional[AssistantResponseFormatOptionParam],
- ) -> Optional[AssistantResponseFormatOption]:
- return (
- model_parse(AssistantResponseFormat, params)
- if isinstance(params, dict)
- else params
- )
diff --git a/src/openai_responses/endpoints/chat.py b/src/openai_responses/endpoints/chat.py
deleted file mode 100644
index fb49cd6..0000000
--- a/src/openai_responses/endpoints/chat.py
+++ /dev/null
@@ -1,179 +0,0 @@
-import json
-from typing import Any, List, Literal, Optional, TypedDict
-
-import httpx
-import respx
-
-from openai.types.chat.chat_completion import ChatCompletion, Choice
-from openai.types.completion_usage import CompletionUsage
-from openai.types.chat.chat_completion_message import (
- ChatCompletionMessage,
- FunctionCall,
-)
-from openai.types.chat.chat_completion_message_tool_call import (
- ChatCompletionMessageToolCall,
- Function,
-)
-from openai.types.chat.completion_create_params import CompletionCreateParams
-
-from ._base import StatelessMock, CallContainer
-from ..decorators import side_effect
-from ..utils import model_dict, utcnow_unix_timestamp_s
-from ..tokens import count_tokens
-
-
-class ChatMock:
- def __init__(self) -> None:
- self.completions = ChatCompletionMock()
-
-
-class PartialFunctionCall(TypedDict, total=False):
- arguments: str
- name: str
-
-
-class PartialToolCall(TypedDict, total=False):
- function: PartialFunctionCall
-
-
-class PartialMessage(TypedDict, total=False):
- content: str
- function_call: PartialFunctionCall
- tool_calls: List[PartialToolCall]
-
-
-class PartialChoice(TypedDict, total=False):
- finish_reason: Literal[
- "stop",
- "length",
- "tool_calls",
- "content_filter",
- "function_call",
- ]
- message: PartialMessage
-
-
-class ChatCompletionMock(StatelessMock):
- def __init__(self) -> None:
- super().__init__(
- name="chat_completion_mock",
- endpoint="/v1/chat/completions",
- route_registrations=[
- {
- "name": "create",
- "method": respx.post,
- "pattern": None,
- "side_effect": self._create,
- }
- ],
- )
- self.create = CallContainer()
-
- def __call__(
- self,
- *,
- choices: Optional[List[PartialChoice]] = None,
- latency: Optional[float] = None,
- failures: Optional[int] = None,
- ):
- def getter(*args: Any, **kwargs: Any):
- return dict(
- choices=choices or [],
- latency=latency or 0,
- failures=failures or 0,
- )
-
- return self._make_decorator(getter)
-
- @side_effect
- def _create(
- self,
- request: httpx.Request,
- route: respx.Route,
- choices: List[PartialChoice],
- **kwargs: Any,
- ) -> httpx.Response:
- self.create.route = route
-
- content: CompletionCreateParams = json.loads(request.content)
-
- completion = ChatCompletion(
- id=self._faker.chat.completion.id(),
- choices=[
- self._choice_partial_to_model(i, p) for i, p in enumerate(choices)
- ],
- model=content["model"],
- created=utcnow_unix_timestamp_s(),
- system_fingerprint="",
- object="chat.completion",
- )
-
- generated = ""
- for choice in completion.choices:
- if choice.message.content:
- generated += choice.message.content
- elif choice.message.tool_calls:
- for tool_call in choice.message.tool_calls:
- generated += tool_call.function.arguments
-
- prompt = ""
- for message in content["messages"]:
- prompt += str(message.get("content"))
-
- completion_tokens = count_tokens(completion.model, generated)
- prompt_tokens = count_tokens(completion.model, prompt)
- total_tokens = completion_tokens + prompt_tokens
-
- completion.usage = CompletionUsage(
- completion_tokens=completion_tokens,
- prompt_tokens=prompt_tokens,
- total_tokens=total_tokens,
- )
-
- return httpx.Response(status_code=201, json=model_dict(completion))
-
- def _choice_partial_to_model(self, i: int, p: PartialChoice) -> Choice:
- def fn_call_partial_to_model(p: Optional[PartialFunctionCall]):
- if p is None:
- return None
- else:
- return FunctionCall(
- arguments=p.get("arguments", ""),
- name=p.get("name", ""),
- )
-
- def tool_calls_partial_to_model(p: Optional[List[PartialToolCall]]):
- if p is None:
- return None
- else:
- calls: List[ChatCompletionMessageToolCall] = []
- for partial_call in p:
- call_function = partial_call.get("function", {})
- calls.append(
- ChatCompletionMessageToolCall(
- id=self._faker.beta.thread.run.step.step_details.tool_call.id(),
- function=Function(
- arguments=call_function.get("arguments", ""),
- name=call_function.get("name", ""),
- ),
- type="function",
- )
- )
- return calls
-
- message = ChatCompletionMessage(
- content=p.get("message", {}).get("content"),
- role="assistant",
- function_call=fn_call_partial_to_model(
- p.get("message", {}).get("function_call")
- ),
- tool_calls=tool_calls_partial_to_model(
- p.get("message", {}).get("tool_calls")
- ),
- )
-
- return Choice(
- finish_reason=p.get("finish_reason", "stop"),
- message=message,
- index=i,
- )
diff --git a/src/openai_responses/endpoints/embeddings.py b/src/openai_responses/endpoints/embeddings.py
deleted file mode 100644
index 573636d..0000000
--- a/src/openai_responses/endpoints/embeddings.py
+++ /dev/null
@@ -1,72 +0,0 @@
-import json
-from typing import Any, List, Optional
-
-import httpx
-import respx
-
-from openai.types.embedding import Embedding
-from openai.types.embedding_create_params import EmbeddingCreateParams
-from openai.types.create_embedding_response import CreateEmbeddingResponse, Usage
-
-from ._base import StatelessMock, CallContainer
-from ..decorators import side_effect
-from ..tokens import count_tokens
-from ..utils import model_dict
-
-
-class EmbeddingsMock(StatelessMock):
- def __init__(self) -> None:
- super().__init__(
- name="embeddings_mock",
- endpoint="/v1/embeddings",
- route_registrations=[
- {
- "name": "create",
- "method": respx.post,
- "pattern": None,
- "side_effect": self._create,
- }
- ],
- )
- self.create = CallContainer()
-
- def __call__(
- self,
- *,
- embedding: Optional[List[float]] = None,
- latency: Optional[float] = None,
- failures: Optional[int] = None,
- ):
- def getter(*args: Any, **kwargs: Any):
- return dict(
- embedding=embedding or [],
- latency=latency or 0,
- failures=failures or 0,
- )
-
- return self._make_decorator(getter)
-
- @side_effect
- def _create(
- self,
- request: httpx.Request,
- route: respx.Route,
- embedding: List[float],
- **kwargs: Any,
- ) -> httpx.Response:
- self.create.route = route
-
- content: EmbeddingCreateParams = json.loads(request.content)
-
- token_count = 0
- if isinstance(content["input"], str):
- token_count = count_tokens(content["model"], content["input"])
-
- embeddings = CreateEmbeddingResponse(
- data=[Embedding(embedding=embedding, index=0, object="embedding")],
- model=content["model"],
- object="list",
- usage=Usage(prompt_tokens=token_count, total_tokens=token_count),
- )
-
- return httpx.Response(status_code=201, json=model_dict(embeddings))
diff --git a/src/openai_responses/endpoints/files.py b/src/openai_responses/endpoints/files.py
deleted file mode 100644
index 02c3187..0000000
--- a/src/openai_responses/endpoints/files.py
+++ /dev/null
@@ -1,164 +0,0 @@
-import re
-from typing import Any, Optional
-
-import httpx
-import respx
-
-from openai.pagination import SyncPage
-from openai.types.file_object import FileObject
-from openai.types.file_deleted import FileDeleted
-
-from ._base import StatefulMock, CallContainer
-from ..decorators import side_effect
-from ..state import StateStore
-from ..utils import model_dict, utcnow_unix_timestamp_s
-
-
-class FilesMock(StatefulMock):
- def __init__(self) -> None:
- super().__init__(
- name="files_mock",
- endpoint="/v1/files",
- route_registrations=[
- {
- "name": "create",
- "method": respx.post,
- "pattern": None,
- "side_effect": self._create,
- },
- {
- "name": "list",
- "method": respx.get,
- "pattern": None,
- "side_effect": self._list,
- },
- {
- "name": "retrieve",
- "method": respx.get,
- "pattern": r"/(?P\w+)",
- "side_effect": self._retrieve,
- },
- {
- "name": "delete",
- "method": respx.delete,
- "pattern": r"/(?P\w+)",
- "side_effect": self._delete,
- },
- ],
- )
-
- # NOTE: these are explicitly defined to help with autocomplete and type hints
- self.create = CallContainer()
- self.list = CallContainer()
- self.retrieve = CallContainer()
- self.delete = CallContainer()
-
- def __call__(
- self,
- *,
- latency: Optional[float] = None,
- failures: Optional[int] = None,
- state_store: Optional[StateStore] = None,
- ):
- def getter(*args: Any, **kwargs: Any):
- return dict(
- latency=latency or 0,
- failures=failures or 0,
- state_store=kwargs["used_state"],
- )
-
- return self._make_decorator(getter, state_store or StateStore())
-
- @side_effect
- def _create(
- self,
- request: httpx.Request,
- route: respx.Route,
- state_store: StateStore,
- **kwargs: Any,
- ) -> httpx.Response:
- self.create.route = route
-
- filename = ""
- purpose = "assistants"
-
- content = request.content.decode("utf-8")
-
- prog = re.compile(
- r'Content-Disposition: form-data;[^;]+; name="purpose"\r\n\r\n(?P[^\r\n]+)|filename="(?P[^"]+)"'
- )
- matches = prog.finditer(content)
- for match in matches:
- if match.group("filename"):
- filename = match.group("filename")
- if match.group("purpose_value"):
- purpose = match.group("purpose_value")
-
- obj = FileObject(
- id=self._faker.file.id(),
- bytes=0,
- created_at=utcnow_unix_timestamp_s(),
- filename=filename,
- object="file",
- purpose=purpose, # type: ignore
- status="uploaded",
- )
- state_store.files.put(obj)
-
- return httpx.Response(status_code=201, json=model_dict(obj))
-
- @side_effect
- def _list(
- self,
- request: httpx.Request,
- route: respx.Route,
- state_store: StateStore,
- **kwargs: Any,
- ) -> httpx.Response:
- self.list.route = route
-
- purpose = request.url.params.get("purpose")
- files = SyncPage[FileObject](
- object="list",
- data=state_store.files.list(purpose=purpose),
- )
-
- return httpx.Response(status_code=200, json=model_dict(files))
-
- @side_effect
- def _retrieve(
- self,
- request: httpx.Request,
- route: respx.Route,
- id: str,
- state_store: StateStore,
- **kwargs: Any,
- ) -> httpx.Response:
- self.retrieve.route = route
-
- *_, id = request.url.path.split("/")
- file = state_store.files.get(id)
-
- if not file:
- return httpx.Response(status_code=404)
-
- return httpx.Response(status_code=200, json=model_dict(file))
-
- @side_effect
- def _delete(
- self,
- request: httpx.Request,
- route: respx.Route,
- id: str,
- state_store: StateStore,
- **kwargs: Any,
- ) -> httpx.Response:
- self.delete.route = route
-
- *_, id = request.url.path.split("/")
- deleted = state_store.files.delete(id)
-
- return httpx.Response(
- status_code=200,
- json=model_dict(FileDeleted(id=id, deleted=deleted, object="file")),
- )
diff --git a/src/openai_responses/endpoints/messages.py b/src/openai_responses/endpoints/messages.py
deleted file mode 100644
index 7c01dbf..0000000
--- a/src/openai_responses/endpoints/messages.py
+++ /dev/null
@@ -1,281 +0,0 @@
-import json
-from functools import partial
-from typing import Any, Iterable, List, Optional, Union
-
-import httpx
-
-from openai.types.beta.code_interpreter_tool import CodeInterpreterTool
-from openai.types.beta.file_search_tool import FileSearchTool
-import respx
-
-from openai.pagination import SyncCursorPage
-from openai.types.beta.thread_create_params import (
- Message as ThreadMessageCreateParams,
- MessageAttachment as ThreadMessageCreateAttachmentParams,
-)
-from openai.types.beta.threads.text import Text
-from openai.types.beta.threads.text_content_block import TextContentBlock
-from openai.types.beta.threads.message import Message, Attachment as MessageAttachment
-from openai.types.beta.threads.message_create_params import (
- MessageCreateParams,
- Attachment as MessageCreateAttachmentParams,
-)
-from openai.types.beta.threads.message_update_params import MessageUpdateParams
-from openai.types.beta.threads.run_create_params import AdditionalMessage
-
-
-from ._base import StatefulMock, CallContainer
-from ..decorators import side_effect
-from ..state import StateStore
-from ..utils import model_dict, model_parse, remove_none, utcnow_unix_timestamp_s
-
-__all__ = ["MessagesMock"]
-
-
-class MessagesMock(StatefulMock):
- def __init__(self) -> None:
- super().__init__(
- name="messages_mock",
- endpoint=r"/v1/threads/(?P\w+)/messages",
- route_registrations=[
- {
- "name": "create",
- "method": respx.post,
- "pattern": None,
- "side_effect": self._create,
- },
- {
- "name": "list",
- "method": respx.get,
- "pattern": None,
- "side_effect": self._list,
- },
- {
- "name": "retrieve",
- "method": respx.get,
- "pattern": r"/(?P\w+)",
- "side_effect": self._retrieve,
- },
- {
- "name": "update",
- "method": respx.post,
- "pattern": r"/(?P\w+)",
- "side_effect": self._update,
- },
- ],
- )
-
- # NOTE: these are explicitly defined to help with autocomplete and type hints
- self.create = CallContainer()
- self.list = CallContainer()
- self.retrieve = CallContainer()
- self.update = CallContainer()
-
- def _register_routes(self, **common: Any) -> None:
- self.retrieve.route = respx.get(url__regex=self.url + r"/(?P\w+)").mock(
- side_effect=partial(self._retrieve, **common)
- )
- self.update.route = respx.post(url__regex=self.url + r"/(?P\w+)").mock(
- side_effect=partial(self._update, **common)
- )
- self.create.route = respx.post(url__regex=self.url).mock(
- side_effect=partial(self._create, **common)
- )
- self.list.route = respx.get(url__regex=self.url).mock(
- side_effect=partial(self._list, **common)
- )
-
- def __call__(
- self,
- *,
- latency: Optional[float] = None,
- failures: Optional[int] = None,
- state_store: Optional[StateStore] = None,
- validate_thread_exists: Optional[bool] = None,
- ):
- def getter(*args: Any, **kwargs: Any):
- return dict(
- latency=latency or 0,
- failures=failures or 0,
- state_store=kwargs["used_state"],
- validate_thread_exists=validate_thread_exists or False,
- )
-
- return self._make_decorator(getter, state_store or StateStore())
-
- @side_effect
- def _create(
- self,
- request: httpx.Request,
- route: respx.Route,
- thread_id: str,
- state_store: StateStore,
- validate_thread_exists: bool,
- **kwargs: Any,
- ) -> httpx.Response:
- self.create.route = route
-
- if validate_thread_exists:
- thread = state_store.beta.threads.get(thread_id)
-
- if not thread:
- return httpx.Response(status_code=404)
-
- content: MessageCreateParams = json.loads(request.content)
- message = self._parse_message_create_params(thread_id, content)
-
- state_store.beta.threads.messages.put(message)
-
- return httpx.Response(status_code=201, json=model_dict(message))
-
- @side_effect
- def _list(
- self,
- request: httpx.Request,
- route: respx.Route,
- thread_id: str,
- state_store: StateStore,
- validate_thread_exists: bool,
- **kwargs: Any,
- ) -> httpx.Response:
- self.list.route = route
-
- if validate_thread_exists:
- thread = state_store.beta.threads.get(thread_id)
-
- if not thread:
- return httpx.Response(status_code=404)
-
- limit = request.url.params.get("limit")
- order = request.url.params.get("order")
- after = request.url.params.get("after")
- before = request.url.params.get("before")
-
- messages = SyncCursorPage[Message](
- data=state_store.beta.threads.messages.list(
- thread_id,
- limit,
- order,
- after,
- before,
- )
- )
-
- return httpx.Response(status_code=200, json=model_dict(messages))
-
- @side_effect
- def _retrieve(
- self,
- request: httpx.Request,
- route: respx.Route,
- thread_id: str,
- id: str,
- state_store: StateStore,
- validate_thread_exists: bool,
- **kwargs: Any,
- ) -> httpx.Response:
- self.retrieve.route = route
-
- if validate_thread_exists:
- thread = state_store.beta.threads.get(thread_id)
-
- if not thread:
- return httpx.Response(status_code=404)
-
- *_, id = request.url.path.split("/")
- message = state_store.beta.threads.messages.get(id)
-
- if not message:
- return httpx.Response(status_code=404)
-
- else:
- return httpx.Response(status_code=200, json=model_dict(message))
-
- @side_effect
- def _update(
- self,
- request: httpx.Request,
- route: respx.Route,
- thread_id: str,
- id: str,
- state_store: StateStore,
- validate_thread_exists: bool,
- **kwargs: Any,
- ) -> httpx.Response:
- self.update.route = route
-
- if validate_thread_exists:
- thread = state_store.beta.threads.get(thread_id)
-
- if not thread:
- return httpx.Response(status_code=404)
-
- *_, id = request.url.path.split("/")
- content: MessageUpdateParams = json.loads(request.content)
-
- message = state_store.beta.threads.messages.get(id)
-
- if not message:
- return httpx.Response(status_code=404)
-
- message.metadata = content.get("metadata", message.metadata)
-
- state_store.beta.threads.messages.put(message)
-
- return httpx.Response(status_code=200, json=model_dict(message))
-
- def _parse_message_create_params(
- self,
- thread_id: str,
- params: Union[
- ThreadMessageCreateParams,
- MessageCreateParams,
- AdditionalMessage,
- ],
- ) -> Message:
- return Message(
- id=self._faker.beta.thread.message.id(),
- attachments=self._parse_attachments_params(params.get("attachments")),
- content=[
- TextContentBlock(
- text=Text(annotations=[], value=params["content"]),
- type="text",
- )
- ],
- created_at=utcnow_unix_timestamp_s(),
- metadata=params.get("metadata"),
- object="thread.message",
- role=params["role"],
- status="completed",
- thread_id=thread_id,
- )
-
- @staticmethod
- def _parse_attachments_params(
- params: Optional[
- Union[
- Iterable[MessageCreateAttachmentParams],
- Iterable[ThreadMessageCreateAttachmentParams],
- ]
- ],
- ) -> Optional[List[MessageAttachment]]:
- m = {"code_interpreter": CodeInterpreterTool, "file_search": FileSearchTool}
- return (
- remove_none(
- [
- model_parse(
- MessageAttachment,
- {
- "file_id": attachment.get("file_id"),
- "tools": [
- model_parse(m[t["type"]], t) # type: ignore
- for t in attachment.get("tools", [])
- ],
- },
- )
- for attachment in params
- ]
- )
- if params
- else None
- )
diff --git a/src/openai_responses/endpoints/run_steps.py b/src/openai_responses/endpoints/run_steps.py
deleted file mode 100644
index ceda788..0000000
--- a/src/openai_responses/endpoints/run_steps.py
+++ /dev/null
@@ -1,208 +0,0 @@
-from typing import Any, List, Optional
-
-import httpx
-import respx
-
-from openai.pagination import SyncCursorPage
-from openai.types.beta.threads.runs.run_step import (
- RunStep,
- LastError as RunStepLastError,
-)
-from openai.types.beta.threads.runs.tool_calls_step_details import ToolCallsStepDetails
-from openai.types.beta.threads.runs.message_creation_step_details import (
- MessageCreationStepDetails,
-)
-
-from ._base import StatefulMock, CallContainer
-from ._partial_schemas import PartialRunStep
-
-from ..decorators import side_effect
-from ..state import StateStore
-from ..utils import model_dict, model_parse, utcnow_unix_timestamp_s
-
-
-class RunStepsMock(StatefulMock):
- def __init__(self) -> None:
- super().__init__(
- name="assistants_mock",
- endpoint="/v1/assistants",
- route_registrations=[
- {
- "name": "list",
- "method": respx.get,
- "pattern": None,
- "side_effect": self._list,
- },
- {
- "name": "retrieve",
- "method": respx.get,
- "pattern": r"/(?P\w+)",
- "side_effect": self._retrieve,
- },
- ],
- )
-
- self.list = CallContainer()
- self.retrieve = CallContainer()
-
- def __call__(
- self,
- *,
- steps: Optional[List[PartialRunStep]] = None,
- latency: Optional[float] = None,
- failures: Optional[int] = None,
- state_store: Optional[StateStore] = None,
- validate_thread_exists: Optional[bool] = None,
- validate_run_exists: Optional[bool] = None,
- ):
- def getter(*args: Any, **kwargs: Any):
- return dict(
- steps=steps or [],
- latency=latency or 0,
- failures=failures or 0,
- state_store=kwargs["used_state"],
- validate_thread_exists=validate_thread_exists or False,
- validate_run_exists=validate_run_exists or False,
- )
-
- return self._make_decorator(getter, state_store or StateStore())
-
- @side_effect
- def _list(
- self,
- request: httpx.Request,
- route: respx.Route,
- thread_id: str,
- run_id: str,
- steps: List[PartialRunStep],
- state_store: StateStore,
- validate_thread_exists: bool,
- validate_run_exists: bool,
- **kwargs: Any,
- ) -> httpx.Response:
- self.list.route = route
-
- if validate_thread_exists:
- thread = state_store.beta.threads.get(thread_id)
-
- if not thread:
- return httpx.Response(status_code=404)
-
- assistant_id = ""
- if validate_run_exists:
- run = state_store.beta.threads.runs.get(run_id)
-
- if not run:
- return httpx.Response(status_code=404)
-
- assistant_id = run.assistant_id
-
- self._put_steps_in_store(thread_id, run_id, assistant_id, steps, state_store)
-
- limit = request.url.params.get("limit")
- order = request.url.params.get("order")
- after = request.url.params.get("after")
- before = request.url.params.get("before")
-
- return httpx.Response(
- status_code=200,
- json=model_dict(
- SyncCursorPage[RunStep](
- data=state_store.beta.threads.runs.steps.list(
- thread_id,
- run_id,
- limit,
- order,
- after,
- before,
- )
- )
- ),
- )
-
- @side_effect
- def _retrieve(
- self,
- request: httpx.Request,
- route: respx.Route,
- thread_id: str,
- run_id: str,
- id: str,
- steps: List[PartialRunStep],
- state_store: StateStore,
- validate_thread_exists: bool,
- validate_run_exists: bool,
- **kwargs: Any,
- ) -> httpx.Response:
- self.list.route = route
-
- if validate_thread_exists:
- thread = state_store.beta.threads.get(thread_id)
-
- if not thread:
- return httpx.Response(status_code=404)
-
- assistant_id = ""
- if validate_run_exists:
- run = state_store.beta.threads.runs.get(run_id)
-
- if not run:
- return httpx.Response(status_code=404)
-
- assistant_id = run.assistant_id
-
- self._put_steps_in_store(thread_id, run_id, assistant_id, steps, state_store)
-
- *_, id = request.url.path.split("/")
- step = state_store.beta.threads.runs.steps.get(id)
-
- if not step:
- return httpx.Response(status_code=404)
-
- return httpx.Response(status_code=200, json=model_dict(step))
-
- def _put_steps_in_store(
- self,
- thread_id: str,
- run_id: str,
- assistant_id: str,
- steps: List[PartialRunStep],
- state_store: StateStore,
- ) -> None:
- for step in steps:
- step_details = step.get("step_details")
- if not step_details:
- continue
-
- step_details_model: Any = None
- if step_details["type"] == "message_creation":
- step_details_model = model_parse(
- MessageCreationStepDetails, step_details
- )
- elif step_details["type"] == "tool_calls":
- step_details_model = model_parse(ToolCallsStepDetails, step_details)
-
- if not step_details_model:
- continue
-
- state_store.beta.threads.runs.steps.put(
- RunStep(
- id=step.get("id", self._faker.beta.thread.run.step.id()),
- assistant_id=step.get("assistant_id", assistant_id),
- cancelled_at=step.get("cancelled_at"),
- completed_at=step.get("completed_at"),
- created_at=utcnow_unix_timestamp_s(),
- expired_at=step.get("expired_at"),
- failed_at=step.get("failed_at"),
- last_error=model_parse(
- RunStepLastError,
- step.get("last_error"),
- ),
- object="thread.run.step",
- run_id=run_id,
- status=step.get("status", "in_progress"),
- thread_id=thread_id,
- type=step_details["type"],
- step_details=step_details_model,
- )
- )
diff --git a/src/openai_responses/endpoints/runs.py b/src/openai_responses/endpoints/runs.py
deleted file mode 100644
index 46a9906..0000000
--- a/src/openai_responses/endpoints/runs.py
+++ /dev/null
@@ -1,485 +0,0 @@
-import json
-from typing import Any, Literal, Optional, Sequence, TypedDict
-
-import httpx
-
-from openai.types.beta.assistant_tool_choice import AssistantToolChoice
-from openai.types.beta.assistant_tool_choice_option import AssistantToolChoiceOption
-from openai.types.beta.assistant_tool_choice_option_param import (
- AssistantToolChoiceOptionParam,
-)
-import respx
-
-from openai.pagination import SyncCursorPage
-from openai.types.beta.assistant import Assistant
-
-
-from openai.types.beta.threads.run import (
- IncompleteDetails,
- Run,
- LastError,
- RequiredAction,
- TruncationStrategy,
- Usage,
-)
-from openai.types.beta.threads.run_create_params import RunCreateParams
-from openai.types.beta.threads.run_update_params import RunUpdateParams
-
-from openai_responses.endpoints.messages import MessagesMock
-
-
-from ._base import StatefulMock, CallContainer
-from ._partial_schemas import PartialRun
-from .assistants import AssistantsMock
-from .run_steps import RunStepsMock
-
-from ..decorators import side_effect
-from ..state import StateStore
-from ..utils import model_dict, model_parse, utcnow_unix_timestamp_s
-
-__all__ = ["RunsMock"]
-
-
-class MultiMethodSequence(TypedDict, total=False):
- create: Sequence[PartialRun]
- retrieve: Sequence[PartialRun]
-
-
-class RunsMock(StatefulMock):
- def __init__(self) -> None:
- super().__init__(
- name="runs_mock",
- endpoint=r"/v1/threads/(?P\w+)/runs",
- route_registrations=[
- {
- "name": "create",
- "method": respx.post,
- "pattern": None,
- "side_effect": self._create,
- },
- {
- "name": "list",
- "method": respx.get,
- "pattern": None,
- "side_effect": self._list,
- },
- {
- "name": "retrieve",
- "method": respx.get,
- "pattern": r"/(?P\w+)",
- "side_effect": self._retrieve,
- },
- {
- "name": "update",
- "method": respx.post,
- "pattern": r"/(?P\w+)",
- "side_effect": self._update,
- },
- {
- "name": "cancel",
- "method": respx.post,
- "pattern": r"/(?P\w+)/cancel",
- "side_effect": self._cancel,
- },
- {
- "name": "submit_tool_outputs",
- "method": respx.post,
- "pattern": r"/(?P\w+)/submit_tool_outputs",
- "side_effect": self._submit_tool_outputs,
- },
- ],
- )
-
- self.create = CallContainer()
- self.list = CallContainer()
- self.retrieve = CallContainer()
- self.update = CallContainer()
- self.cancel = CallContainer()
- self.submit_tool_outputs = CallContainer()
-
- self.steps = RunStepsMock()
-
- def __call__(
- self,
- *,
- sequence: Optional[MultiMethodSequence] = None,
- latency: Optional[float] = None,
- failures: Optional[int] = None,
- state_store: Optional[StateStore] = None,
- validate_thread_exists: Optional[bool] = None,
- validate_assistant_exists: Optional[bool] = None,
- ):
- def getter(*args: Any, **kwargs: Any):
- return dict(
- sequence=sequence or {},
- latency=latency or 0,
- failures=failures or 0,
- state_store=kwargs["used_state"],
- validate_thread_exists=validate_thread_exists or False,
- validate_assistant_exists=validate_assistant_exists or False,
- )
-
- return self._make_decorator(getter, state_store or StateStore())
-
- @side_effect
- def _create(
- self,
- request: httpx.Request,
- route: respx.Route,
- thread_id: str,
- sequence: MultiMethodSequence,
- state_store: StateStore,
- validate_thread_exists: bool,
- validate_assistant_exists: bool,
- **kwargs: Any,
- ) -> httpx.Response:
- self.create.route = route
- failures: int = kwargs.get("failures", 0)
-
- if validate_thread_exists:
- thread = state_store.beta.threads.get(thread_id)
-
- if not thread:
- return httpx.Response(status_code=404)
-
- content: RunCreateParams = json.loads(request.content)
-
- partial_run = (
- self._next_partial_run(sequence, route.call_count, failures, "create") or {}
- )
- if validate_assistant_exists:
- asst = state_store.beta.assistants.get(content["assistant_id"])
-
- if not asst:
- return httpx.Response(status_code=404)
-
- partial_run = self._merge_partial_run_with_assistant(partial_run, asst)
-
- for additional_message in content.get("additional_messages", []) or []:
- parsed = MessagesMock()._parse_message_create_params(
- thread_id,
- additional_message,
- )
- state_store.beta.threads.messages.put(parsed)
-
- run = Run(
- id=self._faker.beta.thread.run.id(),
- assistant_id=content["assistant_id"],
- cancelled_at=partial_run.get("cancelled_at"),
- completed_at=partial_run.get("completed_at"),
- created_at=utcnow_unix_timestamp_s(),
- expires_at=partial_run.get("expires_at"),
- failed_at=partial_run.get("failed_at"),
- incomplete_details=model_parse(
- IncompleteDetails,
- partial_run.get("incomplete_details"),
- ),
- instructions="\n".join(
- [
- partial_run.get("instructions", ""),
- content.get("additional_instructions", "") or "",
- ]
- ),
- last_error=model_parse(
- LastError,
- partial_run.get("last_error"),
- ),
- max_completion_tokens=content.get("max_completion_tokens"),
- max_prompt_tokens=content.get("max_prompt_tokens"),
- metadata=content.get("metadata"),
- model=partial_run.get("model", "gpt-3.5-turbo"),
- object="thread.run",
- required_action=model_parse(
- RequiredAction,
- partial_run.get("required_action"),
- ),
- response_format=AssistantsMock._parse_response_format_params(
- content.get("response_format")
- ),
- started_at=partial_run.get("started_at"),
- status=partial_run.get("status", "queued"),
- thread_id=thread_id,
- tool_choice=self._parse_tool_choice_params(content.get("tool_choice")),
- tools=AssistantsMock._parse_tool_params(partial_run.get("tools")) or [],
- truncation_strategy=model_parse(
- TruncationStrategy, content.get("truncation_strategy")
- ),
- usage=Usage(
- completion_tokens=0,
- prompt_tokens=0,
- total_tokens=0,
- ),
- temperature=content.get("temperature"),
- top_p=content.get("top_p"),
- )
-
- state_store.beta.threads.runs.put(run)
-
- return httpx.Response(status_code=201, json=model_dict(run))
-
- def _retrieve(
- self,
- request: httpx.Request,
- route: respx.Route,
- thread_id: str,
- id: str,
- sequence: MultiMethodSequence,
- state_store: StateStore,
- validate_thread_exists: bool,
- validate_assistant_exists: bool,
- **kwargs: Any,
- ) -> httpx.Response:
- self.retrieve.route = route
- failures: int = kwargs.get("failures", 0)
-
- if validate_thread_exists:
- thread = state_store.beta.threads.get(thread_id)
-
- if not thread:
- return httpx.Response(status_code=404)
-
- *_, id = request.url.path.split("/")
- run = state_store.beta.threads.runs.get(id)
-
- if not run:
- return httpx.Response(status_code=404)
-
- partial_run = (
- self._next_partial_run(sequence, route.call_count, failures, "retrieve")
- or {}
- )
-
- if validate_assistant_exists:
- asst = state_store.beta.assistants.get(run.assistant_id)
- if asst:
- partial_run = self._merge_partial_run_with_assistant(partial_run, asst)
-
- run.cancelled_at = partial_run.get("cancelled_at", run.cancelled_at)
- run.completed_at = partial_run.get("completed_at", run.completed_at)
- run.expires_at = partial_run.get("expires_at", run.expires_at)
- run.failed_at = partial_run.get("failed_at", run.failed_at)
- run.incomplete_details = (
- model_parse(
- IncompleteDetails,
- partial_run.get("incomplete_details"),
- )
- or run.incomplete_details
- )
- run.instructions = partial_run.get("instructions", run.instructions)
- run.last_error = (
- model_parse(LastError, partial_run.get("last_error")) or run.last_error
- )
- run.model = partial_run.get("model", run.model)
- run.required_action = (
- model_parse(RequiredAction, partial_run.get("required_action"))
- or run.required_action
- )
- run.started_at = partial_run.get("started_at", run.started_at)
- run.status = partial_run.get("status", run.status)
- run.tools = (
- AssistantsMock._parse_tool_params(partial_run.get("tools")) or run.tools
- )
-
- state_store.beta.threads.runs.put(run)
-
- return httpx.Response(status_code=200, json=model_dict(run))
-
- def _list(
- self,
- request: httpx.Request,
- route: respx.Route,
- thread_id: str,
- sequence: MultiMethodSequence,
- state_store: StateStore,
- validate_thread_exists: bool,
- validate_assistant_exists: bool,
- **kwargs: Any,
- ):
- self.list.route = route
-
- if validate_thread_exists:
- thread = state_store.beta.threads.get(thread_id)
-
- if not thread:
- return httpx.Response(status_code=404)
-
- if validate_assistant_exists:
- # TODO: what should be done here?
- pass
-
- if sequence:
- # TODO: should there be a method sequence for list?
- pass
-
- limit = request.url.params.get("limit")
- order = request.url.params.get("order")
- after = request.url.params.get("after")
- before = request.url.params.get("before")
-
- runs = SyncCursorPage[Run](
- data=state_store.beta.threads.runs.list(
- thread_id, limit, order, after, before
- )
- )
-
- return httpx.Response(status_code=200, json=model_dict(runs))
-
- def _update(
- self,
- request: httpx.Request,
- route: respx.Route,
- thread_id: str,
- id: str,
- sequence: MultiMethodSequence,
- state_store: StateStore,
- validate_thread_exists: bool,
- validate_assistant_exists: bool,
- **kwargs: Any,
- ) -> httpx.Response:
- self.update.route = route
-
- if validate_thread_exists:
- thread = state_store.beta.threads.get(thread_id)
-
- if not thread:
- return httpx.Response(status_code=404)
-
- if validate_assistant_exists:
- # TODO: what should be done here?
- pass
-
- if sequence:
- # TODO: should there be a method sequence for update?
- pass
-
- *_, id = request.url.path.split("/")
- run = state_store.beta.threads.runs.get(id)
-
- if not run:
- return httpx.Response(status_code=404)
-
- content: RunUpdateParams = json.loads(request.content)
-
- run.metadata = content.get("metadata", run.metadata)
-
- state_store.beta.threads.runs.put(run)
-
- return httpx.Response(status_code=200, json=model_dict(run))
-
- def _cancel(
- self,
- request: httpx.Request,
- route: respx.Route,
- thread_id: str,
- id: str,
- sequence: MultiMethodSequence,
- state_store: StateStore,
- validate_thread_exists: bool,
- validate_assistant_exists: bool,
- **kwargs: Any,
- ) -> httpx.Response:
- self.cancel.route = route
-
- if validate_thread_exists:
- thread = state_store.beta.threads.get(thread_id)
-
- if not thread:
- return httpx.Response(status_code=404)
-
- if validate_assistant_exists:
- # TODO: what should be done here?
- pass
-
- if sequence:
- # TODO: should there be a method sequence for cancel?
- pass
-
- *_, id, _ = request.url.path.split("/")
- run = state_store.beta.threads.runs.get(id)
-
- if not run:
- return httpx.Response(status_code=404)
-
- run.status = "cancelling"
-
- state_store.beta.threads.runs.put(run)
-
- return httpx.Response(status_code=200, json=model_dict(run))
-
- def _submit_tool_outputs(
- self,
- request: httpx.Request,
- route: respx.Route,
- thread_id: str,
- id: str,
- sequence: MultiMethodSequence,
- state_store: StateStore,
- validate_thread_exists: bool,
- validate_assistant_exists: bool,
- **kwargs: Any,
- ) -> httpx.Response:
- self.submit_tool_outputs.route = route
-
- if validate_thread_exists:
- thread = state_store.beta.threads.get(thread_id)
-
- if not thread:
- return httpx.Response(status_code=404)
-
- if validate_assistant_exists:
- # TODO: what should be done here?
- pass
-
- if sequence:
- # TODO: should there be a method sequence for submit tools?
- pass
-
- *_, id, _ = request.url.path.split("/")
- run = state_store.beta.threads.runs.get(id)
-
- if not run:
- return httpx.Response(status_code=404)
-
- return httpx.Response(status_code=200, json=model_dict(run))
-
- @staticmethod
- def _next_partial_run(
- sequence: MultiMethodSequence,
- call_count: int,
- failures: int,
- method: Literal["create", "retrieve"],
- ) -> Optional[PartialRun]:
- used_sequence = sequence.get(method, [])
- net_ix = call_count - failures
- try:
- return used_sequence[net_ix]
- except IndexError:
- return None
-
- @staticmethod
- def _merge_partial_run_with_assistant(
- run: Optional[PartialRun],
- asst: Assistant,
- ) -> PartialRun:
- if not run:
- return {
- "instructions": asst.instructions or "",
- "model": asst.model,
- "tools": [model_dict(tool) for tool in asst.tools], # type: ignore
- }
- else:
- return run | {
- "instructions": run.get("instructions", asst.instructions or ""),
- "model": run.get("model", asst.model),
- "tools": run.get("tools", [model_dict(tool) for tool in asst.tools]), # type: ignore
- }
-
- @staticmethod
- def _parse_tool_choice_params(
- params: Optional[AssistantToolChoiceOptionParam],
- ) -> Optional[AssistantToolChoiceOption]:
- return (
- model_parse(AssistantToolChoice, params)
- if isinstance(params, dict)
- else params
- )
diff --git a/src/openai_responses/endpoints/threads.py b/src/openai_responses/endpoints/threads.py
deleted file mode 100644
index 7dcc121..0000000
--- a/src/openai_responses/endpoints/threads.py
+++ /dev/null
@@ -1,179 +0,0 @@
-import json
-from typing import Any, Optional
-
-import httpx
-import respx
-
-from openai.types.beta.thread import Thread, ToolResources
-from openai.types.beta.thread_deleted import ThreadDeleted
-from openai.types.beta.thread_update_params import ThreadUpdateParams
-from openai.types.beta.thread_create_params import ThreadCreateParams
-
-from ._base import StatefulMock, CallContainer
-from .messages import MessagesMock
-from .runs import RunsMock
-
-from ..decorators import side_effect
-from ..state import StateStore
-from ..utils import model_dict, model_parse, utcnow_unix_timestamp_s
-
-__all__ = ["ThreadsMock"]
-
-
-class ThreadsMock(StatefulMock):
- def __init__(self) -> None:
- super().__init__(
- name="threads_mock",
- endpoint="/v1/threads",
- route_registrations=[
- {
- "name": "create",
- "method": respx.post,
- "pattern": None,
- "side_effect": self._create,
- },
- {
- "name": "retrieve",
- "method": respx.get,
- "pattern": r"/(?P\w+)",
- "side_effect": self._retrieve,
- },
- {
- "name": "update",
- "method": respx.post,
- "pattern": r"/(?P\w+)",
- "side_effect": self._update,
- },
- {
- "name": "delete",
- "method": respx.delete,
- "pattern": r"/(?P\w+)",
- "side_effect": self._delete,
- },
- ],
- )
-
- # NOTE: these are explicitly defined to help with autocomplete and type hints
- self.create = CallContainer()
- self.retrieve = CallContainer()
- self.update = CallContainer()
- self.delete = CallContainer()
-
- self.messages = MessagesMock()
- self.runs = RunsMock()
-
- def __call__(
- self,
- *,
- latency: Optional[float] = None,
- failures: Optional[int] = None,
- state_store: Optional[StateStore] = None,
- ):
- def getter(*args: Any, **kwargs: Any):
- return dict(
- latency=latency or 0,
- failures=failures or 0,
- state_store=kwargs["used_state"],
- )
-
- return self._make_decorator(getter, state_store or StateStore())
-
- @side_effect
- def _create(
- self,
- request: httpx.Request,
- route: respx.Route,
- state_store: StateStore,
- **kwargs: Any,
- ) -> httpx.Response:
- self.create.route = route
-
- content: ThreadCreateParams = json.loads(request.content)
-
- thread = Thread(
- id=self._faker.beta.thread.id(),
- created_at=utcnow_unix_timestamp_s(),
- tool_resources=model_parse(ToolResources, content.get("tool_resources")),
- metadata=content.get("metadata"),
- object="thread",
- )
- messages = [
- self.messages._parse_message_create_params(thread.id, m)
- for m in content.get("messages", [])
- ]
-
- state_store.beta.threads.put(thread)
- for message in messages:
- state_store.beta.threads.messages.put(message)
-
- return httpx.Response(status_code=201, json=model_dict(thread))
-
- @side_effect
- def _retrieve(
- self,
- request: httpx.Request,
- route: respx.Route,
- id: str,
- state_store: StateStore,
- **kwargs: Any,
- ) -> httpx.Response:
- self.retrieve.route = route
-
- *_, id = request.url.path.split("/")
- thread = state_store.beta.threads.get(id)
-
- if not thread:
- return httpx.Response(status_code=404)
-
- else:
- return httpx.Response(status_code=200, json=model_dict(thread))
-
- @side_effect
- def _update(
- self,
- request: httpx.Request,
- route: respx.Route,
- id: str,
- state_store: StateStore,
- **kwargs: Any,
- ) -> httpx.Response:
- self.update.route = route
-
- *_, id = request.url.path.split("/")
- content: ThreadUpdateParams = json.loads(request.content)
-
- thread = state_store.beta.threads.get(id)
-
- if not thread:
- return httpx.Response(status_code=404)
-
- thread.tool_resources = (
- model_parse(ToolResources, content.get("tool_resources"))
- or thread.tool_resources
- )
- thread.metadata = content.get("metadata", thread.metadata)
-
- state_store.beta.threads.put(thread)
-
- return httpx.Response(status_code=200, json=model_dict(thread))
-
- @side_effect
- def _delete(
- self,
- request: httpx.Request,
- route: respx.Route,
- id: str,
- state_store: StateStore,
- **kwargs: Any,
- ) -> httpx.Response:
- self.delete.route = route
-
- *_, id = request.url.path.split("/")
- deleted = state_store.beta.threads.delete(id)
-
- return httpx.Response(
- status_code=200,
- json=model_dict(
- ThreadDeleted(id=id, deleted=deleted, object="thread.deleted")
- ),
- )
diff --git a/src/openai_responses/helpers/builders/_base.py b/src/openai_responses/helpers/builders/_base.py
new file mode 100644
index 0000000..c5ba2aa
--- /dev/null
+++ b/src/openai_responses/helpers/builders/_base.py
@@ -0,0 +1,16 @@
+from typing import Optional, Type
+
+import httpx
+
+from ..._routes._base import Route
+from ..._types.generics import M, P
+
+__all__ = ["_generic_builder"]
+
+
+def _generic_builder(
+ route: Type[Route[M, P]],
+ request: httpx.Request,
+ extra: Optional[P] = None,
+) -> M:
+ return getattr(route, "_build")(extra or {}, request)
diff --git a/src/openai_responses/helpers/builders/assistants.py b/src/openai_responses/helpers/builders/assistants.py
new file mode 100644
index 0000000..0ebcfbf
--- /dev/null
+++ b/src/openai_responses/helpers/builders/assistants.py
@@ -0,0 +1,19 @@
+from typing import Optional
+
+import httpx
+
+from openai.types.beta.assistant import Assistant
+
+from ._base import _generic_builder
+from ..._routes.assistants import AssistantCreateRoute
+from ..._types.partials.assistants import PartialAssistant
+
+__all__ = ["assistant_from_create_request"]
+
+
+def assistant_from_create_request(
+ request: httpx.Request,
+ *,
+ extra: Optional[PartialAssistant] = None,
+) -> Assistant:
+ return _generic_builder(AssistantCreateRoute, request, extra)
diff --git a/src/openai_responses/helpers/builders/chat.py b/src/openai_responses/helpers/builders/chat.py
new file mode 100644
index 0000000..ba16253
--- /dev/null
+++ b/src/openai_responses/helpers/builders/chat.py
@@ -0,0 +1,19 @@
+from typing import Optional
+
+import httpx
+
+from openai.types.chat.chat_completion import ChatCompletion
+from ..._routes.chat import ChatCompletionsCreateRoute
+
+from ._base import _generic_builder
+from ..._types.partials.chat import PartialChatCompletion
+
+__all__ = ["chat_completion_from_create_request"]
+
+
+def chat_completion_from_create_request(
+ request: httpx.Request,
+ *,
+ extra: Optional[PartialChatCompletion] = None,
+) -> ChatCompletion:
+ return _generic_builder(ChatCompletionsCreateRoute, request, extra)
diff --git a/src/openai_responses/helpers/builders/embeddings.py b/src/openai_responses/helpers/builders/embeddings.py
new file mode 100644
index 0000000..18524ae
--- /dev/null
+++ b/src/openai_responses/helpers/builders/embeddings.py
@@ -0,0 +1,19 @@
+from typing import Optional
+
+import httpx
+
+from openai.types.create_embedding_response import CreateEmbeddingResponse
+
+from ._base import _generic_builder
+from ..._routes.embeddings import EmbeddingsCreateRoute
+from ..._types.partials.embeddings import PartialCreateEmbeddingResponse
+
+__all__ = ["embedding_create_response_from_create_request"]
+
+
+def embedding_create_response_from_create_request(
+ request: httpx.Request,
+ *,
+ extra: Optional[PartialCreateEmbeddingResponse] = None,
+) -> CreateEmbeddingResponse:
+ return _generic_builder(EmbeddingsCreateRoute, request, extra)
diff --git a/src/openai_responses/helpers/builders/messages.py b/src/openai_responses/helpers/builders/messages.py
new file mode 100644
index 0000000..fdda967
--- /dev/null
+++ b/src/openai_responses/helpers/builders/messages.py
@@ -0,0 +1,36 @@
+from typing import Optional
+
+import httpx
+
+from openai.types.beta.threads.message import Message
+
+from ._base import _generic_builder
+from ..._routes.messages import MessageCreateRoute
+from ..._types.partials.messages import PartialMessage
+
+from ..._utils.faker import faker
+from ..._utils.serde import model_parse
+from ..._utils.time import utcnow_unix_timestamp_s
+
+__all__ = ["message_from_create_request", "build_message"]
+
+
+def message_from_create_request(
+ thread_id: str,
+ request: httpx.Request,
+ *,
+ extra: Optional[PartialMessage] = None,
+) -> Message:
+ partial: PartialMessage = {"thread_id": thread_id}
+ if extra:
+ partial |= extra
+ return _generic_builder(MessageCreateRoute, request, partial)
+
+
+def build_message(partial: PartialMessage) -> Message:
+ default: PartialMessage = {
+ "id": faker.beta.thread.message.id(),
+ "created_at": utcnow_unix_timestamp_s(),
+ "object": "thread.message",
+ }
+ return model_parse(Message, default | partial)
diff --git a/src/openai_responses/helpers/builders/run_steps.py b/src/openai_responses/helpers/builders/run_steps.py
new file mode 100644
index 0000000..a0b6100
--- /dev/null
+++ b/src/openai_responses/helpers/builders/run_steps.py
@@ -0,0 +1,22 @@
+from openai.types.beta.threads.runs.run_step import RunStep
+
+from ..._types.partials.run_steps import PartialRunStep
+
+from ..._utils.faker import faker
+from ..._utils.serde import model_parse
+from ..._utils.time import utcnow_unix_timestamp_s
+
+
+__all__ = ["build_run_step"]
+
+
+def build_run_step(partial: PartialRunStep) -> RunStep:
+ return model_parse(
+ RunStep,
+ {
+ "id": faker.beta.thread.run.step.id(),
+ "created_at": utcnow_unix_timestamp_s(),
+ "object": "thread.run.step",
+ }
+ | partial,
+ )
diff --git a/src/openai_responses/helpers/builders/runs.py b/src/openai_responses/helpers/builders/runs.py
new file mode 100644
index 0000000..5fc0a71
--- /dev/null
+++ b/src/openai_responses/helpers/builders/runs.py
@@ -0,0 +1,23 @@
+from typing import Optional
+
+import httpx
+
+from openai.types.beta.threads.run import Run
+
+from ._base import _generic_builder
+from ..._routes.runs import RunCreateRoute
+from ..._types.partials.runs import PartialRun
+
+__all__ = ["run_from_create_request"]
+
+
+def run_from_create_request(
+ thread_id: str,
+ request: httpx.Request,
+ *,
+ extra: Optional[PartialRun] = None,
+) -> Run:
+ partial: PartialRun = {"thread_id": thread_id}
+ if extra:
+ partial |= extra
+ return _generic_builder(RunCreateRoute, request, partial)
diff --git a/src/openai_responses/helpers/builders/threads.py b/src/openai_responses/helpers/builders/threads.py
new file mode 100644
index 0000000..293c79b
--- /dev/null
+++ b/src/openai_responses/helpers/builders/threads.py
@@ -0,0 +1,19 @@
+from typing import Optional
+
+import httpx
+
+from openai.types.beta.thread import Thread
+
+from ._base import _generic_builder
+from ..._routes.threads import ThreadCreateRoute
+from ..._types.partials.threads import PartialThread
+
+__all__ = ["thread_from_create_request"]
+
+
+def thread_from_create_request(
+ request: httpx.Request,
+ *,
+ extra: Optional[PartialThread] = None,
+) -> Thread:
+ return _generic_builder(ThreadCreateRoute, request, extra)
diff --git a/src/openai_responses/helpers/state_store.py b/src/openai_responses/helpers/state_store.py
new file mode 100644
index 0000000..db544f6
--- /dev/null
+++ b/src/openai_responses/helpers/state_store.py
@@ -0,0 +1,39 @@
+from typing import Optional
+
+from .._mock import OpenAIMock
+from .._stores.state_store import Resource, StateStore
+
+
+def add_resource_to_state_store(
+ resource: Resource,
+ *,
+ mock: Optional[OpenAIMock] = None,
+ state_store: Optional[StateStore] = None,
+):
+ """Add a resource to the state store being used for a test. If an object with the same resource
+ ID already exists in the state store then it will be overwritten.
+
+ Args:
+ resource (Resource): An OpenAI resource
+ mock (Optional[OpenAIMock], optional): Mock associated with test. Defaults to None.
+ state_store (Optional[StateStore], optional): State store associated with test. Defaults to None.
+
+ Raises:
+ ValueError: If neither mock or state store are provided
+ ValueError: If both mock and state store are provided
+ """
+ if not mock and not state_store:
+ raise ValueError(
+ "Either a mock instance or a state store instance must be provided"
+ )
+
+ if mock and state_store:
+ raise ValueError(
+ "Only one of mock instance or state store instance should be provided not both"
+ )
+
+ if mock:
+ mock._state._blind_put(resource)
+
+ if state_store:
+ state_store._blind_put(resource)
diff --git a/src/openai_responses/plugin.py b/src/openai_responses/plugin.py
index 18b5727..0d2a5ca 100644
--- a/src/openai_responses/plugin.py
+++ b/src/openai_responses/plugin.py
@@ -1,94 +1,15 @@
import pytest
-from openai_responses import (
- AssistantsMock,
- ChatCompletionMock,
- EmbeddingsMock,
- FilesMock,
- ThreadsMock,
- MessagesMock,
- RunsMock,
- RunStepsMock,
-)
+from . import OpenAIMock
def pytest_configure(config: pytest.Config):
config.addinivalue_line(
"markers",
- "assistants_mock: OpenAI assistants API mocker",
+ "openai_mock: OpenAI API mocker object",
)
- config.addinivalue_line(
- "markers",
- "chat_completion_mock: OpenAI chat completion API mocker",
- )
-
- config.addinivalue_line(
- "markers",
- "embeddings_mock: OpenAI embeddings API mocker",
- )
-
- config.addinivalue_line(
- "markers",
- "files_mock: OpenAI files API mocker",
- )
-
- config.addinivalue_line(
- "markers",
- "threads_mock: OpenAI threads API mocker",
- )
-
- config.addinivalue_line(
- "markers",
- "messages_mock: OpenAI messages API mocker",
- )
-
- config.addinivalue_line(
- "markers",
- "runs_mock: OpenAI runs API mocker",
- )
-
- config.addinivalue_line(
- "markers",
- "run_steps_mock: OpenAI runs steps API mocker",
- )
-
-
-@pytest.fixture()
-def assistants_mock() -> AssistantsMock:
- return AssistantsMock()
-
-
-@pytest.fixture()
-def chat_completion_mock() -> ChatCompletionMock:
- return ChatCompletionMock()
-
-
-@pytest.fixture()
-def embeddings_mock() -> EmbeddingsMock:
- return EmbeddingsMock()
-
-
-@pytest.fixture()
-def files_mock() -> FilesMock:
- return FilesMock()
-
-
-@pytest.fixture()
-def threads_mock() -> ThreadsMock:
- return ThreadsMock()
-
-
-@pytest.fixture()
-def messages_mock() -> MessagesMock:
- return MessagesMock()
-
-
-@pytest.fixture()
-def runs_mock() -> RunsMock:
- return RunsMock()
-
@pytest.fixture()
-def run_steps_mock() -> RunStepsMock:
- return RunStepsMock()
+def openai_mock() -> OpenAIMock:
+ return OpenAIMock()
diff --git a/src/openai_responses/tokens.py b/src/openai_responses/tokens.py
deleted file mode 100644
index f0de1ab..0000000
--- a/src/openai_responses/tokens.py
+++ /dev/null
@@ -1,6 +0,0 @@
-import tiktoken
-
-
-def count_tokens(model: str, text: str) -> int:
- encoding = tiktoken.encoding_for_model(model)
- return len(encoding.encode(text))
diff --git a/src/openai_responses/utils.py b/src/openai_responses/utils.py
deleted file mode 100644
index e168970..0000000
--- a/src/openai_responses/utils.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import datetime as dt
-from typing import Any, List, Optional, Type, TypeVar
-
-from openai import BaseModel
-
-T = TypeVar("T")
-M = TypeVar("M", bound=BaseModel)
-
-
-def utcnow_unix_timestamp_s() -> int:
- return int(dt.datetime.now().timestamp())
-
-
-def model_dict(m: BaseModel) -> dict[str, Any]:
- if hasattr(m, "model_dump"):
- return getattr(m, "model_dump")()
- else:
- return getattr(m, "dict")()
-
-
-def model_parse(m: Type[M], d: Optional[object]) -> Optional[M]:
- if not d:
- return None
- if hasattr(m, "model_validate"):
- return getattr(m, "model_validate")(d)
- else:
- return getattr(m, "parse_obj")(d)
-
-
-def remove_none(ls: List[Optional[T]]) -> List[T]:
- return [el for el in ls if el]
diff --git a/tests/unit/test_decorators.py b/tests/unit/test_decorators.py
deleted file mode 100644
index 18b7ae2..0000000
--- a/tests/unit/test_decorators.py
+++ /dev/null
@@ -1,53 +0,0 @@
-import time
-from typing import Any
-
-import httpx
-import respx
-
-from openai_responses.decorators import side_effect, unwrap
-
-
-def test_side_effect():
- @side_effect
- def wrapped_side_effect(route: respx.Route, **kwargs: Any) -> httpx.Response:
- return httpx.Response(status_code=200)
-
- res = wrapped_side_effect(respx.Route())
- assert res.status_code == 200
-
- res = wrapped_side_effect(respx.Route(), failures=2)
- assert res.status_code == 500
-
- t1 = time.time()
- res = wrapped_side_effect(respx.Route(), latency=1)
- t2 = time.time()
- assert res.status_code == 200
- assert (t2 - t1) > 1
-
-
-def test_unwrap():
- global i
- i = 0
-
- def foo_decorator(func: Any):
- def wrapper():
- global i
- i += 1
- return func()
-
- return wrapper
-
- @foo_decorator
- @foo_decorator
- def my_function():
- global i
- i += 1
- return i
-
- assert my_function() == 3
-
- i = 0
-
- original = unwrap(my_function)
-
- assert original() == 1
diff --git a/tests/unit/test_state.py b/tests/unit/test_state_store.py
similarity index 99%
rename from tests/unit/test_state.py
rename to tests/unit/test_state_store.py
index d8ffc03..f0d4e03 100644
--- a/tests/unit/test_state.py
+++ b/tests/unit/test_state_store.py
@@ -5,7 +5,7 @@
from openai.types.beta.threads.message import Message
from openai.types.beta.threads.run import Run
-from openai_responses.state import StateStore
+from openai_responses import StateStore
@pytest.fixture
diff --git a/tests/unit/test_tokens.py b/tests/unit/test_tokens.py
deleted file mode 100644
index 878c4e3..0000000
--- a/tests/unit/test_tokens.py
+++ /dev/null
@@ -1,7 +0,0 @@
-from openai_responses.tokens import count_tokens
-
-
-def test_count_tokens():
- model = "gpt-4-turbo"
- text = "tiktoken is great!"
- assert count_tokens(model, text) == 6
diff --git a/tox.ini b/tox.ini
index 73ebec3..aad0f2b 100644
--- a/tox.ini
+++ b/tox.ini
@@ -27,4 +27,3 @@ commands =
[testenv:examples]
commands =
pytest examples -v {posargs}
-