Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(routes): add moderation route support #61

Merged
merged 2 commits into from
Aug 26, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ Pytest plugin for automatically mocking OpenAI requests. Powered by [RESPX](http
- [Embeddings](https://platform.openai.com/docs/api-reference/embeddings)
- [Files](https://platform.openai.com/docs/api-reference/files)
- [Models](https://platform.openai.com/docs/api-reference/models)
- [Moderations](https://platform.openai.com/docs/api-reference/moderations)
- [Assistants](https://platform.openai.com/docs/api-reference/assistants)
- [Threads](https://platform.openai.com/docs/api-reference/threads)
- [Messages](https://platform.openai.com/docs/api-reference/messages)
Expand Down
2 changes: 1 addition & 1 deletion docs/coverage.md
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ The end-goal of this library is to eventually support all OpenAI API routes. See
| Retrieve model | :material-check:{ .green } | - | Stateful |
| Delete a fine-tuned model | :material-close:{ .red } | - | - |
| **Moderations** |
| Create moderation | :material-close:{ .red } | - | - |
| Create moderation | :material-check:{ .green } | - | Stateless |
| **Assistants** |
| Create assistant | :material-check:{ .green } | - | Stateful |
| List assistants | :material-check:{ .green } | - | Stateful |
Expand Down
75 changes: 75 additions & 0 deletions examples/test_moderations.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
import openai
import pytest

import openai_responses
from openai_responses import OpenAIMock


@pytest.fixture()
def client():
return openai.Client(api_key="sk-fake123")


@openai_responses.mock()
def test_create_moderation_returns_default(
openai_mock: OpenAIMock, client: openai.Client
):
expected_prefix = "modr-"
expect_model = "text-moderation-007"

actual = client.moderations.create(input="Test input")

assert actual.id.startswith(
expected_prefix
), f"Expected id to start with {expected_prefix}"
assert actual.model == expect_model
assert len(actual.results) == 0
assert openai_mock.moderations.create.route.call_count == 1


@openai_responses.mock()
def test_create_moderation_applies_defaults_if_partial_response_provided(
openai_mock: OpenAIMock, client: openai.Client
):
openai_mock.moderations.create.response = {
"results": [
{
"flagged": True,
"categories": {"harassment": True, "violence/graphic": True},
"category_scores": {"harassment": 0.9, "violence/graphic": 0.8},
}
]
}

actual = client.moderations.create(input="Test input")

assert len(actual.results) == 1
assert actual.results[0].model_dump(by_alias=True) == {
"flagged": True,
"categories": {
"harassment": True,
"harassment/threatening": False,
"hate": False,
"hate/threatening": False,
"self-harm": False,
"self-harm/instructions": False,
"self-harm/intent": False,
"sexual": False,
"sexual/minors": False,
"violence": False,
"violence/graphic": True,
},
"category_scores": {
"harassment": 0.9,
"harassment/threatening": 0.0,
"hate": 0.0,
"hate/threatening": 0.0,
"self-harm": 0.0,
"self-harm/instructions": 0.0,
"self-harm/intent": 0.0,
"sexual": 0.0,
"sexual/minors": 0.0,
"violence": 0.0,
"violence/graphic": 0.8,
},
}
11 changes: 10 additions & 1 deletion src/openai_responses/_mock.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,14 @@

import respx

from ._routes import BetaRoutes, ChatRoutes, EmbeddingsRoutes, FileRoutes, ModelRoutes
from ._routes import (
BetaRoutes,
ChatRoutes,
EmbeddingsRoutes,
FileRoutes,
ModelRoutes,
ModerationsRoutes,
)
from .stores import StateStore


Expand All @@ -14,6 +21,7 @@ class OpenAIMock:
embeddings: EmbeddingsRoutes
files: FileRoutes
models: ModelRoutes
moderations: ModerationsRoutes

def __init__(
self,
Expand Down Expand Up @@ -50,6 +58,7 @@ def _init_routes(self) -> None:
self.embeddings = EmbeddingsRoutes(self._router)
self.files = FileRoutes(self._router, self._state)
self.models = ModelRoutes(self._router, self._state)
self.moderations = ModerationsRoutes(self._router)

# NOTE: need to sort routes to avoid match conflicts
self._router.routes._routes.sort(key=lambda r: len(repr(r._pattern)), reverse=True) # type: ignore
Expand Down
15 changes: 14 additions & 1 deletion src/openai_responses/_routes/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,18 @@
FileRetrieveContentRoute,
)
from .models import ModelListRoute, ModelRetrieveRoute
from .moderation import ModerationCreateRoute

from .beta import BetaRoutes

__all__ = ["BetaRoutes", "ChatRoutes", "EmbeddingsRoutes", "FileRoutes", "ModelRoutes"]
__all__ = [
"BetaRoutes",
"ChatRoutes",
"EmbeddingsRoutes",
"FileRoutes",
"ModelRoutes",
"ModerationsRoutes",
]


class ChatRoutes:
Expand Down Expand Up @@ -46,3 +54,8 @@ class ModelRoutes:
def __init__(self, router: respx.MockRouter, state: StateStore) -> None:
self.list = ModelListRoute(router, state)
self.retrieve = ModelRetrieveRoute(router, state)


class ModerationsRoutes:
def __init__(self, router: respx.MockRouter) -> None:
self.create = ModerationCreateRoute(router)
2 changes: 1 addition & 1 deletion src/openai_responses/_routes/_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def _handler(request: httpx.Request, route: respx.Route, **kwargs: Any):
assert not callable(self._response)
return httpx.Response(
status_code=self._status_code,
json=model_dict(self._build(self._response, request)),
json=model_dict(self._build(self._response, request), by_alias=True),
)

return _handler
Expand Down
81 changes: 81 additions & 0 deletions src/openai_responses/_routes/moderation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
from openai.types.moderation import Moderation, Categories, CategoryScores
from openai.types.moderation_create_response import ModerationCreateResponse

import httpx
import respx

from ._base import StatelessRoute

from .._types.partials.moderation import (
PartialModerationCreateResponse,
PartialCategories,
PartialCategoryScores,
)

from .._utils.serde import model_parse
from .._utils.faker import faker

__all__ = ["ModerationCreateRoute"]

_default_categories: PartialCategories = {
"harassment": False,
"harassment/threatening": False,
"hate": False,
"hate/threatening": False,
"self-harm": False,
"self-harm/instructions": False,
"self-harm/intent": False,
"sexual": False,
"sexual/minors": False,
"violence": False,
"violence/graphic": False,
}

_default_category_scores: PartialCategoryScores = {
"harassment": 0.0,
"harassment/threatening": 0.0,
"hate": 0.0,
"hate/threatening": 0.0,
"self-harm": 0.0,
"self-harm/instructions": 0.0,
"self-harm/intent": 0.0,
"sexual": 0.0,
"sexual/minors": 0.0,
"violence": 0.0,
"violence/graphic": 0.0,
}


class ModerationCreateRoute(
StatelessRoute[ModerationCreateResponse, PartialModerationCreateResponse]
):
def __init__(self, router: respx.MockRouter) -> None:
super().__init__(route=router.post(url__regex="/moderations"), status_code=200)

@staticmethod
def _build(
partial: PartialModerationCreateResponse, request: httpx.Request
) -> ModerationCreateResponse:
partial_results = partial.get("results", [])
moderation_results = [
Moderation(
categories=model_parse(
Categories,
_default_categories
| partial_result.get("categories", _default_categories),
),
category_scores=model_parse(
CategoryScores,
_default_category_scores
| partial_result.get("category_scores", _default_category_scores),
),
flagged=partial_result.get("flagged", False),
)
for partial_result in partial_results
]

return ModerationCreateResponse(
id=partial.get("id", faker.moderation.id()),
model=partial.get("model", "text-moderation-007"),
results=moderation_results,
)
54 changes: 54 additions & 0 deletions src/openai_responses/_types/partials/moderation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
from typing import List, TypedDict
from typing_extensions import NotRequired

__all__ = [
"PartialModerationCreateResponse",
"PartialCategories",
"PartialCategoryScores",
]

PartialCategories = TypedDict(
"PartialCategories",
{
"harassment": NotRequired[bool],
"harassment/threatening": NotRequired[bool],
"hate": NotRequired[bool],
"hate/threatening": NotRequired[bool],
"self-harm": NotRequired[bool],
"self-harm/instructions": NotRequired[bool],
"self-harm/intent": NotRequired[bool],
"sexual": NotRequired[bool],
"sexual/minors": NotRequired[bool],
"violence": NotRequired[bool],
"violence/graphic": NotRequired[bool],
},
)

PartialCategoryScores = TypedDict(
"PartialCategoryScores",
{
"harassment": NotRequired[float],
"harassment/threatening": NotRequired[float],
"hate": NotRequired[float],
"hate/threatening": NotRequired[float],
"self-harm": NotRequired[float],
"self-harm/instructions": NotRequired[float],
"self-harm/intent": NotRequired[float],
"sexual": NotRequired[float],
"sexual/minors": NotRequired[float],
"violence": NotRequired[float],
"violence/graphic": NotRequired[float],
},
)


class PartialModeration(TypedDict):
categories: NotRequired[PartialCategories]
category_scores: NotRequired[PartialCategoryScores]
flagged: NotRequired[bool]


class PartialModerationCreateResponse(TypedDict):
id: NotRequired[str]
model: NotRequired[str]
results: NotRequired[List[PartialModeration]]
6 changes: 6 additions & 0 deletions src/openai_responses/_utils/faker.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,10 +99,16 @@ def id(self) -> str:
return gen_id("vsfb")


class ModerationProvider:
def id(self) -> str:
return gen_id(prefix="modr", sep="-")


class Faker:
def __init__(self) -> None:
self.chat = ChatProvider()
self.file = FileProvider()
self.moderation = ModerationProvider()
self.beta = Faker.BetaProviders()

class BetaProviders:
Expand Down
12 changes: 6 additions & 6 deletions src/openai_responses/_utils/serde.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,15 @@ def json_loads(b: bytes) -> Any:
return {k: v for k, v in d.items() if v is not None}


def model_dict(m: BaseModel) -> dict[str, Any]:
def model_dict(m: BaseModel, **kwargs: Any) -> dict[str, Any]:
if hasattr(m, "model_dump"):
return getattr(m, "model_dump")()
return getattr(m, "model_dump")(**kwargs)
else:
return getattr(m, "dict")()
return getattr(m, "dict")(**kwargs)


def model_parse(m: Type[M], d: object) -> M:
def model_parse(m: Type[M], d: object, **kwargs: Any) -> M:
if hasattr(m, "model_validate"):
return getattr(m, "model_validate")(d)
return getattr(m, "model_validate")(d, **kwargs)
else:
return getattr(m, "parse_obj")(d)
return getattr(m, "parse_obj")(d, **kwargs)
19 changes: 19 additions & 0 deletions src/openai_responses/helpers/builders/moderations.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
from typing import Optional

import httpx

from openai.types.moderation_create_response import ModerationCreateResponse

from ._base import _generic_builder
from ..._routes.moderation import ModerationCreateRoute
from ..._types.partials.moderation import PartialModerationCreateResponse

__all__ = ["moderation_create_response_from_create_request"]


def moderation_create_response_from_create_request(
request: httpx.Request,
*,
extra: Optional[PartialModerationCreateResponse] = None,
) -> ModerationCreateResponse:
return _generic_builder(ModerationCreateRoute, request, extra)
Loading