Skip to content

Commit

Permalink
Fix Python3.8 compatibility breakage
Browse files Browse the repository at this point in the history
The breakage was resulting from newer syntax being used for type
annotations, as part of mlc-ai#592.
So long as `mlc_chat.interface.openai_api` wasn't imported, the
breaking changes were not encountered.  In
mlc-ai#1107, the addition of `from
.interface.openai_api import ChatMessage` caused this module to be
imported, breaking compatibility of `mlc_chat.ChatModule` with
Python3.8.

This commit updates the type annotations to the supported syntax.
  • Loading branch information
Lunderberg committed Nov 6, 2023
1 parent e2c99a8 commit 467eb2a
Showing 1 changed file with 16 additions and 16 deletions.
32 changes: 16 additions & 16 deletions python/mlc_chat/interface/openai_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,13 @@
class ChatMessage(BaseModel):
role: str
content: str
name: str | None = None
name: Optional[str] = None


class ChatCompletionRequest(BaseModel):
model: str
messages: list[ChatMessage]
stream: bool | None = False
messages: List[ChatMessage]
stream: Optional[bool] = False
temperature: float = None
top_p: float = None
# TODO: replace by presence_penalty and frequency_penalty
Expand All @@ -43,47 +43,47 @@ class ChatCompletionRequest(BaseModel):

class UsageInfo(BaseModel):
prompt_tokens: int = 0
completion_tokens: int | None = 0
completion_tokens: Optional[int] = 0
total_tokens: int = 0


class ChatCompletionResponseChoice(BaseModel):
index: int
message: ChatMessage
finish_reason: Literal["stop", "length"] | None = None
finish_reason: Optional[Literal["stop", "length"]] = None


class ChatCompletionResponse(BaseModel):
id: str = Field(default_factory=lambda: f"chatcmpl-{shortuuid.random()}")
object: str = "chat.completion"
created: int = Field(default_factory=lambda: int(time.time()))
choices: list[ChatCompletionResponseChoice]
choices: List[ChatCompletionResponseChoice]
# TODO: Implement support for the following fields
usage: UsageInfo | None = None
usage: Optional[UsageInfo] = None


class DeltaMessage(BaseModel):
role: str | None = None
content: str | None = None
role: Optional[str] = None
content: Optional[str] = None


class ChatCompletionResponseStreamChoice(BaseModel):
index: int
delta: DeltaMessage
finish_reason: Literal["stop", "length"] | None = None
finish_reason: Optional[Literal["stop", "length"]] = None


class ChatCompletionStreamResponse(BaseModel):
id: str = Field(default_factory=lambda: f"chatcmpl-{shortuuid.random()}")
object: str = "chat.completion.chunk"
created: int = Field(default_factory=lambda: int(time.time()))
choices: list[ChatCompletionResponseStreamChoice]
choices: List[ChatCompletionResponseStreamChoice]


class CompletionRequest(BaseModel):
model: str
prompt: str | list[str]
stream: bool | None = False
prompt: Union[str, List[str]]
stream: Optional[bool] = False
temperature: float = None
repetition_penalty: float = None
top_p: float = None
Expand All @@ -107,16 +107,16 @@ class CompletionRequest(BaseModel):
class CompletionResponseChoice(BaseModel):
index: int
text: str
finish_reason: Literal["stop", "length"] | None = None
finish_reason: Optional[Literal["stop", "length"]] = None
# TODO: logprobs support
logprobs: int | None = None
logprobs: Optional[int] = None


class CompletionResponse(BaseModel):
id: str = Field(default_factory=lambda: f"cmpl-{shortuuid.random()}")
object: str = "text.completion"
created: int = Field(default_factory=lambda: int(time.time()))
choices: list[CompletionResponseChoice]
choices: List[CompletionResponseChoice]
usage: UsageInfo


Expand Down

0 comments on commit 467eb2a

Please sign in to comment.