From 1ed0ec48423635b48acdede0b057d1bc404b7bb7 Mon Sep 17 00:00:00 2001 From: Jason Liu Date: Thu, 9 May 2024 23:05:21 -0400 Subject: [PATCH] bump --- configs/__init__.py | 130 ++++++++++++++++++++++++++++++++++++++ configs/v1/name.yaml | 24 +++++++ configs/v2/character.yaml | 24 +++++++ run.py | 91 ++++++++++++++------------ 4 files changed, 230 insertions(+), 39 deletions(-) create mode 100644 configs/__init__.py create mode 100644 configs/v1/name.yaml create mode 100644 configs/v2/character.yaml diff --git a/configs/__init__.py b/configs/__init__.py new file mode 100644 index 0000000..265823d --- /dev/null +++ b/configs/__init__.py @@ -0,0 +1,130 @@ +from typing import List, Literal, Optional, Iterable, Type +from pydantic import BaseModel, Field, create_model, field_validator + +TypeHint = Literal[ + "str", + "int", + "float", + "bool", + "str[]", + "int[]", + "float[]", + "bool[]", +] + + +class Property(BaseModel): + title: str + type: TypeHint + prompt: Optional[str] = None + + +class OutputSchema(BaseModel): + name: str + prompt: Optional[str] = None + properties: List[Property] + + +class InputSchema(BaseModel): + name: str + properties: List[Property] + + +class PromptMessage(BaseModel): + role: str + content: str + + +class Config(BaseModel): + path: str + output_schema: OutputSchema + input_schema: InputSchema + prompt: List[PromptMessage] + model: str = "gpt-4-turbo" + + @field_validator("path") + def validate_path(cls, v: str) -> str: + assert v.startswith("/"), "Path must be absolute" + return v + + def create_output_model(self) -> Type[BaseModel]: + types: dict[str, type] = { + "str": str, + "int": int, + "float": float, + "bool": bool, + "str[]": List[str], + "int[]": List[int], + "float[]": List[float], + "bool[]": List[bool], + } + + return create_model( + self.output_schema.name, + **{ + prop.title: ( + types[prop.type], + Field( + ..., + title=prop.title, + description=prop.prompt, + ), + ) + for prop in self.output_schema.properties + }, # type: ignore + ) # type: ignore + + def create_input_model(self) -> Type[BaseModel]: + types: dict[str, type] = { + "str": str, + "int": int, + "float": float, + "bool": bool, + "str[]": List[str], + "int[]": List[int], + "float[]": List[float], + "bool[]": List[bool], + } + + return create_model( + self.input_schema.name, + **{ + prop.title: ( + types[prop.type], + Field( + ..., + title=prop.title, + ), + ) + for prop in self.input_schema.properties + }, # type: ignore + ) # type: ignore + + def messages(self, data: BaseModel) -> List[dict]: + from jinja2 import Template + + return [ + { + "role": message.role, + "content": Template(message.content).render(**data.model_dump()), + } + for message in self.prompt + ] + + +def load_configs() -> Iterable[Config]: + import os + import yaml + + cur_path = os.path.dirname(__file__) + + for root, dirs, files in os.walk(cur_path): + for filename in files: + if filename.endswith(".yaml"): + file_path = os.path.join(root, filename) + with open(file_path, "r") as f: + api_path = file_path.replace(cur_path, "").split(".")[0] + + content = yaml.safe_load(f) + config = Config.model_validate(dict(path=api_path, **content)) + yield config diff --git a/configs/v1/name.yaml b/configs/v1/name.yaml new file mode 100644 index 0000000..4321887 --- /dev/null +++ b/configs/v1/name.yaml @@ -0,0 +1,24 @@ +output_schema: + name: UserDetail + prompt: "Extracting a user" + properties: + - title: name + type: str + prompt: "What is the user's name?" + - title: age + type: int + prompt: "How old is the user?" + +input_schema: + name: Request + properties: + - title: data + type: str + +prompt: + - role: system + content: "You are a name extractor" + - role: user + content: "extract {{data}}" + +model: gpt-4-turbo \ No newline at end of file diff --git a/configs/v2/character.yaml b/configs/v2/character.yaml new file mode 100644 index 0000000..2162487 --- /dev/null +++ b/configs/v2/character.yaml @@ -0,0 +1,24 @@ +output_schema: + name: UserDetail + prompt: "Extracting a user" + properties: + - title: name + type: str + prompt: "What is the user's name?" + - title: friends + type: str[] + prompt: "some friends." + +input_schema: + name: Request + properties: + - title: show + type: str + +prompt: + - role: system + content: "You are character generator" + - role: user + content: "Create a person from this show: {{show}}" + +model: gpt-4-turbo \ No newline at end of file diff --git a/run.py b/run.py index c40c750..b5f3eba 100644 --- a/run.py +++ b/run.py @@ -5,45 +5,58 @@ import instructor +from configs import load_configs import openai app = FastAPI() -client = instructor.from_openai(openai.OpenAI(), model="gpt-4-turbo-preview") - - -class Property(BaseModel): - name: str - value: str - - -class User(BaseModel): - name: str - age: int - properties: List[Property] - - -@app.post("/v1/extract_user", response_model=User) -def extract_user(text: str): - user = client.chat.completions.create( - messages=[ - {"role": "user", "content": f"Extract user from `{text}`"}, - ], - response_model=User, - ) - return user - - -@app.post("/v1/extract_user_stream") -def extract_user_stream(text: str): - user_stream = client.chat.completions.create_partial( - messages=[ - {"role": "user", "content": f"Extract user from `{text}`"}, - ], - response_model=User, - ) - - def stream(): - for partial_user in user_stream: - yield f"data: {partial_user.model_dump_json()}\n\n" - - return StreamingResponse(stream(), media_type="text/event-stream") +client = instructor.from_openai(openai.OpenAI()) + + +for config in load_configs(): + OutputModel = config.create_output_model() + InputModel = config.create_input_model() + path = config.path + + @app.post(path, response_model=OutputModel) + def extract_data(input: InputModel): + return client.chat.completions.create( + model=config.model, + messages=config.messages(input), + response_model=OutputModel, + ) + + @app.post(f"{path}/list") + def extract_data_list(input: InputModel): + objs = client.chat.completions.create_iterable( + model=config.model, + messages=config.messages(input), + response_model=OutputModel, + ) + return [obj for obj in objs] + + @app.post(f"{path}/list/stream") + def extract_data_list_stream(input: InputModel): + def stream(): + for obj in client.chat.completions.create_iterable( + model=config.model, + messages=config.messages(input), + response_model=OutputModel, + stream=True, + ): + yield obj + + return StreamingResponse(stream(), media_type="text/event-stream") + + @app.post(f"{path}/stream") + def extract_data_stream(input: InputModel): + user_stream = client.chat.completions.create_partial( + model=config.model, + messages=config.messages(input), + response_model=OutputModel, + ) + + def stream(): + for partial_user in user_stream: + yield f"data: {partial_user.model_dump_json()}\n\n" + + return StreamingResponse(stream(), media_type="text/event-stream")