Skip to content
This repository has been archived by the owner on Sep 12, 2024. It is now read-only.

Add LLM Builder UI #213

Open
wants to merge 23 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
232 changes: 232 additions & 0 deletions autollm/cli.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,232 @@
import os

import gradio as gr
import llama_index
from llama_index import Document

from autollm.auto.llm import AutoLiteLLM
from autollm.auto.query_engine import AutoQueryEngine
from autollm.serve.llm_utils import create_custom_llm
from autollm.utils.document_reading import read_files_as_documents

llama_index.set_global_handler("simple")

DEFAULT_LLM_MODEL = "gpt-4-0125-preview"
OPENAI_MODELS = [
"gpt-4-0125-preview", "gpt-4-1106-preview", "gpt-4", "gpt-4-0613", "gpt-4-32k", "gpt-4-32k-0613",
"gpt-3.5-turbo-1106", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-instruct"
]
GEMINI_MODELS = [
"gemini-pro",
]


def determine_llm_model(model_selections: list[tuple[str, bool]]) -> str:
"""
Determines which LLM model is selected based on user input.

Parameters:
model_selections (list of tuples): List containing tuples of (model_name, input_variable).

Returns:
str: Selected LLM model name.
"""
selected_models = []
for model_name, is_selected in model_selections:
if is_selected:
selected_models.append(model_name)

if len(selected_models) != 1:
raise ValueError("Exactly one LLM model must be selected.")
return selected_models[0]


def create_app(
use_openai, openai_model, openai_api_key, use_gemini, gemini_model, gemini_api_key, what_to_make_area,
uploaded_files, webpage_input, config_file):
global query_engine
progress = gr.Progress()

os.environ["OPENAI_API_KEY"] = openai_api_key
os.environ["GEMINI_API_KEY"] = gemini_api_key

progress(0.2, desc="Reading files...")
file_documents = read_files_as_documents(input_files=uploaded_files)

progress(0.6, desc="Updating LLM...")
custom_llm = create_custom_llm(user_prompt=what_to_make_area, config=config_file)
emoji, name, description, instructions = update_configurations(custom_llm)

progress(0.8, desc="Configuring app..")
# List of model selections - easily extendable
model_selections = [
(openai_model, use_openai),
(gemini_model, use_gemini),
# Add new models here as ('model_name', use_model),
]

# Determine the selected LLM provider
selected_llm_model = determine_llm_model(model_selections)

# Update the query engine with the selected LLM model
query_engine = AutoQueryEngine.from_defaults(
documents=file_documents,
use_async=False,
system_prompt=custom_llm.instructions,
exist_ok=True,
overwrite_existing=True,
llm_model=selected_llm_model)

# Complete progress
progress(1.0, desc="Completed") # Complete progress bar
create_preview_output = gr.Textbox(
"""LLM details are updated in configuration tab and LLM App is ready to be previewed 🚀. Start chatting with your custom LLM on the preview 👉"""
)

return create_preview_output, emoji, name, description, instructions


def update_configurations(custom_llm):
emoji = custom_llm.emoji
name = custom_llm.name
description = custom_llm.description
instructions = custom_llm.instructions

return gr.Textbox(
emoji, interactive=True), gr.Textbox(
name, interactive=True), gr.Textbox(
description, interactive=True), gr.Textbox(
instructions, interactive=True)


def update_app():
pass


def predict(message, history):
chat_response = query_engine.query(message).response
return chat_response


with gr.Blocks(title="autollm UI", theme=gr.themes.Default(primary_hue=gr.themes.colors.teal)) as demo:
gr.Markdown("# LLM Builder")
gr.Markdown(
"""
<p style='text-align: center'>
Powered by <a href='https://github.com/safevideo/autollm' target='_blank'>autollm</a>
</p>
""")
with gr.Row():
with gr.Column():
with gr.Tab("Create"):
with gr.Accordion(label="LLM Model (default openai gpt-4-0125-preview)", open=False):
with gr.Tab("OpenAI"):
use_openai = gr.Checkbox(value=True, label="Use OpenAI", interactive=True)
openai_model = gr.Dropdown(
label="OpenAI Model",
choices=OPENAI_MODELS,
value=DEFAULT_LLM_MODEL,
interactive=True)
openai_api_key_input = gr.Textbox(label="OPENAI_API_KEY", type="password")
with gr.Tab("Gemini"):
use_gemini = gr.Checkbox(value=False, label="Use Gemini", interactive=True)
gemini_model = gr.Dropdown(
label="Gemini Model", choices=GEMINI_MODELS, value="gemini-pro", interactive=True)
gemini_api_key_input = gr.Textbox(label="GEMINI_API_KEY", type="password")
with gr.Accordion(label="Embedding Model API key", open=False):
with gr.Tab("HuggingFace TGI"):
hf_api_key_input = gr.Textbox(label="HF_API_KEY", type="password")
what_to_make_area = gr.Textbox(label="What would you like to make?", lines=2)

with gr.Column(variant="compact"):
with gr.Accordion(label="Add knowledge from files", open=False):
uploaded_files = gr.File(label="Add knowledge from files", file_count="multiple")
with gr.Accordion(label="Add knowledge from folder", open=False):
directory_input = gr.File(
label="Add knowledge from directory", file_count="directory")
with gr.Accordion(label="Add knowledge from webpages", open=False):
webpage_input = gr.Textbox(
lines=2,
info="Enter URLs separated by commas.",
placeholder="https://www.example1.com, https://www.example2.com")

with gr.Row():
with gr.Column(scale=1, min_width=10):
placeholder = gr.Button(visible=False, interactive=False)
with gr.Column(scale=1, min_width=100):
create_preview_button = gr.Button("Create Preview", variant="primary")
create_preview_output = gr.Textbox(
label="Status",
info="Click `Create Preview` 👆 to build preview of the LLM app on the right")

with gr.Tab("Configure"):
with gr.Column(variant="compact"):
detail_html = gr.HTML(
'<a href="https://github.com/safevideo/autollm/blob/main/examples/configs/config.example.yaml">click here for example config</a>'
)
with gr.Accordion(label="Load config file", open=False):
config_file_upload = gr.File(
label="Configurations of LLM, Vector Store..", file_count="single")
emoji = gr.Textbox(label="Emoji")
name = gr.Textbox(label="Name")
description = gr.Textbox(label="Description")
instruction = gr.TextArea(label="Instructions")
with gr.Row():
with gr.Column(scale=1, min_width=10):
placeholder = gr.Button(visible=False, interactive=False)
with gr.Column(scale=1, min_width=100):
update_preview_button = gr.Button("Update Preview", variant="primary")
configure_output = gr.Textbox(label="👆 Click `Create Preview` to see preview of the LLM app")
with gr.Tab("Export"):
# Controls for 'Export' tab
hf_api_key = gr.Textbox(label="Hf api key:", type="password")
make_db_private = gr.Checkbox(label="Make db private")

with gr.Column():
with gr.Row():
download_api_button = gr.Button("Download as API")
deploy_button = gr.Button("Deploy to 🤗")

with gr.Row():
with gr.Column():
ai_avatar_image = os.path.join(os.path.dirname(__file__), "serve/avatar.jpg")

chatbot = gr.Chatbot(
label="Preview",
bubble_full_width=False,
render=False,
show_copy_button=True,
avatar_images=(None, ai_avatar_image))
chat_interface = gr.ChatInterface(predict, chatbot=chatbot)

create_preview_button.click(
create_app,
inputs=[
use_openai, openai_model, openai_api_key_input, use_gemini, gemini_model,
gemini_api_key_input, what_to_make_area, uploaded_files, webpage_input, config_file_upload
],
outputs=[create_preview_output, emoji, name, description, instruction])

update_preview_button.click(
update_app,
inputs=[
openai_api_key_input, gemini_api_key_input, what_to_make_area, uploaded_files, webpage_input,
config_file_upload, emoji, name, description, instruction
],
outputs=[configure_output],
scroll_to_output=True)

gr.Markdown(
"""
<p style='text-align: center'>
Automatically created by <a href='https://huggingface.co/safevideo' target='_blank'>LLM Builder</a>
</p>
""")


def main():
demo.launch()


if __name__ == "__main__":
main()
Binary file added autollm/serve/avatar.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
103 changes: 103 additions & 0 deletions autollm/serve/llm_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
from typing import Any, Optional

from llama_index.program import LLMTextCompletionProgram
from pydantic import BaseModel, Field

from autollm import AutoLiteLLM

DEFAULT_LLM_MODEL = "azure/gpt-4-1106"
DEFAULT_LLM_MAX_TOKENS = 1024
DEFAULT_LLM_TEMPERATURE = 0.1


class CustomLLM(BaseModel):
"""Data model for custom LLM creation from user prompt."""

emoji: str = Field(
...,
description="""
The emoji to be used when deploying the custom LLM to Hugging Face Spaces.
""",
example="📝",
)
name: str = Field(
...,
description="""
The descriptive name of the custom LLM.
""",
example="Creative Writing Coach",
)
description: str = Field(
...,
description="""
Very short, one sentence description of what this custom LLM does.
""",
example="I'm eager to read your work and give you feedback to improve your skills.",
)
instructions: str = Field(
...,
description="""
Very detailed persona instructions for the custom LLM.
What does this custom LLM do?
How does it behave?
What should it avoid doing?
How long or short should responses be?
""",
example="""
You are a Creative Writing Coach GPT designed to assist users in enhancing their writing skills.
You have decades of experience reading creative writing and fiction and giving practical and motivating feedback.
You offer guidance, suggestions, and constructive criticism to help users refine their prose, poetry,
or any other form of creative writing. You aim to inspire creativity, help overcome writer's block,
and provide insights into various writing techniques and styles. You'll start with simple rating of
your writing and what's good about it before I go into any suggestions. Always be positive and encouraging.
Ask questions to get more information. Be specific and detailed in your feedback.
""",
)


PROMPT_TEMPLATE_STR = """\
Your task is to revise the user prompt and create a JSON object \
in the format of the CustomLLM data model. The JSON object will \
be used to create a custom LLM model. Ensure the revised prompt \
maintains the original intent, is clear and detailed, and is \
adapted to the specific context and task mentioned in the user input.

1. Analyze the basic prompt to understand its primary purpose and context.
2. Refine the prompt to be clear, detailed, specific, and tailored to the context and task.
3. Retain the core elements and intent of the original prompt.
4. Provide an enhanced version of the prompt, ensuring it is optimized for a LLM model interaction.

User prompt: {user_prompt}
"""


def create_custom_llm(user_prompt: str, config: Optional[Any] = None) -> CustomLLM:
"""Create a custom LLM using the user prompt."""
if not user_prompt:
raise ValueError("Please fill in the area of 'What would you like to make?'")

if not config:
config = {}

llm_model = config.get('llm_model', DEFAULT_LLM_MODEL)
llm_max_tokens = config.get('llm_max_tokens', DEFAULT_LLM_MAX_TOKENS)
llm_temperature = config.get('llm_temperature', DEFAULT_LLM_TEMPERATURE)
llm_api_base = config.get('llm_api_base', None)

llm = AutoLiteLLM.from_defaults(
model=llm_model,
max_tokens=llm_max_tokens,
temperature=llm_temperature,
api_base=llm_api_base,
)

program = LLMTextCompletionProgram.from_defaults(
output_cls=CustomLLM,
prompt_template_str=PROMPT_TEMPLATE_STR,
llm=llm,
verbose=True,
)

output = program(user_prompt=user_prompt)

return output
6 changes: 5 additions & 1 deletion autollm/utils/document_reading.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
from pathlib import Path
from typing import List, Optional, Sequence

from llama_index import download_loader
from llama_index.readers.file.base import SimpleDirectoryReader
from llama_index.schema import Document

Expand Down Expand Up @@ -37,10 +38,13 @@ def read_files_as_documents(
Returns:
documents (Sequence[Document]): A sequence of Document objects.
"""
JSONReader = download_loader("JSONReader")

# Configure file_extractor to use MarkdownReader for md files
file_extractor = {
".md": MarkdownReader(read_as_single_doc=True),
".pdf": LangchainPDFReader(extract_images=False)
".pdf": LangchainPDFReader(extract_images=False),
".json": JSONReader(),
}

# Initialize SimpleDirectoryReader
Expand Down
3 changes: 2 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ llama-index==0.9.27
litellm==1.16.21
uvicorn
fastapi
python-dotenv
python-dotenv==1.0.1
httpx
lancedb==0.3.4
gradio==4.14.0
5 changes: 5 additions & 0 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,4 +71,9 @@ def get_license():
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Internet :: WWW/HTTP :: HTTP Servers'
],
entry_points={
'console_scripts': [
'autollm=autollm.serve.cli:main',
],
},
)
Loading