Skip to content
This repository has been archived by the owner on Sep 12, 2024. It is now read-only.

Commit

Permalink
add create_web_app (#12)
Browse files Browse the repository at this point in the history
* add create_web_app

* update readme

* update fastapi docs

* fix styling

* some fixes, add tests for web app, update readme

* update requirements

* update httpx versioning

* add examples

* minor fixes, llama_index version update

* major refactor

* minor fix

* another minor fix

* bugfix

* fix tests

---------

Co-authored-by: fatih <34196005+fcakyon@users.noreply.github.com>
Co-authored-by: fcakyon <fcakyon@gmail.com>
  • Loading branch information
3 people authored Oct 19, 2023
1 parent f2a3d7f commit 7ea85ba
Show file tree
Hide file tree
Showing 22 changed files with 254 additions and 147 deletions.
33 changes: 30 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -79,11 +79,14 @@ from autollm import AutoVectorStoreIndex

# Dynamically initialize a VectorStoreIndex instance with the same AutoVectorStoreIndex interface:

vector_store_index = AutoVectorStoreIndex.from_defaults(vector_store_type="PineconeVectorStore", pinecone_index=pinecone.Index("quickstart"))
vector_store_index = AutoVectorStoreIndex.from_defaults(
vector_store_type="PineconeVectorStore", pinecone_index=pinecone.Index("quickstart")
)


vector_store_index = AutoVectorStoreIndex.from_defaults(
vector_store_type="VectorStoreIndex", documents=documents)
vector_store_type="VectorStoreIndex", documents=documents
)
```

### AutoQueryEngine (Creates a query engine pipeline in a single line of code)
Expand Down Expand Up @@ -200,6 +203,30 @@ input_dir = "/local/documents/path"
documents = local_document_provider(input_dir=input_dir)
```

### FastAPI Integration in 1-Line 🚀

Creating a FastAPI application integrated with AutoLLM has never been easier. Follow the quick guide below to get started.

#### Create Your FastAPI Application

In your `main.py`, include the following line of code:

```python
from autollm import create_web_app

app = create_web_app(config_path, env_path)
```

Here, `config` and `env` should be replaced by your configuration and environment file paths.

#### Run Your Application

After creating your FastAPI app, run the following command in your terminal to get it up and running:

```bash
uvicorn main:app
```

______________________________________________________________________

## FAQ
Expand Down Expand Up @@ -250,7 +277,7 @@ ______________________________________________________________________

## License

AutoLLM is available under the [GNU Affero General Public License (AGPL 3.0)](LICENSE.txt).
AutoLLM is available under the [GNU Affero General Public License (AGPL 3.0)](LICENSE).

______________________________________________________________________

Expand Down
3 changes: 2 additions & 1 deletion autollm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,5 +12,6 @@
from autollm.auto.query_engine import AutoQueryEngine
from autollm.auto.service_context import AutoServiceContext
from autollm.auto.vector_store_index import AutoVectorStoreIndex
from autollm.serve.utils import create_web_app

__all__ = ['AutoLLM', 'AutoServiceContext', 'AutoVectorStoreIndex', 'AutoQueryEngine']
__all__ = ['AutoLLM', 'AutoServiceContext', 'AutoVectorStoreIndex', 'AutoQueryEngine', 'create_web_app']
Empty file removed autollm/app/api/__init__.py
Empty file.
43 changes: 0 additions & 43 deletions autollm/app/api/ask_question.py

This file was deleted.

16 changes: 0 additions & 16 deletions autollm/app/api/health_check.py

This file was deleted.

20 changes: 0 additions & 20 deletions autollm/app/docs.py

This file was deleted.

21 changes: 0 additions & 21 deletions autollm/app/main.py

This file was deleted.

2 changes: 1 addition & 1 deletion autollm/auto/vector_store_index.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def from_defaults(
if documents is None:
documents = [Document.example()]
if vector_store_type == "VectorStoreIndex":
index = VectorStoreIndex.from_documents(documents=[documents], *args, **kwargs)
index = VectorStoreIndex.from_documents(documents=documents, *args, **kwargs)
else:
VectorStoreClass = import_vector_store_class(vector_store_type)
vector_store = VectorStoreClass(*args, **kwargs)
Expand Down
File renamed without changes.
16 changes: 16 additions & 0 deletions autollm/serve/docs.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# Purpose: FastAPI documentation configuration.
# Metadata
title = "AutoLLM Query Engine"
description = """
This is a FastAPI service for AutoLLM's natural language query engine.
It's designed to query multiple and big documents and get the most relevant results.
"""
version = "0.0.1"
openapi_url = "/api/v1/openapi.json"
terms_of_service = "Local Deployment, All Rights Reserved."
tags_metadata = [
{
"name": "query",
"description": "Operations related to querying the header-documents."
},
]
72 changes: 72 additions & 0 deletions autollm/serve/utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
import logging
from typing import Dict

import yaml
from dotenv import load_dotenv
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel, Field

from autollm.auto.query_engine import AutoQueryEngine
from autollm.serve.docs import description, openapi_url, tags_metadata, terms_of_service, title, version

logging.basicConfig(level=logging.INFO)


# Function to load the configuration for tasks and initialize query engines
def load_config_and_initialize_engines(config_file_path: str,
env_file_path: str = None) -> Dict[str, AutoQueryEngine]:
# Optionally load environment variables from a .env file
if env_file_path:
load_dotenv(dotenv_path=env_file_path)

# Load the YAML configuration file
with open(config_file_path) as f:
config = yaml.safe_load(f)

# Initialize query engines based on the config
query_engines = {}
for task_params in config['tasks']:
task_name = task_params.pop('name')
query_engines[task_name] = AutoQueryEngine.from_parameters(**task_params)

return query_engines


class QueryPayload(BaseModel):
task: str = Field(..., description="Task to execute")
user_query: str = Field(..., description="User's query")


# Function to create the FastAPI web app
def create_web_app(config_file_path: str, env_file_path: str = None):
app = FastAPI(
title=title,
description=description,
version=version,
openapi_url=openapi_url,
terms_of_service=terms_of_service,
openapi_tags=tags_metadata,
)

query_engines = load_config_and_initialize_engines(config_file_path, env_file_path)

@app.post("/query")
async def query(payload: QueryPayload):
task = payload.task
user_query = payload.user_query

if task not in query_engines:
raise HTTPException(status_code=400, detail="Invalid task name")

# Use the appropriate query engine for the task
query_engine = query_engines[task]
response = query_engine.query(user_query)

return response

return app


# For demonstration, let's assume we have a config.yaml with task configurations and an optional .env file
# This function call would typically be in your main application file
# app = create_web_app("path/to/config.yaml", "path/to/.env")
2 changes: 2 additions & 0 deletions examples/configs/.env.example
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
# .env.example
OPENAI_API_KEY=your_openai_api_key_here
25 changes: 25 additions & 0 deletions examples/configs/config.example.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
# config.example.yaml
version: '1.0' # Version of this configuration file
tasks:
- name: "summarize"
system_prompt: "Please summarize the following text:" # System prompt for this task
vector_store_params:
vector_store_type: "VectorStoreIndex"
llm_params:
model: "gpt-3.5-turbo"
service_context_params:
chunk_size: 1024
query_engine_params:
similarity_top_k: 5
enable_cost_calculator: true
- name: "qa"
system_prompt: "Question:" # System prompt for this task
vector_store_params:
vector_store_type: "VectorStoreIndex"
llm_params:
model: "gpt-3.5-turbo"
service_context_params:
chunk_size: 1024
query_engine_params:
similarity_top_k: 3
enable_cost_calculator: false
3 changes: 2 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
llama-index==0.8.41
llama-index==0.8.46
litellm==0.8.4
gitpython==3.1.37
uvicorn==0.23.2
fastapi==0.103.2
python-dotenv
httpx
18 changes: 16 additions & 2 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,12 +28,26 @@ def get_version():
return re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', f.read(), re.M).group(1)


def get_author():
current_dir = os.path.abspath(os.path.dirname(__file__))
init_file = os.path.join(current_dir, 'autollm', '__init__.py')
with open(init_file, encoding='utf-8') as f:
return re.search(r'^__author__ = [\'"]([^\'"]*)[\'"]', f.read(), re.M).group(1)


def get_license():
current_dir = os.path.abspath(os.path.dirname(__file__))
init_file = os.path.join(current_dir, 'autollm', '__init__.py')
with open(init_file, encoding='utf-8') as f:
return re.search(r'^__license__ = [\'"]([^\'"]*)[\'"]', f.read(), re.M).group(1)


setuptools.setup(
name='autollm',
version=get_version(),
author='safevideo',
author=get_author(),
author_email='support@safevideo.ai',
license='AGPL-3.0',
license=get_license(),
description="Ship RAG based LLM Web API's, in seconds.",
long_description=get_long_description(),
long_description_content_type='text/markdown',
Expand Down
24 changes: 24 additions & 0 deletions tests/config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
# config.example.yaml
tasks:
- name: "summarize"
system_prompt: "Please summarize the following text:" # System prompt for this task
vector_store_params:
vector_store_type: "VectorStoreIndex"
llm_params:
model: "gpt-3.5-turbo"
service_context_params:
chunk_size: 1024
query_engine_params:
similarity_top_k: 5
enable_cost_calculator: true
- name: "qa"
system_prompt: "Question:" # System prompt for this task
vector_store_params:
vector_store_type: "VectorStoreIndex"
llm_params:
model: "gpt-3.5-turbo"
service_context_params:
chunk_size: 1024
query_engine_params:
similarity_top_k: 3
enable_cost_calculator: false
Loading

0 comments on commit 7ea85ba

Please sign in to comment.