Skip to content
This repository has been archived by the owner on Sep 12, 2024. It is now read-only.

Commit

Permalink
update tests with azure config (#222)
Browse files Browse the repository at this point in the history
* update github actions with azure

* update tests config with azure

* update tests autofastapi with azure

* update tests autolitellm with azure

* update tests autoqueryengine with azure

* update tests autoservicecontext with azure

* update tests autoVSindex with azure
  • Loading branch information
SeeknnDestroy authored Jan 23, 2024
1 parent 4af1ce8 commit a104237
Show file tree
Hide file tree
Showing 8 changed files with 144 additions and 31 deletions.
4 changes: 3 additions & 1 deletion .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -68,5 +68,7 @@ jobs:
- name: Run tests
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
AZURE_API_KEY: ${{ secrets.AZURE_API_KEY }}
AZURE_API_BASE: ${{ secrets.AZURE_API_BASE }}
AZURE_API_VERSION: ${{ secrets.AZURE_API_VERSION }}
run: pytest
4 changes: 3 additions & 1 deletion .github/workflows/package_ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -61,5 +61,7 @@ jobs:
- name: Run tests
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
AZURE_API_KEY: ${{ secrets.AZURE_API_KEY }}
AZURE_API_BASE: ${{ secrets.AZURE_API_BASE }}
AZURE_API_VERSION: ${{ secrets.AZURE_API_VERSION }}
run: pytest
7 changes: 5 additions & 2 deletions tests/config.yaml
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
# config.example.yaml
tasks:
- name: "summarize"
llm_model: "gpt-3.5-turbo"
llm_model: "azure/gpt-35-turbo-1106"
llm_max_tokens: 1028
llm_temperature: 0.1
system_prompt: "You are a friendly chatbot that can summarize documents.:" # System prompt for this task
enable_cost_calculator: true
embed_model: "text-embedding-ada-002"
embed_model: "azure/text-embedding-ada-002"
chunk_size: 512
chunk_overlap: 64
context_window: 2048
Expand All @@ -15,3 +15,6 @@ tasks:
- name: "qa"
system_prompt: "You are a friendly chatbot that can answer questions." # System prompt for this task
enable_cost_calculator: false
embed_model: "azure/text-embedding-ada-002"
llm_model: "azure/gpt-35-turbo-1106"
vector_store_type: "SimpleVectorStore"
45 changes: 41 additions & 4 deletions tests/test_auto_fastapi_app.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,11 @@
# Importing the necessary modules for testing and mocking
import os

from fastapi.testclient import TestClient
from llama_index import Document, VectorStoreIndex
from llama_index import Document, ServiceContext, VectorStoreIndex
from llama_index.embeddings import AzureOpenAIEmbedding
from llama_index.indices.query.base import BaseQueryEngine
from llama_index.llms import AzureOpenAI

from autollm.auto.fastapi_app import AutoFastAPI
from autollm.serve.utils import load_config_and_initialize_engines
Expand All @@ -20,6 +23,11 @@
}
}

# set the environment variables
azure_api_key = os.environ.get("AZURE_API_KEY")
azure_endpoint = os.environ.get("AZURE_API_BASE")
azure_api_version = os.environ.get("AZURE_API_VERSION")


def test_load_config_and_initialize_engines():
# Mock the load_config_and_initialize_engines function with the sample config and documents
Expand All @@ -38,8 +46,22 @@ def test_auto_fastapi_from_config():


def test_auto_fastapi_from_query_engine():
from llama_index import VectorStoreIndex
index = VectorStoreIndex.from_documents(documents=documents)
llm = AzureOpenAI(
engine="gpt-35-turbo-1106",
model="gpt-35-turbo-16k",
api_key=azure_api_key,
azure_endpoint=azure_endpoint,
api_version=azure_api_version,
)
embed_model = AzureOpenAIEmbedding(
model="text-embedding-ada-002",
deployment_name="text-embedding-ada-002",
api_key=azure_api_key,
azure_endpoint=azure_endpoint,
api_version=azure_api_version,
)
service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model)
index = VectorStoreIndex.from_documents(documents=documents, service_context=service_context)
query_engine = index.as_query_engine()

# Create the FastAPI app from the query engine
Expand All @@ -65,7 +87,22 @@ def test_query_endpoint_from_config():

def test_query_endpoint_from_query_engine():
# Create llama-index query engine
index = VectorStoreIndex.from_documents(documents=documents)
llm = AzureOpenAI(
engine="gpt-35-turbo-1106",
model="gpt-35-turbo-16k",
api_key=azure_api_key,
azure_endpoint=azure_endpoint,
api_version=azure_api_version,
)
embed_model = AzureOpenAIEmbedding(
model="text-embedding-ada-002",
deployment_name="text-embedding-ada-002",
api_key=azure_api_key,
azure_endpoint=azure_endpoint,
api_version=azure_api_version,
)
service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model)
index = VectorStoreIndex.from_documents(documents=documents, service_context=service_context)
query_engine = index.as_query_engine()

# Create the FastAPI app from the query engine
Expand Down
18 changes: 6 additions & 12 deletions tests/test_auto_lite_llm.py
Original file line number Diff line number Diff line change
@@ -1,23 +1,17 @@
from llama_index import Document, ServiceContext, VectorStoreIndex
from llama_index.llms import ChatMessage, ChatResponse
from llama_index.llms.base import BaseLLM
from llama_index.query_engine import BaseQueryEngine

from autollm.auto.llm import AutoLiteLLM


def test_auto_lite_llm():
document = Document.example()

llm = AutoLiteLLM.from_defaults(model="gpt-3.5-turbo")
llm = AutoLiteLLM.from_defaults(model="azure/gpt-35-turbo-1106")

# Check if the llm is an instance of LLM
assert isinstance(llm, BaseLLM)

service_context = ServiceContext.from_defaults(llm=llm)

index = VectorStoreIndex.from_documents(documents=[document], service_context=service_context)

query_engine = index.as_query_engine()
message = ChatMessage(role="user", content="Hey! how's it going?")
chat_response = llm.chat([message])

# Check if the query_engine is an instance of BaseQueryEngine
assert isinstance(query_engine, BaseQueryEngine)
# Check if the chat response is an instance of ChatResponse
assert isinstance(chat_response, ChatResponse)
41 changes: 35 additions & 6 deletions tests/test_auto_query_engine.py
Original file line number Diff line number Diff line change
@@ -1,24 +1,53 @@
from llama_index import Document, VectorStoreIndex
import os

from llama_index import Document, ServiceContext, VectorStoreIndex
from llama_index.embeddings import AzureOpenAIEmbedding
from llama_index.llms import AzureOpenAI
from llama_index.query_engine import BaseQueryEngine

from autollm.auto.query_engine import AutoQueryEngine

# set the environment variables
azure_api_key = os.environ.get("AZURE_API_KEY")
azure_endpoint = os.environ.get("AZURE_API_BASE")
azure_api_version = os.environ.get("AZURE_API_VERSION")

documents = [Document.example()]


def test_auto_query_engine_from_defaults():
vector_store_type = "SimpleVectorStore"
query_engine = AutoQueryEngine.from_defaults(documents=documents, vector_store_type=vector_store_type)
query_engine = AutoQueryEngine.from_defaults(
documents=documents,
vector_store_type=vector_store_type,
llm_model="azure/gpt-35-turbo-1106",
embed_model="azure/text-embedding-ada-002",
)

# Check if the query_engine is an instance of BaseQueryEngine
assert isinstance(query_engine, BaseQueryEngine)


def test_auto_query_engine_from_instances():
vector_store_index = VectorStoreIndex.from_documents(documents=documents)

query_engine = AutoQueryEngine.from_instances(vector_store_index=vector_store_index, service_context=None)

llm = AzureOpenAI(
engine="gpt-35-turbo-1106",
model="gpt-35-turbo-16k",
api_key=azure_api_key,
azure_endpoint=azure_endpoint,
api_version=azure_api_version,
)
embed_model = AzureOpenAIEmbedding(
model="text-embedding-ada-002",
deployment_name="text-embedding-ada-002",
api_key=azure_api_key,
azure_endpoint=azure_endpoint,
api_version=azure_api_version,
)
service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model)
vector_store_index = VectorStoreIndex.from_documents(documents=documents, service_context=service_context)

query_engine = AutoQueryEngine.from_instances(
vector_store_index=vector_store_index, service_context=service_context)
# Check if the query_engine is an instance of BaseQueryEngine
assert isinstance(query_engine, BaseQueryEngine)

Expand Down
27 changes: 25 additions & 2 deletions tests/test_auto_service_context.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,35 @@
import os

from llama_index import Document, ServiceContext, VectorStoreIndex
from llama_index.embeddings import AzureOpenAIEmbedding
from llama_index.llms import AzureOpenAI

from autollm.auto.service_context import AutoServiceContext

# set the environment variables
azure_api_key = os.environ.get("AZURE_API_KEY")
azure_endpoint = os.environ.get("AZURE_API_BASE")
azure_api_version = os.environ.get("AZURE_API_VERSION")


def test_auto_service_context():
document = Document.example()

service_context = AutoServiceContext.from_defaults(enable_cost_calculator=True)
llm = AzureOpenAI(
engine="gpt-35-turbo-1106",
model="gpt-35-turbo-16k",
api_key=azure_api_key,
azure_endpoint=azure_endpoint,
api_version=azure_api_version,
)
embed_model = AzureOpenAIEmbedding(
model="text-embedding-ada-002",
deployment_name="text-embedding-ada-002",
api_key=azure_api_key,
azure_endpoint=azure_endpoint,
api_version=azure_api_version,
)
service_context = AutoServiceContext.from_defaults(
enable_cost_calculator=True, llm=llm, embed_model=embed_model)

# Check if the service_context is an instance of ServiceContext
assert isinstance(service_context, ServiceContext)
Expand Down
29 changes: 26 additions & 3 deletions tests/test_auto_vector_store_index.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,37 @@
from llama_index import Document, VectorStoreIndex
import os

from llama_index import Document, ServiceContext, VectorStoreIndex
from llama_index.embeddings import AzureOpenAIEmbedding
from llama_index.llms import AzureOpenAI
from llama_index.query_engine import BaseQueryEngine

from autollm.auto.vector_store_index import AutoVectorStoreIndex

# set the environment variables
azure_api_key = os.environ.get("AZURE_API_KEY")
azure_endpoint = os.environ.get("AZURE_API_BASE")
azure_api_version = os.environ.get("AZURE_API_VERSION")


def test_auto_vector_store():
documents = [Document.example()]

llm = AzureOpenAI(
engine="gpt-35-turbo-1106",
model="gpt-35-turbo-16k",
api_key=azure_api_key,
azure_endpoint=azure_endpoint,
api_version=azure_api_version,
)
embed_model = AzureOpenAIEmbedding(
model="text-embedding-ada-002",
deployment_name="text-embedding-ada-002",
api_key=azure_api_key,
azure_endpoint=azure_endpoint,
api_version=azure_api_version,
)
service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model)
vector_store_index = AutoVectorStoreIndex.from_defaults(
vector_store_type="SimpleVectorStore", documents=documents)
vector_store_type="SimpleVectorStore", documents=documents, service_context=service_context)

# Check if the vector_store_index is an instance of VectorStoreIndex
assert isinstance(vector_store_index, VectorStoreIndex)
Expand Down

0 comments on commit a104237

Please sign in to comment.