Skip to content
This repository has been archived by the owner on Sep 12, 2024. It is now read-only.

Commit

Permalink
fix tests
Browse files Browse the repository at this point in the history
  • Loading branch information
SeeknnDestroy committed Oct 19, 2023
1 parent 2a69874 commit b5809de
Show file tree
Hide file tree
Showing 5 changed files with 16 additions and 39 deletions.
7 changes: 3 additions & 4 deletions tests/test_auto_llm.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from llama_index import Document, ServiceContext, VectorStoreIndex
from llama_index.llms.base import LLM
from llama_index.query_engine import BaseQueryEngine

from autollm.auto.llm import AutoLLM

Expand All @@ -18,7 +19,5 @@ def test_auto_llm():

query_engine = index.as_query_engine()

response = query_engine.query("What is the meaning of life?")

# Check if the response is not None
assert response.response is not None
# Check if the query_engine is an instance of BaseQueryEngine
assert isinstance(query_engine, BaseQueryEngine)
9 changes: 4 additions & 5 deletions tests/test_auto_query_engine.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,13 @@
from llama_index import Document
from llama_index.query_engine import BaseQueryEngine

from autollm.auto.query_engine import AutoQueryEngine


def test_auto_query_engine():
documents = Document.example()
documents = [Document.example()]
vector_store_params = {"vector_store_type": "VectorStoreIndex", "documents": documents}
query_engine = AutoQueryEngine.from_parameters(vector_store_params=vector_store_params)

response = query_engine.query("What is the meaning of life?")

# Check if the response is not None
assert response.response is not None
# Check if the query_engine is an instance of BaseQueryEngine
assert isinstance(query_engine, BaseQueryEngine)
7 changes: 5 additions & 2 deletions tests/test_auto_service_context.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from llama_index import Document, VectorStoreIndex
from llama_index import Document, ServiceContext, VectorStoreIndex

from autollm.auto.service_context import AutoServiceContext

Expand All @@ -8,6 +8,9 @@ def test_auto_service_context():

service_context = AutoServiceContext.from_defaults(enable_cost_calculator=True)

# Check if the service_context is an instance of ServiceContext
assert isinstance(service_context, ServiceContext)

index = VectorStoreIndex.from_documents(documents=[document], service_context=service_context)

query_engine = index.as_query_engine()
Expand All @@ -17,6 +20,6 @@ def test_auto_service_context():
# Check if the response is not None
assert response.response is not None

# Check if the total token cost is greater than 0
# Check if the cost calculating handler is working
cost_caltulator = service_context.callback_manager.handlers[0]
assert cost_caltulator.total_llm_token_cost > 0
9 changes: 4 additions & 5 deletions tests/test_auto_vector_store_index.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
from llama_index import Document, VectorStoreIndex
from llama_index.query_engine import BaseQueryEngine

from autollm.auto.vector_store_index import AutoVectorStoreIndex


def test_auto_vector_store():
documents = Document.example()
documents = [Document.example()]

vector_store_index = AutoVectorStoreIndex.from_defaults(
vector_store_type="VectorStoreIndex", documents=documents)
Expand All @@ -14,7 +15,5 @@ def test_auto_vector_store():

query_engine = vector_store_index.as_query_engine()

response = query_engine.query("What is the meaning of life?")

# Check if the response is not None
assert response.response is not None
# Check if the query_engine is an instance of BaseQueryEngine
assert isinstance(query_engine, BaseQueryEngine)
23 changes: 0 additions & 23 deletions tests/test_cost_calculating_handler.py

This file was deleted.

0 comments on commit b5809de

Please sign in to comment.