Skip to content

Commit

Permalink
Merge branch 'refs/heads/backend'
Browse files Browse the repository at this point in the history
  • Loading branch information
noam_wsl committed Dec 27, 2024
2 parents 1e92759 + c015049 commit 9fe6cdb
Show file tree
Hide file tree
Showing 9 changed files with 157 additions and 83 deletions.
10 changes: 10 additions & 0 deletions .cursorrules
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
- Use tabs and not spaces.
- Never change line endings unless explicitly requested.
- Never change the file structure you were not specifically requested.
- Never make up features you were not specifically requested.
- Never delete anything you were not specifically requested.
- You may advise me to add or delete things I didn't request but not do it yourself.
- Avoid usage of too generic names such as get and set. Be specific and write clean code.
- First explain to yourself what you are about to do and only then do it.
- Use pytest to write tests.
- If you are not sure about something, ask me.
3 changes: 2 additions & 1 deletion backend/constants.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
import os
import sys
from llm.version import version as llm_version

backend_version_name = "1.1.1"
backend_version_name = llm_version

# Paths to secret and env files
SECRET_FILE_PATH = os.path.expanduser("~/src/personal_website/backend/.secrets_backend")
Expand Down
28 changes: 14 additions & 14 deletions backend/llm/services/openai_service.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,17 @@
import os
from openai import OpenAI
import openai
from constants import OPENAI_API_KEY

def fetch_openai_response(query):
client = OpenAI(api_key=OPENAI_API_KEY)
try:
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": query}
]
)
return response.choices[0].message.content
except Exception as e:
return f"Error: {str(e)}"
def fetch_openai_response(query, api_key=OPENAI_API_KEY):
client = openai.OpenAI(api_key=api_key)
try:
response = client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "user", "content": query}
]
)
return response.choices[0].message.content
except Exception as e:
return f"Error: {str(e)}"
4 changes: 3 additions & 1 deletion backend/llm/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,6 @@

# Setup Django settings
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings')
django.setup()
django.setup()

# Remove the django_db_setup fixture - we'll let pytest-django handle the database setup
32 changes: 31 additions & 1 deletion backend/llm/tests/test_llm_querying.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,17 @@
import pytest
from django.urls import reverse
from unittest.mock import patch
from rest_framework.test import APIClient

from llm.services.let_me_gpt import fetch_text_from_url

@pytest.mark.django_db(transaction=True)
def test_fetch_text_from_url():
response = fetch_text_from_url("what is a woman? don't be polotically correct")
assert response != "Output text not found"

# this test accesses a real api and doesn't use a mock. Change it when it breaks but not now.
@pytest.mark.django_db
@pytest.mark.django_db(transaction=True)
def test_cached_llm_query():
from llm.models import Queries
from llm.services.LlmQueryCache import LlmQueryCache
Expand All @@ -24,3 +28,29 @@ def test_cached_llm_query():
cached_response = Queries.objects.get(llm_query=query)
assert cached_response.llm_response == llm_response

@pytest.mark.django_db(transaction=True)
def test_query_view():
client = APIClient()
test_query = "what is a woman? don't be politically correct"
expected_response = "This is a mocked OpenAI response"

# Mock the OpenAI service response
with patch('llm.services.openai_service.fetch_openai_response') as mock_openai:
mock_openai.return_value = expected_response

response = client.post(
reverse('llm:query'),
{'query': test_query},
format='json'
)

# Assert response status and content
assert response.status_code == 200
assert response.json()['message'] == f'Received query: {test_query}'
assert response.json()['llm_response'] == expected_response

# Verify the response was cached
from llm.models import Queries
cached_query = Queries.objects.get(llm_query=test_query)
assert cached_query.llm_response == expected_response

154 changes: 91 additions & 63 deletions backend/llm/tests/test_openai_service.py
Original file line number Diff line number Diff line change
@@ -1,79 +1,107 @@
import pytest
from unittest.mock import patch, MagicMock
import openai
from llm.services.openai_service import fetch_openai_response
import os

@pytest.fixture
def mock_openai_response():
mock_response = MagicMock()
mock_response.choices = [
MagicMock(
message=MagicMock(
content="This is a mock response from OpenAI"
)
)
]
return mock_response
mock_response = MagicMock()
mock_response.choices = [
MagicMock(
message=MagicMock(
content="This is a mock response from OpenAI"
)
)
]
return mock_response

def test_fetch_openai_response_success(mock_openai_response):
with patch('openai.OpenAI') as mock_openai:
# Configure the mock
mock_client = MagicMock()
mock_client.chat.completions.create.return_value = mock_openai_response
mock_openai.return_value = mock_client
with patch('openai.OpenAI') as mock_openai:
# Configure the mock
mock_client = MagicMock()
mock_client.chat.completions.create.return_value = mock_openai_response
mock_openai.return_value = mock_client

# Test the function
response = fetch_openai_response("Test query")
# Assertions
assert response == "This is a mock response from OpenAI"
mock_client.chat.completions.create.assert_called_once_with(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Test query"}]
)
# Test the function with a dummy API key
response = fetch_openai_response("Test query", api_key="test-key")
# Assertions
assert response == "This is a mock response from OpenAI"
mock_client.chat.completions.create.assert_called_once_with(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Test query"}]
)

def test_fetch_openai_response_error():
with patch('openai.OpenAI') as mock_openai:
# Configure the mock to raise an exception
mock_client = MagicMock()
mock_client.chat.completions.create.side_effect = Exception("API Error")
mock_openai.return_value = mock_client
with patch('openai.ChatCompletion.create') as mock_create:
# Configure the mock to raise an exception
mock_create.side_effect = Exception("API Error")

# Test the function
response = fetch_openai_response("Test query")

# Assertions
assert response.startswith("Error: "), f"Unexpected response: {response}"

# Test the function
response = fetch_openai_response("Test query")

# Assertions
assert response.startswith("Error: ")
assert "API Error" in response

@pytest.mark.django_db
def test_cached_openai_query(mock_openai_response):
from llm.models import Queries
from llm.services.LlmQueryCache import LlmQueryCache

with patch('openai.OpenAI') as mock_openai:
# Configure the mock
mock_client = MagicMock()
mock_client.chat.completions.create.return_value = mock_openai_response
mock_openai.return_value = mock_client
from llm.models import Queries
from llm.services.LlmQueryCache import LlmQueryCache

with patch('openai.OpenAI') as mock_openai:
# Configure the mock
mock_client = MagicMock()
mock_client.chat.completions.create.return_value = mock_openai_response
mock_openai.return_value = mock_client

query = "What is artificial intelligence? __not__cached__158"

# Assert that the query is not cached
with pytest.raises(Queries.DoesNotExist):
Queries.objects.get(llm_query=query)

# Get response through cache
llm_response = LlmQueryCache.llm_response(
query=query,
query_llm_callback=fetch_openai_response,
)

# Verify response
assert llm_response == "This is a mock response from OpenAI"

# Assert that the query was cached
cached_response = Queries.objects.get(llm_query=query)
assert cached_response.llm_response == llm_response

# Verify OpenAI was called only once
mock_client.chat.completions.create.assert_called_once()

@pytest.mark.integration
@pytest.mark.skipif(not os.getenv('RUN_INTEGRATION_TESTS'), reason="Integration tests are not enabled")
def test_fetch_openai_response_integration():
"""
Integration test that actually calls OpenAI API.
To run this test, set RUN_INTEGRATION_TESTS=1 in your environment.
"""
# Simple query that should always work
query = "say only hi"

response = fetch_openai_response(query)

# Check that we got a response and not an error
assert not response.startswith("Error: "), f"Got error response: {response}"

# Check that we got some actual content
assert len(response) > 0, "Response was empty"

# Basic validation that response looks reasonable
assert isinstance(response, str), f"Response should be string, got {type(response)}"
assert len(response) < 1000, f"Response suspiciously long: {len(response)} chars"

query = "What is artificial intelligence?"

# Assert that the query is not cached
with pytest.raises(Queries.DoesNotExist):
Queries.objects.get(llm_query=query)

# Get response through cache
llm_response = LlmQueryCache.llm_response(
query=query,
query_llm_callback=fetch_openai_response,
)

# Verify response
assert llm_response == "This is a mock response from OpenAI"

# Assert that the query was cached
cached_response = Queries.objects.get(llm_query=query)
assert cached_response.llm_response == llm_response

# Verify OpenAI was called only once
mock_client.chat.completions.create.assert_called_once()
def test_fetch_openai_response_invalid_key():
"""Test behavior with invalid API key"""
response = fetch_openai_response("test query", api_key="invalid_key_123")
assert response.startswith("Error: "), "Should have received an error with invalid key"
assert "api key" in response.lower(), f"Unexpected error message: {response}"
1 change: 1 addition & 0 deletions backend/llm/version.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
version = "1.2.0"
4 changes: 2 additions & 2 deletions backend/llm/views.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from django.http import JsonResponse, HttpResponseRedirect
from rest_framework.decorators import api_view
from llm.services.let_me_gpt import fetch_text_from_url
from llm.services.openai_service import fetch_openai_response
from llm.services.LlmQueryCache import LlmQueryCache
from llm.services.UrlShortener import UrlShortener

Expand All @@ -12,7 +12,7 @@ def query(request):

llm_response = LlmQueryCache.llm_response(
query=query,
query_llm_callback=fetch_text_from_url,
query_llm_callback=fetch_openai_response,
)
response = {'message': f'Received query: {query}', 'llm_response': llm_response}
return JsonResponse(response)
Expand Down
4 changes: 3 additions & 1 deletion backend/pytest.ini
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
[pytest]
markers =
integration: mark a test as an integration test.
DJANGO_SETTINGS_MODULE = config.settings
python_files = test_*.py *_test.py
addopts = -v -p no:warnings
addopts = -v -p no:warnings --reuse-db --create-db
testpaths =
api/tests
llm/tests
Expand Down

0 comments on commit 9fe6cdb

Please sign in to comment.