Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Improve config layer #80

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .envrc
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
export VIRTUAL_ENV=."venv"
export VIRTUAL_ENV="lambda"."venv"
layout python

[[ -f .envrc.private ]] && source_env .envrc.private
1 change: 1 addition & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ clean:
rm -rf $(BUILD_PACKAGE_DIR)

dev: clean
cd lambda
python -m venv .venv
. .venv/bin/activate
pip install -r lambda/requirements-dev.txt
Expand Down
35 changes: 8 additions & 27 deletions lambda/lambda_function.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,27 +29,6 @@
config = load_config()
canned_response = CannedResponse("en-US")

LLM_URL = config["llm_url"]
LLM_KEY = config["llm_key"]
LLM_MODEL = config["llm_model"]
LLM_SYSTEM_PROMPT = config.get(
"llm_system_prompt",
"""
You are a helpful AI assistant that responds by voice.
Your answers should be simple and quick.
Don't speak back for more than a couple of sentences.
If you need to say more things, say that you're happy to continue,
and wait for the user to ask you to continue.
Remember, your objective is to reply as if your are having a natural
conversation, so be relatively brief, and keep that in mind when replying.
You were created by jpt.land as part of a personal exploration project.
Paulo Truta is a software engineer that worked hard to make you easy!
If the user asks about you, tell him you are the Alexa AI Skill.
You're an helpful and funny artificial powered assistant,
ready to answer any questions a person may have, right on Amazon Alexa.
""",
)


class LLMQuestionProxy:
"""Handler to communicate with an LLM via API or Webhook.
Expand All @@ -65,7 +44,7 @@ def api_request(self, question: str) -> dict:
)

try:
response = self.llm_client.api_request(LLM_SYSTEM_PROMPT, question)
response = self.llm_client.api_request(config.llm_system_prompt, question)

logger.info(response)

Expand Down Expand Up @@ -93,18 +72,20 @@ def webhook_request(self, question: str, context: dict) -> dict:

def ask(self, question: str, context: dict = {}) -> dict:
"""Ask a question and get a response."""
if LLM_MODEL != "webhook":
logger.info("Using API request")
return self.api_request(question)
else:
if config.llm_model == "webhook":
logger.info("Using Webhook request")
return self.webhook_request(question, context)
else:
logger.info("Using API request")
return self.api_request(question)


class BaseRequestHandler(AbstractRequestHandler):
"""Base class for request handlers."""

question = LLMQuestionProxy(LLMClient(LLM_URL, LLM_KEY, LLM_MODEL))
question = LLMQuestionProxy(
LLMClient(config.llm_url, config.llm_key, config.llm_model)
)

def can_handle(self, handler_input: HandlerInput) -> bool:
return True
Expand Down
30 changes: 27 additions & 3 deletions lambda/llm_intent/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,39 @@
import random
from os.path import exists

CONFIG_FILE = "config.json"
from pydantic import BaseModel

CONFIG_FILE = "config.json"

def load_config() -> dict:
DEFAULT_PROMPT = """
You are a helpful AI assistant that responds by voice.
Your answers should be simple and quick.
Don't speak back for more than a couple of sentences.
If you need to say more things, say that you're happy to continue,
and wait for the user to ask you to continue.
Remember, your objective is to reply as if your are having a natural
conversation, so be relatively brief, and keep that in mind when replying.
You were created by jpt.land as part of a personal exploration project.
Paulo Truta is a software engineer that worked hard to make you easy!
If the user asks about you, tell him you are the Alexa AI Skill.
You're an helpful and funny artificial powered assistant,
ready to answer any questions a person may have, right on Amazon Alexa.
"""


class Config(BaseModel):
llm_url: str
llm_key: str
llm_model: str
llm_system_prompt: str = DEFAULT_PROMPT


def load_config() -> Config:
if not exists(CONFIG_FILE):
raise ValueError("Config file does not exist")

with open(CONFIG_FILE) as f:
return json.load(f)
return Config.model_validate(json.load(f))


class CannedResponse:
Expand Down
1 change: 1 addition & 0 deletions lambda/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
urllib3==1.26.15
ask-sdk-core==1.19.0
pydantic==2.10.5
53 changes: 49 additions & 4 deletions lambda/tests/test_utils.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,62 @@
import json
from unittest import TestCase
from unittest.mock import mock_open, patch

from llm_intent.utils import CONFIG_FILE, CannedResponse, load_config
from llm_intent.utils import CONFIG_FILE, DEFAULT_PROMPT, CannedResponse, load_config
from pydantic import ValidationError


class TestLoadConfig(TestCase):
@patch("builtins.open", new_callable=mock_open, read_data='{"key": "value"}')
MINIMUM_CONFIG = {
"llm_url": "http://example.org",
"llm_key": "llm_key",
"llm_model": "llm_model",
}

COMPLETE_CONFIG = {
"llm_url": "http://example.org",
"llm_key": "llm_key",
"llm_model": "llm_model",
"llm_system_prompt": "llm_prompt",
}

@patch(
"builtins.open", new_callable=mock_open, read_data=json.dumps(COMPLETE_CONFIG)
)
@patch("llm_intent.utils.exists", return_value=True)
def test_load_config_success(self, mock_exists, mock_open_file):
"""Test loading configuration successfully."""
expected_config = {"key": "value"}
config = load_config()
self.assertEqual(config, expected_config)
self.assertEqual(config.llm_url, TestLoadConfig.COMPLETE_CONFIG["llm_url"])
self.assertEqual(config.llm_key, TestLoadConfig.COMPLETE_CONFIG["llm_key"])
self.assertEqual(config.llm_model, TestLoadConfig.COMPLETE_CONFIG["llm_model"])
self.assertEqual(
config.llm_system_prompt,
TestLoadConfig.COMPLETE_CONFIG["llm_system_prompt"],
)
mock_exists.assert_called_once_with(CONFIG_FILE)
mock_open_file.assert_called_once_with(CONFIG_FILE)

@patch(
"builtins.open", new_callable=mock_open, read_data=json.dumps(MINIMUM_CONFIG)
)
@patch("llm_intent.utils.exists", return_value=True)
def test_load_config_default_prompt(self, mock_exists, mock_open_file):
"""Test loading configuration with default successfully."""
config = load_config()
self.assertEqual(config.llm_url, TestLoadConfig.COMPLETE_CONFIG["llm_url"])
self.assertEqual(config.llm_key, TestLoadConfig.COMPLETE_CONFIG["llm_key"])
self.assertEqual(config.llm_model, TestLoadConfig.COMPLETE_CONFIG["llm_model"])
self.assertEqual(config.llm_system_prompt, DEFAULT_PROMPT)
mock_exists.assert_called_once_with(CONFIG_FILE)
mock_open_file.assert_called_once_with(CONFIG_FILE)

@patch("builtins.open", new_callable=mock_open, read_data=json.dumps({}))
@patch("llm_intent.utils.exists", return_value=True)
def test_load_config_missing_fields(self, mock_exists, mock_open_file):
"""Test loading configuration missing fields."""
with self.assertRaises(ValidationError):
load_config()
mock_exists.assert_called_once_with(CONFIG_FILE)
mock_open_file.assert_called_once_with(CONFIG_FILE)

Expand Down
Loading