Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Flag options #200

Merged
merged 19 commits into from
Nov 16, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion src/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -11,4 +11,7 @@ lint:
black . --check

test:
pytest .
echo "Running tests for sherpa"
pytest tests
echo "Running tests for sherpa slackapp"
cd apps/slackapp && pytest tests
3 changes: 3 additions & 0 deletions src/apps/slackapp/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,9 @@ build-backend = "poetry.core.masonry.api"
pythonpath = [
"."
]
markers = [
"external_api: this test calls 3rd party APIs"
]

[tool.black]
line-length = 88
Expand Down
166 changes: 99 additions & 67 deletions src/apps/slackapp/slackapp/bolt_app.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,14 @@
from typing import Dict, List

from flask import Flask, request
from langchain.schema import AIMessage, BaseMessage, HumanMessage
from loguru import logger
from slack_bolt import App
from slack_bolt.adapter.flask import SlackRequestHandler
from slackapp.routes.whitelist import whitelist_blueprint

import sherpa_ai.config as cfg
from sherpa_ai.config import AgentConfig
from sherpa_ai.connectors.vectorstores import get_vectordb
from sherpa_ai.database.user_usage_tracker import UserUsageTracker
from sherpa_ai.error_handling import AgentErrorHandler
Expand Down Expand Up @@ -47,23 +49,48 @@ def hello_command(ack, body):
ack(f"Hi, <@{user_id}>!")


def contains_verbose(query: str) -> bool:
"""looks for -verbose in the question and returns True or False"""
return "-verbose" in query.lower()
def convert_thread_history_messages(messages: List[dict]) -> List[BaseMessage]:
results = []

for message in messages:
logger.info(message)
if message["type"] != "message" and message["type"] != "text":
continue

def contains_verbosex(query: str) -> bool:
"""looks for -verbosex in the question and returns True or False"""
return "-verbosex" in query.lower()
message_cls = AIMessage if message["user"] == self.ai_id else HumanMessage
# replace the at in the message with the name of the bot
text = message["text"].replace(f"@{self.ai_id}", f"@{self.ai_name}")

text = text.split("#verbose", 1)[0] # remove everything after #verbose
text = text.replace("-verbose", "") # remove -verbose if it exists
results.append(message_cls(content=text))

return results


def get_response(
20001LastOrder marked this conversation as resolved.
Show resolved Hide resolved
question: str,
previous_messages: List[Dict],
verbose_logger: BaseVerboseLogger,
previous_messages: List[BaseMessage],
user_id: str,
team_id: str,
):
verbose_logger: BaseVerboseLogger,
bot_info: Dict[str, str],
) -> str:
"""
Get response from the task agent for the question

Args:
question (str): question to be answered
previous_messages (List[BaseMessage]): previous messages in the thread
user_id (str): user id of the user who asked the question
team_id (str): team id of the workspace
verbose_logger (BaseVerboseLogger): verbose logger to be used
bot_info (Dict[str, str]): information of the Slack bot

Returns:
str: response from the task agent
"""

llm = SherpaChatOpenAI(
openai_api_key=cfg.OPENAI_API_KEY,
request_timeout=120,
Expand All @@ -74,84 +101,73 @@ def get_response(

memory = get_vectordb()

tools = get_tools(memory)
question, agent_config = AgentConfig.from_input(question)
verbose_logger = verbose_logger if agent_config.verbose else DummyVerboseLogger()

tools = get_tools(memory, agent_config)
ai_name = "Sherpa"
ai_id = bot["user_id"]
ai_id = bot_info["user_id"]

task_agent = TaskAgent.from_llm_and_tools(
ai_name="Sherpa",
ai_role="assistant",
ai_id=bot["user_id"],
ai_id=bot_info["user_id"],
memory=memory,
tools=tools,
previous_messages=previous_messages,
llm=llm,
verbose_logger=verbose_logger,
agent_config=agent_config,
)
error_handler = AgentErrorHandler()

question = question.replace(f"@{ai_id}", f"@{ai_name}")
if contains_verbosex(query=question):
logger.info("Verbose mode is on, show all")
question = question.replace("-verbose", "")
response = error_handler.run_with_error_handling(task_agent.run, task=question)
agent_log = task_agent.logger # logger is updated after running task_agent.run
try: # in case log_formatter fails
verbose_message = log_formatter(agent_log)
except KeyError:
verbose_message = str(agent_log)
return response, verbose_message

elif contains_verbose(query=question):
logger.info("Verbose mode is on, commands only")
question = question.replace("-verbose", "")
response = error_handler.run_with_error_handling(task_agent.run, task=question)

agent_log = task_agent.logger # logger is updated after running task_agent.run
try: # in case log_formatter fails
verbose_message = show_commands_only(agent_log)
except KeyError:
verbose_message = str(agent_log)
return response, verbose_message
response = error_handler.run_with_error_handling(task_agent.run, task=question)

else:
logger.info("Verbose mode is off")
response = error_handler.run_with_error_handling(task_agent.run, task=question)
return response, None

def file_event_handler(say , files , team_id ,user_id , thread_ts , question):
if files[0]['size'] > cfg.FILE_SIZE_LIMIT:
say("Sorry, the file you attached is larger than 2mb. Please try again with a smaller file" , thread_ts=thread_ts)
return { "status":"error" }
file_prompt = QuestionWithFileHandler( question=question , team_id=team_id , user_id=user_id, files=files , token=cfg.SLACK_OAUTH_TOKEN )
return response


def file_event_handler(say, files, team_id, user_id, thread_ts, question):
if files[0]["size"] > cfg.FILE_SIZE_LIMIT:
say(
"Sorry, the file you attached is larger than 2mb. Please try again with a smaller file",
thread_ts=thread_ts,
)
return {"status": "error"}
file_prompt = QuestionWithFileHandler(
question=question,
team_id=team_id,
user_id=user_id,
files=files,
token=cfg.SLACK_OAUTH_TOKEN,
)
file_prompt_data = file_prompt.reconstruct_prompt_with_file()
if file_prompt_data['status']=='success':
question = file_prompt_data['data']
return {"status":"success" , "question":question}
if file_prompt_data["status"] == "success":
question = file_prompt_data["data"]
return {"status": "success", "question": question}
else:
say(file_prompt_data['message'] , thread_ts=thread_ts)
return { "status":"error" }
say(file_prompt_data["message"], thread_ts=thread_ts)
return {"status": "error"}


@app.event("app_mention")
def event_test(client, say, event):
question = event["text"]
thread_ts = event.get("thread_ts", None) or event["ts"]
replies = client.conversations_replies(channel=event["channel"], ts=thread_ts)
previous_messages = replies["messages"][:-1]


# check if the verbose is on
verbose_on = contains_verbose(question)
verbose_logger = (
SlackVerboseLogger(say, thread_ts) if verbose_on else DummyVerboseLogger()
)
previous_messages = convert_thread_history_messages(previous_messages)

input_message = replies["messages"][-1]
user_id = input_message["user"]
# teamid is found on different places depending on the message from slack
user_id = input_message["user"]

# teamid is found on different places depending on the message from slack
# if file exist it will be inside one of the files other wise on the parent message
team_id = input_message['files'][0]["user_team"] if 'files' in input_message else input_message["team"]
team_id = (
input_message["files"][0]["user_team"]
if "files" in input_message
else input_message["team"]
)
combined_id = user_id + "_" + team_id

if cfg.FLASK_DEBUG:
Expand All @@ -173,26 +189,42 @@ def event_test(client, say, event):

if can_excute:
if "files" in event:
files = event['files']
file_event = file_event_handler( files=files ,say=say ,team_id=team_id , thread_ts=thread_ts , user_id=user_id , question=question)
if file_event['status']=="error":
files = event["files"]
file_event = file_event_handler(
files=files,
say=say,
team_id=team_id,
thread_ts=thread_ts,
user_id=user_id,
question=question,
)
if file_event["status"] == "error":
return
else:
question = file_event['question']
question = file_event["question"]
else:
# used to reconstruct the question. if the question contains a link recreate
# them so that they contain scraped and summarized content of the link
reconstructor = PromptReconstructor(
question=question, slack_message=[replies["messages"][-1]]
)
question = reconstructor.reconstruct_prompt()
results, _ = get_response(
question, previous_messages, verbose_logger, user_id, team_id

results = get_response(
question,
previous_messages,
user_id,
team_id,
verbose_logger=SlackVerboseLogger(say, thread_ts),
bot_info=bot,
)

say(results, thread_ts=thread_ts)
else:
say(f"""I'm sorry for any inconvenience, but it appears you've gone over your daily token limit. Don't worry, you'll be able to use our service again in approximately {usage_cheker['time_left']}.Thank you for your patience and understanding.""", thread_ts=thread_ts)
say(
f"""I'm sorry for any inconvenience, but it appears you've gone over your daily token limit. Don't worry, you'll be able to use our service again in approximately {usage_cheker['time_left']}.Thank you for your patience and understanding.""",
thread_ts=thread_ts,
)


@app.event("app_home_opened")
Expand Down
57 changes: 57 additions & 0 deletions src/apps/slackapp/tests/test_get_response.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
from datetime import datetime

import pytest
from slackapp.bolt_app import get_response

import sherpa_ai.config as cfg
from sherpa_ai.verbose_loggers import DummyVerboseLogger


@pytest.mark.external_api
def test_get_response_contains_todays_date():
question = "What is the date today, using the following format: YYYY-MM-DD?"
date = datetime.now().strftime("%Y-%m-%d")

if cfg.SERPER_API_KEY is None:
pytest.skip(
"SERPER_API_KEY not found in environment variables, skipping this test"
)

verbose_logger = DummyVerboseLogger()

response = get_response(
question=question,
previous_messages=[],
user_id="",
team_id="",
verbose_logger=verbose_logger,
bot_info={"user_id": "Sherpa"},
)
assert date in response, "Today's date not found in response"


@pytest.mark.external_api
def test_response_contains_correct_info():
question = "What is AutoGPT and how does it compare with MetaGPT"

if cfg.SERPER_API_KEY is None:
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I see this test and the one above are making actual calls to 3rd party APIs. We should not do this in tests, because we may end up unknowingly hammering the 3rd party API many times.

If we're trying to test our own logic, over and above what the 3rd party API does, we should mock/stub the 3rd party API calls/responses. This will make our tests deterministic, and avoids making network calls in tests.

If we want to know whether the 3rd party API does what it claims to do, that's a different problem. We expect the 3rd party to test their own stuff.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'll add the real mark to these tests so we can skip them if necessary.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

OK. @20001LastOrder can you rename real to something more explicit, e.g. external-api-caller?

pytest.skip(
"SERPER_API_KEY not found in environment variables, skipping this test"
)

verbose_logger = DummyVerboseLogger()

response = get_response(
question=question,
previous_messages=[],
user_id="",
team_id="",
verbose_logger=verbose_logger,
bot_info={"user_id": "Sherpa"},
)

print(response)
assert response is not None
assert response != ""
assert "AutoGPT" in response
assert "MetaGPT" in response
2 changes: 1 addition & 1 deletion src/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ pythonpath = [
"."
]
markers = [
"real: this test calls 3rd party APIs"
"external_api: this test calls 3rd party APIs"
]

[tool.black]
Expand Down
4 changes: 3 additions & 1 deletion src/sherpa_ai/actions/google_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
from loguru import logger

from sherpa_ai.actions.base import BaseAction
from sherpa_ai.config.task_config import AgentConfig
from sherpa_ai.tools import SearchTool

SEARCH_SUMMARY_DESCRIPTION = """Role Description: {role_description}
Expand All @@ -23,6 +24,7 @@ def __init__(
task: str,
llm: BaseLanguageModel,
description: str = SEARCH_SUMMARY_DESCRIPTION,
config: AgentConfig = AgentConfig(),
n: int = 5,
):
self.role_description = role_description
Expand All @@ -32,7 +34,7 @@ def __init__(
self.llm = llm
self.n = n

self.search_tool = SearchTool()
self.search_tool = SearchTool(config=config)

def execute(self, query) -> str:
result = self.search_tool._run(query)
Expand Down
4 changes: 2 additions & 2 deletions src/sherpa_ai/actions/planning.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,8 +116,8 @@ def execute(
self,
task: str,
agent_pool_description: str,
last_plan: Optional[str],
feedback: Optional[str],
last_plan: Optional[str] = None,
feedback: Optional[str] = None,
) -> Plan:
"""
Execute the action
Expand Down
6 changes: 6 additions & 0 deletions src/sherpa_ai/config.py → src/sherpa_ai/config/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@
from dotenv import find_dotenv, load_dotenv
from loguru import logger

from sherpa_ai.config.task_config import AgentConfig

env_path = find_dotenv(usecwd=True)
load_dotenv(env_path)

Expand Down Expand Up @@ -109,3 +111,7 @@ def check_vectordb_setting():
logger.info("Config: OpenAI environment variables are set")

check_vectordb_setting()

__all__ = [
"AgentConfig",
]
Loading