From 466e17e0e90f07252f9c451884ab0ae3ff3de86b Mon Sep 17 00:00:00 2001 From: "egor.baydarov" Date: Tue, 7 Mar 2023 05:56:17 +0300 Subject: [PATCH 1/9] remove moderation; remove unused code; use gpt-3.5-turbo model --- .github/ISSUE_TEMPLATE/config.yml | 1 - requirements.txt | 4 +- src/__init__.py | 1 - src/base.py | 43 +---------- src/completion.py | 95 +++--------------------- src/config.yaml | 26 ------- src/constants.py | 54 ++++---------- src/main.py | 115 ++++-------------------------- src/moderation.py | 71 ------------------ src/utils.py | 29 ++++---- 10 files changed, 55 insertions(+), 384 deletions(-) delete mode 100644 .github/ISSUE_TEMPLATE/config.yml delete mode 100644 src/config.yaml delete mode 100644 src/moderation.py diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml deleted file mode 100644 index 3ba13e0c..00000000 --- a/.github/ISSUE_TEMPLATE/config.yml +++ /dev/null @@ -1 +0,0 @@ -blank_issues_enabled: false diff --git a/requirements.txt b/requirements.txt index 3dbd0834..09a3ed34 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ discord.py==2.1.* python-dotenv==0.21.* -openai==0.25.* +openai==0.27.* PyYAML==6.0 -dacite==1.6.* \ No newline at end of file +dacite==1.6.* diff --git a/src/__init__.py b/src/__init__.py index 8b137891..e69de29b 100644 --- a/src/__init__.py +++ b/src/__init__.py @@ -1 +0,0 @@ - diff --git a/src/base.py b/src/base.py index b106e928..59350982 100644 --- a/src/base.py +++ b/src/base.py @@ -1,8 +1,6 @@ from dataclasses import dataclass from typing import Optional, List -SEPARATOR_TOKEN = "<|endoftext|>" - @dataclass(frozen=True) class Message: @@ -10,44 +8,5 @@ class Message: text: Optional[str] = None def render(self): - result = self.user + ":" - if self.text is not None: - result += " " + self.text + result = {"role": self.user, "content": self.text} return result - - -@dataclass -class Conversation: - messages: List[Message] - - def prepend(self, message: Message): - self.messages.insert(0, message) - return self - - def render(self): - return f"\n{SEPARATOR_TOKEN}".join( - [message.render() for message in self.messages] - ) - - -@dataclass(frozen=True) -class Config: - name: str - instructions: str - example_conversations: List[Conversation] - - -@dataclass(frozen=True) -class Prompt: - header: Message - examples: List[Conversation] - convo: Conversation - - def render(self): - return f"\n{SEPARATOR_TOKEN}".join( - [self.header.render()] - + [Message("System", "Example conversations:").render()] - + [conversation.render() for conversation in self.examples] - + [Message("System", "Current conversation:").render()] - + [self.convo.render()], - ) diff --git a/src/completion.py b/src/completion.py index d60eca95..ddd40d6d 100644 --- a/src/completion.py +++ b/src/completion.py @@ -1,23 +1,10 @@ from enum import Enum from dataclasses import dataclass import openai -from src.moderation import moderate_message from typing import Optional, List -from src.constants import ( - BOT_INSTRUCTIONS, - BOT_NAME, - EXAMPLE_CONVOS, -) import discord -from src.base import Message, Prompt, Conversation +from src.base import Message from src.utils import split_into_shorter_messages, close_thread, logger -from src.moderation import ( - send_moderation_flagged_message, - send_moderation_blocked_message, -) - -MY_BOT_NAME = BOT_NAME -MY_BOT_EXAMPLE_CONVOS = EXAMPLE_CONVOS class CompletionResult(Enum): @@ -25,8 +12,6 @@ class CompletionResult(Enum): TOO_LONG = 1 INVALID_REQUEST = 2 OTHER_ERROR = 3 - MODERATION_FLAGGED = 4 - MODERATION_BLOCKED = 5 @dataclass @@ -37,43 +22,13 @@ class CompletionData: async def generate_completion_response( - messages: List[Message], user: str + messages: List[Message], ) -> CompletionData: try: - prompt = Prompt( - header=Message( - "System", f"Instructions for {MY_BOT_NAME}: {BOT_INSTRUCTIONS}" - ), - examples=MY_BOT_EXAMPLE_CONVOS, - convo=Conversation(messages + [Message(MY_BOT_NAME)]), - ) - rendered = prompt.render() - response = openai.Completion.create( - engine="text-davinci-003", - prompt=rendered, - temperature=1.0, - top_p=0.9, - max_tokens=512, - stop=["<|endoftext|>"], - ) - reply = response.choices[0].text.strip() - if reply: - flagged_str, blocked_str = moderate_message( - message=(rendered + reply)[-500:], user=user - ) - if len(blocked_str) > 0: - return CompletionData( - status=CompletionResult.MODERATION_BLOCKED, - reply_text=reply, - status_text=f"from_response:{blocked_str}", - ) - - if len(flagged_str) > 0: - return CompletionData( - status=CompletionResult.MODERATION_FLAGGED, - reply_text=reply, - status_text=f"from_response:{flagged_str}", - ) + response = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=[message.render() for message in messages]) + reply = response['choices'][0]['message']['content'] return CompletionData( status=CompletionResult.OK, reply_text=reply, status_text=None @@ -98,15 +53,14 @@ async def generate_completion_response( async def process_response( - user: str, thread: discord.Thread, response_data: CompletionData + thread: discord.Thread, response_data: CompletionData ): status = response_data.status reply_text = response_data.reply_text status_text = response_data.status_text - if status is CompletionResult.OK or status is CompletionResult.MODERATION_FLAGGED: - sent_message = None + if status is CompletionResult.OK: if not reply_text: - sent_message = await thread.send( + await thread.send( embed=discord.Embed( description=f"**Invalid response** - empty response", color=discord.Color.yellow(), @@ -115,36 +69,7 @@ async def process_response( else: shorter_response = split_into_shorter_messages(reply_text) for r in shorter_response: - sent_message = await thread.send(r) - if status is CompletionResult.MODERATION_FLAGGED: - await send_moderation_flagged_message( - guild=thread.guild, - user=user, - flagged_str=status_text, - message=reply_text, - url=sent_message.jump_url if sent_message else "no url", - ) - - await thread.send( - embed=discord.Embed( - description=f"⚠️ **This conversation has been flagged by moderation.**", - color=discord.Color.yellow(), - ) - ) - elif status is CompletionResult.MODERATION_BLOCKED: - await send_moderation_blocked_message( - guild=thread.guild, - user=user, - blocked_str=status_text, - message=reply_text, - ) - - await thread.send( - embed=discord.Embed( - description=f"❌ **The response has been blocked by moderation.**", - color=discord.Color.red(), - ) - ) + await thread.send(r) elif status is CompletionResult.TOO_LONG: await close_thread(thread) elif status is CompletionResult.INVALID_REQUEST: diff --git a/src/config.yaml b/src/config.yaml deleted file mode 100644 index ea4bf531..00000000 --- a/src/config.yaml +++ /dev/null @@ -1,26 +0,0 @@ -name: Lenard -instructions: You're a regular Discord user. You talk casually, use emojis, and don't use capital letters. You talk in internet shorthand like np and lol and idk and nvm. You have many interests and love talking to people. -example_conversations: - - messages: - - user: bob - text: how's it going today? - - user: Lenard - text: its going great, im watching youtube videos, hbu? - - user: bob - text: I'm doing great too! - - user: Lenard - text: cool cool - - messages: - - user: bob - text: what's your favorite game? - - user: Lenard - text: im been playing a lot of the new pokemon game :D - - user: bob - text: me too! - - messages: - - user: bob - text: have you been to the zoo? - - user: Lenard - text: i have! unfortunately it started raining so I left early - - user: bob - text: that sucks, I hope you get to go again soon diff --git a/src/constants.py b/src/constants.py index e294ca52..ef9fed0f 100644 --- a/src/constants.py +++ b/src/constants.py @@ -1,23 +1,9 @@ from dotenv import load_dotenv import os -import dacite -import yaml from typing import Dict, List -from src.base import Config load_dotenv() - -# load config.yaml -SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) -CONFIG: Config = dacite.from_dict( - Config, yaml.safe_load(open(os.path.join(SCRIPT_DIR, "config.yaml"), "r")) -) - -BOT_NAME = CONFIG.name -BOT_INSTRUCTIONS = CONFIG.instructions -EXAMPLE_CONVOS = CONFIG.example_conversations - DISCORD_BOT_TOKEN = os.environ["DISCORD_BOT_TOKEN"] DISCORD_CLIENT_ID = os.environ["DISCORD_CLIENT_ID"] OPENAI_API_KEY = os.environ["OPENAI_API_KEY"] @@ -27,34 +13,18 @@ for s in server_ids: ALLOWED_SERVER_IDS.append(int(s)) -SERVER_TO_MODERATION_CHANNEL: Dict[int, int] = {} -server_channels = os.environ.get("SERVER_TO_MODERATION_CHANNEL", "").split(",") -for s in server_channels: - values = s.split(":") - SERVER_TO_MODERATION_CHANNEL[int(values[0])] = int(values[1]) - -# Send Messages, Create Public Threads, Send Messages in Threads, Manage Messages, Manage Threads, Read Message History, Use Slash Command -BOT_INVITE_URL = f"https://discord.com/api/oauth2/authorize?client_id={DISCORD_CLIENT_ID}&permissions=328565073920&scope=bot" - -MODERATION_VALUES_FOR_BLOCKED = { - "hate": 0.5, - "hate/threatening": 0.1, - "self-harm": 0.2, - "sexual": 0.5, - "sexual/minors": 0.2, - "violence": 0.7, - "violence/graphic": 0.8, -} - -MODERATION_VALUES_FOR_FLAGGED = { - "hate": 0.4, - "hate/threatening": 0.05, - "self-harm": 0.1, - "sexual": 0.3, - "sexual/minors": 0.1, - "violence": 0.1, - "violence/graphic": 0.1, -} +# Send Messages +# Create Public Threads +# Send Messages in Threads +# Manage Messages +# Manage Threads +# Read Message History +# Use Slash Command +BOT_INVITE_URL = f"https://discord.com\ +/api/oauth2/authorize\ +?client_id={DISCORD_CLIENT_ID}\ +&permissions=328565073920\ +&scope=bot" SECONDS_DELAY_RECEIVING_MSG = ( 3 # give a delay for the bot to respond so it can catch multiple messages diff --git a/src/main.py b/src/main.py index 1d2edce9..768eff73 100644 --- a/src/main.py +++ b/src/main.py @@ -1,11 +1,10 @@ import discord from discord import Message as DiscordMessage import logging -from src.base import Message, Conversation +from src.base import Message from src.constants import ( BOT_INVITE_URL, DISCORD_BOT_TOKEN, - EXAMPLE_CONVOS, ACTIVATE_THREAD_PREFX, MAX_THREAD_MESSAGES, SECONDS_DELAY_RECEIVING_MSG, @@ -18,13 +17,7 @@ is_last_message_stale, discord_message_to_message, ) -from src import completion from src.completion import generate_completion_response, process_response -from src.moderation import ( - moderate_message, - send_moderation_blocked_message, - send_moderation_flagged_message, -) logging.basicConfig( format="[%(asctime)s] [%(filename)s:%(lineno)d] %(message)s", level=logging.INFO @@ -40,17 +33,6 @@ @client.event async def on_ready(): logger.info(f"We have logged in as {client.user}. Invite URL: {BOT_INVITE_URL}") - completion.MY_BOT_NAME = client.user.name - completion.MY_BOT_EXAMPLE_CONVOS = [] - for c in EXAMPLE_CONVOS: - messages = [] - for m in c.messages: - if m.user == "Lenard": - messages.append(Message(user=client.user.name, text=m.text)) - else: - messages.append(m) - completion.MY_BOT_EXAMPLE_CONVOS.append(Conversation(messages=messages)) - await tree.sync() # /chat message: @@ -73,43 +55,14 @@ async def chat_command(int: discord.Interaction, message: str): user = int.user logger.info(f"Chat command by {user} {message[:20]}") try: - # moderate the message - flagged_str, blocked_str = moderate_message(message=message, user=user) - await send_moderation_blocked_message( - guild=int.guild, - user=user, - blocked_str=blocked_str, - message=message, - ) - if len(blocked_str) > 0: - # message was blocked - await int.response.send_message( - f"Your prompt has been blocked by moderation.\n{message}", - ephemeral=True, - ) - return - embed = discord.Embed( description=f"<@{user.id}> wants to chat! 🤖💬", color=discord.Color.green(), ) embed.add_field(name=user.name, value=message) - if len(flagged_str) > 0: - # message was flagged - embed.color = discord.Color.yellow() - embed.title = "⚠️ This prompt was flagged by moderation." - await int.response.send_message(embed=embed) response = await int.original_response() - - await send_moderation_flagged_message( - guild=int.guild, - user=user, - flagged_str=flagged_str, - message=message, - url=response.jump_url, - ) except Exception as e: logger.exception(e) await int.response.send_message( @@ -119,20 +72,20 @@ async def chat_command(int: discord.Interaction, message: str): # create the thread thread = await response.create_thread( - name=f"{ACTIVATE_THREAD_PREFX} {user.name[:20]} - {message[:30]}", + name=f"{ACTIVATE_THREAD_PREFX} {user.name[:20]} {message[:30]}", slowmode_delay=1, reason="gpt-bot", auto_archive_duration=60, ) async with thread.typing(): # fetch completion - messages = [Message(user=user.name, text=message)] + messages = [Message(user='system', text=message)] response_data = await generate_completion_response( - messages=messages, user=user + messages=messages, ) # send the result await process_response( - user=user, thread=thread, response_data=response_data + thread=thread, response_data=response_data ) except Exception as e: logger.exception(e) @@ -177,49 +130,6 @@ async def on_message(message: DiscordMessage): await close_thread(thread=thread) return - # moderate the message - flagged_str, blocked_str = moderate_message( - message=message.content, user=message.author - ) - await send_moderation_blocked_message( - guild=message.guild, - user=message.author, - blocked_str=blocked_str, - message=message.content, - ) - if len(blocked_str) > 0: - try: - await message.delete() - await thread.send( - embed=discord.Embed( - description=f"❌ **{message.author}'s message has been deleted by moderation.**", - color=discord.Color.red(), - ) - ) - return - except Exception as e: - await thread.send( - embed=discord.Embed( - description=f"❌ **{message.author}'s message has been blocked by moderation but could not be deleted. Missing Manage Messages permission in this Channel.**", - color=discord.Color.red(), - ) - ) - return - await send_moderation_flagged_message( - guild=message.guild, - user=message.author, - flagged_str=flagged_str, - message=message.content, - url=message.jump_url, - ) - if len(flagged_str) > 0: - await thread.send( - embed=discord.Embed( - description=f"⚠️ **{message.author}'s message has been flagged by moderation.**", - color=discord.Color.yellow(), - ) - ) - # wait a bit in case user has more messages if SECONDS_DELAY_RECEIVING_MSG > 0: await asyncio.sleep(SECONDS_DELAY_RECEIVING_MSG) @@ -236,7 +146,9 @@ async def on_message(message: DiscordMessage): ) channel_messages = [ - discord_message_to_message(message) + discord_message_to_message( + message=message, + bot_name=client.user) async for message in thread.history(limit=MAX_THREAD_MESSAGES) ] channel_messages = [x for x in channel_messages if x is not None] @@ -245,7 +157,7 @@ async def on_message(message: DiscordMessage): # generate the response async with thread.typing(): response_data = await generate_completion_response( - messages=channel_messages, user=message.author + messages=channel_messages ) if is_last_message_stale( @@ -255,11 +167,10 @@ async def on_message(message: DiscordMessage): ): # there is another message and its not from us, so ignore this response return - - # send response - await process_response( - user=message.author, thread=thread, response_data=response_data - ) + async with thread.typing(): + await process_response( + thread=thread, response_data=response_data + ) except Exception as e: logger.exception(e) diff --git a/src/moderation.py b/src/moderation.py deleted file mode 100644 index d4f5ea6d..00000000 --- a/src/moderation.py +++ /dev/null @@ -1,71 +0,0 @@ -from src.constants import ( - SERVER_TO_MODERATION_CHANNEL, - MODERATION_VALUES_FOR_BLOCKED, - MODERATION_VALUES_FOR_FLAGGED, -) -import openai -from typing import Optional, Tuple -import discord -from src.utils import logger - - -def moderate_message( - message: str, user: str -) -> Tuple[str, str]: # [flagged_str, blocked_str] - moderation_response = openai.Moderation.create( - input=message, model="text-moderation-latest" - ) - category_scores = moderation_response.results[0]["category_scores"] or {} - - blocked_str = "" - flagged_str = "" - for category, score in category_scores.items(): - if score > MODERATION_VALUES_FOR_BLOCKED.get(category, 1.0): - blocked_str += f"({category}: {score})" - logger.info(f"blocked {user} {category} {score}") - break - if score > MODERATION_VALUES_FOR_FLAGGED.get(category, 1.0): - flagged_str += f"({category}: {score})" - logger.info(f"flagged {user} {category} {score}") - return (flagged_str, blocked_str) - - -async def fetch_moderation_channel( - guild: Optional[discord.Guild], -) -> Optional[discord.abc.GuildChannel]: - if not guild or not guild.id: - return None - moderation_channel = SERVER_TO_MODERATION_CHANNEL.get(guild.id, None) - if moderation_channel: - channel = await guild.fetch_channel(moderation_channel) - return channel - return None - - -async def send_moderation_flagged_message( - guild: Optional[discord.Guild], - user: str, - flagged_str: Optional[str], - message: Optional[str], - url: Optional[str], -): - if guild and flagged_str and len(flagged_str) > 0: - moderation_channel = await fetch_moderation_channel(guild=guild) - if moderation_channel: - message = message[:100] if message else None - await moderation_channel.send( - f"⚠️ {user} - {flagged_str} - {message} - {url}" - ) - - -async def send_moderation_blocked_message( - guild: Optional[discord.Guild], - user: str, - blocked_str: Optional[str], - message: Optional[str], -): - if guild and blocked_str and len(blocked_str) > 0: - moderation_channel = await fetch_moderation_channel(guild=guild) - if moderation_channel: - message = message[:500] if message else None - await moderation_channel.send(f"❌ {user} - {blocked_str} - {message}") diff --git a/src/utils.py b/src/utils.py index 34d723b2..a92ed18c 100644 --- a/src/utils.py +++ b/src/utils.py @@ -1,18 +1,20 @@ -from src.constants import ( - ALLOWED_SERVER_IDS, -) import logging - -logger = logging.getLogger(__name__) +import discord from src.base import Message from discord import Message as DiscordMessage from typing import Optional, List -import discord - +from src.constants import ( + ALLOWED_SERVER_IDS, +) from src.constants import MAX_CHARS_PER_REPLY_MSG, INACTIVATE_THREAD_PREFIX -def discord_message_to_message(message: DiscordMessage) -> Optional[Message]: +logger = logging.getLogger(__name__) + + +def discord_message_to_message( + message: DiscordMessage, + bot_name: str) -> Optional[Message]: if ( message.type == discord.MessageType.thread_starter_message and message.reference.cached_message @@ -20,17 +22,20 @@ def discord_message_to_message(message: DiscordMessage) -> Optional[Message]: and len(message.reference.cached_message.embeds[0].fields) > 0 ): field = message.reference.cached_message.embeds[0].fields[0] - if field.value: - return Message(user=field.name, text=field.value) + logger.info( + f"field.name - {field.name}" + ) + return Message(user="system", text=field.value) else: if message.content: - return Message(user=message.author.name, text=message.content) + user_name = "assistant" if message.author == bot_name else "user" + return Message(user=user_name, text=message.content) return None def split_into_shorter_messages(message: str) -> List[str]: return [ - message[i : i + MAX_CHARS_PER_REPLY_MSG] + message[i: i + MAX_CHARS_PER_REPLY_MSG] for i in range(0, len(message), MAX_CHARS_PER_REPLY_MSG) ] From ee6fc504b11b5034bf8fb4e96ae0d9f9d02d3394 Mon Sep 17 00:00:00 2001 From: "egor.baydarov" Date: Sat, 18 Mar 2023 02:43:32 +0300 Subject: [PATCH 2/9] use async --- src/base.py | 2 +- src/completion.py | 63 ++++++++++++++++++++++------------------------- src/constants.py | 5 +++- src/utils.py | 19 +++++++++++--- 4 files changed, 50 insertions(+), 39 deletions(-) diff --git a/src/base.py b/src/base.py index 59350982..c402c422 100644 --- a/src/base.py +++ b/src/base.py @@ -1,5 +1,5 @@ from dataclasses import dataclass -from typing import Optional, List +from typing import Optional @dataclass(frozen=True) diff --git a/src/completion.py b/src/completion.py index ddd40d6d..2039c17b 100644 --- a/src/completion.py +++ b/src/completion.py @@ -1,17 +1,18 @@ +import io from enum import Enum from dataclasses import dataclass -import openai from typing import Optional, List import discord +import aiohttp from src.base import Message -from src.utils import split_into_shorter_messages, close_thread, logger +from src.utils import split_into_shorter_messages, logger, close_thread +from src.constants import OPENAI_API_KEY, OPENAI_API_URL, OPENAI_MODEL, MAX_CHARS_PER_REPLY_MSG class CompletionResult(Enum): OK = 0 TOO_LONG = 1 - INVALID_REQUEST = 2 - OTHER_ERROR = 3 + ERROR = 2 @dataclass @@ -25,30 +26,29 @@ async def generate_completion_response( messages: List[Message], ) -> CompletionData: try: - response = openai.ChatCompletion.create( - model="gpt-3.5-turbo", - messages=[message.render() for message in messages]) - reply = response['choices'][0]['message']['content'] - - return CompletionData( - status=CompletionResult.OK, reply_text=reply, status_text=None - ) - except openai.error.InvalidRequestError as e: - if "This model's maximum context length" in e.user_message: - return CompletionData( - status=CompletionResult.TOO_LONG, reply_text=None, status_text=str(e) - ) - else: - logger.exception(e) - return CompletionData( - status=CompletionResult.INVALID_REQUEST, - reply_text=None, - status_text=str(e), - ) + async with aiohttp.ClientSession() as session: + messages = [message.render() for message in messages] + async with session.post( + url=OPENAI_API_URL, + json={ + 'model': OPENAI_MODEL, + 'messages': messages + }, + auth=aiohttp.BasicAuth("", OPENAI_API_KEY) + ) as r: + if r.status == 200: + js = await r.json() + reply = js['choices'][0]['message']['content'] + return CompletionData(status=CompletionResult.OK, reply_text=reply, status_text=None) + else: + js = await r.json() + code = js['error']['code'] + status = CompletionResult.TOO_LONG if code == 'context_length_exceeded' else CompletionResult.ERROR + return CompletionData(status=status, reply_text=None, status_text=js) except Exception as e: logger.exception(e) return CompletionData( - status=CompletionResult.OTHER_ERROR, reply_text=None, status_text=str(e) + status=CompletionResult.ERROR, reply_text=None, status_text=str(e) ) @@ -69,16 +69,13 @@ async def process_response( else: shorter_response = split_into_shorter_messages(reply_text) for r in shorter_response: - await thread.send(r) + if len(r) > MAX_CHARS_PER_REPLY_MSG: + file = discord.File(io.StringIO(r), f'message.txt') + await thread.send(file=file) + else: + await thread.send(r) elif status is CompletionResult.TOO_LONG: await close_thread(thread) - elif status is CompletionResult.INVALID_REQUEST: - await thread.send( - embed=discord.Embed( - description=f"**Invalid request** - {status_text}", - color=discord.Color.yellow(), - ) - ) else: await thread.send( embed=discord.Embed( diff --git a/src/constants.py b/src/constants.py index ef9fed0f..df1096be 100644 --- a/src/constants.py +++ b/src/constants.py @@ -1,12 +1,15 @@ from dotenv import load_dotenv import os -from typing import Dict, List +from typing import List load_dotenv() DISCORD_BOT_TOKEN = os.environ["DISCORD_BOT_TOKEN"] DISCORD_CLIENT_ID = os.environ["DISCORD_CLIENT_ID"] + OPENAI_API_KEY = os.environ["OPENAI_API_KEY"] +OPENAI_API_URL = os.environ['OPENAI_API_URL'] +OPENAI_MODEL = os.environ['OPENAI_MODEL'] ALLOWED_SERVER_IDS: List[int] = [] server_ids = os.environ["ALLOWED_SERVER_IDS"].split(",") diff --git a/src/utils.py b/src/utils.py index a92ed18c..e4e14865 100644 --- a/src/utils.py +++ b/src/utils.py @@ -1,5 +1,6 @@ import logging import discord +import re from src.base import Message from discord import Message as DiscordMessage from typing import Optional, List @@ -34,10 +35,20 @@ def discord_message_to_message( def split_into_shorter_messages(message: str) -> List[str]: - return [ - message[i: i + MAX_CHARS_PER_REPLY_MSG] - for i in range(0, len(message), MAX_CHARS_PER_REPLY_MSG) - ] + indices_object = re.finditer( + pattern='```', + string=message) + + indices = [index.start() for index in indices_object] + indices[1::2] = [x + 4 for x in indices[1::2]] + indices.insert(0, 0) + indices.append(len(message)) + + result = [] + for i in range(1, len(indices)): + result.append(message[indices[i-1]:indices[i]]) + + return result def is_last_message_stale( From fb8c1d422cbe13814eea310ec5c8e96a605ef910 Mon Sep 17 00:00:00 2001 From: VladVP Date: Tue, 28 Mar 2023 13:23:03 +0200 Subject: [PATCH 3/9] Add initial system prompt to create behavior identical with and/or equivalent to ChatGPT on the web. Also modified `split_into_shorter_messages` to split all text, not just code blocks. --- src/constants.py | 5 +++- src/main.py | 22 ++++++++++++++- src/utils.py | 69 ++++++++++++++++++++++++++++++++++++------------ 3 files changed, 77 insertions(+), 19 deletions(-) diff --git a/src/constants.py b/src/constants.py index df1096be..39ddc886 100644 --- a/src/constants.py +++ b/src/constants.py @@ -11,6 +11,9 @@ OPENAI_API_URL = os.environ['OPENAI_API_URL'] OPENAI_MODEL = os.environ['OPENAI_MODEL'] +SYSTEM_MESSAGE = os.environ["SYSTEM_MESSAGE"] +KNOWLEDGE_CUTOFF = os.environ["KNOWLEDGE_CUTOFF"] + ALLOWED_SERVER_IDS: List[int] = [] server_ids = os.environ["ALLOWED_SERVER_IDS"].split(",") for s in server_ids: @@ -36,5 +39,5 @@ ACTIVATE_THREAD_PREFX = "💬✅" INACTIVATE_THREAD_PREFIX = "💬❌" MAX_CHARS_PER_REPLY_MSG = ( - 1500 # discord has a 2k limit, we just break message into 1.5k + 2000 # discord has a 2k limit ) diff --git a/src/main.py b/src/main.py index 768eff73..2b255ac1 100644 --- a/src/main.py +++ b/src/main.py @@ -1,3 +1,4 @@ +import datetime import discord from discord import Message as DiscordMessage import logging @@ -8,6 +9,8 @@ ACTIVATE_THREAD_PREFX, MAX_THREAD_MESSAGES, SECONDS_DELAY_RECEIVING_MSG, + SYSTEM_MESSAGE, + KNOWLEDGE_CUTOFF ) import asyncio from src.utils import ( @@ -78,8 +81,17 @@ async def chat_command(int: discord.Interaction, message: str): auto_archive_duration=60, ) async with thread.typing(): + # prepare the initial system message + current_date = datetime.datetime.now().strftime("%Y-%m-%d") + system_message = SYSTEM_MESSAGE.format( + knowledge_cutoff=KNOWLEDGE_CUTOFF, + current_date=current_date + ) # fetch completion - messages = [Message(user='system', text=message)] + messages = [ + Message(user='system', text=system_message), + Message(user='user', text=message) + ] response_data = await generate_completion_response( messages=messages, ) @@ -145,6 +157,13 @@ async def on_message(message: DiscordMessage): f"Thread message to process - {message.author}: {message.content[:50]} - {thread.name} {thread.jump_url}" ) + # prepare the initial system message + current_date = datetime.datetime.now().strftime("%Y-%m-%d") + system_message = SYSTEM_MESSAGE.format( + knowledge_cutoff=KNOWLEDGE_CUTOFF, + current_date=current_date + ) + channel_messages = [ discord_message_to_message( message=message, @@ -152,6 +171,7 @@ async def on_message(message: DiscordMessage): async for message in thread.history(limit=MAX_THREAD_MESSAGES) ] channel_messages = [x for x in channel_messages if x is not None] + channel_messages.append(Message(user='system', text=system_message)) channel_messages.reverse() # generate the response diff --git a/src/utils.py b/src/utils.py index e4e14865..52d0dbd1 100644 --- a/src/utils.py +++ b/src/utils.py @@ -26,7 +26,7 @@ def discord_message_to_message( logger.info( f"field.name - {field.name}" ) - return Message(user="system", text=field.value) + return Message(user="user", text=field.value) else: if message.content: user_name = "assistant" if message.author == bot_name else "user" @@ -34,22 +34,57 @@ def discord_message_to_message( return None -def split_into_shorter_messages(message: str) -> List[str]: - indices_object = re.finditer( - pattern='```', - string=message) - - indices = [index.start() for index in indices_object] - indices[1::2] = [x + 4 for x in indices[1::2]] - indices.insert(0, 0) - indices.append(len(message)) - - result = [] - for i in range(1, len(indices)): - result.append(message[indices[i-1]:indices[i]]) - - return result - +def split_into_shorter_messages(text, limit=MAX_CHARS_PER_REPLY_MSG, code_block='```'): + def split_at_boundary(s, boundary): + parts = s.split(boundary) + result = [] + for i, part in enumerate(parts): + if i % 2 == 1: + result.extend(split_code_block(part)) + else: + result += split_substring(part) + return result + + def split_substring(s): + if len(s) <= limit: + return [s] + for boundary in ('\n', ' '): + if boundary in s: + break + else: + return [s[:limit]] + split_substring(s[limit:]) + + pieces = s.split(boundary) + result = [] + current_part = pieces[0] + for piece in pieces[1:]: + if len(current_part) + len(boundary) + len(piece) > limit: + result.append(current_part) + current_part = piece + else: + current_part += boundary + piece + result.append(current_part) + return result + + def split_code_block(s): + if len(code_block + s + code_block) <= limit: + return [code_block + s + code_block] + else: + lines = s.split('\n') + result = [code_block] + for line in lines: + if len(result[-1] + '\n' + line) > limit: + result[-1] += code_block + result.append(code_block + line) + else: + result[-1] += '\n' + line + result[-1] += code_block + return result + + if code_block in text: + return split_at_boundary(text, code_block) + else: + return split_substring(text) def is_last_message_stale( interaction_message: DiscordMessage, last_message: DiscordMessage, bot_id: str From 67d1c753e97726a74cc74ebaa758d04eca5e4b7c Mon Sep 17 00:00:00 2001 From: VladVP Date: Tue, 28 Mar 2023 13:25:31 +0200 Subject: [PATCH 4/9] Update `.env.example` to use new env variables --- .env.example | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.env.example b/.env.example index 2540f9b5..09fe9d44 100644 --- a/.env.example +++ b/.env.example @@ -3,4 +3,10 @@ DISCORD_BOT_TOKEN=x DISCORD_CLIENT_ID=x ALLOWED_SERVER_IDS=1 -SERVER_TO_MODERATION_CHANNEL=1:1 \ No newline at end of file +SERVER_TO_MODERATION_CHANNEL=1:1 + +OPENAI_API_URL=https://api.openai.com/v1/chat/completions +OPENAI_MODEL=gpt-3.5-turbo + +SYSTEM_MESSAGE="You are ChatGPT, a large language model trained by OpenAI. Answer as concisely as possible. Knowledge cutoff: {knowledge_cutoff} Current date: {current_date}" +KNOWLEDGE_CUTOFF="2021-09" From 81cfed4b375971bea02bec79c1c97aa3e3cda4cb Mon Sep 17 00:00:00 2001 From: VladVP Date: Tue, 28 Mar 2023 13:28:13 +0200 Subject: [PATCH 5/9] Remove `SERVER_TO_MODERATION_CHANNEL` --- .env.example | 1 - 1 file changed, 1 deletion(-) diff --git a/.env.example b/.env.example index 09fe9d44..d98253a8 100644 --- a/.env.example +++ b/.env.example @@ -3,7 +3,6 @@ DISCORD_BOT_TOKEN=x DISCORD_CLIENT_ID=x ALLOWED_SERVER_IDS=1 -SERVER_TO_MODERATION_CHANNEL=1:1 OPENAI_API_URL=https://api.openai.com/v1/chat/completions OPENAI_MODEL=gpt-3.5-turbo From 15c9e6ad80a276be94b016bfa0da69e9cd9753d0 Mon Sep 17 00:00:00 2001 From: VladVP Date: Tue, 28 Mar 2023 13:33:03 +0200 Subject: [PATCH 6/9] Update README --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index f722502d..4eafe579 100644 --- a/README.md +++ b/README.md @@ -53,9 +53,9 @@ This bot uses the [OpenAI Python Library](https://github.com/openai/openai-pytho # Optional configuration -1. If you want moderation messages, create and copy the channel id for each server that you want the moderation messages to send to in `SERVER_TO_MODERATION_CHANNEL`. This should be of the format: `server_id:channel_id,server_id_2:channel_id_2` -1. If you want to change the personality of the bot, go to `src/config.yaml` and edit the instructions -1. If you want to change the moderation settings for which messages get flagged or blocked, edit the values in `src/constants.py`. A lower value means less chance of it triggering. +- If you want to change the model used, you can do so in `OPENAI_MODEL`. Currently only `gpt-3.5-turbo` and `gpt-4` work with the present codebase. + +- If you want to change the behavior/personality of the bot, change the system prompt in `SYSTEM_MESSAGE`, with optional variables enclosed in `{`curly braces`}`. Currently the only variables available are `current_date` and `knowledge_cutoff`, with the latter being equivalent to the environment variable of the same name. The former is always in ISO 8601 format. # FAQ From d9d50d8e1374be0f9c27b72f9a6450ef0efdb49b Mon Sep 17 00:00:00 2001 From: VladVP Date: Tue, 28 Mar 2023 13:43:30 +0200 Subject: [PATCH 7/9] More modifications to README --- README.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/README.md b/README.md index 4eafe579..bac329ce 100644 --- a/README.md +++ b/README.md @@ -16,9 +16,7 @@ Thank you! --- # GPT Discord Bot -Example Discord bot written in Python that uses the [completions API](https://beta.openai.com/docs/api-reference/completions) to have conversations with the `text-davinci-003` model, and the [moderations API](https://beta.openai.com/docs/api-reference/moderations) to filter the messages. - -**THIS IS NOT CHATGPT.** +Example Discord bot written in Python that uses the [completions API](https://beta.openai.com/docs/api-reference/completions) to have conversations with the `gpt-3.5-turbo` or `gpt-4` models. This bot uses the [OpenAI Python Library](https://github.com/openai/openai-python) and [discord.py](https://discordpy.readthedocs.io/). From 54eaf9324012d5767292b5062f52644f8e7604c9 Mon Sep 17 00:00:00 2001 From: root Date: Sat, 20 Jan 2024 16:30:23 +0300 Subject: [PATCH 8/9] generalize errors --- src/completion.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/completion.py b/src/completion.py index 2039c17b..ad87e19f 100644 --- a/src/completion.py +++ b/src/completion.py @@ -34,6 +34,7 @@ async def generate_completion_response( 'model': OPENAI_MODEL, 'messages': messages }, + headers={'Content-Type': 'application/json'}, auth=aiohttp.BasicAuth("", OPENAI_API_KEY) ) as r: if r.status == 200: @@ -41,10 +42,7 @@ async def generate_completion_response( reply = js['choices'][0]['message']['content'] return CompletionData(status=CompletionResult.OK, reply_text=reply, status_text=None) else: - js = await r.json() - code = js['error']['code'] - status = CompletionResult.TOO_LONG if code == 'context_length_exceeded' else CompletionResult.ERROR - return CompletionData(status=status, reply_text=None, status_text=js) + return CompletionData(status=CompletionResult.Error, reply_text=None, status_text=str(r)) except Exception as e: logger.exception(e) return CompletionData( From 3e67ac7a4aa406b90d5bb965b5fb331f5bac8b24 Mon Sep 17 00:00:00 2001 From: "egor.baydarov" Date: Fri, 29 Mar 2024 00:18:51 +0300 Subject: [PATCH 9/9] fixes --- src/completion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/completion.py b/src/completion.py index ad87e19f..d948111f 100644 --- a/src/completion.py +++ b/src/completion.py @@ -42,7 +42,7 @@ async def generate_completion_response( reply = js['choices'][0]['message']['content'] return CompletionData(status=CompletionResult.OK, reply_text=reply, status_text=None) else: - return CompletionData(status=CompletionResult.Error, reply_text=None, status_text=str(r)) + return CompletionData(status=CompletionResult.ERROR, reply_text=None, status_text=str(r)) except Exception as e: logger.exception(e) return CompletionData(