Skip to content

Commit

Permalink
Merge pull request #15 from DON2604/master
Browse files Browse the repository at this point in the history
Update main.py
  • Loading branch information
DON2604 authored Apr 26, 2024
2 parents 9623aa9 + e01815b commit e9f01b4
Showing 1 changed file with 42 additions and 20 deletions.
62 changes: 42 additions & 20 deletions bot/main.py
Original file line number Diff line number Diff line change
@@ -1,42 +1,64 @@
import re
import sys
sys.dont_write_bytecode = True

import os
import nextcord
from nextcord import Interaction
from nextcord.ext import commands
import os
from dotenv import load_dotenv
from prompter import suggest_foods

import google.generativeai as genai

# Load environment variables
load_dotenv()
dtoken = os.getenv("DKEY")
api_key = os.getenv("GKEY")

# Configure generative AI API
genai.configure(api_key=api_key)

# Set up the model
generation_config = {
"temperature": 0.9,
"top_p": 1,
"top_k": 1,
"max_output_tokens": 1950,
}

safety_settings = [
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
]

model = genai.GenerativeModel(
model_name="gemini-1.0-pro",
generation_config=generation_config,
safety_settings=safety_settings
)

# Initialize bot
intents = nextcord.Intents.default()
intents.members = True

client = commands.Bot(command_prefix="!", intents=intents)

@client.event
async def on_ready():
print(f"Logged on as {client.user}!")

res=""
@client.slash_command(name="test", description="Send your input to Generative AI")
async def test(interaction: Interaction, question: str):
# Send initial response to acknowledge the command
await interaction.response.send_message("Generating response...", ephemeral=True)

testserverId = 1106837576964390994
# Send user input directly to the generative model
convo = model.start_chat(history=[])
convo.send_message(question)
response = convo.last.text

@client.slash_command(name="test" , description="gives you food according to ingredients")
async def test(interaction: Interaction, question: str):
print(question)
cleanedmsg = re.sub(r'<.*?>', '', question)
user_ingredients = [ingredient.strip() for ingredient in cleanedmsg.split(',')]
suggested_foods = suggest_foods(user_ingredients)
res = ""
for food in suggested_foods:
res += "- " + food + "\n"
await interaction.response.send_message("Here are some foods you can make with those ingredients:\n" + res)
# Truncate the response if it exceeds the character limit
if len(response) > 2000:
response = response[:1997] + "..."

# Send the generative AI response
await interaction.followup.send(f"Generative AI response:\n{response}")

client.run(dtoken)
client.run(dtoken)

0 comments on commit e9f01b4

Please sign in to comment.