-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathhandler.py
66 lines (51 loc) · 2.02 KB
/
handler.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import requests
from aiogram import types
from aiogram.filters import Command, CommandStart
from aiogram.types import Message
from aiogram import Router
from dotenv import load_dotenv
import os
import json
import aiohttp
# Load environment variables
load_dotenv()
# Get Ollama API information from .env
OLLAMA_API_URL = os.getenv('OLLAMA_API_URL')
LLAMA_MODEL = os.getenv('LLAMA_MODEL')
# Store conversation history
conversation_history = [{"role": "system",
"content": "You are Osuda AI - psychology assistant, which only answers question related to psychology"}]
router = Router()
@router.message(CommandStart())
async def cmd_start(message: Message):
await message.answer("Hello. I am Osuda AI")
@router.message()
async def handle_message(message: Message):
user_prompt = message.text
llama_response = await send_to_llama(user_prompt)
await message.answer(llama_response)
# Function to send a prompt to LLaMA via Ollama API
async def send_to_llama(user_prompt):
global conversation_history
# Append the user message to the conversation history
conversation_history.append({"role": "user", "content": user_prompt})
# Prepare the API call payload
data = {
"model": LLAMA_MODEL,
"messages": conversation_history,
"stream": False
}
async with aiohttp.ClientSession() as session:
async with session.post(OLLAMA_API_URL, json=data) as response:
if response.status == 200:
json_data = await response.json()
# Now use 'message' field directly
if 'message' in json_data:
assistant_reply = json_data['message']['content']
conversation_history.append({"role": "assistant", "content": assistant_reply})
return assistant_reply
else:
return f"Unexpected response format: {json_data}"
else:
return "Error communicating with the LLaMA model."
# Register command and message handlers