-
Notifications
You must be signed in to change notification settings - Fork 0
/
app.py
124 lines (90 loc) · 3.48 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
import streamlit as st
from io import StringIO
from dotenv import load_dotenv
import google.generativeai as genai
import vertexai.preview.generative_models as generative_models
import os
st.set_page_config(page_title='Gemini AI Assistant',
page_icon = "images/gemini_avatar.png",
initial_sidebar_state = 'auto')
@st.cache_data
def initialize_model():
"""
Configure the Google generativeai with the GEMINI_API_KEY
"""
load_dotenv()
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
os.environ["GEMINI_API_KEY"] = GEMINI_API_KEY
genai.configure(api_key=os.environ["GEMINI_API_KEY"])
initialize_model()
background_color = "#252740"
avatars = {
"assistant" : "images/gemini_avatar.png",
"user": "images/user_avatar.png"
}
st.markdown("<h2 style='text-align: center; color: #3184a0;'>Gemini AI Assistant</h2>", unsafe_allow_html=True)
with st.sidebar:
st.image("images/gemini_avatar.png")
if "messages" not in st.session_state.keys():
st.session_state.messages = [
{"role": "assistant", "content": "How may I assist you today?"}
]
for message in st.session_state.messages:
with st.chat_message(message["role"],
avatar=avatars[message["role"]]):
st.write(message["content"])
def clear_chat_history():
st.session_state.messages = [
{"role": "assistant", "content": "How may I assist you today?"}
]
st.sidebar.button("Clear Chat History", on_click=clear_chat_history)
def run_query(input_text):
"""
Run query. The model is initialized and then queried.
Args:
input_text (str): we are just passing to the model the user prompt
Returns:
response.text (str): the text of the response
"""
try:
generation_config = {
"max_output_tokens": 8192,
"temperature": 0.7,
"top_p": 0.95,
}
model = genai.GenerativeModel("gemini-1.5-pro-001")
conversation_context = []
# create context
for message in st.session_state.messages:
role = "User" if message["role"] == "user" else "Gemini"
conversation_context.append(f"{role}: {message['content']}")
conversation_context = "\n".join(conversation_context)
prompt = f"{conversation_context}\nYou: {input_text}\nGemini:"
response = model.generate_content(prompt,
generation_config=generation_config)
if response:
return response.text
else:
return "Error"
except Exception as ex:
print(ex)
return "Error"
output = st.empty()
if prompt := st.chat_input():
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user", avatar=avatars["user"]):
st.write(prompt)
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant", avatar=avatars["assistant"]):
with st.spinner("Thinking..."):
response = run_query(prompt)
placeholder = st.empty()
full_response = ""
for item in response:
full_response += item
placeholder.markdown(full_response, unsafe_allow_html=True)
placeholder.markdown(response, unsafe_allow_html=True)
message = {"role": "assistant",
"content": response,
"avatar": avatars["assistant"]}
st.session_state.messages.append(message)