forked from SevaSk/ecoute
-
Notifications
You must be signed in to change notification settings - Fork 0
/
GPTResponder.py
111 lines (96 loc) · 3.85 KB
/
GPTResponder.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
import openai
from keys import OPENAI_API_KEY
from prompts import create_suggestion, create_summarization
import time
openai.api_key = OPENAI_API_KEY
# def generate_response_from_transcript(transcript):
# try:
# response = openai.ChatCompletion.create(
# model="gpt-3.5-turbo",
# messages=[{"role": "system", "content": create_prompt(transcript)}],
# temperature = 0.0
# )
# except Exception as e:
# print(e)
# return ''
# full_response = response.choices[0].message.content
# print(response.choices[0])
# try:
# return full_response.split('[')[1].split(']')[0]
# except:
# return ''
def generate_suggestion_from_transcript(transcript):
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "system", "content": create_suggestion(transcript)}],
temperature = 0.0
)
except Exception as e:
print(e)
return ''
full_response = response.choices[0].message.content
print("suggestion:" + full_response)
try:
return full_response
except:
return ''
def generate_summarization_from_transcript(transcript):
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "system", "content": create_summarization(transcript)}],
temperature = 0.0
)
except Exception as e:
print(e)
return ''
full_response = response.choices[0].message.content
print("summarization:" + full_response)
try:
return full_response
except:
return ''
class GPTResponder:
def __init__(self):
self.responses = []
self.response_interval = 30
self.summarization_interval = 90
self.summarization = ''
self.summarizations = []
def respond_to_transcriber(self, transcriber):
while True:
if transcriber.transcript_changed_event.is_set():
start_time = time.time()
transcriber.transcript_changed_event.clear()
transcript_string = transcriber.get_transcript()
response = generate_suggestion_from_transcript(transcript_string)
end_time = time.time() # Measure end time
execution_time = end_time - start_time # Calculate the time it took to execute the function
if response != '':
if response not in self.responses:
self.responses.append(response)
remaining_time = self.response_interval - execution_time
if remaining_time > 0:
time.sleep(remaining_time)
else:
time.sleep(0.3)
def summarize_to_transcriber(self, transcriber):
while True:
if transcriber.transcript_changed_event.is_set():
start_time = time.time()
transcriber.transcript_changed_event.clear()
transcript_string = transcriber.get_transcript()
response = generate_summarization_from_transcript(transcript_string)
end_time = time.time() # Measure end time
execution_time = end_time - start_time # Calculate the time it took to execute the function
if response != '':
self.summarization = response
self.summarizations.append(response)
remaining_time = self.summarization_interval - execution_time
if remaining_time > 0:
time.sleep(remaining_time)
else:
time.sleep(0.3)
def update_response_interval(self, interval):
self.response_interval = interval