-
Notifications
You must be signed in to change notification settings - Fork 1
/
w2v2graph_01.py
393 lines (304 loc) · 17.5 KB
/
w2v2graph_01.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
'''
This framework outlines the key components and flow of the Graph-Directed Thoughtful Reasoning System using Word2Vec embeddings.
It highlights the integration of document preprocessing, embedding generation using Word2Vec, graph construction, thoughtful reasoning agents, knowledge graph generation, and a conversation loop for user interaction.
The framework provides a structured approach to building conversational AI systems that can engage in thoughtful and contextually relevant discussions based on a given set of documents, leveraging the power of word embeddings and graph-based representation.
Document Ingestion and Preprocessing:
Ingest text documents from specified sources
Preprocess the text by tokenizing and splitting into chunks of fixed size
Associate each chunk with its corresponding document source
Embedding Generation:
Train a Word2Vec model on the preprocessed text chunks
Generate vector embeddings for each word in the vocabulary
Calculate chunk embeddings by averaging the word embeddings for each chunk
Graph Construction:
Calculate the similarity or distance between chunk embeddings using a distance metric (e.g., Euclidean distance)
Perform hierarchical clustering on the chunk embeddings to group similar chunks together
Construct a directed graph representation using the clustering results
Each node in the graph represents either a cluster or a document chunk
Edges in the graph connect clusters to their corresponding document chunks
Serialize and store the constructed graph for efficient retrieval
Thoughtful Reasoning Agents:
Implement two types of reasoning agents: thoughtful_summary_agent and chunk_summary_agent
thoughtful_summary_agent:
Generates high-level summaries based on the aggregated content of entire clusters
Maintains a chat history to provide context for subsequent summaries
Constructs prompts by combining chat history, aggregated cluster content, and user queries
Utilizes a local language model to generate thoughtful summaries
chunk_summary_agent:
Generates concise factoid summaries based on specific document chunks
Constructs prompts using the chunk content and user queries
Utilizes a diffeent local language model to generate factoid summaries
Performs text cleaning and post-processing on the generated summaries
Knowledge Graph Generation:
Implement a knowledge graph generator (e.g., EnhancedKnowledgeGraphGenerator)
Extract entities from the text using named entity recognition techniques
Identify relationships between entities based on verb dependencies and co-occurrence within sentences
Construct a knowledge graph in JSON format, representing entities and their relationships
Conversation Loop:
Engage in a conversation loop where users can ask questions about the ingested documents
Process user queries by generating query embeddings using the same embedding model
Retrieve the most relevant nodes from the graph based on the similarity between query embeddings and node embeddings
Identify the clusters associated with the relevant nodes
Generate thoughtful summaries using the thoughtful_summary_agent for the identified clusters
Generate factoid summaries using the chunk_summary_agent for specific document chunks within the clusters
Format the generated summaries and present them to the user
Update the chat history with the generated summaries and user queries
Repeat the conversation loop until the user chooses to exit
'''
import gensim
from gensim.models import Word2Vec
import smart_open
import numpy as np
from scipy.spatial.distance import pdist, squareform, cosine
from scipy.cluster.hierarchy import linkage, fcluster
import networkx as nx
import pickle
from openai import OpenAI
import spacy
import json
import os
from collections import Counter
import re
chat_history = []
file_paths = ['socialnetwork.txt', 'crow.txt', 'yan.txt']
nlp = spacy.load("en_core_web_sm", disable=["parser", "ner"])
TEH_CHUNK_SIZE = 64
def read_and_preprocess(file_paths, CHUNK_SIZE=TEH_CHUNK_SIZE):
for file_path in file_paths:
with smart_open.smart_open(file_path, encoding="utf-8") as f:
text = f.read()
doc = nlp(text)
chunk = []
for token in doc:
if token.is_alpha:
chunk.append(token.text.lower())
if len(chunk) >= CHUNK_SIZE:
yield (chunk[:CHUNK_SIZE], file_path)
chunk = chunk[CHUNK_SIZE:]
if chunk:
yield (chunk, file_path)
def get_chunk_vector(model, chunk):
word_vectors = [model.wv[word] for word in chunk if word in model.wv]
return np.mean(word_vectors, axis=0) if word_vectors else np.zeros(model.vector_size)
def find_matching_nodes(graph, query_vector, top_k=1):
distances = []
for node in graph:
if 'text' in graph.nodes[node]:
node_vector = get_chunk_vector(model, gensim.utils.simple_preprocess(graph.nodes[node]['text']))
distance = cosine(query_vector, node_vector)
source_file = graph.nodes[node].get('source_file', 'Unknown')
distances.append((node, distance, source_file))
distances.sort(key=lambda x: x[1])
return distances[:top_k]
def aggregate_cluster_content(graph, cluster_id):
texts = []
for node in graph.nodes:
if graph.nodes[node].get('cluster') == cluster_id:
texts.append(graph.nodes[node].get('text', ''))
aggregated_text = " ".join(texts)
return aggregated_text
def thoughtful_summary_agent(text, user_query, max_tokens=32000):
global chat_history
client = OpenAI(
base_url='http://localhost:11434/v1',
api_key='ollama',
)
history_prompt = "\n".join([f"{item['role']}: {item['content']}" for item in chat_history])
if not history_prompt:
history_prompt = "You just awoke, answer thoughtfully when discussing your inner thoughts with others. Answer questions with intense thoughtfulness."
prompt = f"You are a thoughtful chatbot with that self reflects on their output.\n\nReflecting on past thoughts:\n\n{history_prompt}\n\nIntegrate current thoughts:\n\n{text}\n"
response = client.chat.completions.create(
model="mistral:instruct",
messages=[
{"role": "system", "content": prompt},
{"role": "user", "content": f"\n\nReflect on thoughts and answer the following:\n\n{user_query}\n\n"}
],
max_tokens=32000,
temperature=0.2,
)
summary = response.choices[0].message.content
chat_history.append({"role": "system", "content": prompt})
chat_history.append({"role": "user", "content": user_query})
chat_history.append({"role": "assistant", "content": summary})
chat_history = chat_history[-4:]
return summary
def chunk_summary_agent(chunk_text, user_query, max_tokens=256):
global chat_history
client = OpenAI(
base_url='http://localhost:11434/v1',
api_key='ollama',
)
system_prompt = f"You are a thoughtful systems oriented summary agent and can see the potential connections between everything. You have recently been wondering about {user_query}. Assume you have everything you need to reply."
prompt = f"Here is your stream of consciousness '{chunk_text}', organise your thoughts into a single concise factoid.\n\n"
response = client.chat.completions.create(
model="tinydolphin:latest",
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt + "\n\n"},
],
max_tokens=512,
temperature=0.2
)
summary = response.choices[0].message.content
cleaned_summary = clean_text(summary)
chat_history.append({"role": "assistant", "content": cleaned_summary})
return summary
def clean_text(text):
phrases_to_remove = ["I'm sorry "]
for phrase in phrases_to_remove:
text = text.replace(phrase, "")
text = ' '.join(word for word in text.split() if not word.isdigit() and not word.replace('.', '', 1).isdigit())
text = text.replace('\n', ' ')
text = text.replace('\n\n', ' ')
text = ' '.join(text.split())
text = re.sub(r'[^a-zA-Z0-9.,\s]', '', text)
text = text.strip()
return text
def get_cluster_information_with_summary(graph, cluster_id, query_vector, model, top_k=5, user_query="", max_chunk_summaries=3):
aggregated_content = aggregate_cluster_content(graph, cluster_id)
cluster_summary = thoughtful_summary_agent(aggregated_content, user_query, max_tokens=TEH_CHUNK_SIZE)
cluster_documents = []
for node in graph.nodes:
if graph.nodes[node].get('cluster') == cluster_id:
node_vector = get_chunk_vector(model, gensim.utils.simple_preprocess(graph.nodes[node].get('text', '')))
similarity = 1 - cosine(query_vector, node_vector)
source_file = graph.nodes[node].get('source_file', 'No source file available')
cluster_documents.append((graph.nodes[node].get('text', 'No text available'), similarity, source_file))
sorted_documents = sorted(cluster_documents, key=lambda x: x[1], reverse=True)
top_documents_with_source = [(doc[0], doc[2]) for doc in sorted_documents[:top_k]]
chunk_summaries = []
for doc_text, _ in top_documents_with_source[:max_chunk_summaries]:
chunk_summary = chunk_summary_agent(doc_text, user_query, max_tokens=TEH_CHUNK_SIZE*2)
chunk_summaries.append(chunk_summary)
return cluster_summary, top_documents_with_source, chunk_summaries
def chat_with_embedding_from_pickle(query, model, top_k=5, pickle_path="hierarchical_graph.pkl", user_query=""):
with open(pickle_path, "rb") as f:
graph = pickle.load(f)
query_vector = get_chunk_vector(model, gensim.utils.simple_preprocess(query))
matching_nodes = find_matching_nodes(graph, query_vector, top_k=3)
if matching_nodes:
top_match = matching_nodes[0][0]
cluster_id = graph.nodes[top_match].get('cluster')
if cluster_id:
cluster_summary, top_documents_with_source, chunk_summaries = get_cluster_information_with_summary(graph, cluster_id, query_vector, model, top_k, user_query)
cluster_summary = clean_text(cluster_summary)
response_text = f"{cluster_summary}\n\n" + "\n".join([f"- {doc_text} (Source: {source_file})" for doc_text, source_file in top_documents_with_source])
return response_text, cluster_summary, chunk_summaries
else:
return "Found a relevant section, but couldn't locate its broader context within the cluster.", None, None
else:
return "Unable to find relevant information for the query.", None, None
def serialize_kg_for_chat(kg_json):
entities_summary = '; '.join([f"{entity}: {details['type']} (Weight: {details.get('weight', 0.0):.4f})" for entity, details in kg_json['entities'].items()])
relationships_summary = '; '.join([f"{rel['subject']} {rel['verb']} {rel['object']} (Weight: {rel.get('weight', 0.0):.4f})" for rel in kg_json['relationships']])
return f"KG Entities: {entities_summary}. KG Relationships: {relationships_summary}."
def handle_query(user_query, model, graph, top_k=5):
global chat_history
query_vector = get_chunk_vector(model, gensim.utils.simple_preprocess(user_query))
matching_nodes = find_matching_nodes(graph, query_vector, top_k=3)
cluster_ids = set(graph.nodes[node[0]].get('cluster') for node in matching_nodes)
filtered_nodes = [node for node in graph.nodes if graph.nodes[node].get('cluster') in cluster_ids]
expanded_query = user_query
for node in filtered_nodes:
chunk_text = graph.nodes[node].get('text', '')
expanded_query += ' ' + ' '.join(gensim.utils.simple_preprocess(chunk_text))
response_text, cluster_summary, chunk_summaries = chat_with_embedding_from_pickle(expanded_query, model, top_k, "hierarchical_graph.pkl", user_query)
if not isinstance(chunk_summaries, list):
chunk_summaries = []
if response_text:
formatted_response = f"{response_text}\n\n"
for i, summary in enumerate(chunk_summaries, start=1):
formatted_response += f"{i}. {summary}\n"
combined_text = response_text
kg_generator = EnhancedKnowledgeGraphGenerator()
kg_json = kg_generator.process_text(combined_text)
kg_summary = serialize_kg_for_chat(kg_json)
cleaned_kg_summary = clean_text(kg_summary)
chat_history.append({"role": "system", "content": cleaned_kg_summary})
chat_history = chat_history[-5:]
return formatted_response
class EnhancedKnowledgeGraphGenerator:
def __init__(self, model='en_core_web_sm'):
self.nlp = spacy.load(model)
def process_text(self, graph_text):
doc = self.nlp(graph_text)
word_counts = Counter(token.text.lower() for token in doc if not token.is_stop and not token.is_punct)
total_words = sum(word_counts.values())
spacy_entities = self.extract_entities(doc, word_counts, total_words)
custom_entities = self.extract_custom_entities(doc, spacy_entities, word_counts, total_words)
all_entities = {**spacy_entities, **custom_entities}
relationships = self.extract_relationships(doc, word_counts, total_words)
all_entities, relationships = self.infer_missing_relationships(doc, all_entities, relationships)
return self.create_kg_json(all_entities, relationships)
def extract_entities(self, doc, word_counts, total_words):
entities = {}
for ent in doc.ents:
weight = word_counts.get(ent.text.lower(), 0) / total_words if total_words > 0 else 0
entities[ent.text] = {"type": ent.label_, "weight": weight}
return entities
def extract_custom_entities(self, doc, existing_entities, word_counts, total_words):
entities = {}
for token in doc:
if token.pos_ == "PROPN" and token.text not in existing_entities:
weight = word_counts.get(token.text.lower(), 0) / total_words if total_words > 0 else 0
entities[token.text] = {"type": "ENTITY", "weight": weight}
return entities
def extract_relationships(self, doc, word_counts, total_words):
relationships = []
for sent in doc.sents:
for token in sent:
if token.pos_ == "VERB":
subjects = [child for child in token.children if child.dep_ == "nsubj"]
objects = [child for child in token.children if child.dep_ in ("dobj", "attr", "prep")]
for subj in subjects:
for obj in objects:
weight = (word_counts.get(subj.text.lower(), 0) + word_counts.get(obj.text.lower(), 0)) / (2 * total_words) if total_words > 0 else 0
relationships.append({"subject": subj.text, "verb": token.lemma_, "object": obj.text, "weight": weight})
return relationships
def infer_missing_relationships(self, doc, entities, relationships):
inferred_relationships = []
for sent in doc.sents:
sentence_entities = {ent.text for ent in sent.ents if ent.text in entities}
if len(sentence_entities) > 1:
for entity in sentence_entities:
for other_entity in sentence_entities:
if entity != other_entity:
inferred_relationship = {"subject": entity, "verb": "association", "object": other_entity}
if inferred_relationship not in relationships:
inferred_relationships.append(inferred_relationship)
relationships.extend(inferred_relationships)
return entities, relationships
def create_kg_json(self, entities, relationships):
kg_json = {"entities": entities, "relationships": relationships}
return kg_json
chunks_with_paths = list(read_and_preprocess(file_paths))
corpus = [chunk for chunk, _ in chunks_with_paths]
source_file_paths = [source_file_path for _, source_file_path in chunks_with_paths]
model = Word2Vec(sentences=corpus, vector_size=256, window=10, min_count=1, workers=4)
chunk_vectors = [get_chunk_vector(model, chunk) for chunk in corpus]
distance_matrix = pdist(chunk_vectors, 'euclidean')
Z = linkage(distance_matrix, 'ward')
clusters = fcluster(Z, t=16, criterion='maxclust')
H = nx.DiGraph()
for i, cluster_id in enumerate(clusters):
parent_node = f"Cluster_{cluster_id}"
child_node = f"Doc_{i}"
source_file_path = source_file_paths[i]
source_file_name = os.path.basename(source_file_path)
H.add_node(parent_node)
H.add_node(child_node, text=' '.join(corpus[i]), cluster=cluster_id, source_file=source_file_name)
H.add_edge(parent_node, child_node)
with open("hierarchical_graph.pkl", "wb") as f:
pickle.dump(H, f)
# Main conversation loop
def main_conversation_loop(model, graph):
print("Assistant: Hi there! Ask me anything about the document. Type 'exit' to end.")
while True:
user_input = input("You: ").strip()
if user_input.lower() == 'exit':
print("Assistant: Goodbye!")
break
response = handle_query(user_input, model, graph)
print(f"Assistant: {response}")
if __name__ == "__main__":
main_conversation_loop(model, H)