-
Notifications
You must be signed in to change notification settings - Fork 3
/
test.py
109 lines (88 loc) · 3.02 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
import tensorflow as tf
from attention import MultiLayerPerceptronAttention
from config import MultiLayerPerceptronAttentionConfig
def init_session():
init_op = tf.compat.v1.global_variables_initializer()
gpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=0.5)
sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options))
sess.run(init_op)
return sess
def feed(attention):
assert(isinstance(attention, MultiLayerPerceptronAttention))
return {
attention.Input[attention.I_x]: feed_dict['x'],
attention.Input[attention.I_pos]: feed_dict['pos'],
attention.Input[attention.I_dist_obj]: feed_dict['dist_obj'],
attention.Input[attention.I_dist_subj]: feed_dict['dist_subj'],
attention.Input[attention.I_entities]: feed_dict['entities']
}
if __name__ == "__main__":
feed_dict = {
"x": [
[1, 0, 1, 4, 2],
[1, 2, 0, 3, 0],
[1, 2, 1, 3, 3]
],
"dist_subj": [
[1, 0, 1, 4, 2],
[1, 2, 0, 3, 0],
[1, 2, 1, 3, 3]
],
"dist_obj": [
[1, 0, 1, 4, 2],
[1, 2, 0, 3, 0],
[1, 2, 1, 3, 3]
],
"pos": [
[1, 0, 1, 4, 2],
[1, 2, 0, 3, 0],
[1, 2, 1, 3, 3]
],
"entities": [
[0, 4],
[3, 1],
[3, 1]
],
}
term_embedding = [
[1.0, 5.0, 1.0, 3.0, 0.0],
[2.0, 0.0, 2.0, 2.0, 1.0],
[3.0, 3.0, 1.0, 3.0, 3.0],
[4.0, 2.0, 4.0, 2.0, 4.0],
[5.0, 6.0, 1.0, 5.0, 0.0]
]
pos_embedding = term_embedding
dist_embedding = term_embedding
term_embedding_tensor = tf.constant(
value=term_embedding,
dtype=tf.float32,
shape=[len(term_embedding), len(term_embedding[0])])
pos_embedding_tensor = tf.constant(
value=pos_embedding,
dtype=tf.float32,
shape=[len(pos_embedding), len(pos_embedding[0])])
dist_embedding_tensor = tf.constant(
value=dist_embedding,
dtype=tf.float32,
shape=[len(dist_embedding), len(dist_embedding[0])])
attention = MultiLayerPerceptronAttention(
cfg=MultiLayerPerceptronAttentionConfig(),
batch_size=len(feed_dict['x']),
terms_per_context=len(feed_dict['x'][0]),
term_embedding_size=len(term_embedding),
pos_embedding_size=len(pos_embedding),
dist_embedding_size=len(dist_embedding))
attention.init_input()
attention.init_hidden()
e_sum, weights = attention.init_body(
term_embedding=term_embedding_tensor,
pos_embedding=pos_embedding_tensor,
dist_embedding=dist_embedding_tensor)
with init_session() as sess:
r_sum, r_weights = sess.run([e_sum, weights], feed_dict=feed(attention))
print("att_sum:")
print(r_sum[0].shape)
print(r_sum[0])
print("att_weights:")
print(r_weights[0].shape)
print(r_weights[0])