-
Notifications
You must be signed in to change notification settings - Fork 15
/
net.py
101 lines (75 loc) · 2.98 KB
/
net.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
def generate_convolutional_block(inp, filters, length=2, pool=True, stride=1):
"Generates a convolutional block, with a couple of simple options"
output = inp
for i in range(length):
# convolution
output = tf.layers.Conv2D(
filters=filters,
kernel_size=3,
strides=stride,
padding='same',
kernel_initializer=tf.keras.initializers.he_normal(),
)(output)
# batch normalization
output = tf.layers.batch_normalization(output)
# ReLU
output = tf.nn.relu(output)
parallel = tf.layers.Conv2D(
filters=filters,
kernel_size=1,
strides=stride**length,
padding='same',
kernel_initializer=tf.keras.initializers.he_normal(),
)(inp)
# batch normalization
parallel = tf.layers.batch_normalization(parallel)
# ReLU
parallel = tf.nn.relu(parallel)
output = (output + parallel) / 2
if pool:
output = tf.layers.MaxPooling2D(
pool_size=3,
strides=2,
)(output)
return output
def generate_network(size=512, width=1):
"Generates a tensorflow graph for the network and returns is"
inp = tf.placeholder(tf.float32, [None, size, size, 1], name='input')
labels = tf.placeholder(tf.int32, [None], name='labels')
# First convolutiona block with only one
output = generate_convolutional_block(inp, filters=16*width, stride=2)
# 3 "normal" convolutional blocks
output = generate_convolutional_block(output, filters=32*width)
output = generate_convolutional_block(output, filters=48*width)
output = generate_convolutional_block(output, filters=64*width)
# last convolutional block without pooling
output = generate_convolutional_block(output, filters=80*width, pool=False)
# Global average pooling
output = tf.reduce_mean(output, axis=[1,2], name='gap')
# output = tf.layers.flatten(output, name='flatten')
# Dense layer for the output, with softmax activation
logits = tf.layers.Dense(
units=2, # 2 outputs
kernel_initializer=tf.keras.initializers.he_normal(),
name='logits',
)(output)
probabilities = tf.nn.softmax(logits, name='probabilities')
classes = tf.argmax(logits, axis=1, name='classes')
return inp, labels, {
'logits': logits,
'probabilities': probabilities,
'classes': classes,
}
def generate_functions(inp, labels, output):
"""Generates functions like error, accuracy and train,
that are used for training and testing the network"""
error = tf.losses.sparse_softmax_cross_entropy(labels, output['logits'])
optimizer = tf.train.AdamOptimizer(learning_rate=8e-5)
train = optimizer.minimize(error)
metrics = {
'accuracy': tf.metrics.accuracy(labels, output['classes']),
'AUC': tf.metrics.auc(labels, output['probabilities'][:,1]),
}
return error, train, metrics