-
Notifications
You must be signed in to change notification settings - Fork 14.9k
/
build_an_image_dataset.py
212 lines (170 loc) · 7.24 KB
/
build_an_image_dataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
""" Build an Image Dataset in TensorFlow.
For this example, you need to make your own set of images (JPEG).
We will show 2 different ways to build that dataset:
- From a root folder, that will have a sub-folder containing images for each class
```
ROOT_FOLDER
|-------- SUBFOLDER (CLASS 0)
| |
| | ----- image1.jpg
| | ----- image2.jpg
| | ----- etc...
|
|-------- SUBFOLDER (CLASS 1)
| |
| | ----- image1.jpg
| | ----- image2.jpg
| | ----- etc...
```
- From a plain text file, that will list all images with their class ID:
```
/path/to/image/1.jpg CLASS_ID
/path/to/image/2.jpg CLASS_ID
/path/to/image/3.jpg CLASS_ID
/path/to/image/4.jpg CLASS_ID
etc...
```
Below, there are some parameters that you need to change (Marked 'CHANGE HERE'),
such as the dataset path.
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
"""
from __future__ import print_function
import tensorflow as tf
import os
# Dataset Parameters - CHANGE HERE
MODE = 'folder' # or 'file', if you choose a plain text file (see above).
DATASET_PATH = '/path/to/dataset/' # the dataset file or root folder path.
# Image Parameters
N_CLASSES = 2 # CHANGE HERE, total number of classes
IMG_HEIGHT = 64 # CHANGE HERE, the image height to be resized to
IMG_WIDTH = 64 # CHANGE HERE, the image width to be resized to
CHANNELS = 3 # The 3 color channels, change to 1 if grayscale
# Reading the dataset
# 2 modes: 'file' or 'folder'
def read_images(dataset_path, mode, batch_size):
imagepaths, labels = list(), list()
if mode == 'file':
# Read dataset file
with open(dataset_path) as f:
data = f.read().splitlines()
for d in data:
imagepaths.append(d.split(' ')[0])
labels.append(int(d.split(' ')[1]))
elif mode == 'folder':
# An ID will be affected to each sub-folders by alphabetical order
label = 0
# List the directory
try: # Python 2
classes = sorted(os.walk(dataset_path).next()[1])
except Exception: # Python 3
classes = sorted(os.walk(dataset_path).__next__()[1])
# List each sub-directory (the classes)
for c in classes:
c_dir = os.path.join(dataset_path, c)
try: # Python 2
walk = os.walk(c_dir).next()
except Exception: # Python 3
walk = os.walk(c_dir).__next__()
# Add each image to the training set
for sample in walk[2]:
# Only keeps jpeg images
if sample.endswith('.jpg') or sample.endswith('.jpeg'):
imagepaths.append(os.path.join(c_dir, sample))
labels.append(label)
label += 1
else:
raise Exception("Unknown mode.")
# Convert to Tensor
imagepaths = tf.convert_to_tensor(imagepaths, dtype=tf.string)
labels = tf.convert_to_tensor(labels, dtype=tf.int32)
# Build a TF Queue, shuffle data
image, label = tf.train.slice_input_producer([imagepaths, labels],
shuffle=True)
# Read images from disk
image = tf.read_file(image)
image = tf.image.decode_jpeg(image, channels=CHANNELS)
# Resize images to a common size
image = tf.image.resize_images(image, [IMG_HEIGHT, IMG_WIDTH])
# Normalize
image = image * 1.0/127.5 - 1.0
# Create batches
X, Y = tf.train.batch([image, label], batch_size=batch_size,
capacity=batch_size * 8,
num_threads=4)
return X, Y
# -----------------------------------------------
# THIS IS A CLASSIC CNN (see examples, section 3)
# -----------------------------------------------
# Note that a few elements have changed (usage of queues).
# Parameters
learning_rate = 0.001
num_steps = 10000
batch_size = 128
display_step = 100
# Network Parameters
dropout = 0.75 # Dropout, probability to keep units
# Build the data input
X, Y = read_images(DATASET_PATH, MODE, batch_size)
# Create model
def conv_net(x, n_classes, dropout, reuse, is_training):
# Define a scope for reusing the variables
with tf.variable_scope('ConvNet', reuse=reuse):
# Convolution Layer with 32 filters and a kernel size of 5
conv1 = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)
# Max Pooling (down-sampling) with strides of 2 and kernel size of 2
conv1 = tf.layers.max_pooling2d(conv1, 2, 2)
# Convolution Layer with 32 filters and a kernel size of 5
conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu)
# Max Pooling (down-sampling) with strides of 2 and kernel size of 2
conv2 = tf.layers.max_pooling2d(conv2, 2, 2)
# Flatten the data to a 1-D vector for the fully connected layer
fc1 = tf.contrib.layers.flatten(conv2)
# Fully connected layer (in contrib folder for now)
fc1 = tf.layers.dense(fc1, 1024)
# Apply Dropout (if is_training is False, dropout is not applied)
fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training)
# Output layer, class prediction
out = tf.layers.dense(fc1, n_classes)
# Because 'softmax_cross_entropy_with_logits' already apply softmax,
# we only apply softmax to testing network
out = tf.nn.softmax(out) if not is_training else out
return out
# Because Dropout have different behavior at training and prediction time, we
# need to create 2 distinct computation graphs that share the same weights.
# Create a graph for training
logits_train = conv_net(X, N_CLASSES, dropout, reuse=False, is_training=True)
# Create another graph for testing that reuse the same weights
logits_test = conv_net(X, N_CLASSES, dropout, reuse=True, is_training=False)
# Define loss and optimizer (with train logits, for dropout to take effect)
loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits_train, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op)
# Evaluate model (with test logits, for dropout to be disabled)
correct_pred = tf.equal(tf.argmax(logits_test, 1), tf.cast(Y, tf.int64))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
# Saver object
saver = tf.train.Saver()
# Start training
with tf.Session() as sess:
# Run the initializer
sess.run(init)
# Start the data queue
tf.train.start_queue_runners()
# Training cycle
for step in range(1, num_steps+1):
if step % display_step == 0:
# Run optimization and calculate batch loss and accuracy
_, loss, acc = sess.run([train_op, loss_op, accuracy])
print("Step " + str(step) + ", Minibatch Loss= " + \
"{:.4f}".format(loss) + ", Training Accuracy= " + \
"{:.3f}".format(acc))
else:
# Only run the optimization op (backprop)
sess.run(train_op)
print("Optimization Finished!")
# Save your model
saver.save(sess, 'my_tf_model')