Skip to content

Commit 19a551b

Browse files
GregGreg
Greg
authored and
Greg
committedSep 24, 2018
first real commit
1 parent b343a14 commit 19a551b

File tree

72 files changed

+3510
-0
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

72 files changed

+3510
-0
lines changed
 

‎NN_shape_test.sh

+12
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
echo "----------- NN Size Experiment ---------------"
2+
3+
echo 'zero. Zero indexed by mistake, but its staying this way'
4+
#python batch_mnistNN.py --error-file 'train_error_online_NN0.pickle' --test-error-on 1 --test-error-file 'test_error_online_NN0.pickle' --alpha .02 --batch-size 1 --NN-setup 0 --num-epochs 3
5+
echo 'one'
6+
python batch_mnistNN.py --error-file 'pickles/train_error_online_NN1.pickle' --test-error-on 1 --test-error-file 'pickles/test_error_online_NN1.pickle' --alpha .02 --batch-size 1 --NN-setup 1 --num-epochs 3
7+
echo 'two'
8+
python batch_mnistNN.py --error-file 'pickles/train_error_online_NN2.pickle' --test-error-on 1 --test-error-file 'pickles/test_error_online_NN2.pickle' --alpha .02 --batch-size 1 --NN-setup 2 --num-epochs 3
9+
echo 'three'
10+
python batch_mnistNN.py --error-file 'pickles/train_error_online_NN3.pickle' --test-error-on 1 --test-error-file 'pickles/test_error_online_NN3.pickle' --alpha .02 --batch-size 1 --NN-setup 3 --num-epochs 3
11+
echo 'four'
12+
python batch_mnistNN.py --error-file 'pickles/train_error_online_NN4.pickle' --test-error-on 1 --test-error-file 'pickles/test_error_online_NN4.pickle' --alpha .02 --batch-size 1 --NN-setup 4 --num-epochs 3

‎batch_mnistNN.py

+152
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,152 @@
1+
from batch_neural_network import *
2+
import numpy as np
3+
import itertools
4+
import pickle
5+
import argparse
6+
7+
def reshapeInstance(t_input, t_label, numElements):
8+
9+
#reshape and scale // make it a list
10+
t_input = np.reshape(t_input,(numElements,1))
11+
t_input = (t_input + 1)/257.
12+
t_input = t_input.tolist()
13+
t_input = list(itertools.chain.from_iterable(t_input))
14+
15+
#make it a list
16+
t_label = t_label.tolist()
17+
t_label = list(itertools.chain.from_iterable(t_label))
18+
19+
return (t_input, t_label)
20+
21+
def main():
22+
parser = argparse.ArgumentParser()
23+
parser.add_argument('--error-file', type=str, default='error_batch.pickle')
24+
parser.add_argument('--test-error-file', type=str, default='test_error_batch.pickle')
25+
parser.add_argument('--test-error-on', type=int, default=0);
26+
parser.add_argument('--NN-setup', type=int, default = 0)
27+
28+
parser.add_argument('--num-epochs', type=int, default=3, help='no help for you!')
29+
parser.add_argument('--alpha', type=float, default=.03)
30+
parser.add_argument('--batch-size', type=int, default=2, help='number of examples per batch, -1 is full batch')
31+
args = parser.parse_args()
32+
error_file = args.error_file
33+
test_error_file = args.test_error_file;
34+
test_error_on = args.test_error_on;
35+
nn_setup = args.NN_setup;
36+
37+
#config parameters
38+
numEpochs = args.num_epochs
39+
alpha = args.alpha
40+
batchSize = args.batch_size
41+
42+
# loading in that good good data
43+
trainImages = np.load('data/train_images.npy')
44+
trainLabels = np.load('data/train_labels.npy')
45+
46+
testImages = np.load('data/test_images.npy')
47+
testLabels = np.load('data/test_labels.npy')
48+
49+
imSize = np.shape(trainImages[0])
50+
inputSize = imSize[0]*imSize[1]
51+
numLabels = len(trainLabels[0])
52+
53+
#print '# features in:',inputSize
54+
#print '# possibe labels out:',numLabels
55+
56+
#Neural Network Definition
57+
hiddenLayerDimensionList = [[350,175,85,40,20,numLabels], [300,150,75,40,numLabels], [280,130,60,numLabels], [250,100,numLabels], [175,numLabels]];
58+
hiddenLayerDimensions = hiddenLayerDimensionList[nn_setup];
59+
# hiddenLayerDimensions = [ 300, 30, numLabels ]
60+
61+
NN = BatchNeuralNetwork(hiddenLayerDimensions, inputSize,alpha)
62+
correctTrain = 0.0
63+
error_over_iters = np.array([], dtype='float64')
64+
test_error_over_iters = np.array([], dtype='float64')
65+
66+
if batchSize == -1: batchSize = len(trainLabels)
67+
68+
for j in range(0,numEpochs):
69+
i_last = 0
70+
for i in range(0,len(trainLabels),batchSize):
71+
72+
upperBound = i+batchSize
73+
if upperBound > len(trainLabels) - 1:
74+
upperBound = len(trainLabels) - 1
75+
76+
inputs = trainImages[i:upperBound]
77+
labels = trainLabels[i:upperBound]
78+
79+
train_inputs = [];
80+
train_labels = [];
81+
#in case the last batch isn't full size
82+
bsBatchSize = upperBound - i
83+
for k in range(0, bsBatchSize):
84+
(train_input, train_label) = reshapeInstance(inputs[k], labels[k], inputSize)
85+
train_inputs.append(train_input)
86+
train_labels.append(train_label)
87+
88+
y_hats = NN.feedForward(train_inputs)
89+
outputDeltas = NN.computeDError(train_labels, y_hats)
90+
NN.feedBack(outputDeltas)
91+
92+
# print y_hats
93+
for a in range(0,len(y_hats)):
94+
y_hat = y_hats[a]
95+
train_label = train_labels[a]
96+
same = (np.argmax(np.asarray(y_hat)) == np.argmax(np.asarray(train_label)))
97+
correctTrain += float(same)
98+
99+
if (i % 1000 == 0 and i != 0):
100+
correctTest = 0.0;
101+
denom = i - i_last
102+
i_last = i
103+
train_accuracy = 100 - (100 * (float(correctTrain)/float(denom)))
104+
print 'Epoch//Sample:',j,'//',i
105+
print 'Train Error:', int(train_accuracy),'%'
106+
correctTrain = 0
107+
error_over_iters = np.append(error_over_iters, train_accuracy)
108+
109+
if test_error_on:
110+
correct = 0;
111+
for idx in range(0,len(testLabels),batchSize):
112+
113+
upperBound = idx+batchSize
114+
if upperBound > len(trainLabels) - 1:
115+
upperBound = len(trainLabels) - 1
116+
117+
inputs = testImages[idx:upperBound]
118+
labels = testLabels[idx:upperBound]
119+
120+
# print labels
121+
122+
test_inputs = [];
123+
test_labels = [];
124+
#in case the last batch isn't full size
125+
bsBatchSize = upperBound - idx
126+
for k in range(0, bsBatchSize):
127+
(test_input, test_label) = reshapeInstance(inputs[k], labels[k], inputSize)
128+
test_inputs.append(test_input)
129+
test_labels.append(test_label)
130+
131+
y_hats = NN.feedForward(test_inputs)
132+
133+
for a in range(0,len(y_hats)):
134+
y_hat = y_hats[a]
135+
test_label = test_labels[a]
136+
137+
same = (np.argmax(np.asarray(y_hat)) == np.argmax(np.asarray(test_label)))
138+
correctTest += float(same)
139+
140+
test_error = 100 - (100 * (float(correctTest)/float(idx+1)));
141+
test_error_over_iters = np.append(test_error_over_iters, test_error);
142+
print 'Test Error:', int(test_error), '%'
143+
144+
with open(error_file, 'wb') as f:
145+
pickle.dump(error_over_iters, f)
146+
147+
if test_error_on:
148+
with open(test_error_file, 'wb') as f:
149+
pickle.dump(test_error_over_iters, f)
150+
151+
if __name__ == '__main__':
152+
main()

0 commit comments

Comments
 (0)