-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathNeuralNetwork0.py
129 lines (106 loc) · 3.91 KB
/
NeuralNetwork0.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
from functions import sigmoid as activate
from copy import deepcopy
import pprint
import random
pp = pprint.PrettyPrinter(width=40)
class NeuralNetwork0:
def __init__(self, shape=(4, 5, 6, 3), init_w=1, use_bias=True):
self.shape = shape
self.use_bias = use_bias
layers = list()
for n in shape:
layers.append([0] * n)
self.layers = layers
bias = deepcopy(layers)
weights = list()
j = 1 # layers[1] is the first hidden layer
while j < len(layers):
wj = list()
for m in range(len(layers[j])):
wm = list()
bias[j][m] = random.uniform(-init_w, init_w) if use_bias else 0
for n in range(len(layers[j-1])):
w = random.uniform(-init_w, init_w) # initial weights matter
wm.append(round(w, 2))
wj.append(wm)
j += 1
weights.append(wj)
self.weights = weights
self.bias = bias
#pp.pprint(bias)
def calc(self, inpt):
layers = deepcopy(self.layers)
layers[0] = list(inpt)
bias = self.bias
weights = deepcopy(self.weights)
j = 1
while j < len(layers):
i = j-1
for m in range(len(layers[j])):
for n in range(len(layers[i])):
layers[j][m] += layers[i][n] * weights[i][m][n]
b = 0
if self.use_bias and j < len(layers):
b = bias[j][m]
#print(j, m, b)
layers[j][m] = activate(layers[j][m] + b)
j += 1
del weights
self.output = layers
def backprop(self, target, gamma=1):
output = self.output
layers = deepcopy(self.layers)
for m, o in enumerate(output[-1]):
layers[-1][m] = (o - target[m]) * o * (1 - o)
bias = self.bias
weights = self.weights
j = len(layers) - 2
while j > 0:
l = j+1
for m in range(len(layers[j])):
o = output[j][m]
for n in range(len(layers[l])):
dl = layers[l][n]
wjl = weights[j][n][m] # not sure
layers[j][m] += dl * wjl * o * (1 - o)
#print('j:%d, m:%d, n:%d => %.3f' % (j, m, n, wjl))
j -= 1
j = 1
while j < len(layers):
i = j-1
for m in range(len(layers[j])):
d = layers[j][m]
for n in range(len(output[i])):
o = output[i][n]
weights[i][m][n] += -1 * gamma * o * d
if self.use_bias:
bias[j][m] += -1 * gamma * d
#print(bias[j][m])
#print('j:%d, m:%d, n:%d => %.3f' % (j, m, n, wjl))
j += 1
def train(self, train_set, e_max=0.001, i_max=1000, gamma=1):
inputs = [ train_set[p][0] for p in range(len(train_set)) ]
targets = [ train_set[p][1] for p in range(len(train_set)) ]
error = 0
for i in range(i_max + 1):
outputs = list()
error = 0
for l in range(len(inputs)):
self.calc(inputs[l])
self.backprop(targets[l], gamma)
outputs.append(list(self.output[-1]))
for l in range(len(outputs)):
for n in range(len(outputs[l])):
error += ((targets[l][n] - outputs[l][n]) ** 2) / 2
error /= len(outputs) * len(outputs[0])
if error <= e_max:
break
#print('i:%d, e:%.3f' % (i, error))
return error, i
def out(self):
o = self.output[-1]
return [ round(n, 3) for n in o ]
if __name__ == '__main__':
net = NeuralNetwork0()
net.calc((0, 0, 0, 1))
net.backprop((1, 0, 0))