-
Notifications
You must be signed in to change notification settings - Fork 1
/
nn.py
150 lines (120 loc) · 4.15 KB
/
nn.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
import bpy
import numpy as np
def highest_fitness(individuals, name):
highest = 0
for frame, individual in individuals.items():
v = individual["fitness"][name]
if v > highest:
highest = v
return highest
# based on Déborah Mesquita
# https://realpython.com/python-ai-neural-network/
class neural_network:
def __init__(self, learning_rate, matrix_size):
weights = []
for i in range(matrix_size):
weights.append(np.random.randn())
self.weights = np.array(weights)
self.bias = np.random.randn()
self.learning_rate = learning_rate
def _sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def _sigmoid_deriv(self, x):
return self._sigmoid(x) * (1 - self._sigmoid(x))
def train(self, input_vectors, targets, iterations):
cumulative_errors = []
for current_iteration in range(iterations):
# Pick a data instance at random
random_data_index = np.random.randint(len(input_vectors))
input_vector = input_vectors[random_data_index]
target = targets[random_data_index]
# Compute the gradients and update the weights
derror_dbias, derror_dweights = self._compute_gradients(
input_vector, target
)
self._update_parameters(derror_dbias, derror_dweights)
# Measure the cumulative error for all the instances
if current_iteration % 100 == 0:
cumulative_error = 0
# Loop through all the instances to measure the error
for data_instance_index in range(len(input_vectors)):
data_point = input_vectors[data_instance_index]
target = targets[data_instance_index]
prediction = self.predict(data_point)
error = np.square(prediction - target)
cumulative_error = cumulative_error + error
cumulative_errors.append(cumulative_error)
return cumulative_errors
def predict(self, input_vector):
layer_1 = np.dot(input_vector, self.weights) + self.bias
layer_2 = self._sigmoid(layer_1)
prediction = layer_2
return prediction
def _compute_gradients(self, input_vector, target):
layer_1 = np.dot(input_vector, self.weights) + self.bias
layer_2 = self._sigmoid(layer_1)
prediction = layer_2
derror_dprediction = 2 * (prediction - target)
dprediction_dlayer1 = self._sigmoid_deriv(layer_1)
dlayer1_dbias = 1
dlayer1_dweights = (0 * self.weights) + (1 * input_vector)
derror_dbias = (
derror_dprediction * dprediction_dlayer1 * dlayer1_dbias
)
derror_dweights = (
derror_dprediction * dprediction_dlayer1 * dlayer1_dweights
)
return derror_dbias, derror_dweights
def _update_parameters(self, derror_dbias, derror_dweights):
self.bias = self.bias - (derror_dbias * self.learning_rate)
self.weights = self.weights - (
derror_dweights * self.learning_rate
)
def start():
'''
Main function to run neural network.
'''
scene = bpy.context.scene
data = scene["<Phaenotyp>"]
obj = data["structure"]
shape_keys = obj.data.shape_keys.key_blocks
phaenotyp = scene.phaenotyp
environment = data["environment"]
individuals = data["individuals"]
# create / recreate results
data["results"] = {}
results = data["results"]
# get data from gui
learning_rate = phaenotyp.nn_learning_rate
epochs = phaenotyp.nn_epochs
# get input from sliders
to_predit = []
for id, key in enumerate(shape_keys):
if id > 0:
v = key.value
to_predit.append(v)
fitness_functions = individuals["0"]["fitness"]
for fitness_function, fitness in fitness_functions.items():
# lists for this fitness
try:
chromosomes = []
targets = []
# get scale of target for normalization
scale = highest_fitness(individuals, fitness_function)
if scale !=0:
for frame, individual in individuals.items():
chromosome = individual["chromosome"]
chromosomes.append(chromosome)
target = individual["fitness"][fitness_function]
targets.append(target/scale)
input_vectors = np.array(chromosomes)
targets = np.array(targets)
matrix_size = len(individuals["0"]["chromosome"])
nn = neural_network(learning_rate, matrix_size)
training_error = nn.train(input_vectors, targets, epochs)
#print(nn.predict([0.1, 0.7, 0.2])*scale, "should be 32.498")
#print(nn.weights)
result = nn.predict(to_predit)*scale
results[fitness_function] = result
except:
pass