-
Notifications
You must be signed in to change notification settings - Fork 0
/
brkga.py
171 lines (141 loc) · 6.72 KB
/
brkga.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
"""
File: brkga.py
Created: 2019-06-16
Copyright 2019 Peter Caven, peter@sparseinference.com
Description:
An implementation of the "Biased Random-Key Genetic Algorithm".
[1] Random-key genetic algorithms, by José Fernando Gonçalves, Mauricio G. C. Resende,
August 18, 2014, http://mauricio.resende.info/
"""
import torch
#===============================================================================
# Wrapped Optimizers
#===============================================================================
def optSGD(lr=1.0e-3, momentum=0.0, dampening=0, weight_decay=0, nesterov=False):
def opt(params):
return torch.optim.SGD(params,
lr=lr,
momentum=momentum,
dampening=dampening,
weight_decay=weight_decay,
nesterov=nesterov)
return opt
def optAdam(lr=1.0e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False):
def opt(params):
return torch.optim.Adam(params,
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
amsgrad=amsgrad)
return opt
def optAdagrad(lr=1.0e-2, lr_decay=0, weight_decay=0, initial_accumulator_value=0):
def opt(params):
return torch.optim.Adagrad(params,
lr=lr,
lr_decay=lr_decay,
weight_decay=weight_decay,
initial_accumulator_value=initial_accumulator_value)
return opt
def optAdadelta(lr=1.0, rho=0.9, eps=1e-6, weight_decay=0):
def opt(params):
return torch.optim.Adadelta(params, lr=lr, rho=rho, eps=eps, weight_decay=weight_decay)
return opt
def optRMSprop(lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0, centered=False):
def opt(params):
return torch.optim.RMSprop(params, lr=lr, alpha=alpha, eps=eps, weight_decay=weight_decay, momentum=momentum, centered=centered)
return opt
#===============================================================================
class BRKGA():
def __init__(self, populationShape, elites=1, mutants=1, optimizer=optSGD(), dtype=torch.float64):
super().__init__()
self.dtype = dtype
self.keys = torch.rand(*populationShape, requires_grad=True, dtype=dtype)
self.indexes = None
self.eliteCount = max(1, elites)
self.mutantCount = max(0, mutants)
self.nonMutantCount = len(self.keys) - self.mutantCount
self.optimizer = optimizer([self.keys])
#------------------------------------------------------
def orderBy(self, results, descending=False):
"""
Sort the population of keys by the 'results' of mapping an objective function to each random key.
The best result and its random key are returned.
results: a tensor of objective function values (1D tensor), one value per row in 'self.keys'.
"""
values,self.indexes = results.sort(descending=descending)
return values[0],self.keys[self.indexes[0]]
#------------------------------------------------------
@property
def elites(self):
"""
Return a tensor of the best random keys in the population.
PRECONDITION: The indexes of the random keys are sorted
by the most recent call to 'self.orderBy'.
"""
return self.keys[self.indexes[:self.eliteCount]]
#------------------------------------------------------
@property
def nonelites(self):
"""
Return a tensor of the non-elite random keys in the population.
PRECONDITION: The indexes of the random keys are sorted
by the most recent call to 'self.orderBy'.
"""
return self.keys[self.indexes[self.eliteCount:]]
#------------------------------------------------------
def optimize(self):
"""
Use the optimizer passed to the constructor to adjust the random keys
using the gradients computed by a call to 'results.backward()'.
Note: the elites may not be the best random keys after this call,
but the entire population of random keys will (stochastically)
move toward the optimum.
"""
with torch.no_grad():
self.optimizer.step()
self.optimizer.zero_grad()
self.keys.data.clamp_(min=0, max=1)
#------------------------------------------------------
def map(self, func):
"""
Compute a results tensor by applying 'func' to
each random key in the population.
"""
return torch.stack([func(x) for x in self.keys])
#------------------------------------------------------
def evolve(self):
"""
One iteration of the "Biased Random-Key Genetic Algorithm".
Replaces the current population of random keys with a new population.
PRECONDITION: The indexes of the random keys are already sorted
by a call to 'self.orderBy'.
POSTCONDITION: The shape of the population of random keys is unchanged.
The first 'self.eliteCount' random keys are still the best keys from
the most recent call to 'self.orderBy'.
"""
with torch.no_grad():
keyShape = self.keys.shape[1:]
#----
if self.mutantCount > 0:
mutants = torch.rand(self.mutantCount, *keyShape, dtype=self.dtype)
nonElites = torch.cat([self.keys[self.indexes[self.eliteCount : self.nonMutantCount]], mutants], 0)
else:
nonElites = self.keys[self.indexes[self.eliteCount : self.nonMutantCount]]
nonEliteCount = len(nonElites)
#----
elites = self.elites
eliteSelectors = torch.multinomial(torch.full((self.eliteCount,), 0.5), nonEliteCount, replacement=True)
selectedElites = elites[eliteSelectors]
#----
nonEliteSelectors = torch.multinomial(torch.full((nonEliteCount,), 0.5), nonEliteCount, replacement=True)
selectedNonElites = nonElites[nonEliteSelectors]
#----
offspringSelectors = torch.empty((nonEliteCount, *keyShape), dtype=self.dtype).bernoulli_(p=0.5)
offspring = (offspringSelectors * selectedElites) + ((1.0 - offspringSelectors) * selectedNonElites)
#----
torch.cat([elites, offspring], 0, out=self.keys)
#------------------------------------------------------
##===================================================
if __name__ == '__main__':
pass