-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathenvironment.py
62 lines (49 loc) · 1.97 KB
/
environment.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import numpy as np
import neat
from indiv import Indiv
from renderer import Renderer
class Environment:
grid_size = 64 # Size of the world
pop_size = 60 # Initial population size
num_food = 120 # Initial amount of food
nutrition = 200 # Food nutrition
steps = 300 # Number of time steps per generation
agents = []
foods = []
def __init__(self, seed):
self.rand = np.random.default_rng(seed)
self.state = 0
self.renderer = Renderer(self.grid_size)
def evaluate_genomes(self, genomes, config):
self.state += 1
self.agents = []
self.foods = []
# Initialize
for genome_id, genome in genomes:
# Create a brain
genome.fitness = 0
net = neat.nn.FeedForwardNetwork.create(genome, config)
# Create an agent and connect the brain
x = self.rand.random() * (self.grid_size - 1) + 0.5
y = self.rand.random() * (self.grid_size - 1) + 0.5
a = self.rand.random() * 2 * np.pi - np.pi
agent = Indiv(x, y, a, net)
self.agents.append(agent)
# Simulate
self.foods = self.rand.random((self.num_food, 2)) * (self.grid_size - 4) + 2 # Spawn food
for i in range(self.steps):
for agent in self.agents:
# Agent acts
agent.step(self)
# Check food
new_foods = np.delete(self.foods, np.where(
(abs(self.foods[:, 0] - agent.x) < 0.5) &
(abs(self.foods[:, 1] - agent.y) < 0.5))[0], axis=0)
agent.energy += self.nutrition * (len(self.foods) - len(new_foods))
self.foods = new_foods
# Render world to screen
if self.state % 100 == 0:
self.renderer.render(self)
# Evaluate
for (genome_id, genome), agent in zip(genomes, self.agents):
genome.fitness = agent.energy