-
Notifications
You must be signed in to change notification settings - Fork 0
/
agent.py
138 lines (111 loc) · 4.16 KB
/
agent.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
from pacman import Pacman
from nodes import NodeGroup
from pellets import PelletGroup
from ghosts import GhostGroup
from fruit import Fruit
from pauser import Pauser
from levels import LevelController
from text import TextGroup
from sprites import Spritesheet
from maze import Maze
from vector import Vector2
from constants import *
from pygame.locals import *
from entity import MazeRunner
from animation import Animation
import pygame
from stack import Stack
from model import Linear_QNet, QTrainer
from helper import *
import torch
import random
import numpy as np
from collections import deque
from run import GameController
MAX_MEMORY = 100_000
BATCH_SIZE = 1000
LR = 0.001
class Agent:
def __init__(self):
self.n_games = 0
self.epsilon = 0 # randomness
self.gamma = 0.95 # discount rate
self.memory = deque(maxlen=MAX_MEMORY) # popleft()
self.model = Linear_QNet(2036, 1356, 5)
self.trainer = QTrainer(self.model, lr=LR, gamma=self.gamma)
def get_state(self, game):
maze_coords = game.maze.maze_coords
pacman_position = game.pacman.position.asInt()
pacman_direction = game.pacman.direction.asInt()
ghost_positions = [ghost.position.asInt() for ghost in game.ghosts]
ghost_directions = [ghost.direction.asInt() for ghost in game.ghosts]
state = [pacman_position, pacman_direction, ghost_positions, ghost_directions, maze_coords]
# state = [pacman_position, ghost_positions, ]
state = add_flatten_lists(state)
return np.array(state, dtype=int)
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done)) # popleft if MAX_MEMORY is reached
def train_long_memory(self):
if len(self.memory) > BATCH_SIZE:
mini_sample = random.sample(self.memory, BATCH_SIZE) # list of tuples
else:
mini_sample = self.memory
states, actions, rewards, next_states, dones = zip(*mini_sample)
self.trainer.train_step(states, actions, rewards, next_states, dones)
def train_short_memory(self, state, action, reward, next_state, done):
self.trainer.train_step(state, action, reward, next_state, done)
def get_action(self, state):
# random moves: tradeoff exploration / exploitation
self.epsilon = 80 - self.n_games
# up, down, left, right
final_move = [0,0,0,0,0]
if random.randint(0, 200) < self.epsilon:
move = random.randint(0, 4)
final_move[move] = 1
print('random')
else:
state0 = torch.tensor(state, dtype=torch.float)
prediction = self.model(state0)
move = torch.argmax(prediction).item()
final_move[move] = 1
print('predicted')
return final_move
action = [0,0,0,0,0]
def train():
plot_scores = []
plot_mean_scores = []
total_score = 0
record = 0
game = GameController()
game.startGame()
agent = Agent()
while True:
game.update(action)
# get old state
state_old = agent.get_state(game)
# get move
final_move = agent.get_action(state_old)
reward, done, score = game.update(final_move)
# print(reward)
# perform move and get new state
state_new = agent.get_state(game)
# # train short memory
agent.train_short_memory(state_old, final_move, reward, state_new, done)
# remember
agent.remember(state_old, final_move, reward, state_new, done)
if done:
# train long memory, plot result
game.startGame()
agent.n_games += 1
agent.train_long_memory()
if score > record:
record = score
agent.model.save()
print('Game', agent.n_games, 'Score', score, 'Record:', record)
plot_scores.append(score)
total_score += score
mean_score = total_score / agent.n_games
plot_mean_scores.append(mean_score)
plot(plot_scores, plot_mean_scores)
if __name__ == '__main__':
train()