forked from nathangrinsztajn/Box-World
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathbox_world_env.py
208 lines (172 loc) · 6.83 KB
/
box_world_env.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
import time
import numpy as np
import matplotlib.pyplot as plt
import gym
from gym.utils import seeding
from gym.spaces.discrete import Discrete
from gym.spaces import Box
from boxworld_gen import *
class boxworld(gym.Env):
"""Boxworld representation
Args:
n: specify the size of the field (n x n)
goal_length
num_distractor
distractor_length
world: an existing world data. If this is given, use this data.
If None, generate a new data by calling world_gen() function
"""
def __init__(self, n, goal_length, num_distractor, distractor_length,
viewport_size=5, max_steps=300, world=None, silence=False):
self.goal_length = goal_length
self.num_distractor = num_distractor
self.distractor_length = distractor_length
self.viewport_size = viewport_size
self.n = n
self.num_pairs = goal_length - 1 + distractor_length * num_distractor
# Penalties and Rewards
self.step_cost = 0.1
self.reward_gem = 10
self.reward_key = 0
# Other Settings
self.viewer = None
self.max_steps = max_steps
self.action_space = Discrete(len(ACTION_LOOKUP))
self.observation_space = Box(low=0, high=255, shape=(n, n, 3), dtype=np.uint8)
self.silence = silence
# Game initialization
self.owned_key = np.array(grid_color, dtype=np.float64)
self.np_random_seed = None
self.world = None
self.reset(world)
self.fig = plt.figure()
self.ax = self.fig.add_subplot(1, 1, 1)
self.img = self.ax.imshow(self.world, vmin=0, vmax=255, interpolation='none')
self.fig.canvas.draw()
self.axbackground = self.fig.canvas.copy_from_bbox(self.ax.bbox)
plt.show(block=False)
def seed(self, seed=None):
self.np_random_seed = seed
return [seed]
def save(self):
np.save('box_world.npy', self.world)
def step(self, action):
change = CHANGE_COORDINATES[action]
new_position = self.player_position + change
current_position = self.player_position.copy()
self.num_env_steps += 1
reward = -self.step_cost
done = self.num_env_steps == self.max_steps
# Move player if the field in the moving direction is either
if np.any(new_position < 0) or np.any(new_position >= self.n):
possible_move = False
elif np.array_equal(new_position, [0, 0]):
possible_move = False
elif is_empty(self.world[new_position[0], new_position[1]]):
# No key, no lock
possible_move = True
elif new_position[1] == 0 or is_empty(self.world[new_position[0], new_position[1]-1]):
# It is a key
if is_empty(self.world[new_position[0], new_position[1]+1]):
# Key is not locked
possible_move = True
self.owned_key = self.world[new_position[0], new_position[1]].copy()
# self.world[0, 0] = self.owned_key
if np.array_equal(self.world[new_position[0], new_position[1]], goal_color):
# Goal reached
reward += self.reward_gem
done = True
else:
reward += self.reward_key
else:
possible_move = False
else:
# It is a lock
if np.array_equal(self.world[new_position[0], new_position[1]], self.owned_key):
# The lock matches the key
self.owned_key = np.array(grid_color, dtype=np.float64)
possible_move = True
else:
possible_move = False
if not self.silence:
print("lock color is {}, but owned key is {}".format(
self.world[new_position[0], new_position[1]], self.owned_key))
if possible_move:
self.player_position = new_position
update_color(self.world, previous_agent_loc=current_position, new_agent_loc=new_position)
info = {
"action.name": ACTION_LOOKUP[action],
"action.moved_player": possible_move,
}
return self.state(), reward, done, info
def reset(self, world=None):
if world is None:
self.world, self.player_position = world_gen(n=self.n, goal_length=self.goal_length,
num_distractor=self.num_distractor,
distractor_length=self.distractor_length,
seed=self.np_random_seed,
silence=self.silence)
else:
self.world, self.player_position = world
self.num_env_steps = 0
return self.state()
def render(self, mode='window'):
img = self.world_fog_map()
if mode == 'return':
return img
else:
self.img.set_data(img)
self.fig.canvas.restore_region(self.axbackground)
self.ax.draw_artist(self.img)
self.fig.canvas.blit(self.ax.bbox)
plt.pause(0.001)
def get_action_lookup(self):
return ACTION_LOOKUP
def viewport_mask(self):
mask = np.zeros(shape=(self.n, self.n), dtype=np.bool)
k = self.viewport_size // 2
t = max(self.player_position[0] - k, 0)
b = min(self.player_position[0] + k + 1, self.n)
l = max(self.player_position[1] - k, 0)
r = min(self.player_position[1] + k + 1, self.n)
mask[t:b, l:r] = True
return mask
def viewport_map(self):
k = self.viewport_size // 2
world = np.pad(self.world, ((k, k), (k, k), (0, 0)),
mode='constant', constant_values=0)
t = self.player_position[0] + k - k
b = self.player_position[0] + k + k + 1
l = self.player_position[1] + k - k
r = self.player_position[1] + k + k + 1
return world[t:b, l:r, :]
def world_fog_map(self):
img = self.world.copy()
mask_out_of_viewport = np.logical_not(self.viewport_mask())
mask_grid_color = np.ma.masked_equal(self.world, grid_color).mask[:, :, 0]
mask = np.logical_and(mask_out_of_viewport, mask_grid_color)
img[mask] = 0
img = img.astype(np.uint8)
return img
def state(self):
return self.viewport_map() / 255.0,\
self.owned_key / 255.0,\
self.player_position / self.n
ACTION_LOOKUP = {
0: 'move up',
1: 'move down',
2: 'move left',
3: 'move right',
}
CHANGE_COORDINATES = {
0: (-1, 0),
1: (1, 0),
2: (0, -1),
3: (0, 1)
}
if __name__ == "__main__":
# execute only if run as a script
env = boxworld(12, 3, 2, 1)
# env.seed(1)
env.reset()
env.render()