-
Notifications
You must be signed in to change notification settings - Fork 21
/
test_submission_code.py
80 lines (65 loc) · 2.54 KB
/
test_submission_code.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import gym
class EpisodeDone(Exception):
pass
class Episode(gym.Env):
"""A class for a single episode."""
def __init__(self, env):
self.env = env
self.action_space = env.action_space
self.observation_space = env.observation_space
self._done = False
def reset(self):
if not self._done:
return self.env.reset()
def step(self, action):
s, r, d, i = self.env.step(action)
if d:
self._done = True
raise EpisodeDone()
else:
return s, r, d, i
class MineRLAgent():
"""
To compete in the competition, you are required to implement the two
functions in this class:
- load_agent: a function that loads e.g. network models
- run_agent_on_episode: a function that plays one game of MineRL
By default this agent behaves like a random agent: pick random action on
each step.
NOTE:
This class enables the evaluator to run your agent in parallel in Threads,
which means anything loaded in load_agent will be shared among parallel
agents. Take care when tracking e.g. hidden state (this should go to run_agent_on_episode).
"""
def load_agent(self):
"""
This method is called at the beginning of the evaluation.
You should load your model and do any preprocessing here.
THIS METHOD IS ONLY CALLED ONCE AT THE BEGINNING OF THE EVALUATION.
DO NOT LOAD YOUR MODEL ANYWHERE ELSE.
"""
# This is a random agent so no need to do anything
# YOUR CODE GOES HERE
pass
def run_agent_on_episode(self, single_episode_env: Episode):
"""This method runs your agent on a SINGLE episode.
You should just implement the standard environment interaction loop here:
obs = env.reset()
while not done:
env.step(self.agent.act(obs))
...
NOTE:
This method will be called in PARALLEL during evaluation.
So, only store state in LOCAL variables.
For example, if using an LSTM, don't store the hidden state in the class
but as a local variable to the method.
Args:
env (gym.Env): The env your agent should interact with.
"""
# An implementation of a random agent
# YOUR CODE GOES HERE
_ = single_episode_env.reset()
done = False
while not done:
random_act = single_episode_env.action_space.sample()
single_episode_env.step(random_act)