-
Notifications
You must be signed in to change notification settings - Fork 762
/
sb3_highway_dqn.py
56 lines (48 loc) · 1.51 KB
/
sb3_highway_dqn.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
import gymnasium as gym
from gymnasium.wrappers import RecordVideo
from stable_baselines3 import DQN
import highway_env # noqa: F401
TRAIN = True
if __name__ == "__main__":
# Create the environment
env = gym.make("highway-fast-v0", render_mode="rgb_array")
obs, info = env.reset()
# Create the model
model = DQN(
"MlpPolicy",
env,
policy_kwargs=dict(net_arch=[256, 256]),
learning_rate=5e-4,
buffer_size=15000,
learning_starts=200,
batch_size=32,
gamma=0.8,
train_freq=1,
gradient_steps=1,
target_update_interval=50,
verbose=1,
tensorboard_log="highway_dqn/",
)
# Train the model
if TRAIN:
model.learn(total_timesteps=int(2e4))
model.save("highway_dqn/model")
del model
# Run the trained model and record video
model = DQN.load("highway_dqn/model", env=env)
env = RecordVideo(
env, video_folder="highway_dqn/videos", episode_trigger=lambda e: True
)
env.unwrapped.config["simulation_frequency"] = 15 # Higher FPS for rendering
env.unwrapped.set_record_video_wrapper(env)
for videos in range(10):
done = truncated = False
obs, info = env.reset()
while not (done or truncated):
# Predict
action, _states = model.predict(obs, deterministic=True)
# Get reward
obs, reward, done, truncated, info = env.step(action)
# Render
env.render()
env.close()