Skip to content

Commit

Permalink
PPO additions and warnings
Browse files Browse the repository at this point in the history
* Add linear decay to learning rate for PPO
* Add warning/exception for unsupported brain configurations w/ PPO
  • Loading branch information
awjuliani committed Sep 22, 2017
1 parent b8109bb commit 77b04d1
Show file tree
Hide file tree
Showing 5 changed files with 38 additions and 25 deletions.
2 changes: 1 addition & 1 deletion python/PPO.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@
"# Create the Tensorflow model graph\n",
"ppo_model = create_agent_model(env, lr=learning_rate,\n",
" h_size=hidden_units, epsilon=epsilon,\n",
" beta=beta)\n",
" beta=beta, max_step=max_steps)\n",
"\n",
"is_continuous = (env.brains[brain_name].action_space_type == \"continuous\")\n",
"use_observations = (env.brains[brain_name].number_observations > 0)\n",
Expand Down
6 changes: 3 additions & 3 deletions python/ppo.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
Options:
--help Show this message.
--max-step=<n> Maximum number of steps to run environment [default: 5e6].
--max-steps=<n> Maximum number of steps to run environment [default: 5e6].
--run-path=<path> The sub-directory name for model and summary statistics [default: ppo].
--load Whether to load the model or randomly initialize [default: False].
--train Whether to train model, or only run inference [default: True].
Expand All @@ -38,7 +38,7 @@
print(options)

# General parameters
max_steps = float(options['--max-step'])
max_steps = float(options['--max-steps'])
model_path = './models/{}'.format(str(options['--run-path']))
summary_path = './summaries/{}'.format(str(options['--run-path']))
load_model = options['--load']
Expand Down Expand Up @@ -69,7 +69,7 @@
# Create the Tensorflow model graph
ppo_model = create_agent_model(env, lr=learning_rate,
h_size=hidden_units, epsilon=epsilon,
beta=beta)
beta=beta, max_step=max_steps)

is_continuous = (env.brains[brain_name].action_space_type == "continuous")
use_observations = (env.brains[brain_name].number_observations > 0)
Expand Down
43 changes: 27 additions & 16 deletions python/ppo/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,10 @@
import tensorflow as tf
import tensorflow.contrib.layers as c_layers
from tensorflow.python.tools import freeze_graph
from unityagents import UnityEnvironmentException


def create_agent_model(env, lr=1e-4, h_size=128, epsilon=0.2, beta=1e-3):
def create_agent_model(env, lr=1e-4, h_size=128, epsilon=0.2, beta=1e-3, max_step=5e6):
"""
Takes a Unity environment and model-specific hyperparameters and returns the
appropriate PPO agent model for the environment.
Expand All @@ -17,16 +18,23 @@ def create_agent_model(env, lr=1e-4, h_size=128, epsilon=0.2, beta=1e-3):
"""
brain_name = env.brain_names[0]
if env.brains[brain_name].action_space_type == "continuous":
return ContinuousControlModel(lr, env.brains[brain_name].state_space_size,
env.brains[brain_name].action_space_size, h_size, epsilon, beta)
if env.brains[brain_name].number_observations == 0:
return ContinuousControlModel(lr, env.brains[brain_name].state_space_size,
env.brains[brain_name].action_space_size, h_size, epsilon, beta, max_step)
else:
raise UnityEnvironmentException("There is currently no PPO model which supports both a continuous "
"action space and camera observations.")
if env.brains[brain_name].action_space_type == "discrete":
if env.brains[brain_name].number_observations == 0:
return DiscreteControlModel(lr, env.brains[brain_name].state_space_size,
env.brains[brain_name].action_space_size, h_size, epsilon, beta)
env.brains[brain_name].action_space_size, h_size, epsilon, beta, max_step)
else:
brain = env.brains[brain_name]
if env.brains[brain_name].state_space_size > 0:
print("This brain contains agents with both observations and states. There is currently no PPO model"
"which supports this. Defaulting to Vision-based PPO model.")
h, w = brain.camera_resolutions[0]['height'], brain.camera_resolutions[0]['height']
return VisualDiscreteControlModel(lr, h, w, env.brains[brain_name].action_space_size, h_size, epsilon, beta)
return VisualDiscreteControlModel(lr, h, w, env.brains[brain_name].action_space_size, h_size, epsilon, beta, max_step)


def save_model(sess, saver, model_path="./", steps=0):
Expand All @@ -37,7 +45,7 @@ def save_model(sess, saver, model_path="./", steps=0):
:param steps: Current number of steps in training process.
:param saver: Tensorflow saver for session.
"""
last_checkpoint = model_path+'/model-'+str(steps)+'.cptk'
last_checkpoint = model_path + '/model-' + str(steps) + '.cptk'
saver.save(sess, last_checkpoint)
tf.train.write_graph(sess.graph_def, model_path, 'raw_graph_def.pb', as_text=False)
print("Saved Model")
Expand All @@ -61,7 +69,7 @@ def export_graph(model_path, env_name="env", target_nodes="action"):


class PPOModel(object):
def __init__(self, probs, old_probs, value, entropy, beta, epsilon, lr):
def __init__(self, probs, old_probs, value, entropy, beta, epsilon, lr, max_step):
"""
Creates training-specific Tensorflow ops for PPO models.
:param probs: Current policy probabilities
Expand All @@ -85,15 +93,18 @@ def __init__(self, probs, old_probs, value, entropy, beta, epsilon, lr):

self.loss = self.policy_loss + self.value_loss - beta * tf.reduce_mean(entropy)

optimizer = tf.train.AdamOptimizer(learning_rate=lr)
self.global_step = tf.Variable(0, trainable=False, name='global_step', dtype=tf.int32)
self.learning_rate = tf.train.polynomial_decay(lr, self.global_step,
max_step, 1e-10,
power=1.0)
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
self.update_batch = optimizer.minimize(self.loss)

self.global_step = tf.Variable(0, trainable=False, name='global_step', dtype=tf.int32)
self.increment_step = tf.assign(self.global_step, self.global_step+1)
self.increment_step = tf.assign(self.global_step, self.global_step + 1)


class ContinuousControlModel(PPOModel):
def __init__(self, lr, s_size, a_size, h_size, epsilon, beta):
def __init__(self, lr, s_size, a_size, h_size, epsilon, beta, max_step):
"""
Creates Continuous Control Actor-Critic model.
:param s_size: State-space size
Expand Down Expand Up @@ -127,11 +138,11 @@ def __init__(self, lr, s_size, a_size, h_size, epsilon, beta):

self.old_probs = tf.placeholder(shape=[None, a_size], dtype=tf.float32, name='old_probabilities')

PPOModel.__init__(self, self.probs, self.old_probs, self.value, self.entropy, 0.0, epsilon, lr)
PPOModel.__init__(self, self.probs, self.old_probs, self.value, self.entropy, 0.0, epsilon, lr, max_step)


class DiscreteControlModel(PPOModel):
def __init__(self, lr, s_size, a_size, h_size, epsilon, beta):
def __init__(self, lr, s_size, a_size, h_size, epsilon, beta, max_step):
"""
Creates Discrete Control Actor-Critic model.
:param s_size: State-space size
Expand All @@ -158,11 +169,11 @@ def __init__(self, lr, s_size, a_size, h_size, epsilon, beta):
self.old_responsible_probs = tf.reduce_sum(self.old_probs * self.selected_actions, axis=1)

PPOModel.__init__(self, self.responsible_probs, self.old_responsible_probs,
self.value, self.entropy, beta, epsilon, lr)
self.value, self.entropy, beta, epsilon, lr, max_step)


class VisualDiscreteControlModel(PPOModel):
def __init__(self, lr, o_size_h, o_size_w, a_size, h_size, epsilon, beta):
def __init__(self, lr, o_size_h, o_size_w, a_size, h_size, epsilon, beta, max_step):
"""
Creates Discrete Control Actor-Critic model for use with visual observations (images).
:param o_size_h: Observation height.
Expand Down Expand Up @@ -194,4 +205,4 @@ def __init__(self, lr, o_size_h, o_size_w, a_size, h_size, epsilon, beta):
self.old_responsible_probs = tf.reduce_sum(self.old_probs * self.selected_actions, axis=1)

PPOModel.__init__(self, self.responsible_probs, self.old_responsible_probs,
self.value, self.entropy, beta, epsilon, lr)
self.value, self.entropy, beta, epsilon, lr, max_step)
10 changes: 6 additions & 4 deletions python/ppo/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ def __init__(self, ppo_model, sess, info, is_continuous, use_observations):
self.model = ppo_model
self.sess = sess
stats = {'cumulative_reward': [], 'episode_length': [], 'value_estimate': [],
'entropy': [], 'value_loss': [], 'policy_loss': []}
'entropy': [], 'value_loss': [], 'policy_loss': [], 'learning_rate': []}
self.stats = stats

self.training_buffer = vectorize_history(empty_local_history({}))
Expand Down Expand Up @@ -45,11 +45,13 @@ def take_action(self, info, env, brain_name):
self.model.batch_size: len(info.states)}
else:
feed_dict = {self.model.state_in: info.states, self.model.batch_size: len(info.states)}
actions, a_dist, value, ent = self.sess.run([self.model.output, self.model.probs,
self.model.value, self.model.entropy],
feed_dict=feed_dict)
actions, a_dist, value, ent, learn_rate = self.sess.run([self.model.output, self.model.probs,
self.model.value, self.model.entropy,
self.model.learning_rate],
feed_dict=feed_dict)
self.stats['value_estimate'].append(value)
self.stats['entropy'].append(ent)
self.stats['learning_rate'].append(learn_rate)
new_info = env.step(actions, value={brain_name: value})[brain_name]
self.add_experiences(info, new_info, epsi, actions, a_dist, value)
return new_info
Expand Down
2 changes: 1 addition & 1 deletion python/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
required = f.read().splitlines()

setup(name='unityagents',
version='0.1',
version='0.1.1',
description='Unity Machine Learning Agents',
license='Apache License 2.0',
author='Unity Technologies',
Expand Down

0 comments on commit 77b04d1

Please sign in to comment.