Skip to content

Commit

Permalink
[bug-fix] Fix entropy computation for GaussianDistribution (#3684)
Browse files Browse the repository at this point in the history
  • Loading branch information
Ervin T authored and vincentpierre committed Mar 27, 2020
1 parent b2ed5d1 commit 47f11da
Show file tree
Hide file tree
Showing 5 changed files with 12 additions and 4 deletions.
1 change: 1 addition & 0 deletions com.unity.ml-agents/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
### Bug Fixes
- Raise the wall in CrawlerStatic scene to prevent Agent from falling off. (#3650)
- Fixed an issue where specifying `vis_encode_type` was required only for SAC. (#3677)
- Fixed the reported entropy values for continuous actions (#3684)
- Fixed an issue where switching models using `SetModel()` during training would use an excessive amount of memory. (#3664)
- Environment subprocesses now close immediately on timeout or wrong API version. (#3679)
- Fixed an issue in the gym wrapper that would raise an exception if an Agent called EndEpisode multiple times in the same step. (#3700)
Expand Down
2 changes: 1 addition & 1 deletion ml-agents/mlagents/trainers/distributions.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ def _create_entropy(
self, encoded: "GaussianDistribution.MuSigmaTensors"
) -> tf.Tensor:
single_dim_entropy = 0.5 * tf.reduce_mean(
tf.log(2 * np.pi * np.e) + tf.square(encoded.log_sigma)
tf.log(2 * np.pi * np.e) + 2 * encoded.log_sigma
)
# Make entropy the right shape
return tf.ones_like(tf.reshape(encoded.mu[:, 0], [-1])) * single_dim_entropy
Expand Down
1 change: 0 additions & 1 deletion ml-agents/mlagents/trainers/sac/optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,6 @@ def __init__(self, policy: TFPolicy, trainer_params: Dict[str, Any]):
"q1_loss": self.q1_loss,
"q2_loss": self.q2_loss,
"entropy_coef": self.ent_coef,
"entropy": self.policy.entropy,
"update_batch": self.update_batch_policy,
"update_value": self.update_batch_value,
"update_entropy": self.update_batch_entropy,
Expand Down
10 changes: 9 additions & 1 deletion ml-agents/mlagents/trainers/tests/test_distributions.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def dummy_config():

def test_gaussian_distribution():
with tf.Graph().as_default():
logits = tf.Variable(initial_value=[[0, 0]], trainable=True, dtype=tf.float32)
logits = tf.Variable(initial_value=[[1, 1]], trainable=True, dtype=tf.float32)
distribution = GaussianDistribution(
logits,
act_size=VECTOR_ACTION_SPACE,
Expand All @@ -71,6 +71,14 @@ def test_gaussian_distribution():
assert out.shape[1] == VECTOR_ACTION_SPACE[0]
output = sess.run([distribution.total_log_probs])
assert output[0].shape[0] == 1
# Test entropy is correct
log_std_tensor = tf.get_default_graph().get_tensor_by_name(
"log_std/BiasAdd:0"
)
feed_dict = {log_std_tensor: [[1.0, 1.0]]}
entropy = sess.run([distribution.entropy], feed_dict=feed_dict)
# Entropy with log_std of 1.0 should be 2.42
assert pytest.approx(entropy[0], 0.01) == 2.42


def test_tanh_distribution():
Expand Down
2 changes: 1 addition & 1 deletion ml-agents/mlagents/trainers/tests/test_simple_rl.py
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ def test_visual_advanced_ppo(vis_encode_type, num_visual):
def test_recurrent_ppo(use_discrete):
env = Memory1DEnvironment([BRAIN_NAME], use_discrete=use_discrete)
override_vals = {
"max_steps": 4000,
"max_steps": 5000,
"batch_size": 64,
"buffer_size": 128,
"learning_rate": 1e-3,
Expand Down

0 comments on commit 47f11da

Please sign in to comment.