From e429fd3d65672f34c00c34cb880223eaa5783644 Mon Sep 17 00:00:00 2001 From: Vincent-Pierre BERGES Date: Mon, 19 Apr 2021 09:06:42 -0700 Subject: [PATCH] POCA Attention will use h_size for embedding size and not 128 (#5281) --- ml-agents/mlagents/trainers/torch/networks.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/ml-agents/mlagents/trainers/torch/networks.py b/ml-agents/mlagents/trainers/torch/networks.py index dce8b03305..4159b57030 100644 --- a/ml-agents/mlagents/trainers/torch/networks.py +++ b/ml-agents/mlagents/trainers/torch/networks.py @@ -253,8 +253,6 @@ def forward( class MultiAgentNetworkBody(torch.nn.Module): - ATTENTION_EMBEDDING_SIZE = 128 - """ A network body that uses a self attention layer to handle state and action input from a potentially variable number of agents that @@ -293,17 +291,18 @@ def __init__( + self.action_spec.continuous_size ) + attention_embeding_size = self.h_size self.obs_encoder = EntityEmbedding( - obs_only_ent_size, None, self.ATTENTION_EMBEDDING_SIZE + obs_only_ent_size, None, attention_embeding_size ) self.obs_action_encoder = EntityEmbedding( - q_ent_size, None, self.ATTENTION_EMBEDDING_SIZE + q_ent_size, None, attention_embeding_size ) - self.self_attn = ResidualSelfAttention(self.ATTENTION_EMBEDDING_SIZE) + self.self_attn = ResidualSelfAttention(attention_embeding_size) self.linear_encoder = LinearEncoder( - self.ATTENTION_EMBEDDING_SIZE, + attention_embeding_size, network_settings.num_layers, self.h_size, kernel_gain=(0.125 / self.h_size) ** 0.5,