From 940b5a6e7f299af14b81206780b2849c97b8181d Mon Sep 17 00:00:00 2001 From: iLampard Date: Tue, 3 Sep 2024 11:11:12 +0800 Subject: [PATCH] Fix: a temp fix to cap the value in exp term in torch_sahp.py --- easy_tpp/model/torch_model/torch_sahp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/easy_tpp/model/torch_model/torch_sahp.py b/easy_tpp/model/torch_model/torch_sahp.py index eb8f73f..637e2e0 100644 --- a/easy_tpp/model/torch_model/torch_sahp.py +++ b/easy_tpp/model/torch_model/torch_sahp.py @@ -79,7 +79,7 @@ def state_decay(self, encode_state, mu, eta, gamma, duration_t): # [batch_size, hidden_dim] states = torch.matmul(encode_state, mu) + ( torch.matmul(encode_state, eta) - torch.matmul(encode_state, mu)) * torch.exp( - -torch.matmul(encode_state, gamma) * duration_t) + -torch.matmul(encode_state, gamma) * torch.clip(duration_t, max=10)) # a temp fix to avoid exploding the exp term return states def forward(self, time_seqs, time_delta_seqs, event_seqs, attention_mask):