-
Notifications
You must be signed in to change notification settings - Fork 0
/
losses.py
135 lines (104 loc) · 3.59 KB
/
losses.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
import torch
from torch.nn import functional as F
import commons
class ForwardSumLoss(torch.nn.modules.loss._Loss):
def __init__(self, blank_logprob=-1):
super().__init__()
self.log_softmax = torch.nn.LogSoftmax(dim=3)
self.ctc_loss = torch.nn.CTCLoss(zero_infinity=True)
self.blank_logprob = blank_logprob
@property
def input_types(self):
return {
"attn_logprob": NeuralType(('B', 'S', 'T_spec', 'T_text'), LogprobsType()),
"in_lens": NeuralType(tuple('B'), LengthsType()),
"out_lens": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self):
return {
"forward_sum_loss": NeuralType(elements_type=LossType()),
}
def forward(self, attn_logprob, in_lens, out_lens):
key_lens = in_lens
query_lens = out_lens
attn_logprob_padded = F.pad(input=attn_logprob, pad=(1, 0), value=self.blank_logprob)
total_loss = 0.0
for bid in range(attn_logprob.shape[0]):
target_seq = torch.arange(1, key_lens[bid] + 1).unsqueeze(0)
curr_logprob = attn_logprob_padded[bid].permute(1, 0, 2)[: query_lens[bid], :, : key_lens[bid] + 1]
curr_logprob = self.log_softmax(curr_logprob[None])[0]
loss = self.ctc_loss(
curr_logprob,
target_seq,
input_lengths=query_lens[bid : bid + 1],
target_lengths=key_lens[bid : bid + 1],
)
total_loss += loss
total_loss /= attn_logprob.shape[0]
return total_loss
class BinLoss(torch.nn.modules.loss._Loss):
def __init__(self):
super().__init__()
@property
def input_types(self):
return {
"hard_attention": NeuralType(('B', 'S', 'T_spec', 'T_text'), ProbsType()),
"soft_attention": NeuralType(('B', 'S', 'T_spec', 'T_text'), ProbsType()),
}
@property
def output_types(self):
return {
"bin_loss": NeuralType(elements_type=LossType()),
}
def forward(self, hard_attention, soft_attention):
log_sum = torch.log(torch.clamp(soft_attention[hard_attention == 1], min=1e-12)).sum()
return -log_sum / hard_attention.sum()
def feature_loss(fmap_r, fmap_g):
loss = 0
for dr, dg in zip(fmap_r, fmap_g):
for rl, gl in zip(dr, dg):
rl = rl.float().detach()
gl = gl.float()
# fix last 1024 != 2048 after mcmbd
if rl.size(2) > gl.size(2):
rl = rl[:,:,:gl.size(2)]
loss += torch.mean(torch.abs(rl - gl))
return loss * 2
def discriminator_loss(disc_real_outputs, disc_generated_outputs):
loss = 0
r_losses = []
g_losses = []
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
dr = dr.float()
dg = dg.float()
r_loss = torch.mean((1-dr)**2)
g_loss = torch.mean(dg**2)
loss += (r_loss + g_loss)
r_losses.append(r_loss.item())
g_losses.append(g_loss.item())
return loss, r_losses, g_losses
def generator_loss(disc_outputs):
loss = 0
gen_losses = []
for dg in disc_outputs:
dg = dg.float()
l = torch.mean((1-dg)**2)
gen_losses.append(l)
loss += l
return loss, gen_losses
def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):
"""
z_p, logs_q: [b, h, t_t]
m_p, logs_p: [b, h, t_t]
"""
z_p = z_p.float()
logs_q = logs_q.float()
m_p = m_p.float()
logs_p = logs_p.float()
z_mask = z_mask.float()
kl = logs_p - logs_q - 0.5
kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p)
kl = torch.sum(kl * z_mask)
l = kl / torch.sum(z_mask)
return l