forked from firasl/CWCC
-
Notifications
You must be signed in to change notification settings - Fork 0
/
loss_utils.py
185 lines (122 loc) · 5.27 KB
/
loss_utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 14 15:41:30 2020
@author: laakom
"""
import tensorflow as tf
from tensorflow.keras import backend as K
def angluar_error_nll2(y_trues, y_preds):
y_pred = y_preds[:, 0:3]
#log rgb to rgb:
#y_pred = log_to_rgb(-y_pred)
#y_true = log_to_rgb(-y_trues)
#arccoss error
deff = tf.acos( K.clip(K.sum( tf.nn.l2_normalize(y_trues ,axis = -1)*tf.nn.l2_normalize(y_pred ,axis = -1) ,axis=-1,keepdims=True ) , 0.0001, 1-0.0001))
return K.mean(deff) *180.0 / 3.141592653589793
def angluar_error_nll_metric2(y_trues, y_preds):
y_pred = y_preds[:, 0:3]
#arccoss error
deff = tf.acos( K.clip(K.sum( tf.nn.l2_normalize(y_trues ,axis = -1)*tf.nn.l2_normalize(y_pred ,axis = -1) ,axis=-1,keepdims=True ) , 0.0, 1.))
return K.mean(deff)*180.0 / 3.141592653589793
def error_estimate_mse(ytrue, ypreds):
y_pred = ypreds[:, 0:3]
sigma = ypreds[:, 3:] # + K.epsilon()
sigma = tf.reshape(sigma,(-1,))
errors = tf.acos( K.clip(K.sum( tf.nn.l2_normalize(ytrue ,axis = -1)*tf.nn.l2_normalize(y_pred ,axis = -1) ,axis=-1,keepdims=True ) , 0.0, 1.))
errors = tf.reshape(errors ,(-1,))
#mse = K.sum( K.square(errors-sigma),axis=1)
mse = K.mean(tf.math.squared_difference(errors,sigma), axis=-1) # K.sum( tf(errors-sigma),axis=-1)
return mse
# things i tried but didnt work out.
def gaussian_nll_inv(ytrue, ypreds,lam=1):
"""Keras implmementation of multivariate Gaussian negative loglikelihood loss function.
This implementation implies diagonal covariance matrix.
Parameters
----------
ytrue: tf.tensor of shape [n_samples, n_dims]
ground truth values
ypreds: tf.tensor of shape [n_samples, n_dims*2]
predicted mu and logsigma values (e.g. by your neural network)
Returns
-------
neg_log_likelihood: float
negative loglikelihood averaged over samples
This loss can then be used as a target loss for any keras model, e.g.:
model.compile(loss=gaussian_nll, optimizer='Adam')
"""
mu = ypreds[:, 0:3]
sigma = ypreds[:, 3:] # + K.epsilon()
mse = K.sum( K.square(ytrue-mu)*sigma,axis=1)
sigma_trace = K.sum(K.log(sigma ), axis=1)
log_likelihood = mse - lam* sigma_trace
return K.mean(log_likelihood)
def gaussian_nll_inv2(ytrue, ypreds):
"""Keras implmementation of multivariate Gaussian negative loglikelihood loss function.
This implementation implies diagonal covariance matrix.
Parameters
----------
ytrue: tf.tensor of shape [n_samples, n_dims]
ground truth values
ypreds: tf.tensor of shape [n_samples, n_dims*2]
predicted mu and logsigma values (e.g. by your neural network)
Returns
-------
neg_log_likelihood: float
negative loglikelihood averaged over samples
This loss can then be used as a target loss for any keras model, e.g.:
model.compile(loss=gaussian_nll, optimizer='Adam')
"""
mu = ypreds[:, 0:3]
sigma = ypreds[:, 3:] # + K.epsilon()
# mse = K.sum( tf.square(ytrue-mu)*sigma),axis=1)
mse = K.sum( tf.square(ytrue-mu)*tf.exp(sigma),axis=1)
# sigma_trace = K.sum(K.log(sigma +0.001 ), axis=1)
sigma_trace = K.sum(tf.square(sigma) , axis=1)
log_likelihood = mse - sigma_trace
return K.mean(log_likelihood)
def gaussian_nll_invlog(ytrue, ypreds):
"""Keras implmementation of multivariate Gaussian negative loglikelihood loss function.
This implementation implies diagonal covariance matrix.
Parameters
----------
ytrue: tf.tensor of shape [n_samples, n_dims]
ground truth values
ypreds: tf.tensor of shape [n_samples, n_dims*2]
predicted mu and logsigma values (e.g. by your neural network)
Returns
-------
neg_log_likelihood: float
negative loglikelihood averaged over samples
This loss can then be used as a target loss for any keras model, e.g.:
model.compile(loss=gaussian_nll, optimizer='Adam')
"""
mu = ypreds[:, 0:2]
sigma = ypreds[:, 2:] # + K.epsilon()
mse = K.sum( K.square(ytrue-mu)*sigma,axis=1)
sigma_trace = K.sum(K.log(sigma ), axis=1)
log_likelihood = mse -sigma_trace
return K.mean(log_likelihood)
def gaussian_nll(ytrue, ypreds):
"""Keras implmementation of multivariate Gaussian negative loglikelihood loss function.
This implementation implies diagonal covariance matrix.
Parameters
----------
ytrue: tf.tensor of shape [n_samples, n_dims]
ground truth values
ypreds: tf.tensor of shape [n_samples, n_dims*2]
predicted mu and logsigma values (e.g. by your neural network)
Returns
-------
neg_log_likelihood: float
negative loglikelihood averaged over samples
This loss can then be used as a target loss for any keras model, e.g.:
model.compile(loss=gaussian_nll, optimizer='Adam')
"""
mu = ypreds[:, 0:3]
sigma = ypreds[:, 3:] + K.epsilon()
mse = K.sum( K.square(ytrue-mu)/sigma,axis=1)
# sigma_trace = K.sum(K.log(sigma), axis=1)
sigma_trace = K.sum(sigma, axis=1)
log_likelihood = mse+sigma_trace
return K.mean(log_likelihood)