forked from PanpanZheng/OCAN
-
Notifications
You must be signed in to change notification settings - Fork 0
/
bg_utils.py
128 lines (101 loc) · 3.84 KB
/
bg_utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
'''
Author: Panpan Zheng
Date created: 2/15/2018
Python Version: 2.7
'''
import numpy as np
import tensorflow as tf
from sklearn.neighbors.kde import KernelDensity
import matplotlib.pyplot as plt
def one_hot(x, depth):
x_one_hot = np.zeros((len(x), depth), dtype=np.int32)
x = x.astype(int)
for i in range(x_one_hot.shape[0]):
x_one_hot[i, x[i]] = 1
return x_one_hot
def xavier_init(size): # initialize the weight-matrix W.
in_dim = size[0]
xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
return tf.random_normal(shape=size, stddev=xavier_stddev)
def sample_Z(m, n): # generating the input for G.
return np.random.uniform(-1., 1., size=[m, n])
def sample_shuffle_spv(X, labels):
n_samples = len(X)
s = np.arange(n_samples)
np.random.shuffle(s)
return np.array(X[s]), labels[s]
def sample_shuffle_uspv(X):
n_samples = len(X)
s = np.arange(n_samples)
np.random.shuffle(s)
return np.array(X[s])
def kde_density_estimator(X,kernel='gaussian',bandwidth=0.2):
return KernelDensity(kernel=kernel, bandwidth=bandwidth).fit(X)
def complement_density(kde, X, sf=0.5):
# probs = map(lambda x: np.exp(kde.score([x])), X)
probs = np.exp(kde.score_samples(X))
thrld = np.median(probs)
return np.array(
map(lambda x: low_density(x, thrld, sf), probs)
)
def low_density(prob, thrld, sf):
if prob > thrld:
return sf * np.reciprocal(prob)
# return sf * (1-prob)
else:
return thrld
def pull_away_loss(g):
Nor = tf.norm(g, axis=1)
Nor_mat = tf.tile(tf.expand_dims(Nor, axis=1),
[1, tf.shape(g)[1]])
X = tf.divide(g, Nor_mat)
X_X = tf.square(tf.matmul(X, tf.transpose(X)))
mask = tf.subtract(tf.ones_like(X_X),
tf.diag(
tf.ones([tf.shape(X_X)[0]]))
)
pt_loss = tf.divide(tf.reduce_sum(tf.multiply(X_X, mask)),
tf.multiply(
tf.cast(tf.shape(X_X)[0], tf.float32),
tf.cast(tf.shape(X_X)[0]-1, tf.float32)))
return pt_loss
def draw_trend(D_real_prob, D_fake_prob, D_val_prob, fm_loss, f1):
fig = plt.figure()
fig.patch.set_facecolor('w')
# plt.subplot(311)
p1, = plt.plot(D_real_prob, "-g")
p2, = plt.plot(D_fake_prob, "--r")
p3, = plt.plot(D_val_prob, ":c")
plt.xlabel("# of epoch")
plt.ylabel("probability")
leg = plt.legend([p1, p2, p3], [r'$p(y|V_B)$', r'$p(y|\~{V})$', r'$p(y|V_M)$'], loc=1, bbox_to_anchor=(1, 1), borderaxespad=0.)
leg.draw_frame(False)
# plt.legend(frameon=False)
fig = plt.figure()
fig.patch.set_facecolor('w')
# plt.subplot(312)
p4, = plt.plot(fm_loss, "-b")
plt.xlabel("# of epoch")
plt.ylabel("feature matching loss")
# plt.legend([p4], ["d_real_prob", "d_fake_prob", "d_val_prob"], loc=1, bbox_to_anchor=(1, 1), borderaxespad=0.)
fig = plt.figure()
fig.patch.set_facecolor('w')
# plt.subplot(313)
p5, = plt.plot(f1, "-y")
plt.xlabel("# of epoch")
plt.ylabel("F1")
# plt.legend([p1, p2, p3, p4, p5], ["d_real_prob", "d_fake_prob", "d_val_prob", "fm_loss","f1"], loc=1, bbox_to_anchor=(1, 3.5), borderaxespad=0.)
plt.show()
def plot_decision_boundary(pred_func, X, y):
# Set min and max values and give it some padding
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole gid
Z = pred_func(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral)