forked from xziyue/robust_mnist_feature_py
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathreconstruct_nonrobust_feature.py
59 lines (45 loc) · 1.82 KB
/
reconstruct_nonrobust_feature.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
from perturbation import *
import tensorflow as tf
import warnings
# forcing tensorflow to use cpu (if there is not enough graphics memory)
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
tf.enable_eager_execution()
# raw data
print('loading MNIST data...')
train_X, train_Y = preprocess_mnist_data(*load_mnist_train_XY())
warnings.filterwarnings(action='ignore', category=FutureWarning)
# load the model
model = tf.keras.models.load_model('nn_model/std_model.dat')
intermediateModel = tf.keras.Model(inputs = model.input, outputs = model.get_layer('FC').output)
maxEpoches = 1000
learningRate = 0.01
def reconstruct_feature(featureId):
x = train_X[featureId : featureId + 1, :, :, :]
tensor_x = tf.convert_to_tensor(x)
modelPredictReal = intermediateModel.predict(tensor_x)
start_x = np.clip(np.random.normal(0.5, 0.01, x.size), 0, 1).astype(x.dtype).reshape(x.shape)
#start_x = train_X[200 : 200 + 1, :, :, :]
start_x = tf.convert_to_tensor(start_x)
lastLoss = 0.0
lossDiff = 1.0e6
for epoch in range(maxEpoches):
if abs(lossDiff) < 0.0001:
break
with tf.GradientTape() as tape:
tape.watch(start_x)
modelPredictNow = intermediateModel(start_x)
loss = tf.norm(modelPredictNow - modelPredictReal)
gradient = tape.gradient(loss, start_x)
# normalize the gradient
gradientNorm = tf.norm(gradient)
gradient = tf.math.divide(gradient, gradientNorm)
# apply the gradient
start_x = tf.math.subtract(start_x, tf.math.multiply(gradient, learningRate))
# clip to 0, 1
start_x = tf.clip_by_value(start_x, 0.0, 1.0)
lossVal = loss.numpy()
# update loss value
lossDiff = lossVal - lastLoss
lastLoss = lossVal
print(lossVal, lossDiff)
return start_x.numpy()