-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrecover_latent_vector.py
75 lines (66 loc) · 2.8 KB
/
recover_latent_vector.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
#!/usr/bin/env python3
# Given an image (possibly generated by the GAN)
# this scripts tries to recover its latent vector.
#
# The algorithm is based on the ICLR 17 workshop paper:
# https://openreview.net/forum?id=HJC88BzFl
# Precise Recovery of Latent Vectors from Generative Adversarial Networks
# by Zachary C. Lipton, and Subarna Tripathi
# It requires:
# - a starting image, ./interpolation_from_start/foo_00.png
# - its latent vector (optional) ./interpolation_from_start/zp_start.npy
import os
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
from PIL import Image
folder = "./interpolation_from_start/"
# Choose a random starting point
zp = tf.Variable(np.random.normal(size=(1,512)), dtype=tf.float32)
# Or if we know the original latent vector, we can start from it
##start_zp = np.load(folder + "zp_start.npy")
zzp = np.empty((1,512))
zzp[0] = zp
zp = tf.Variable(zzp, dtype=tf.float32)
# Load the image for which we want to recover the latent vector
# and crete an appropriate tensor for it
start_img = Image.open(folder + str(input('Enter Image filename:\n')))
start_img.resize((128, 128), Image.ANTIALIAS)
start_img_np = np.array(start_img)/255
fz = tf.Variable(start_img_np, tf.float32)
fz = tf.expand_dims(fz, 0)
fz = tf.cast(fz,tf.float32)
# Choose a directory for which you have privileges
# where you download the tfhub model
print('Downloading the model.')
os.environ['TFHUB_CACHE_DIR'] = 'C:/Users/Praneel/Documents/Python/DPML/GANs'
generator = hub.Module("http://tfhub.dev/google/progan-128/1")
print('Model downloaded.')
# Define the optimization problem
fzp = generator(zp)
loss = tf.losses.mean_squared_error(labels=fz, predictions=fzp)
# Decayed gradient descent
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.99
learning_rate = tf.train.exponential_decay(starter_learning_rate,
global_step,
10000, 0.005)
opt = tf.train.GradientDescentOptimizer(learning_rate)
# Optimize on the variable zp
train = opt.minimize(loss, var_list=zp, global_step=global_step)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(200): # Use more iterations (10000)
# If we know the original latent vector, we can also compute
# how far the recovered vector is from it
_, loss_value, zp_val, eta = sess.run((train, loss, zp, learning_rate))
z_loss = np.sqrt(np.sum(np.square(zp_val - start_zp))/len(zp_val[0]))
print("%03d) eta=%03f, loss = %f, z_loss = %f" % (i, eta, loss_value, z_loss))
# Save the recovered latent vector
zp_val = sess.run(zp)
np.save(folder + "zp_rec", zp_val)
# Print out the corresponding image out of the recovered
# latent vector
imgs = sess.run(generator(zp))
imgs = (imgs * 255).astype(np.uint8)
Image.fromarray(imgs[0]).save(folder + "foo_rec.png")