Skip to content

Commit

Permalink
use variable._ref(); fix empirical.py, test_beta_log_prob.py
Browse files Browse the repository at this point in the history
  • Loading branch information
dustinvtran committed Jan 28, 2017
1 parent 3dd5009 commit 8b245ec
Show file tree
Hide file tree
Showing 6 changed files with 12 additions and 12 deletions.
2 changes: 1 addition & 1 deletion edward/inferences/klpq.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,6 @@ def build_loss_and_gradients(self, var_list):
loss = tf.reduce_mean(w_norm * log_w)
grads = tf.gradients(
-tf.reduce_mean(q_log_prob * tf.stop_gradient(w_norm)),
var_list)
[v._ref() for v in var_list])
grads_and_vars = list(zip(grads, var_list))
return loss, grads_and_vars
12 changes: 6 additions & 6 deletions edward/inferences/klqp.py
Original file line number Diff line number Diff line change
Expand Up @@ -366,7 +366,7 @@ def build_reparam_loss_and_gradients(inference, var_list):
if var_list is None:
var_list = tf.trainable_variables()

grads = tf.gradients(loss, [v.ref() for v in var_list])
grads = tf.gradients(loss, [v._ref() for v in var_list])
grads_and_vars = list(zip(grads, var_list))
return loss, grads_and_vars

Expand Down Expand Up @@ -439,7 +439,7 @@ def build_reparam_kl_loss_and_gradients(inference, var_list):
if var_list is None:
var_list = tf.trainable_variables()

grads = tf.gradients(loss, [v.ref() for v in var_list])
grads = tf.gradients(loss, [v._ref() for v in var_list])
grads_and_vars = list(zip(grads, var_list))
return loss, grads_and_vars

Expand Down Expand Up @@ -511,7 +511,7 @@ def build_reparam_entropy_loss_and_gradients(inference, var_list):
if var_list is None:
var_list = tf.trainable_variables()

grads = tf.gradients(loss, [v.ref() for v in var_list])
grads = tf.gradients(loss, [v._ref() for v in var_list])
grads_and_vars = list(zip(grads, var_list))
return loss, grads_and_vars

Expand Down Expand Up @@ -582,7 +582,7 @@ def build_score_loss_and_gradients(inference, var_list):

grads = tf.gradients(
-tf.reduce_mean(q_log_prob * tf.stop_gradient(losses)),
var_list)
[v._ref() for v in var_list])
grads_and_vars = list(zip(grads, var_list))
return loss, grads_and_vars

Expand Down Expand Up @@ -657,7 +657,7 @@ def build_score_kl_loss_and_gradients(inference, var_list):
loss = -(tf.reduce_mean(p_log_lik) - kl)
grads = tf.gradients(
-(tf.reduce_mean(q_log_prob * tf.stop_gradient(p_log_lik)) - kl),
var_list)
[v._ref() for v in var_list])
grads_and_vars = list(zip(grads, var_list))
return loss, grads_and_vars

Expand Down Expand Up @@ -732,6 +732,6 @@ def build_score_entropy_loss_and_gradients(inference, var_list):
grads = tf.gradients(
-(tf.reduce_mean(q_log_prob * tf.stop_gradient(p_log_prob)) +
q_entropy),
var_list)
[v._ref() for v in var_list])
grads_and_vars = list(zip(grads, var_list))
return loss, grads_and_vars
2 changes: 1 addition & 1 deletion edward/inferences/map.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ def build_loss_and_gradients(self, var_list):
if var_list is None:
var_list = tf.trainable_variables()

grads = tf.gradients(loss, [v.ref() for v in var_list])
grads = tf.gradients(loss, [v._ref() for v in var_list])
grads_and_vars = list(zip(grads, var_list))
return loss, grads_and_vars

Expand Down
4 changes: 2 additions & 2 deletions edward/models/empirical.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,12 +78,12 @@ def _std(self):
def _variance(self):
return math_ops.square(self.std())

def sample_n(self, n, seed=None):
def _sample_n(self, n, seed=None):
if self.n != 1:
logits = logit(tf.ones(self.n, dtype=tf.float32) /
tf.cast(self.n, dtype=tf.float32))
cat = tf.contrib.distributions.Categorical(logits=logits)
indices = cat.sample_n(n, seed)
indices = cat._sample_n(n, seed)
return tf.gather(self._params, indices)
else:
multiples = tf.concat(
Expand Down
2 changes: 1 addition & 1 deletion examples/iwvi.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ def build_loss_and_gradients(self, var_list):
log_w += [p_log_prob - q_log_prob]

loss = -log_mean_exp(log_w)
grads = tf.gradients(loss, [v.ref() for v in var_list])
grads = tf.gradients(loss, [v._ref() for v in var_list])
grads_and_vars = list(zip(grads, var_list))
return loss, grads_and_vars

Expand Down
2 changes: 1 addition & 1 deletion tests/test-models/test_beta_log_prob.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def _test(a, b, n):
a = a.eval()
b = b.eval()
assert np.allclose(rv.log_prob(x_tf).eval(),
stats.beta.logpdf(x, a, b))
stats.beta.logpdf(x, a, b), atol=1e-3)


class test_beta_log_prob_class(tf.test.TestCase):
Expand Down

0 comments on commit 8b245ec

Please sign in to comment.