-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathops.py
37 lines (27 loc) · 1.16 KB
/
ops.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import math
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
def linear(input_, output_size, scope=None, stddev=0.5, bias_start=0.0, with_w=False):
shape = input_.get_shape().as_list()
with tf.variable_scope(scope or "Linear"):
matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32,
tf.random_normal_initializer(stddev=stddev))
bias = tf.get_variable("bias", [output_size],
initializer=tf.random_normal_initializer(stddev=stddev))
if with_w:
return tf.matmul(input_, matrix) + bias, matrix, bias
else:
return tf.matmul(input_, matrix) + bias
def squash(input,factor=0.01):
return tf.select(input>0.5,(1-factor)+factor*input,input*factor)
#return 1/(1+tf.exp(-30*(input-0.5)))
def kl_divergence(a,b):
quo = tf.divide(a,b)
log_q = tf.log(quo)
return tf.reduce_sum(tf.multiply(a,log_q))
def js_divergence(a,b):
m = tf.multiply(0.5,tf.add(a,b))
klam = tf.multiply(0.5,kl_divergence(a,m))
klbm = tf.multiply(0.5,kl_divergence(b,m))
return tf.reduce_sum(tf.add(klam,klbm))