Skip to content

Commit 0d34459

Browse files
committed
Updated more to v1.0
1 parent 1802bf9 commit 0d34459

File tree

46 files changed

+315
-356
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

46 files changed

+315
-356
lines changed

01_Introduction/05_Declaring_Operations/05_operations.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@
3737
#print(sess.run(tf.equal(test_num, 3)))
3838
def custom_polynomial(x_val):
3939
# Return 3x^2 - x + 10
40-
return(tf.sub(3 * tf.square(x_val), x_val) + 10)
40+
return(tf.subtract(3 * tf.square(x_val), x_val) + 10)
4141

4242
print(sess.run(custom_polynomial(11)))
4343
# What should we get with list comprehension

02_TensorFlow_Way/01_Operations_as_a_Computational_Graph/01_operations_on_a_graph.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,8 @@
2020
for x_val in x_vals:
2121
print(sess.run(prod, feed_dict={x_data: x_val}))
2222

23-
merged = tf.merge_all_summaries()
23+
merged = tf.summary.merge_all()
2424
if not os.path.exists('tensorboard_logs/'):
2525
os.makedirs('tensorboard_logs/')
2626

27-
my_writer = tf.train.SummaryWriter('tensorboard_logs/', sess.graph)
27+
my_writer = tf.summary.FileWriter('tensorboard_logs/', sess.graph)
Lines changed: 0 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -1,41 +0,0 @@
1-
# Layering Nested Operations
2-
import matplotlib.pyplot as plt
3-
import numpy as np
4-
import tensorflow as tf
5-
import os
6-
from tensorflow.python.framework import ops
7-
ops.reset_default_graph()
8-
9-
# Create graph
10-
sess = tf.Session()
11-
12-
# Create tensors
13-
14-
# Create data to feed in
15-
my_array = np.array([[1., 3., 5., 7., 9.],
16-
[-2., 0., 2., 4., 6.],
17-
[-6., -3., 0., 3., 6.]])
18-
x_vals = np.array([my_array, my_array + 1])
19-
x_data = tf.placeholder(tf.float32, shape=(3, 5))
20-
m1 = tf.constant([[1.],[0.],[-1.],[2.],[4.]])
21-
m2 = tf.constant([[2.]])
22-
a1 = tf.constant([[10.]])
23-
24-
# 1st Operation Layer = Multiplication
25-
prod1 = tf.matmul(x_data, m1)
26-
27-
# 2nd Operation Layer = Multiplication
28-
prod2 = tf.matmul(prod1, m2)
29-
30-
# 3rd Operation Layer = Addition
31-
add1 = tf.add(prod2, a1)
32-
33-
for x_val in x_vals:
34-
print(sess.run(add1, feed_dict={x_data: x_val}))
35-
36-
merged = tf.merge_all_summaries()
37-
38-
if not os.path.exists('tensorboard_logs/')
39-
os.makedirs('tensorboard_logs/')
40-
41-
my_writer = tf.train.SummaryWriter('tensorboard_logs/', sess.graph)

02_TensorFlow_Way/03_Working_with_Multiple_Layers/03_multiple_layers.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,9 +46,9 @@ def custom_layer(input_matrix):
4646
# After custom operation, size is now 2x2 (squeezed out size 1 dims)
4747
print(sess.run(custom_layer1, feed_dict={x_data: x_val}))
4848

49-
merged = tf.merge_all_summaries()
49+
merged = tf.summary.merge_all()
5050

5151
if not os.path.exists('tensorboard_logs/'):
5252
os.makedirs('tensorboard_logs/')
5353

54-
my_writer = tf.train.SummaryWriter('tensorboard_logs/', sess.graph)
54+
my_writer = tf.summary.FileWriter('tensorboard_logs/', sess.graph)

02_TensorFlow_Way/04_Implementing_Loss_Functions/04_loss_functions.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@
6767
# L = -actual * (log(sigmoid(pred))) - (1-actual)(log(1-sigmoid(pred)))
6868
# or
6969
# L = max(actual, 0) - actual * pred + log(1 + exp(-abs(actual)))
70-
xentropy_sigmoid_y_vals = tf.nn.sigmoid_cross_entropy_with_logits(x_vals, targets)
70+
xentropy_sigmoid_y_vals = tf.nn.sigmoid_cross_entropy_with_logits(logits=x_vals, labels=targets)
7171
xentropy_sigmoid_y_out = sess.run(xentropy_sigmoid_y_vals)
7272

7373
# Weighted (softmax) cross entropy loss
@@ -93,13 +93,13 @@
9393
# L = -actual * (log(softmax(pred))) - (1-actual)(log(1-softmax(pred)))
9494
unscaled_logits = tf.constant([[1., -3., 10.]])
9595
target_dist = tf.constant([[0.1, 0.02, 0.88]])
96-
softmax_xentropy = tf.nn.softmax_cross_entropy_with_logits(unscaled_logits, target_dist)
96+
softmax_xentropy = tf.nn.softmax_cross_entropy_with_logits(logits=unscaled_logits, labels=target_dist)
9797
print(sess.run(softmax_xentropy))
9898

9999
# Sparse entropy loss
100100
# Use when classes and targets have to be mutually exclusive
101101
# L = sum( -actual * log(pred) )
102102
unscaled_logits = tf.constant([[1., -3., 10.]])
103103
sparse_target_dist = tf.constant([2])
104-
sparse_xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(unscaled_logits, sparse_target_dist)
104+
sparse_xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=unscaled_logits, labels=sparse_target_dist)
105105
print(sess.run(sparse_xentropy))

02_TensorFlow_Way/05_Implementing_Back_Propagation/05_back_propagation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@
9292
sess.run(init)
9393

9494
# Add classification loss (cross entropy)
95-
xentropy = tf.nn.sigmoid_cross_entropy_with_logits(my_output_expanded, y_target_expanded)
95+
xentropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=my_output_expanded, labels=y_target_expanded)
9696

9797
# Create Optimizer
9898
my_opt = tf.train.GradientDescentOptimizer(0.05)

02_TensorFlow_Way/07_Combining_Everything_Together/07_combining_everything_together.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,10 +44,10 @@
4444
# x1 - A*x2 + b
4545
my_mult = tf.matmul(x2_data, A)
4646
my_add = tf.add(my_mult, b)
47-
my_output = tf.sub(x1_data, my_add)
47+
my_output = tf.subtract(x1_data, my_add)
4848

4949
# Add classification loss (cross entropy)
50-
xentropy = tf.nn.sigmoid_cross_entropy_with_logits(my_output, y_target)
50+
xentropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=my_output, labels=y_target)
5151

5252
# Create Optimizer
5353
my_opt = tf.train.GradientDescentOptimizer(0.05)

02_TensorFlow_Way/08_Evaluating_Models/08_evaluating_models.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@
118118
my_output = tf.add(x_data, A)
119119

120120
# Add classification loss (cross entropy)
121-
xentropy = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(my_output, y_target))
121+
xentropy = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=my_output, labels=y_target))
122122

123123
# Create Optimizer
124124
my_opt = tf.train.GradientDescentOptimizer(0.05)

03_Linear_Regression/05_Implementing_Deming_Regression/05_deming_regression.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@
4040
model_output = tf.add(tf.matmul(x_data, A), b)
4141

4242
# Declare Demming loss function
43-
demming_numerator = tf.abs(tf.sub(y_target, tf.add(tf.matmul(x_data, A), b)))
43+
demming_numerator = tf.abs(tf.subtract(y_target, tf.add(tf.matmul(x_data, A), b)))
4444
demming_denominator = tf.sqrt(tf.add(tf.square(A),1))
4545
loss = tf.reduce_mean(tf.truediv(demming_numerator, demming_denominator))
4646

03_Linear_Regression/08_Implementing_Logistic_Regression/08_logistic_regression.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ def normalize_cols(m):
6161
model_output = tf.add(tf.matmul(x_data, A), b)
6262

6363
# Declare loss function (Cross Entropy loss)
64-
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(model_output, y_target))
64+
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=model_output, labels=y_target))
6565

6666
# Declare optimizer
6767
my_opt = tf.train.GradientDescentOptimizer(0.01)

04_Support_Vector_Machines/02_Working_with_Linear_SVMs/02_linear_svm.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@
4949
b = tf.Variable(tf.random_normal(shape=[1,1]))
5050

5151
# Declare model operations
52-
model_output = tf.sub(tf.matmul(x_data, A), b)
52+
model_output = tf.subtract(tf.matmul(x_data, A), b)
5353

5454
# Declare vector L2 'norm' function squared
5555
l2_norm = tf.reduce_sum(tf.square(A))
@@ -59,7 +59,7 @@
5959
# L2 regularization parameter, alpha
6060
alpha = tf.constant([0.01])
6161
# Margin term in loss
62-
classification_term = tf.reduce_mean(tf.maximum(0., tf.sub(1., tf.multiply(model_output, y_target))))
62+
classification_term = tf.reduce_mean(tf.maximum(0., tf.subtract(1., tf.multiply(model_output, y_target))))
6363
# Put terms together
6464
loss = tf.add(classification_term, tf.multiply(alpha, l2_norm))
6565

04_Support_Vector_Machines/03_Reduction_to_Linear_Regression/03_support_vector_regression.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@
5353
# 1/2 margin width parameter = epsilon
5454
epsilon = tf.constant([0.5])
5555
# Margin term in loss
56-
loss = tf.reduce_mean(tf.maximum(0., tf.sub(tf.abs(tf.sub(model_output, y_target)), epsilon)))
56+
loss = tf.reduce_mean(tf.maximum(0., tf.subtract(tf.abs(tf.subtract(model_output, y_target)), epsilon)))
5757

5858
# Declare optimizer
5959
my_opt = tf.train.GradientDescentOptimizer(0.075)

04_Support_Vector_Machines/04_Working_with_Kernels/04_svm_kernels.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -47,16 +47,16 @@
4747
gamma = tf.constant(-50.0)
4848
dist = tf.reduce_sum(tf.square(x_data), 1)
4949
dist = tf.reshape(dist, [-1,1])
50-
sq_dists = tf.add(tf.sub(dist, tf.multiply(2., tf.matmul(x_data, tf.transpose(x_data)))), tf.transpose(dist))
51-
my_kernel = tf.exp(tf.mul(gamma, tf.abs(sq_dists)))
50+
sq_dists = tf.add(tf.subtract(dist, tf.multiply(2., tf.matmul(x_data, tf.transpose(x_data)))), tf.transpose(dist))
51+
my_kernel = tf.exp(tf.multiply(gamma, tf.abs(sq_dists)))
5252

5353
# Compute SVM Model
5454
model_output = tf.matmul(b, my_kernel)
5555
first_term = tf.reduce_sum(b)
5656
b_vec_cross = tf.matmul(tf.transpose(b), b)
5757
y_target_cross = tf.matmul(y_target, tf.transpose(y_target))
5858
second_term = tf.reduce_sum(tf.multiply(my_kernel, tf.multiply(b_vec_cross, y_target_cross)))
59-
loss = tf.neg(tf.sub(first_term, second_term))
59+
loss = tf.negative(tf.subtract(first_term, second_term))
6060

6161
# Create Prediction Kernel
6262
# Linear prediction kernel
@@ -65,7 +65,7 @@
6565
# Gaussian (RBF) prediction kernel
6666
rA = tf.reshape(tf.reduce_sum(tf.square(x_data), 1),[-1,1])
6767
rB = tf.reshape(tf.reduce_sum(tf.square(prediction_grid), 1),[-1,1])
68-
pred_sq_dist = tf.add(tf.sub(rA, tf.multiply(2., tf.matmul(x_data, tf.transpose(prediction_grid)))), tf.transpose(rB))
68+
pred_sq_dist = tf.add(tf.subtract(rA, tf.multiply(2., tf.matmul(x_data, tf.transpose(prediction_grid)))), tf.transpose(rB))
6969
pred_kernel = tf.exp(tf.multiply(gamma, tf.abs(pred_sq_dist)))
7070

7171
prediction_output = tf.matmul(tf.multiply(tf.transpose(y_target),b), pred_kernel)

04_Support_Vector_Machines/05_Implementing_Nonlinear_SVMs/05_nonlinear_svm.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -49,12 +49,12 @@
4949
b_vec_cross = tf.matmul(tf.transpose(b), b)
5050
y_target_cross = tf.matmul(y_target, tf.transpose(y_target))
5151
second_term = tf.reduce_sum(tf.multiply(my_kernel, tf.multiply(b_vec_cross, y_target_cross)))
52-
loss = tf.neg(tf.sub(first_term, second_term))
52+
loss = tf.negative(tf.subtract(first_term, second_term))
5353

5454
# Gaussian (RBF) prediction kernel
5555
rA = tf.reshape(tf.reduce_sum(tf.square(x_data), 1),[-1,1])
5656
rB = tf.reshape(tf.reduce_sum(tf.square(prediction_grid), 1),[-1,1])
57-
pred_sq_dist = tf.add(tf.sub(rA, tf.multiply(2., tf.matmul(x_data, tf.transpose(prediction_grid)))), tf.transpose(rB))
57+
pred_sq_dist = tf.add(tf.subtract(rA, tf.multiply(2., tf.matmul(x_data, tf.transpose(prediction_grid)))), tf.transpose(rB))
5858
pred_kernel = tf.exp(tf.multiply(gamma, tf.abs(pred_sq_dist)))
5959

6060
prediction_output = tf.matmul(tf.multiply(tf.transpose(y_target),b), pred_kernel)

04_Support_Vector_Machines/06_Implementing_Multiclass_SVMs/06_multiclass_svm.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -57,28 +57,28 @@
5757
gamma = tf.constant(-10.0)
5858
dist = tf.reduce_sum(tf.square(x_data), 1)
5959
dist = tf.reshape(dist, [-1,1])
60-
sq_dists = tf.add(tf.sub(dist, tf.multiply(2., tf.matmul(x_data, tf.transpose(x_data)))), tf.transpose(dist))
60+
sq_dists = tf.add(tf.subtract(dist, tf.multiply(2., tf.matmul(x_data, tf.transpose(x_data)))), tf.transpose(dist))
6161
my_kernel = tf.exp(tf.multiply(gamma, tf.abs(sq_dists)))
6262

6363
# Declare function to do reshape/batch multiplication
6464
def reshape_matmul(mat):
6565
v1 = tf.expand_dims(mat, 1)
6666
v2 = tf.reshape(v1, [3, batch_size, 1])
67-
return(tf.batch_matmul(v2, v1))
67+
return(tf.matmul(v2, v1))
6868

6969
# Compute SVM Model
7070
model_output = tf.matmul(b, my_kernel)
7171
first_term = tf.reduce_sum(b)
7272
b_vec_cross = tf.matmul(tf.transpose(b), b)
7373
y_target_cross = reshape_matmul(y_target)
7474

75-
second_term = tf.reduce_sum(tf.multiply(my_kernel, tf.mul(b_vec_cross, y_target_cross)),[1,2])
76-
loss = tf.reduce_sum(tf.neg(tf.sub(first_term, second_term)))
75+
second_term = tf.reduce_sum(tf.multiply(my_kernel, tf.multiply(b_vec_cross, y_target_cross)),[1,2])
76+
loss = tf.reduce_sum(tf.negative(tf.subtract(first_term, second_term)))
7777

7878
# Gaussian (RBF) prediction kernel
7979
rA = tf.reshape(tf.reduce_sum(tf.square(x_data), 1),[-1,1])
8080
rB = tf.reshape(tf.reduce_sum(tf.square(prediction_grid), 1),[-1,1])
81-
pred_sq_dist = tf.add(tf.sub(rA, tf.mul(2., tf.matmul(x_data, tf.transpose(prediction_grid)))), tf.transpose(rB))
81+
pred_sq_dist = tf.add(tf.subtract(rA, tf.multiply(2., tf.matmul(x_data, tf.transpose(prediction_grid)))), tf.transpose(rB))
8282
pred_kernel = tf.exp(tf.multiply(gamma, tf.abs(pred_sq_dist)))
8383

8484
prediction_output = tf.matmul(tf.multiply(y_target,b), pred_kernel)

05_Nearest_Neighbor_Methods/02_Working_with_Nearest_Neighbors/02_nearest_neighbor.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -70,24 +70,24 @@
7070

7171
# Declare distance metric
7272
# L1
73-
distance = tf.reduce_sum(tf.abs(tf.sub(x_data_train, tf.expand_dims(x_data_test,1))), reduction_indices=2)
73+
distance = tf.reduce_sum(tf.abs(tf.subtract(x_data_train, tf.expand_dims(x_data_test,1))), axis=2)
7474

7575
# L2
7676
#distance = tf.sqrt(tf.reduce_sum(tf.square(tf.sub(x_data_train, tf.expand_dims(x_data_test,1))), reduction_indices=1))
7777

7878
# Predict: Get min distance index (Nearest neighbor)
7979
#prediction = tf.arg_min(distance, 0)
80-
top_k_xvals, top_k_indices = tf.nn.top_k(tf.neg(distance), k=k)
80+
top_k_xvals, top_k_indices = tf.nn.top_k(tf.negative(distance), k=k)
8181
x_sums = tf.expand_dims(tf.reduce_sum(top_k_xvals, 1),1)
8282
x_sums_repeated = tf.matmul(x_sums,tf.ones([1, k], tf.float32))
8383
x_val_weights = tf.expand_dims(tf.div(top_k_xvals,x_sums_repeated), 1)
8484

8585
top_k_yvals = tf.gather(y_target_train, top_k_indices)
86-
prediction = tf.squeeze(tf.batch_matmul(x_val_weights,top_k_yvals), squeeze_dims=[1])
86+
prediction = tf.squeeze(tf.matmul(x_val_weights,top_k_yvals), axis=[1])
8787
#prediction = tf.reduce_mean(top_k_yvals, 1)
8888

8989
# Calculate MSE
90-
mse = tf.div(tf.reduce_sum(tf.square(tf.sub(prediction, y_target_test))), batch_size)
90+
mse = tf.div(tf.reduce_sum(tf.square(tf.subtract(prediction, y_target_test))), batch_size)
9191

9292
# Calculate how many loops over training data
9393
num_loops = int(np.ceil(len(x_vals_test)/batch_size))

05_Nearest_Neighbor_Methods/04_Computing_with_Mixed_Distance_Functions/04_mixed_distance_functions_knn.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -71,22 +71,22 @@
7171

7272
# Declare weighted distance metric
7373
# Weighted - L2 = sqrt((x-y)^T * A * (x-y))
74-
subtraction_term = tf.sub(x_data_train, tf.expand_dims(x_data_test,1))
75-
first_product = tf.batch_matmul(subtraction_term, tf.tile(tf.expand_dims(weight_matrix,0), [batch_size,1,1]))
76-
second_product = tf.batch_matmul(first_product, tf.transpose(subtraction_term, perm=[0,2,1]))
74+
subtraction_term = tf.subtract(x_data_train, tf.expand_dims(x_data_test,1))
75+
first_product = tf.matmul(subtraction_term, tf.tile(tf.expand_dims(weight_matrix,0), [batch_size,1,1]))
76+
second_product = tf.matmul(first_product, tf.transpose(subtraction_term, perm=[0,2,1]))
7777
distance = tf.sqrt(tf.batch_matrix_diag_part(second_product))
7878

7979
# Predict: Get min distance index (Nearest neighbor)
80-
top_k_xvals, top_k_indices = tf.nn.top_k(tf.neg(distance), k=k)
80+
top_k_xvals, top_k_indices = tf.nn.top_k(tf.negative(distance), k=k)
8181
x_sums = tf.expand_dims(tf.reduce_sum(top_k_xvals, 1),1)
8282
x_sums_repeated = tf.matmul(x_sums,tf.ones([1, k], tf.float32))
8383
x_val_weights = tf.expand_dims(tf.div(top_k_xvals,x_sums_repeated), 1)
8484

8585
top_k_yvals = tf.gather(y_target_train, top_k_indices)
86-
prediction = tf.squeeze(tf.batch_matmul(x_val_weights,top_k_yvals), squeeze_dims=[1])
86+
prediction = tf.squeeze(tf.matmul(x_val_weights,top_k_yvals), axis=[1])
8787

8888
# Calculate MSE
89-
mse = tf.div(tf.reduce_sum(tf.square(tf.sub(prediction, y_target_test))), batch_size)
89+
mse = tf.div(tf.reduce_sum(tf.square(tf.subtract(prediction, y_target_test))), batch_size)
9090

9191
# Calculate how many loops over training data
9292
num_loops = int(np.ceil(len(x_vals_test)/batch_size))

05_Nearest_Neighbor_Methods/05_An_Address_Matching_Example/05_address_matching.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -58,16 +58,16 @@ def create_typo(s, prob=0.75):
5858
ref_zip = tf.placeholder(shape=[None, n], dtype=tf.float32)
5959

6060
# Declare Zip code distance for a test zip and reference set
61-
zip_dist = tf.square(tf.sub(ref_zip, test_zip))
61+
zip_dist = tf.square(tf.subtract(ref_zip, test_zip))
6262

6363
# Declare Edit distance for address
6464
address_dist = tf.edit_distance(test_address, ref_address, normalize=True)
6565

6666
# Create similarity scores
6767
zip_max = tf.gather(tf.squeeze(zip_dist), tf.argmax(zip_dist, 1))
6868
zip_min = tf.gather(tf.squeeze(zip_dist), tf.argmin(zip_dist, 1))
69-
zip_sim = tf.div(tf.sub(zip_max, zip_dist), tf.sub(zip_max, zip_min))
70-
address_sim = tf.sub(1., address_dist)
69+
zip_sim = tf.div(tf.subtract(zip_max, zip_dist), tf.subtract(zip_max, zip_min))
70+
address_sim = tf.subtract(1., address_dist)
7171

7272
# Combine distance functions
7373
address_weight = 0.5

05_Nearest_Neighbor_Methods/06_Nearest_Neighbors_for_Image_Recognition/06_image_recognition.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -48,17 +48,17 @@
4848

4949
# Declare distance metric
5050
# L1
51-
distance = tf.reduce_sum(tf.abs(tf.sub(x_data_train, tf.expand_dims(x_data_test,1))), reduction_indices=2)
51+
distance = tf.reduce_sum(tf.abs(tf.subtract(x_data_train, tf.expand_dims(x_data_test,1))), axis=2)
5252

5353
# L2
5454
#distance = tf.sqrt(tf.reduce_sum(tf.square(tf.sub(x_data_train, tf.expand_dims(x_data_test,1))), reduction_indices=1))
5555

5656
# Predict: Get min distance index (Nearest neighbor)
57-
top_k_xvals, top_k_indices = tf.nn.top_k(tf.neg(distance), k=k)
57+
top_k_xvals, top_k_indices = tf.nn.top_k(tf.negative(distance), k=k)
5858
prediction_indices = tf.gather(y_target_train, top_k_indices)
5959
# Predict the mode category
60-
count_of_predictions = tf.reduce_sum(prediction_indices, reduction_indices=1)
61-
prediction = tf.argmax(count_of_predictions, dimension=1)
60+
count_of_predictions = tf.reduce_sum(prediction_indices, axis=1)
61+
prediction = tf.argmax(count_of_predictions, axis=1)
6262

6363
# Calculate how many loops over training data
6464
num_loops = int(np.ceil(len(x_vals_test)/batch_size))

06_Neural_Networks/02_Implementing_an_Operational_Gate/02_gates.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@
3434

3535
# Declare the loss function as the difference between
3636
# the output and a target value, 50.
37-
loss = tf.square(tf.sub(multiplication, 50.))
37+
loss = tf.square(tf.subtract(multiplication, 50.))
3838

3939
# Initialize variables
4040
init = tf.global_variables_initializer()
@@ -78,7 +78,7 @@
7878

7979
# Declare the loss function as the difference between
8080
# the output and a target value, 50.
81-
loss = tf.square(tf.sub(two_gate, 50.))
81+
loss = tf.square(tf.subtract(two_gate, 50.))
8282

8383
# Initialize variables
8484
init = tf.global_variables_initializer()

06_Neural_Networks/03_Working_with_Activation_Functions/03_activation_functions.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,8 +35,8 @@
3535

3636
# Declare the loss function as the difference between
3737
# the output and a target value, 0.75.
38-
loss1 = tf.reduce_mean(tf.square(tf.sub(sigmoid_activation, 0.75)))
39-
loss2 = tf.reduce_mean(tf.square(tf.sub(relu_activation, 0.75)))
38+
loss1 = tf.reduce_mean(tf.square(tf.subtract(sigmoid_activation, 0.75)))
39+
loss2 = tf.reduce_mean(tf.square(tf.subtract(relu_activation, 0.75)))
4040

4141
# Initialize variables
4242
init = tf.global_variables_initializer()

0 commit comments

Comments
 (0)