-
Notifications
You must be signed in to change notification settings - Fork 4
/
linear_SVM.py
137 lines (112 loc) · 5.65 KB
/
linear_SVM.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
# 该程序通过梯度下降法来实现支持向量机解决 mnist 二分类问题
# 参考资料:https://en.wikipedia.org/wiki/Support-vector_machine
# -*- coding: utf-8 -*-
"""
Created on 01 May, 2019
@author jswanglp
requirements:
matplotlib==2.0.2
numpy==1.15.4
tensorflow==1.12.0
scikit_learn==0.23.1
"""
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data
tf.logging.set_verbosity(tf.logging.ERROR)
# 计时装饰器
import time
def timer(func):
def wrapper(*args, **kwargs):
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
t = end_time - start_time
print('This operation takes {} s.'.format(t))
return result
return wrapper
if __name__ == '__main__':
# 参数设置
tf.app.flags.DEFINE_integer('batch_size', 128, 'Number of samples per batch.')
tf.app.flags.DEFINE_integer('num_epochs', 100, 'Number of epochs for training.')
tf.app.flags.DEFINE_boolean('is_evaluation', True, 'Whether or not the model should be evaluated.')
tf.app.flags.DEFINE_float('C_param', 0.1, 'penalty parameter of the error term.')
tf.app.flags.DEFINE_float('Reg_param', 1.0, 'penalty parameter of the error term.')
tf.app.flags.DEFINE_float('delta', 1.0, 'The parameter set for margin.')
tf.app.flags.DEFINE_float('learning_rate', 1e-3, 'The initial learning rate for optimization.')
FLAGS = tf.app.flags.FLAGS
# 误差函数与精度函数
def loss_fn(W,b,x_data,y_target):
logits = tf.subtract(tf.matmul(x_data, W), b)
norm_term = tf.divide(tf.reduce_sum(tf.multiply(tf.transpose(W),W)), 2)
classification_loss = tf.reduce_mean(tf.maximum(0., tf.subtract(FLAGS.delta, tf.multiply(logits, y_target))))
total_loss = tf.add(tf.multiply(FLAGS.C_param, classification_loss), tf.multiply(FLAGS.Reg_param, norm_term))
return total_loss
def inference_fn(W,b,x_data,y_target):
prediction = tf.sign(tf.subtract(tf.matmul(x_data, W), b))
accuracy = tf.reduce_mean(tf.cast(tf.equal(prediction, y_target), tf.float32))
return accuracy
@timer
def extraction_fn(data): # 只获取0或1的图像的索引
index_list = []
for idx in range(data.shape[0]):
if data[idx] == 0 or data[idx] == 1:
index_list.append(idx)
return index_list
# 所需训练图像和标签的获取
mnist = input_data.read_data_sets("sample_data/MNIST_data", reshape=True, one_hot=False)
data = {}
data['train_image'] = mnist.train.images
data['train_label'] = mnist.train.labels
data['test_image'] = mnist.test.images
data['test_label'] = mnist.test.labels
index_list_train = extraction_fn(data['train_label'])
index_list_test = extraction_fn(data['test_label'])
data['train_image'] = mnist.train.images[index_list_train]
data['train_label'] = mnist.train.labels[index_list_train]
data['test_image'] = mnist.test.images[index_list_test]
data['test_label'] = np.array(mnist.test.labels[index_list_test], dtype=np.float32)
# data['test_label'] = mnist.test.labels[index_list_test].astype('float32')
data['train_image_label'] = np.c_[data['train_image'], data['train_label']]
num_samples, num_features = data['train_image'].shape
num_hidden = 256
graph = tf.Graph()
with graph.as_default():
with tf.name_scope('Input'):
x_data = tf.placeholder(shape=[None, num_features], dtype=tf.float32)
y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32)
# 降低数据维数
with tf.name_scope('Net'):
w_1 = tf.Variable(tf.random_normal(shape=[num_features, num_hidden]), name='w_1')
b_1 = tf.Variable(tf.random_normal(shape=[num_hidden]), name='b_1')
layer_1 = tf.sigmoid(tf.matmul(x_data, w_1) + b_1)
W = tf.Variable(tf.random_normal(shape=[num_hidden, 1]), name='weights')
b = tf.Variable(tf.random_normal(shape=[1]), name='bias')
with tf.name_scope('Loss'):
total_loss = loss_fn(W, b, layer_1, y_target)
with tf.name_scope('Accuracy'):
accuracy = inference_fn(W, b, layer_1, y_target)
with tf.name_scope('Train'):
train_op = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(total_loss)
# 转换测试集标签
test_label = data['test_label'].reshape(-1, 1)
test_label[test_label==0] = -1
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(FLAGS.num_epochs):
np.random.shuffle(data['train_image_label'])
image_batch = data['train_image_label'][:FLAGS.batch_size,:-1]
label_batch = data['train_image_label'][:FLAGS.batch_size,-1]
label_batch[label_batch==0] = -1
_, loss, acc = sess.run([train_op, total_loss, accuracy], feed_dict={x_data: image_batch,
y_target: label_batch.reshape(-1, 1)})
acc *= 100
if (epoch + 1) % 5 == 0:
test_loss, test_acc = sess.run([total_loss, accuracy], feed_dict={x_data: data['test_image'],
y_target: test_label})
test_acc *= 100
print_list = [epoch + 1, loss, acc, test_acc]
print('Epoch {0[0]}, loss: {0[1]:.2f}, training accuracy: {0[2]:.2f}%.'.format(print_list))
print(' '*10, 'Testing accuracy is {0[3]:.2f}%.'.format(print_list))
sess.close()