Tensorflow案例:手写字识别

本文介绍如何使用Tensorflow对MNIST数据集进行手写字识别,涉及到神经网络优化技术:模型的正则化、模型的滑动平均、学习率衰减,也涉及到Tensorflow中的实用化技术:模型的持久化、断点继续训练。

1
2
3
import tensorflow as tf
import os, time
import matplotlib.pyplot as plt

搭建模型

1
2
3
4
# 神经网络结构相关的参数
INPUT_NODE = 784
OUTPUT_NODE = 10
LAYER1_NODE = 500
1
2
3
4
5
6
7
8
9
# 神经网络训练相关的参数
BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.8
LEARNING_RATE_DECAY = 0.99
REGULARIZATION_RATE = 0.0001
TRAINING_STEPS = 30000
MOVING_AVERAGE_DECAY = 0.99
MODEL_SAVE_PATH = 'model/'
MODEL_NAME = 'model_nn_mnist.ckpt'
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
def get_weight_variable(shape, regularizer):
weights = tf.get_variable(
"weights", shape,
initializer = tf.truncated_normal_initializer(stddev=0.1))

# 当需要正则化时,将当前变量的正则化损失加入自定义的losses的集合。
if regularizer != None:
tf.add_to_collection('losses', regularizer(weights))
return weights

def inference(input_tensor, regularizer):
with tf.variable_scope('layer1'):
weights = get_weight_variable(
[INPUT_NODE, LAYER1_NODE], regularizer)
biases = tf.get_variable(
"biases", [LAYER1_NODE],
initializer = tf.constant_initializer(0.0))
layer1 = tf.nn.relu(tf.matmul(input_tensor, weights) + biases)

with tf.variable_scope('layer2'):
weights = get_weight_variable(
[INPUT_NODE, LAYER1_NODE], regularizer)
biases = tf.get_variable(
"biases", [LAYER1_NODE],
initializer = tf.constant_initializer(0.0))
layer2 = tf.matmul(layer1, weights) + biases

return layer2

训练模型

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
from tensorflow.examples.tutorials.mnist import input_data

def train(mnist):
x = tf.placeholder(tf.float32, [None, INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, OUTPUT_NODE], name='y-input')

# 建立推断过程
regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
y = inference(x, regularizer)
global_step = tf.Variable(0, trainable=False)

# 对模型平滑平均
variable_average = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variable_average_op = variable_average.apply(
tf.trainable_variables())

# 在每幅图像只有一种类别的情况使用
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
cross_entropy_mean = tf.reduce_mean(cross_entropy)
# 加上正则化项
loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))

# 学习率指数衰减
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE,
global_step,
mnist.train.num_examples / BATCH_SIZE,
LEARNING_RATE_DECAY)

train_step = tf.train.GradientDescentOptimizer(learning_rate)\
.minimize(loss, global_step = global_step)

# 控制依赖性,必须先执行函数中的操作,再返回执行后面的操作,这里只是一个标识符
with tf.control_dependencies([train_step, variable_average_op]):
train_op = tf.no_op(name='train')

# 持久化类
saver = tf.train.Saver()
with tf.Session() as sess:
tf.global_variables_initializer().run()

for i in range(TRAINING_STEPS):
xs, ys = mnist.train.next_batch(BATCH_SIZE)
_, loss_value, step = sess.run([train_op, loss, global_step],
feed_dict={x: xs, y_: ys})

# 每隔1000轮保存一次模型
if i%1000 == 0:
print("After %s training step(s), loss on training "
"batch is %g." %(step, loss_value))
saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step = global_step)
1
2
3
tf.reset_default_graph()
mnist = input_data.read_data_sets('/home/seisinv/data/mnist/', one_hot=True)
train(mnist)
Extracting /home/seisinv/data/mnist/train-images-idx3-ubyte.gz
Extracting /home/seisinv/data/mnist/train-labels-idx1-ubyte.gz
Extracting /home/seisinv/data/mnist/t10k-images-idx3-ubyte.gz
Extracting /home/seisinv/data/mnist/t10k-labels-idx1-ubyte.gz
After 1 training step(s), loss on training batch is 7.21735.
After 1001 training step(s), loss on training batch is 0.43611.
After 2001 training step(s), loss on training batch is 0.445638.
After 3001 training step(s), loss on training batch is 0.653738.
After 4001 training step(s), loss on training batch is 0.441901.
After 5001 training step(s), loss on training batch is 0.219952.
After 6001 training step(s), loss on training batch is 0.319431.
After 7001 training step(s), loss on training batch is 0.292837.
After 8001 training step(s), loss on training batch is 0.249832.
After 9001 training step(s), loss on training batch is 0.38989.
After 10001 training step(s), loss on training batch is 0.439101.
After 11001 training step(s), loss on training batch is 0.315771.
After 12001 training step(s), loss on training batch is 0.309541.
After 13001 training step(s), loss on training batch is 0.270133.
After 14001 training step(s), loss on training batch is 0.204996.
After 15001 training step(s), loss on training batch is 0.35835.
After 16001 training step(s), loss on training batch is 0.343041.
After 17001 training step(s), loss on training batch is 0.419665.
After 18001 training step(s), loss on training batch is 0.145532.
After 19001 training step(s), loss on training batch is 0.308465.
After 20001 training step(s), loss on training batch is 0.265333.
After 21001 training step(s), loss on training batch is 0.149025.
After 22001 training step(s), loss on training batch is 0.250994.
After 23001 training step(s), loss on training batch is 0.43688.
After 24001 training step(s), loss on training batch is 0.381845.
After 25001 training step(s), loss on training batch is 0.277657.
After 26001 training step(s), loss on training batch is 0.492095.
After 27001 training step(s), loss on training batch is 0.334551.
After 28001 training step(s), loss on training batch is 0.19148.
After 29001 training step(s), loss on training batch is 0.244014.

测试模型

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
# EVAL_INTERVAL_SECS = 10

def evaluate(mnist):
with tf.Graph().as_default() as g:
x = tf.placeholder(
tf.float32, [None, INPUT_NODE], name='x-input')
y_ = tf.placeholder(
tf.float32, [None, OUTPUT_NODE], name='y-input')
validate_feed = {x: mnist.validation.images,
y_: mnist.validation.labels}

# 只关心预测损失,而不关心正则化损失
y = inference(x, None)

# 计算正确率
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

# 通过变量重命名的方式加载模型,从而使得可以直接使用平滑平均模型进行推断
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)

# while True:
with tf.Session() as sess:
# 自动从checkpoint文件中找到最新的模型
ckpt = tf.train.get_checkpoint_state(
MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
# 加载模型
saver.restore(sess, ckpt.model_checkpoint_path)
# 获取模型保存时的迭代轮数
global_step = ckpt.model_checkpoint_path\
.split('/')[-1].split('-')[-1]
accuracy_score = sess.run(accuracy, feed_dict=validate_feed)
print("After %s training setp(s), validation "
"accuracy = %g" %(global_step, accuracy_score))
else:
print("No checkpoint file found")
return
# time.sleep(EVAL_INTERVAL_SECS)
1
2
3
tf.reset_default_graph()
mnist = input_data.read_data_sets('/home/seisinv/data/mnist', one_hot=True)
evaluate(mnist)
Extracting /home/seisinv/data/mnist/train-images-idx3-ubyte.gz
Extracting /home/seisinv/data/mnist/train-labels-idx1-ubyte.gz
Extracting /home/seisinv/data/mnist/t10k-images-idx3-ubyte.gz
Extracting /home/seisinv/data/mnist/t10k-labels-idx1-ubyte.gz
INFO:tensorflow:Restoring parameters from model/model_nn_mnist.ckpt-29001
After 29001 training setp(s), validation accuracy = 0.9292

参考资料

  • 郑泽宇、梁博文和顾思宇,Tensorflow: 实战Google深度学习框架(第二版)