在TensorFlow目录下新建文件,命名为LSTM.py,利用TensorFlow解决类似于LSTM在MNIST数据集上进行数字识别的问题,在PyCharm中编写以下代码。
import tensorflow as tf
from tensorflow.contrib import rnn
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
导入MNIST数据集
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("./MNIST_data/", one_hot=True)
设置全局变量
learning_rate = 0.001
training_steps = 10000
batch_size = 128
display_step = 200
num_input = 28 # 输入向量的维度
timesteps = 28 # 循环层长度
num_hidden = 128 # 隐藏层的特征数
num_classes = 10 # 0~9
tf Graph 输入
X = tf.placeholder("float", [None, timesteps, num_input])
Y = tf.placeholder("float", [None, num_classes])
定义权重和偏置
weights = {
'out': tf.Variable(tf.random_normal([num_hidden, num_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([num_classes]))
}
def RNN(x, weights, biases):
x = tf.unstack(x, timesteps, 1)
# 初始的biases=1,不希望遗忘任何信息
lstm_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0)
outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
# 选择最后一个output与输出的全连接weights相乘,再加上biases
return tf.matmul(outputs[-1], weights['out']) + biases['out']
logits = RNN(X, weights, biases)
prediction = tf.nn.softmax(logits)
定义损失和优化
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op)
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for step in range(1, training_steps+1):
# 随机抽出这一次迭代训练时用的数据
batch_x, batch_y = mnist.train.next_batch(batch_size)
# 对数据进行处理,使得其符合输入
batch_x = batch_x.reshape((batch_size, timesteps, num_input))
# 迭代
sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
if step % display_step == 0 or step == 1:
# 计算损失
loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,
Y: batch_y})
print("Step " + str(step) + ", Minibatch Loss= " + \
"{:.4f}".format(loss) + ", Training Accuracy= " + \
"{:.3f}".format(acc))
print("优化完成!")
# 计算128个测试的准确率
test_len = 128
test_data = mnist.test.images[:test_len].reshape((-1, timesteps, num_input))
test_label = mnist.test.labels[:test_len]
print("测试准确率:", sess.run(accuracy, feed_dict={X: test_data, Y: test_
label}))