應用TensorFlow建構lstm
import tensorflow as tf
from tensorflow.contrib import rnn
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
learning_rate = 0.001
training_steps = 10000
batch_size = 128
display_step = 200
num_inputs = 28 # 每個時間步的輸入資料特征數
timesteps = 28 # 時間步數目
num_hiddens = 64 # LSTM cell中的units數量
num_classes = 10 # 類别數目
num_layers = 2 # 循環神經網絡層數
X = tf.placeholder("float", [None, timesteps, num_inputs])
Y = tf.placeholder("float",[None, num_classes])
keep_prob = tf.placeholder(tf.float32, [])
weights = {
'out': tf.Variable(tf.random_normal([num_hiddens, num_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([num_classes]))
}
def lstm(lstm_x, weights, biases, keep_prob):
# 多層lstm
multi_cell = []
for i in range(num_layers):
# num_units是LSTM輸出結果的次元
lstm_cell = rnn.BasicLSTMCell(num_units=num_hiddens, forget_bias=1.0, state_is_tuple=True)
# dropout 一般隻設定 output_keep_prob
# 丢棄機制隻應該在訓練期間使用
lstm_cell = rnn.DropoutWrapper(cell=lstm_cell,input_keep_prob=1.0,output_keep_prob=keep_prob)
multi_cell.append(lstm_cell)
# MultiRNNCell 來實作多層 LSTM
mlstm_cell = rnn.MultiRNNCell(multi_cell, state_is_tuple=True)
# 用全零來初始化state
init_state = mlstm_cell.zero_state(batch_size, dtype=tf.float32)
# time_major==False 時, outputs.shape = [batch_size, time_step_size, hidden_size]
outputs, state = tf.nn.dynamic_rnn(mlstm_cell, inputs=lstm_x, initial_state=init_state, time_major=False)
# 注意tf.nn.static_rnn中inputs為按時間步展開的[batch_size,input_size],x=tf.unstack(x,axis=1)
# 取最後一個時間步的輸出(batch_size, num_hiddens)
h_state = outputs[:, -1, :]
# 或者 h_state = state[-1][1]
# or 調整次元為 (steps, batch, hidden_size)
# outputs = tf.transpose(outputs, [1,0,2])
# h_state = outputs[-1]
# 全連接配接層,輸出(batch_size, num_classes)
return tf.matmul(h_state, weights['out']) + biases['out']
logits = lstm(X, weights, biases, keep_prob)
# 定義損失及優化器
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op)
# 計算準确度
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for step in range(1, training_steps+1):
batch_x, batch_y = mnist.train.next_batch(batch_size)
# 注意x資料次元
batch_x = batch_x.reshape((batch_size, timesteps, num_inputs))
sess.run(train_op, feed_dict={X:batch_x, Y:batch_y, keep_prob:0.9})
if step % display_step == 0 or step == 1:
loss, acc = sess.run([loss_op, accuracy], feed_dict={X:batch_x, Y:batch_y, keep_prob:1.})
print("Step " + str(step) + ", Minibatch Loss= " + \
"{:.4f}".format(loss) + ", Training Accuracy= " + \
"{:.3f}".format(acc))
print("Optimization Finished!")
# Calculate accuracy for 128 mnist test images
test_len = 128
test_data = mnist.test.images[:test_len].reshape((-1, timesteps, num_inputs))
test_label = mnist.test.labels[:test_len]
print("Testing Accuracy:", sess.run(accuracy, feed_dict={X: test_data, Y: test_label, keep_prob:1.}))