模型建立
embedding_dim = 16
batch_size = 512
single_rnn_model = keras.models.Sequential([
# 1. define matrix: [vocab_size, embedding_dim]
# 2. [1,2,3,4..], max_length * embedding_dim
# 3. batch_size * max_length * embedding_dim
keras.layers.Embedding(vocab_size, embedding_dim,
input_length = max_length),
# rnn return_sequences 傳回的最後一步的輸出還是所有輸出 false最後一步輸出
keras.layers.SimpleRNN(units = 64, return_sequences = False),
keras.layers.Dense(64, activation = 'relu'),
keras.layers.Dense(1, activation='sigmoid'),
])
single_rnn_model.summary()
single_rnn_model.compile(optimizer = 'adam',
loss = 'binary_crossentropy',
metrics = ['accuracy'])

訓練
history_single_rnn = single_rnn_model.fit(
train_data, train_labels,
epochs = 30,
batch_size = batch_size,
validation_split = 0.2)
學習曲線
def plot_learning_curves(history, label, epochs, min_value, max_value):
data = {}
data[label] = history.history[label]
data['val_'+label] = history.history['val_'+label]
pd.DataFrame(data).plot(figsize=(8, 5))
plt.grid(True)
plt.axis([0, epochs, min_value, max_value])
plt.show()
plot_learning_curves(history_single_rnn, 'acc', 30, 0, 1)
plot_learning_curves(history_single_rnn, 'loss', 30, 0, 1)
測試
準确率0.5036,上面采用的是單向rnn,如果采用雙向rnn是什麼效果呢?
雙向rnn模型
embedding_dim = 16
batch_size = 512
model = keras.models.Sequential([
# 1. define matrix: [vocab_size, embedding_dim]
# 2. [1,2,3,4..], max_length * embedding_dim
# 3. batch_size * max_length * embedding_dim
keras.layers.Embedding(vocab_size, embedding_dim,
input_length = max_length),
# 雙向rnn封裝
keras.layers.Bidirectional(
keras.layers.SimpleRNN(
# 設定為true 因為要輸入下一層,不是單個序列
units = 64, return_sequences = True)),
# 多層雙向rnn
keras.layers.Bidirectional(
keras.layers.SimpleRNN(
units = 64, return_sequences = False)),
keras.layers.Dense(64, activation = 'relu'),
keras.layers.Dense(1, activation='sigmoid'),
])
model.summary()
model.compile(optimizer = 'adam',
loss = 'binary_crossentropy',
metrics = ['accuracy'])
雙向rnn訓練
history = model.fit(
train_data, train_labels,
epochs = 30,
batch_size = batch_size,
validation_split = 0.2)
準确率要好一點
雙向rnn學習曲線
plot_learning_curves(history, 'acc', 30, 0, 1)
plot_learning_curves(history, 'loss', 30, 0, 3.8)
雙層雙向rnn太複雜,有過拟合的問題,是以可以改成單層試試。
單層雙向rnn模型
embedding_dim = 16
batch_size = 512
bi_rnn_model = keras.models.Sequential([
# 1. define matrix: [vocab_size, embedding_dim]
# 2. [1,2,3,4..], max_length * embedding_dim
# 3. batch_size * max_length * embedding_dim
keras.layers.Embedding(vocab_size, embedding_dim,
input_length = max_length),
keras.layers.Bidirectional(
keras.layers.SimpleRNN(
# 改成32
units = 32, return_sequences = False)),
keras.layers.Dense(32, activation = 'relu'),
keras.layers.Dense(1, activation='sigmoid'),
])
bi_rnn_model.summary()
bi_rnn_model.compile(optimizer = 'adam',
loss = 'binary_crossentropy',
metrics = ['accuracy'])
訓練
history = bi_rnn_model.fit(
train_data, train_labels,
epochs = 30,
batch_size = batch_size,
validation_split = 0.2)
準确率好很多。
學習曲線
plot_learning_curves(history, 'acc', 30, 0, 1)
plot_learning_curves(history, 'loss', 30, 0, 1.5)
測試
準确率比普通神經網絡要低,說明rnn比普通nn要弱嗎?
從學習曲線圖看出,在第五次疊代後,test loss開始上升,說明rnn過拟合非常明顯,說明這個模型非常強大,太強大了是以過拟合,剛剛使用的減少過拟合方法是降低模型尺寸,還有一些正則化項,dropout,也可以防止過拟合。之後會介紹一種更強大的網絡來防止過拟合——LSTM。
text generation
導入莎士比亞資料集
資料處理
詞表65,一共有如上字元。
字元對應一個id,形成id到字元的一個映射。
把清單變成np.array
def split_input_target(id_text):
"""
每個輸出都是輸入的下一個字元
abcde -> abcd, bcde
"""
return id_text[0:-1], id_text[1:]
char_dataset = tf.data.Dataset.from_tensor_slices(text_as_int)
seq_length = 100
# 輸入是5 輸出是4 長度+1
seq_dataset = char_dataset.batch(seq_length + 1,
#最後一個batch不夠,直接丢掉
drop_remainder = True)
for ch_id in char_dataset.take(2):
print(ch_id, idx2char[ch_id.numpy()])
for seq_id in seq_dataset.take(2):
print(seq_id)
print(repr(''.join(idx2char[seq_id.numpy()])))
調用上面函數獲得輸入和輸出。下面前兩個是一組輸入和輸出,對于第二維來說,裡面的第一個值等于第一維第一個的後面第二個值。
建構data set
定義模型
vocab_size = len(vocab)
embedding_dim = 256
rnn_units = 1024
def build_model(vocab_size, embedding_dim, rnn_units, batch_size):
model = keras.models.Sequential([
# embedding 層
keras.layers.Embedding(vocab_size, embedding_dim,
batch_input_shape = [batch_size, None]),
keras.layers.SimpleRNN(units = rnn_units,
stateful = True,
recurrent_initializer = 'glorot_uniform',
return_sequences = True),
keras.layers.Dense(vocab_size),
])
return model
model = build_model(
vocab_size = vocab_size,
embedding_dim = embedding_dim,
rnn_units = rnn_units,
batch_size = batch_size)
model.summary()
batch size * 長度 * 類别預測(預測一個機率分布)
随機采樣
# random sampling.
# greedy, random.
sample_indices = tf.random.categorical(
logits = example_batch_predictions[0], num_samples = 1)
print(sample_indices)
# (100, 65) -> (100, 1)
sample_indices = tf.squeeze(sample_indices, axis = -1)
print(sample_indices)
output_dir = "./text_generation_checkpoints"
if not os.path.exists(output_dir):
os.mkdir(output_dir)
checkpoint_prefix = os.path.join(output_dir, 'ckpt_{epoch}')
checkpoint_callback = keras.callbacks.ModelCheckpoint(
filepath = checkpoint_prefix,
save_weights_only = True)
epochs = 100
history = model.fit(seq_dataset, epochs = epochs,
callbacks = [checkpoint_callback])
model2 = build_model(vocab_size,
embedding_dim,
rnn_units,
batch_size = 1)
model2.load_weights(tf.train.latest_checkpoint(output_dir))
model2.build(tf.TensorShape([1, None]))
# start ch sequence A,
# A -> model -> b
# A.append(b) -> B
# B(Ab) -> model -> c
# B.append(c) -> C
# C(Abc) -> model -> ...
model2.summary()
def generate_text(model, start_string, num_generate = 1000):
input_eval = [char2idx[ch] for ch in start_string]
input_eval = tf.expand_dims(input_eval, 0)
text_generated = []
model.reset_states()
for _ in range(num_generate):
# 1. model inference -> predictions
# 2. sample -> ch -> text_generated.
# 3. update input_eval
# predictions : [batch_size, input_eval_len, vocab_size]
predictions = model(input_eval)
# predictions : [input_eval_len, vocab_size]
predictions = tf.squeeze(predictions, 0)
# predicted_ids: [input_eval_len, 1]
# a b c -> b c d
predicted_id = tf.random.categorical(
predictions, num_samples = 1)[-1, 0].numpy()
text_generated.append(idx2char[predicted_id])
# s, x -> rnn -> s', y
input_eval = tf.expand_dims([predicted_id], 0)
return start_string + ''.join(text_generated)
new_text = generate_text(model2, "All: ")
print(new_text)
生成文本