1.線性回歸
import keras
import numpy as np
import matplotlib.pyplot as plt
#Sequential按順序構成的模型
from keras.models import Sequential
#Dense全連接配接層
from keras.layers import Dense
#使用numpy生成100個随機點
x_data = np.random.rand(100)
noise = np.random.normal(0,0.01,x_data.shape)
y_data = x_data*0.1 + 0.2 + noise
#顯示随機點
plt.scatter(x_data,y_data)
plt.show()
#建構一個順序模型
model = Sequential()
#在模型中添加一個全連接配接層
model.add(Dense(units = 1,input_dim = 1))
#sgd:随機梯度下降
#mse:Mean Squared Error, 均方誤差
model.compile(optimizer='sgd',loss = 'mse')
#訓練3001個批次
for step in range(3001):
#每次訓練一個批次
cost = model.train_on_batch(x_data,y_data)
#每500個batch列印一次cost值
if step % 500 ==0:
print("cost: ", cost)
#列印權值和偏置值
W,b = model.layers[0].get_weights()
print("W: ", W,"b: ",b)
#x_data輸入網絡中,得到預測值y_pred
y_pred = model.predict(x_data)
#顯示随機點
plt.scatter(x_data,y_data)
#顯示預測結果
plt.plot(x_data,y_pred,'r--',lw=3)
plt.show()
2.非線性回歸
import keras
import numpy as np
import matplotlib.pyplot as plt
#Sequential按順序構成的模型
from keras.models import Sequential
#Dense全連接配接層
from keras.layers import Dense,Activation
from keras.optimizers import SGD
#使用numpy生成200個随機點
x_data = np.linspace(-0.5,0.5,200)
noise = np.random.normal(0,0.02,x_data.shape)
y_data = np.square(x_data) + noise
plt.scatter(x_data,y_data)
plt.show()
#建構一個順序模型
model = Sequential()
#在模型中添加一個全連接配接層
#1-10-1
model.add(Dense(units=10,input_dim = 1,activation='relu'))
# model.add(Activation('tanh'))
model.add(Dense(units=1,activation='relu'))
# model.add(Activation('tanh'))
#定義優化算法
sgd = SGD(lr=0.3)
#sgd:随機梯度下降
#mse:Mean Squared Error, 均方誤差
model.compile(optimizer=sgd,loss = 'mse')
#訓練3001個批次
for step in range(3001):
#每次訓練一個批次
cost = model.train_on_batch(x_data,y_data)
#每500個batch列印一次cost值
if step % 500 ==0:
print("cost: ", cost)
#x_data輸入網絡中,得到預測值y_pred
y_pred = model.predict(x_data)
#顯示随機點
plt.scatter(x_data,y_data)
#顯示預測結果
plt.plot(x_data,y_pred,'r--',lw=3)
plt.show()
3.MNIST資料集分類
import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD
#載入資料
(x_train,y_train),(x_test,y_test) = mnist.load_data()
#(60000,28,28)
print('x_shape: ',x_train.shape)
#(60000)
print('y_shape: ',y_train.shape)
#(60000,28,28)->(60000,784)
x_train = x_train.reshape(x_train.shape[0],-1)/255.0
x_test = x_test.reshape(x_test.shape[0],-1)/255.0
#換one hot格式
y_train = np_utils.to_categorical(y_train,num_classes = 10)
y_test = np_utils.to_categorical(y_test,num_classes = 10)
# 建立模型,輸入784個神經元,輸出10個神經元
model = Sequential([
Dense(units=10,input_dim=784,bias_initializer='one',activation='softmax')
])
#定義優化器
sgd = SGD(lr=0.2)
#定義優化器,loss function,訓練過程中就算準确率
model.compile(
optimizer = sgd,
loss='mse',
metrics=['accuracy']
)
#訓練模型
model.fit(x_train,y_train,batch_size=32,epochs=10)
#評估模型
loss,accuracy = model.evaluate(x_test,y_test)
print('\ntest loss',loss)
print('accuracy',accuracy)
4.交叉熵
import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD
#載入資料
(x_train,y_train),(x_test,y_test) = mnist.load_data()
#(60000,28,28)
print('x_shape: ',x_train.shape)
#(60000)
print('y_shape: ',y_train.shape)
#(60000,28,28)->(60000,784)
x_train = x_train.reshape(x_train.shape[0],-1)/255.0
x_test = x_test.reshape(x_test.shape[0],-1)/255.0
#換one hot格式
y_train = np_utils.to_categorical(y_train,num_classes = 10)
y_test = np_utils.to_categorical(y_test,num_classes = 10)
# 建立模型,輸入784個神經元,輸出10個神經元
model = Sequential([
Dense(units=10,input_dim=784,bias_initializer='one',activation='softmax')
])
#定義優化器
sgd = SGD(lr=0.2)
#定義優化器,loss function,訓練過程中就算準确率
model.compile(
optimizer = sgd,
loss='categorical_crossentropy',
metrics=['accuracy']
)
#訓練模型
model.fit(x_train,y_train,batch_size=32,epochs=10)
#評估模型
loss,accuracy = model.evaluate(x_test,y_test)
print('\ntest loss',loss)
print('accuracy',accuracy)
5.Dropout
import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense,Dropout
from keras.optimizers import SGD
#載入資料
(x_train,y_train),(x_test,y_test) = mnist.load_data()
#(60000,28,28)
print('x_shape: ',x_train.shape)
#(60000)
print('y_shape: ',y_train.shape)
#(60000,28,28)->(60000,784)
x_train = x_train.reshape(x_train.shape[0],-1)/255.0
x_test = x_test.reshape(x_test.shape[0],-1)/255.0
#換one hot格式
y_train = np_utils.to_categorical(y_train,num_classes = 10)
y_test = np_utils.to_categorical(y_test,num_classes = 10)
# 建立模型
model = Sequential([
Dense(units=200,input_dim=784,bias_initializer='one',activation='tanh'),
Dropout(0.4),
Dense(units=100,bias_initializer='one',activation='tanh'),
Dropout(0.4),
Dense(units=10,bias_initializer='one',activation='softmax')
])
#定義優化器
sgd = SGD(lr=0.2)
#定義優化器,loss function,訓練過程中就算準确率
model.compile(
optimizer = sgd,
loss='categorical_crossentropy',
metrics=['accuracy']
)
#訓練模型
model.fit(x_train,y_train,batch_size=32,epochs=10)
#評估模型
loss,accuracy = model.evaluate(x_test,y_test)
print('\ntest loss',loss)
print('test accuracy',accuracy)
loss,accuracy = model.evaluate(x_train,y_train)
print('\ntrain loss',loss)
print('train accuracy',accuracy)
6.正則化應用
import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense,Dropout
from keras.optimizers import SGD
from keras.regularizers import l2
#載入資料
(x_train,y_train),(x_test,y_test) = mnist.load_data()
#(60000,28,28)
print('x_shape: ',x_train.shape)
#(60000)
print('y_shape: ',y_train.shape)
#(60000,28,28)->(60000,784)
x_train = x_train.reshape(x_train.shape[0],-1)/255.0
x_test = x_test.reshape(x_test.shape[0],-1)/255.0
#換one hot格式
y_train = np_utils.to_categorical(y_train,num_classes = 10)
y_test = np_utils.to_categorical(y_test,num_classes = 10)
# 建立模型
model = Sequential([
Dense(units=200,input_dim=784,bias_initializer='one',activation='tanh',kernel_regularizer=l2(0.0003)),
Dense(units=100,bias_initializer='one',activation='tanh',kernel_regularizer=l2(0.0003)),
Dense(units=10,bias_initializer='one',activation='softmax',kernel_regularizer=l2(0.0003))
])
#定義優化器
sgd = SGD(lr=0.2)
#定義優化器,loss function,訓練過程中就算準确率
model.compile(
optimizer = sgd,
loss='categorical_crossentropy',
metrics=['accuracy']
)
#訓練模型
model.fit(x_train,y_train,batch_size=32,epochs=10)
#評估模型
loss,accuracy = model.evaluate(x_test,y_test)
print('\ntest loss',loss)
print('test accuracy',accuracy)
loss,accuracy = model.evaluate(x_train,y_train)
print('\ntrain loss',loss)
print('train accuracy',accuracy)
7.優化器介紹及其應用
import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD,Adam
#載入資料
(x_train,y_train),(x_test,y_test) = mnist.load_data()
#(60000,28,28)
print('x_shape: ',x_train.shape)
#(60000)
print('y_shape: ',y_train.shape)
#(60000,28,28)->(60000,784)
x_train = x_train.reshape(x_train.shape[0],-1)/255.0
x_test = x_test.reshape(x_test.shape[0],-1)/255.0
#換one hot格式
y_train = np_utils.to_categorical(y_train,num_classes = 10)
y_test = np_utils.to_categorical(y_test,num_classes = 10)
# 建立模型,輸入784個神經元,輸出10個神經元
model = Sequential([
Dense(units=10,input_dim=784,bias_initializer='one',activation='softmax')
])
#定義優化器
sgd = SGD(lr=0.2)
adam = Adam(lr=0.001)
#定義優化器,loss function,訓練過程中就算準确率
model.compile(
optimizer = adam,
loss='categorical_crossentropy',
metrics=['accuracy']
)
#訓練模型
model.fit(x_train,y_train,batch_size=32,epochs=10)
#評估模型
loss,accuracy = model.evaluate(x_test,y_test)
print('\ntest loss',loss)
print('accuracy',accuracy)
8.CNN應用于手寫數字識别
import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense,Dropout,Convolution2D,MaxPooling2D,Flatten
from keras.optimizers import Adam
#載入資料
(x_train,y_train),(x_test,y_test) = mnist.load_data()
#(60000,28,28)->(60000,28,28,1)
x_train = x_train.reshape(-1,28,28,1)/255.0
x_test = x_test.reshape(-1,28,28,1)/255.0
#換one hot格式
y_train = np_utils.to_categorical(y_train,num_classes = 10)
y_test = np_utils.to_categorical(y_test,num_classes = 10)
# 建立模型
model = Sequential()
#第一個卷積層
#input_shape 輸入平面
#filters 卷積核/濾波器個數
#kernel_size 卷積視窗大小
#strides 步長
#padding padding方式 same/valid
#activation 激活函數
model.add(Convolution2D(
input_shape = (28,28,1),
filters = 32,
kernel_size = 5,
strides = 1,
padding = 'same',
activation = 'relu'
))
#第一個池化層
model.add(MaxPooling2D(
pool_size = 2,
strides = 2,
padding = 'same',
))
#第二個卷積層
model.add(Convolution2D(64,5,strides=1,padding='same',activation='relu'))
#第二個池化層
model.add(MaxPooling2D(2,2,'same'))
#把第二個池化層的輸出扁平化為1維
model.add(Flatten())
#第一個全連接配接層
model.add(Dense(1024,activation = 'relu'))
#Dropout
model.add(Dropout(0.5))
#第二個全連接配接層
model.add(Dense(10,activation='softmax'))
#定義優化器
adam = Adam(lr=1e-4)
#定義優化器,loss function,訓練過程中就算準确率
model.compile(
optimizer = adam,
loss='categorical_crossentropy',
metrics=['accuracy']
)
#訓練模型
model.fit(x_train,y_train,batch_size=64,epochs=10)
#評估模型
loss,accuracy = model.evaluate(x_test,y_test)
print('\ntest loss',loss)
print('accuracy',accuracy)
9.RNN應用
import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense
from keras.layers.recurrent import SimpleRNN
from keras.optimizers import Adam
#資料長度-一行有28個像素
input_size = 28
#序列長度-一共有28行
time_steps = 28
#隐藏層cell個數
cell_size = 50
#載入資料
(x_train,y_train),(x_test,y_test) = mnist.load_data()
#(60000,28,28)
x_train = x_train/255.0
x_test = x_test/255.0
#換one hot格式
y_train = np_utils.to_categorical(y_train,num_classes = 10)
y_test = np_utils.to_categorical(y_test,num_classes = 10)
# 建立模型
model = Sequential()
#循環神經網絡
model.add(SimpleRNN(
units = cell_size,#輸出
input_shape = (time_steps,input_size)#輸入
))
#輸出層
model.add(Dense(10,activation='softmax'))
#定義優化器
adam = Adam(lr=1e-4)
#定義優化器,loss function,訓練過程中就算準确率
model.compile(
optimizer = adam,
loss='categorical_crossentropy',
metrics=['accuracy']
)
#訓練模型
model.fit(x_train,y_train,batch_size=64,epochs=10)
#評估模型
loss,accuracy = model.evaluate(x_test,y_test)
print('\ntest loss',loss)
print('accuracy',accuracy)
10.儲存模型
import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD
#載入資料
(x_train,y_train),(x_test,y_test) = mnist.load_data()
#(60000,28,28)
print('x_shape: ',x_train.shape)
#(60000)
print('y_shape: ',y_train.shape)
#(60000,28,28)->(60000,784)
x_train = x_train.reshape(x_train.shape[0],-1)/255.0
x_test = x_test.reshape(x_test.shape[0],-1)/255.0
#換one hot格式
y_train = np_utils.to_categorical(y_train,num_classes = 10)
y_test = np_utils.to_categorical(y_test,num_classes = 10)
# 建立模型,輸入784個神經元,輸出10個神經元
model = Sequential([
Dense(units=10,input_dim=784,bias_initializer='one',activation='softmax')
])
#定義優化器
sgd = SGD(lr=0.2)
#定義優化器,loss function,訓練過程中就算準确率
model.compile(
optimizer = sgd,
loss='mse',
metrics=['accuracy']
)
#訓練模型
model.fit(x_train,y_train,batch_size=32,epochs=10)
#評估模型
loss,accuracy = model.evaluate(x_test,y_test)
print('\ntest loss',loss)
print('accuracy',accuracy)
#儲存模型
model.save('model.h5') # HDF5檔案,pip install h5py
11.載入模型
import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD
from keras.models import load_model
#載入資料
(x_train,y_train),(x_test,y_test) = mnist.load_data()
#(60000,28,28)
print('x_shape: ',x_train.shape)
#(60000)
print('y_shape: ',y_train.shape)
#(60000,28,28)->(60000,784)
x_train = x_train.reshape(x_train.shape[0],-1)/255.0
x_test = x_test.reshape(x_test.shape[0],-1)/255.0
#換one hot格式
y_train = np_utils.to_categorical(y_train,num_classes = 10)
y_test = np_utils.to_categorical(y_test,num_classes = 10)
#載入模型
model = load_model('model.h5')
#評估模型
loss,accuracy = model.evaluate(x_test,y_test)
print('\ntest loss',loss)
print('accuracy',accuracy)
12.繪制網絡結構
import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense,Dropout,Convolution2D,MaxPooling2D,Flatten
from keras.optimizers import Adam
from keras.utils.vis_utils import plot_model
import matplotlib.pyplot as plt
#install pydot and graphviz
#載入資料
(x_train,y_train),(x_test,y_test) = mnist.load_data()
#(60000,28,28)->(60000,28,28,1)
x_train = x_train.reshape(-1,28,28,1)/255.0
x_test = x_test.reshape(-1,28,28,1)/255.0
#換one hot格式
y_train = np_utils.to_categorical(y_train,num_classes = 10)
y_test = np_utils.to_categorical(y_test,num_classes = 10)
# 建立模型
model = Sequential()
#第一個卷積層
#input_shape 輸入平面
#filters 卷積核/濾波器個數
#kernel_size 卷積視窗大小
#strides 步長
#padding padding方式 same/valid
#activation 激活函數
model.add(Convolution2D(
input_shape = (28,28,1),
filters = 32,
kernel_size = 5,
strides = 1,
padding = 'same',
activation = 'relu'
))
#第一個池化層
model.add(MaxPooling2D(
pool_size = 2,
strides = 2,
padding = 'same',
))
#第二個卷積層
model.add(Convolution2D(64,5,strides=1,padding='same',activation='relu'))
#第二個池化層
model.add(MaxPooling2D(2,2,'same'))
#把第二個池化層的輸出扁平化為1維
model.add(Flatten())
#第一個全連接配接層
model.add(Dense(1024,activation = 'relu'))
#Dropout
model.add(Dropout(0.5))
#第二個全連接配接層
model.add(Dense(10,activation='softmax'))
#定義優化器
#adam = Adam(lr=1e-4)
#定義優化器,loss function,訓練過程中就算準确率
#model.compile(optimizer = adam,loss='categorical_crossentropy',metrics=['accuracy'])
#訓練模型
#model.fit(x_train,y_train,batch_size=64,epochs=10)
#評估模型
#loss,accuracy = model.evaluate(x_test,y_test)
#print('\ntest loss',loss)
#print('accuracy',accuracy)