天天看点

【Keras】Keras入门

1.线性回归

import keras
import numpy as np
import matplotlib.pyplot as plt
#Sequential按顺序构成的模型
from keras.models import Sequential
#Dense全连接层
from keras.layers import Dense

#使用numpy生成100个随机点
x_data = np.random.rand(100)
noise = np.random.normal(0,0.01,x_data.shape)
y_data = x_data*0.1 + 0.2 + noise

#显示随机点
plt.scatter(x_data,y_data)
plt.show()

#构建一个顺序模型
model = Sequential()
#在模型中添加一个全连接层
model.add(Dense(units = 1,input_dim = 1))
#sgd:随机梯度下降
#mse:Mean Squared Error, 均方误差
model.compile(optimizer='sgd',loss = 'mse')

#训练3001个批次
for step in range(3001):
    #每次训练一个批次
    cost = model.train_on_batch(x_data,y_data)
    #每500个batch打印一次cost值
    if step % 500 ==0:
        print("cost: ", cost)
        
#打印权值和偏置值
W,b = model.layers[0].get_weights()
print("W: ", W,"b: ",b)

#x_data输入网络中,得到预测值y_pred
y_pred = model.predict(x_data)

#显示随机点
plt.scatter(x_data,y_data)
#显示预测结果
plt.plot(x_data,y_pred,'r--',lw=3)
plt.show()
           

2.非线性回归

import keras
import numpy as np
import matplotlib.pyplot as plt
#Sequential按顺序构成的模型
from keras.models import Sequential
#Dense全连接层
from keras.layers import Dense,Activation
from keras.optimizers import SGD

#使用numpy生成200个随机点
x_data = np.linspace(-0.5,0.5,200)
noise = np.random.normal(0,0.02,x_data.shape)
y_data = np.square(x_data) + noise
plt.scatter(x_data,y_data)
plt.show()

#构建一个顺序模型
model = Sequential()
#在模型中添加一个全连接层
#1-10-1
model.add(Dense(units=10,input_dim = 1,activation='relu'))
# model.add(Activation('tanh'))
model.add(Dense(units=1,activation='relu'))
# model.add(Activation('tanh'))

#定义优化算法
sgd = SGD(lr=0.3)

#sgd:随机梯度下降
#mse:Mean Squared Error, 均方误差
model.compile(optimizer=sgd,loss = 'mse')


#训练3001个批次
for step in range(3001):
    #每次训练一个批次
    cost = model.train_on_batch(x_data,y_data)
    #每500个batch打印一次cost值
    if step % 500 ==0:
        print("cost: ", cost)


#x_data输入网络中,得到预测值y_pred
y_pred = model.predict(x_data)

#显示随机点
plt.scatter(x_data,y_data)
#显示预测结果
plt.plot(x_data,y_pred,'r--',lw=3)
plt.show()
           

3.MNIST数据集分类

import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD

#载入数据
(x_train,y_train),(x_test,y_test) = mnist.load_data()
#(60000,28,28)
print('x_shape: ',x_train.shape)
#(60000)
print('y_shape: ',y_train.shape)
#(60000,28,28)->(60000,784)
x_train = x_train.reshape(x_train.shape[0],-1)/255.0
x_test = x_test.reshape(x_test.shape[0],-1)/255.0
#换one hot格式
y_train = np_utils.to_categorical(y_train,num_classes = 10)
y_test = np_utils.to_categorical(y_test,num_classes = 10)

# 创建模型,输入784个神经元,输出10个神经元
model = Sequential([
    Dense(units=10,input_dim=784,bias_initializer='one',activation='softmax')
])

#定义优化器
sgd = SGD(lr=0.2)

#定义优化器,loss function,训练过程中就算准确率
model.compile(
    optimizer = sgd,
    loss='mse',
    metrics=['accuracy']
)

#训练模型
model.fit(x_train,y_train,batch_size=32,epochs=10)

#评估模型
loss,accuracy = model.evaluate(x_test,y_test)
print('\ntest loss',loss)
print('accuracy',accuracy)
           

4.交叉熵

import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD

#载入数据
(x_train,y_train),(x_test,y_test) = mnist.load_data()
#(60000,28,28)
print('x_shape: ',x_train.shape)
#(60000)
print('y_shape: ',y_train.shape)
#(60000,28,28)->(60000,784)
x_train = x_train.reshape(x_train.shape[0],-1)/255.0
x_test = x_test.reshape(x_test.shape[0],-1)/255.0
#换one hot格式
y_train = np_utils.to_categorical(y_train,num_classes = 10)
y_test = np_utils.to_categorical(y_test,num_classes = 10)

# 创建模型,输入784个神经元,输出10个神经元
model = Sequential([
    Dense(units=10,input_dim=784,bias_initializer='one',activation='softmax')
])

#定义优化器
sgd = SGD(lr=0.2)

#定义优化器,loss function,训练过程中就算准确率
model.compile(
    optimizer = sgd,
    loss='categorical_crossentropy',
    metrics=['accuracy']
)

#训练模型
model.fit(x_train,y_train,batch_size=32,epochs=10)

#评估模型
loss,accuracy = model.evaluate(x_test,y_test)
print('\ntest loss',loss)
print('accuracy',accuracy)
           

5.Dropout

import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense,Dropout
from keras.optimizers import SGD

#载入数据
(x_train,y_train),(x_test,y_test) = mnist.load_data()
#(60000,28,28)
print('x_shape: ',x_train.shape)
#(60000)
print('y_shape: ',y_train.shape)
#(60000,28,28)->(60000,784)
x_train = x_train.reshape(x_train.shape[0],-1)/255.0
x_test = x_test.reshape(x_test.shape[0],-1)/255.0
#换one hot格式
y_train = np_utils.to_categorical(y_train,num_classes = 10)
y_test = np_utils.to_categorical(y_test,num_classes = 10)

# 创建模型
model = Sequential([
    Dense(units=200,input_dim=784,bias_initializer='one',activation='tanh'),
    Dropout(0.4),
    Dense(units=100,bias_initializer='one',activation='tanh'),
    Dropout(0.4),
    Dense(units=10,bias_initializer='one',activation='softmax')
])

#定义优化器
sgd = SGD(lr=0.2)

#定义优化器,loss function,训练过程中就算准确率
model.compile(
    optimizer = sgd,
    loss='categorical_crossentropy',
    metrics=['accuracy']
)

#训练模型
model.fit(x_train,y_train,batch_size=32,epochs=10)

#评估模型
loss,accuracy = model.evaluate(x_test,y_test)
print('\ntest loss',loss)
print('test accuracy',accuracy)

loss,accuracy = model.evaluate(x_train,y_train)
print('\ntrain loss',loss)
print('train accuracy',accuracy)
           

6.正则化应用

import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense,Dropout
from keras.optimizers import SGD
from keras.regularizers import l2

#载入数据
(x_train,y_train),(x_test,y_test) = mnist.load_data()
#(60000,28,28)
print('x_shape: ',x_train.shape)
#(60000)
print('y_shape: ',y_train.shape)
#(60000,28,28)->(60000,784)
x_train = x_train.reshape(x_train.shape[0],-1)/255.0
x_test = x_test.reshape(x_test.shape[0],-1)/255.0
#换one hot格式
y_train = np_utils.to_categorical(y_train,num_classes = 10)
y_test = np_utils.to_categorical(y_test,num_classes = 10)

# 创建模型
model = Sequential([
    Dense(units=200,input_dim=784,bias_initializer='one',activation='tanh',kernel_regularizer=l2(0.0003)),
    Dense(units=100,bias_initializer='one',activation='tanh',kernel_regularizer=l2(0.0003)),
    Dense(units=10,bias_initializer='one',activation='softmax',kernel_regularizer=l2(0.0003))
])

#定义优化器
sgd = SGD(lr=0.2)

#定义优化器,loss function,训练过程中就算准确率
model.compile(
    optimizer = sgd,
    loss='categorical_crossentropy',
    metrics=['accuracy']
)

#训练模型
model.fit(x_train,y_train,batch_size=32,epochs=10)

#评估模型
loss,accuracy = model.evaluate(x_test,y_test)
print('\ntest loss',loss)
print('test accuracy',accuracy)

loss,accuracy = model.evaluate(x_train,y_train)
print('\ntrain loss',loss)
print('train accuracy',accuracy)
           

7.优化器介绍及其应用

import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD,Adam

#载入数据
(x_train,y_train),(x_test,y_test) = mnist.load_data()
#(60000,28,28)
print('x_shape: ',x_train.shape)
#(60000)
print('y_shape: ',y_train.shape)
#(60000,28,28)->(60000,784)
x_train = x_train.reshape(x_train.shape[0],-1)/255.0
x_test = x_test.reshape(x_test.shape[0],-1)/255.0
#换one hot格式
y_train = np_utils.to_categorical(y_train,num_classes = 10)
y_test = np_utils.to_categorical(y_test,num_classes = 10)

# 创建模型,输入784个神经元,输出10个神经元
model = Sequential([
    Dense(units=10,input_dim=784,bias_initializer='one',activation='softmax')
])

#定义优化器
sgd = SGD(lr=0.2)
adam = Adam(lr=0.001)

#定义优化器,loss function,训练过程中就算准确率
model.compile(
    optimizer = adam,
    loss='categorical_crossentropy',
    metrics=['accuracy']
)

#训练模型
model.fit(x_train,y_train,batch_size=32,epochs=10)

#评估模型
loss,accuracy = model.evaluate(x_test,y_test)
print('\ntest loss',loss)
print('accuracy',accuracy)
           

8.CNN应用于手写数字识别

import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense,Dropout,Convolution2D,MaxPooling2D,Flatten
from keras.optimizers import Adam

#载入数据
(x_train,y_train),(x_test,y_test) = mnist.load_data()
#(60000,28,28)->(60000,28,28,1)
x_train = x_train.reshape(-1,28,28,1)/255.0
x_test = x_test.reshape(-1,28,28,1)/255.0
#换one hot格式
y_train = np_utils.to_categorical(y_train,num_classes = 10)
y_test = np_utils.to_categorical(y_test,num_classes = 10)

# 创建模型
model = Sequential()

#第一个卷积层
#input_shape 输入平面
#filters 卷积核/滤波器个数
#kernel_size 卷积窗口大小
#strides 步长
#padding padding方式 same/valid
#activation 激活函数
model.add(Convolution2D(
    input_shape = (28,28,1),
    filters =  32,
    kernel_size = 5,
    strides = 1,
    padding = 'same',
    activation = 'relu'
))
#第一个池化层
model.add(MaxPooling2D(
    pool_size = 2,
    strides = 2,
    padding = 'same',
))
#第二个卷积层
model.add(Convolution2D(64,5,strides=1,padding='same',activation='relu'))
#第二个池化层
model.add(MaxPooling2D(2,2,'same'))
#把第二个池化层的输出扁平化为1维
model.add(Flatten())
#第一个全连接层
model.add(Dense(1024,activation = 'relu'))
#Dropout
model.add(Dropout(0.5))
#第二个全连接层
model.add(Dense(10,activation='softmax'))




#定义优化器
adam = Adam(lr=1e-4)

#定义优化器,loss function,训练过程中就算准确率
model.compile(
    optimizer = adam,
    loss='categorical_crossentropy',
    metrics=['accuracy']
)

#训练模型
model.fit(x_train,y_train,batch_size=64,epochs=10)

#评估模型
loss,accuracy = model.evaluate(x_test,y_test)
print('\ntest loss',loss)
print('accuracy',accuracy)
           

9.RNN应用

import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense
from keras.layers.recurrent import SimpleRNN
from keras.optimizers import Adam

#数据长度-一行有28个像素
input_size = 28
#序列长度-一共有28行
time_steps = 28
#隐藏层cell个数
cell_size = 50

#载入数据
(x_train,y_train),(x_test,y_test) = mnist.load_data()
#(60000,28,28)
x_train = x_train/255.0
x_test = x_test/255.0
#换one hot格式
y_train = np_utils.to_categorical(y_train,num_classes = 10)
y_test = np_utils.to_categorical(y_test,num_classes = 10)

# 创建模型
model = Sequential()

#循环神经网络
model.add(SimpleRNN(
    units = cell_size,#输出
    input_shape = (time_steps,input_size)#输入
))

#输出层
model.add(Dense(10,activation='softmax'))




#定义优化器
adam = Adam(lr=1e-4)

#定义优化器,loss function,训练过程中就算准确率
model.compile(
    optimizer = adam,
    loss='categorical_crossentropy',
    metrics=['accuracy']
)

#训练模型
model.fit(x_train,y_train,batch_size=64,epochs=10)

#评估模型
loss,accuracy = model.evaluate(x_test,y_test)
print('\ntest loss',loss)
print('accuracy',accuracy)
           

10.保存模型

import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD

#载入数据
(x_train,y_train),(x_test,y_test) = mnist.load_data()
#(60000,28,28)
print('x_shape: ',x_train.shape)
#(60000)
print('y_shape: ',y_train.shape)
#(60000,28,28)->(60000,784)
x_train = x_train.reshape(x_train.shape[0],-1)/255.0
x_test = x_test.reshape(x_test.shape[0],-1)/255.0
#换one hot格式
y_train = np_utils.to_categorical(y_train,num_classes = 10)
y_test = np_utils.to_categorical(y_test,num_classes = 10)

# 创建模型,输入784个神经元,输出10个神经元
model = Sequential([
    Dense(units=10,input_dim=784,bias_initializer='one',activation='softmax')
])

#定义优化器
sgd = SGD(lr=0.2)

#定义优化器,loss function,训练过程中就算准确率
model.compile(
    optimizer = sgd,
    loss='mse',
    metrics=['accuracy']
)

#训练模型
model.fit(x_train,y_train,batch_size=32,epochs=10)

#评估模型
loss,accuracy = model.evaluate(x_test,y_test)
print('\ntest loss',loss)
print('accuracy',accuracy)

#保存模型
model.save('model.h5') # HDF5文件,pip install h5py
           

11.载入模型

import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD
from keras.models import load_model

#载入数据
(x_train,y_train),(x_test,y_test) = mnist.load_data()
#(60000,28,28)
print('x_shape: ',x_train.shape)
#(60000)
print('y_shape: ',y_train.shape)
#(60000,28,28)->(60000,784)
x_train = x_train.reshape(x_train.shape[0],-1)/255.0
x_test = x_test.reshape(x_test.shape[0],-1)/255.0
#换one hot格式
y_train = np_utils.to_categorical(y_train,num_classes = 10)
y_test = np_utils.to_categorical(y_test,num_classes = 10)

#载入模型
model = load_model('model.h5')

#评估模型
loss,accuracy = model.evaluate(x_test,y_test)
print('\ntest loss',loss)
print('accuracy',accuracy)
           

12.绘制网络结构

import numpy as np
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense,Dropout,Convolution2D,MaxPooling2D,Flatten
from keras.optimizers import Adam
from keras.utils.vis_utils import plot_model
import matplotlib.pyplot as plt
#install pydot and graphviz

#载入数据
(x_train,y_train),(x_test,y_test) = mnist.load_data()
#(60000,28,28)->(60000,28,28,1)
x_train = x_train.reshape(-1,28,28,1)/255.0
x_test = x_test.reshape(-1,28,28,1)/255.0
#换one hot格式
y_train = np_utils.to_categorical(y_train,num_classes = 10)
y_test = np_utils.to_categorical(y_test,num_classes = 10)

# 创建模型
model = Sequential()

#第一个卷积层
#input_shape 输入平面
#filters 卷积核/滤波器个数
#kernel_size 卷积窗口大小
#strides 步长
#padding padding方式 same/valid
#activation 激活函数
model.add(Convolution2D(
    input_shape = (28,28,1),
    filters =  32,
    kernel_size = 5,
    strides = 1,
    padding = 'same',
    activation = 'relu'
))
#第一个池化层
model.add(MaxPooling2D(
    pool_size = 2,
    strides = 2,
    padding = 'same',
))
#第二个卷积层
model.add(Convolution2D(64,5,strides=1,padding='same',activation='relu'))
#第二个池化层
model.add(MaxPooling2D(2,2,'same'))
#把第二个池化层的输出扁平化为1维
model.add(Flatten())
#第一个全连接层
model.add(Dense(1024,activation = 'relu'))
#Dropout
model.add(Dropout(0.5))
#第二个全连接层
model.add(Dense(10,activation='softmax'))




#定义优化器
#adam = Adam(lr=1e-4)

#定义优化器,loss function,训练过程中就算准确率
#model.compile(optimizer = adam,loss='categorical_crossentropy',metrics=['accuracy'])

#训练模型
#model.fit(x_train,y_train,batch_size=64,epochs=10)

#评估模型
#loss,accuracy = model.evaluate(x_test,y_test)
#print('\ntest loss',loss)
#print('accuracy',accuracy)
           

继续阅读