天天看點

AlexNet模型python實作與應用

AlexNet模型實作流程 該模型總共應用五個卷積層和3個完全連接配接層進行卷積模型建構,其中第一和第二卷積層後有局部相應歸一化處理(LRN),第一二五層後進行了最大池化處理,後三個完全連接配接層均進行了dropout防過拟合處理。以下為網絡模型的參數次元、經過處理後訓練集樣本次元和參數數量等相關資訊。另外在第二第四第五層分成了兩個GPU進行模型訓練,此時參數中的channel(第三個次元)要對半分,标紅部分需要除以2。 參數相關資訊表

層名 參數W 參數b strides 參數數量 輸出次元
conv0 —— —— —— —— 227*227*3
conv1 11*11*3*96 96 4*4 (11*11*3+1)*96 55*55*96
LRN1 —— —— —— —— 55*55*96
max_pool1 3*3 —— 2*2 27*27*96
conv2 5*5* 96 *256 256 1*1 (5*5*96+1)*256 27*27*256
LRN2 —— —— —— —— 27*27*256
max_pool2 3*3 —— 2*2 13*13*256
conv3 3*3*256*384 384 1*1 (3*3*256+1)*384 13*13*384
conv4 3*3* 384 *384 384 1*1 (3*3*384+1)*384 13*13*384
conv5 3*3* 384 *256 256 1*1 (3*3*384+1)*256 13*13*256
max_pool3 3*3 —— 2*2 6*6*256
fcLayer1 (6*6*256,4096) 4096 —— (6*6*256+1)*4096 (-1,4096)
fcLayer2 (4096,4096) 4096 —— (4096+1)*4096 (-1,4096)
fcLayer3 (4096,10) 10 —— (4096+1)*10 (-1,10)

AlexNet模型的python實作 1.定義卷積層函數 本人定義的函數沒有将參數劃分成兩部分,若劃分成兩部分,可用以下代碼: 2.定義LRN函數(進行局部相應歸一化處理) 3.定義最大池化函數 4.定義dropout函數(可定義也可不定義,差別不大) 5.定義完全連接配接層

用Alexnet模型訓練淘寶商品分類1000張圖檔

import cv2
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import os
import random

def convinit(w,h,channel,featurenum):
    W = tf.Variable(tf.truncated_normal([w,h,channel,featurenum],stddev = 0.01))#首先需要建立W和b變量
    b = tf.Variable(tf.constant(0.01,shape = [featurenum]))
    return W,b
def fcinit(inputD,outputD):
    W = tf.Variable(tf.truncated_normal([inputD,outputD],stddev =0.01),dtype = tf.float32)
    b = tf.Variable(tf.constant(0.01,shape = [outputD]),dtype = tf.float32)
    return W,b
def convLayer(x,W,b,stride_x,stride_y,Flagure,padding = 'SAME'):
    conv = tf.nn.conv2d(x,W,strides = [1,stride_x,stride_y,1],padding = padding)#進行卷積處理
    out = tf.add(conv,b)
    if Flagure:
        return tf.nn.relu(out)
    else:
        return out #在最後一個卷積時不需要用relu
def LRN(x,alpha,beta,R,bias):
    y = tf.nn.local_response_normalization(x,depth_radius = R,alpha = alpha,beta = beta,bias = bias)
    return y 
def max_poolLayer(x,w,h,stride_x,stride_y,padding = 'SAME'):
    y = tf.nn.max_pool(x,ksize = [1,w,h,1],strides = [1,stride_x,stride_y,1],padding = padding)
    return y
def dropout(x,keeppro):
    y = tf.nn.dropout(x,keeppro)
    return y
def fcLayer(x,W,b,Flagure):
    out = tf.add(tf.matmul(x,W),b)
    if Flagure:
        return tf.nn.relu(out)
    else:
        return out
def model(x,keeppro):
    #conv1
    W1,b1 = convinit(10,10,3,64)
    conv1 = convLayer(x,W1,b1,4,4,True,'VALID')
    LRN1 = LRN(conv1,2e-05,0.75,2,1)
    maxpool1 = max_poolLayer(LRN1,3,3,2,2,'VALID')
    #conv2
    W2,b2 = convinit(5,5,64,96)
    conv2 = convLayer(maxpool1,W2,b2,2,2,True,'VALID')
    LRN2 = LRN(conv2,2e-05,0.75,2,1)
    maxpool2 = max_poolLayer(LRN2,3,3,2,2,'VALID')
    #conv3
    W3,b3 = convinit(3,3,96,128)
    conv3 = convLayer(maxpool2,W3,b3,1,1,True,'SAME')
    #conv4
    W4,b4 = convinit(3,3,128,256)
    conv4 = convLayer(conv3,W4,b4,1,1,True,'SAME')
    #conv5
    W5,b5 = convinit(3,3,256,256)
    conv5 = convLayer(conv4,W5,b5,1,1,True,'SAME')
    maxpool5 = max_poolLayer(conv5,2,2,2,2,'SAME')
    #fclayer1
    fcIn = tf.reshape(maxpool5,[-1,4*4*256])
    W_1,b_1 = fcinit(4*4*256,512)
    fcout1 = fcLayer(fcIn,W_1,b_1,True)
    dropout1 = dropout(fcout1,keeppro)
    #fclayer2
    W_2,b_2 = fcinit(512,256)
    fcout2 = fcLayer(dropout1,W_2,b_2,True)
    dropout2 = dropout(fcout2,keeppro)
    #fclayer3
    W_3,b_3 = fcinit(256,10)
    fcout3 = fcLayer(dropout2,W_3,b_3,False)    
    out_1 = tf.nn.softmax(fcout3)
    out = dropout(out_1,keeppro)
    return out 
def accuracy(x,y):
    global out
    predict = sess.run(out,feed_dict = {x:test_x,keeppro:0.5})
    correct_predict = tf.equal(tf.argmax(predict,1),tf.argmax(y,1))
    accuracy = tf.reduce_mean(tf.cast(correct_predict,tf.float32))
    result = sess.run(accuracy,feed_dict = {x:test_x,y:test_y,keeppro:0.5})
    return predict,result 

#make data
#read file
file = 'D:\\CNN paper\\Alex_net\\image1000test200\\train.txt'
os.chdir('D:\\CNN paper\\Alex_net\\image1000test200\\train')
with open(file,'rb') as f:
    dirdata = []
    for line in f.readlines():
        lines = bytes.decode(line).strip().split('\t')
        dirdata.append(lines)
dirdata = np.array(dirdata)

#read imgdata
imgdir,label_1 = zip(*dirdata)
alldata_x = []
for dirname in imgdir:
    img = cv2.imread(dirname.strip(),cv2.IMREAD_COLOR)
    imgdata = cv2.resize(img,(320,320),cv2.INTER_LINEAR)
    alldata_x.append(imgdata)
#random shuffle
alldata = zip(alldata_x,label_1)
temp = list(alldata)
random.shuffle(temp)
data_xs,data_label = zip(*temp)
data_x = np.array(data_xs)
label = [int(i) for i in data_label]
#label one hot
tf_label_onehot = tf.one_hot(label,10)
with tf.Session() as sess:
    data_y = sess.run(tf_label_onehot)
#data increase
train_x = data_x[:500]
train_y = data_y[:500]
test_x = data_x[500:800]
test_y = data_y[500:800]

x = tf.placeholder(tf.float32,[None,320,320,3])
y = tf.placeholder(tf.float32,[None,10])
keeppro = tf.placeholder(tf.float32)
out = model(x,keeppro)
out = tf.clip_by_value(out,1e-10,1.0)
loss = tf.reduce_mean(-tf.reduce_sum(y*tf.log(out),reduction_indices = [1]))
Optimizer = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
init = tf.global_variables_initializer()
with tf.Session() as sess:
    sess.run(init)
    for i in range(100):
        sess.run(Optimizer,feed_dict = {x:train_x,y:train_y,keeppro:0.5})
        if i%10 == 0:
            cost = sess.run(loss,feed_dict = {x:train_x,y:train_y,keeppro:0.5})
            print('after %d iteration,cost is %f'%(i,cost))
            predict = sess.run(out,feed_dict = {x:test_x,keeppro:0.5})
            correct_predict = tf.equal(tf.argmax(predict,1),tf.argmax(y,1))
           
accuracy = tf.reduce_mean(tf.cast(correct_predict,tf.float32))
          result = sess.run(accuracy,feed_dict = {x:test_x,y:test_y,keeppro:0.5})
          print('after %d iteration,accuracy is %f'%(i,result))
           

繼續閱讀