天天看點

圖像分類---利用pytorch搭建AlexNet網絡模型訓練自己的資料集(貓狗分類)1 資料準備 2 利用pytorch搭建AlexNet網絡模型 3 訓練網絡模型代碼4 測試代碼

1 資料準備

        很多例子做圖像分類的時候都喜歡用手寫數字作為例子來講解圖像分類,這是一個及其不負責任的教學,我個人認為做深度學習有時候是要在資料集上下功夫的,而且因為很多架構都内置了手寫數字資料集,并且已經給我們處理好了,直接可以導入到神經網絡中用了,是以整個實驗下來,我們連資料是什麼樣子都不知道,更别提學完之後去訓練自己的資料集了。

        這裡我用的是貓狗分類的資料集,如下圖所示: 

圖像分類---利用pytorch搭建AlexNet網絡模型訓練自己的資料集(貓狗分類)1 資料準備 2 利用pytorch搭建AlexNet網絡模型 3 訓練網絡模型代碼4 測試代碼

        利用如下腳本将資料集劃分為訓練集和驗證集

import os
from shutil import copy
import random


def mkfile(file):
    if not os.path.exists(file):
        os.makedirs(file)


# 擷取data檔案夾下所有檔案夾名(即需要分類的類名)
file_path = 'D:/PycharmProjects/pytorch_test/test/data_name'
flower_class = [cla for cla in os.listdir(file_path)]

# 建立 訓練集train 檔案夾,并由類名在其目錄下建立5個子目錄
mkfile('data/train')
for cla in flower_class:
    mkfile('data/train/' + cla)

# 建立 驗證集val 檔案夾,并由類名在其目錄下建立子目錄
mkfile('data/val')
for cla in flower_class:
    mkfile('data/val/' + cla)

# 劃分比例,訓練集 : 驗證集 = 9 : 1
split_rate = 0.1

# 周遊所有類别的全部圖像并按比例分成訓練集和驗證集
for cla in flower_class:
    cla_path = file_path + '/' + cla + '/'  # 某一類别的子目錄
    images = os.listdir(cla_path)  # iamges 清單存儲了該目錄下所有圖像的名稱
    num = len(images)
    eval_index = random.sample(images, k=int(num * split_rate))  # 從images清單中随機抽取 k 個圖像名稱
    for index, image in enumerate(images):
        # eval_index 中儲存驗證集val的圖像名稱
        if image in eval_index:
            image_path = cla_path + image
            new_path = 'data/val/' + cla
            copy(image_path, new_path)  # 将選中的圖像複制到新路徑

        # 其餘的圖像儲存在訓練集train中
        else:
            image_path = cla_path + image
            new_path = 'data/train/' + cla
            copy(image_path, new_path)
        print("\r[{}] processing [{}/{}]".format(cla, index + 1, num), end="")  # processing bar
    print()

print("processing done!")
           

        劃分後的資料集如下圖所示,訓練集中和驗證集中都有一定比例的貓和狗的照片。

圖像分類---利用pytorch搭建AlexNet網絡模型訓練自己的資料集(貓狗分類)1 資料準備 2 利用pytorch搭建AlexNet網絡模型 3 訓練網絡模型代碼4 測試代碼

 2 利用pytorch搭建AlexNet網絡模型

import torch
from torch import nn
import torch.nn.functional as F


class MyAlexNet(nn.Module):
    def __init__(self):
        super(MyAlexNet, self).__init__()
        self.c1 = nn.Conv2d(in_channels=3, out_channels=48, kernel_size=11, stride=4, padding=2)
        self.ReLU = nn.ReLU()
        self.c2 = nn.Conv2d(in_channels=48, out_channels=128, kernel_size=5, stride=1, padding=2)
        self.s2 = nn.MaxPool2d(2)
        self.c3 = nn.Conv2d(in_channels=128, out_channels=192, kernel_size=3, stride=1, padding=1)
        self.s3 = nn.MaxPool2d(2)
        self.c4 = nn.Conv2d(in_channels=192, out_channels=192, kernel_size=3, stride=1, padding=1)
        self.c5 = nn.Conv2d(in_channels=192, out_channels=128, kernel_size=3, stride=1, padding=1)
        self.s5 = nn.MaxPool2d(kernel_size=3, stride=2)
        self.flatten = nn.Flatten()
        self.f6 = nn.Linear(4608, 2048)
        self.f7 = nn.Linear(2048, 2048)
        self.f8 = nn.Linear(2048, 1000)

        self.f9 = nn.Linear(1000, 2)

    def forward(self, x):
        x = self.ReLU(self.c1(x))
        x = self.ReLU(self.c2(x))
        x = self.s2(x)
        x = self.ReLU(self.c3(x))
        x = self.s3(x)
        x = self.ReLU(self.c4(x))
        x = self.ReLU(self.c5(x))
        x = self.s5(x)
        x = self.flatten(x)
        x = self.f6(x)
        x = F.dropout(x, p=0.5)
        x = self.f7(x)
        x = F.dropout(x, p=0.5)
        x = self.f8(x)
        x = F.dropout(x, p=0.5)

        x = self.f9(x)
        x = F.dropout(x, p=0.5)
        return x

if __name__ =="__mian__":
    x = torch.rand([1, 3, 224, 224])
    model = MyAlexNet()
    y = model(x)





           

 3 訓練網絡模型代碼

import torch
from torch import nn
from net import MyAlexNet
import numpy as np

from torch.optim import lr_scheduler
import os

from torchvision import transforms
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader

import matplotlib.pyplot as plt

# 解決中文顯示問題
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

ROOT_TRAIN = r'D:/PycharmProjects/pytorch_test/test/data/train'
ROOT_TEST = r'D:/PycharmProjects/pytorch_test/test/data/val'


# 将圖像RGB三個通道的像素值分别減去0.5,再除以0.5.進而将所有的像素值固定在[-1,1]範圍内
normalize = transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
train_transform = transforms.Compose([
    transforms.Resize((224, 224)), # 裁剪為224*224
    transforms.RandomVerticalFlip(), # 随機垂直旋轉
    transforms.ToTensor(), # 将0-255範圍内的像素轉為0-1範圍内的tensor
    normalize])

val_transform = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    normalize])

train_dataset = ImageFolder(ROOT_TRAIN, transform=train_transform)
val_dataset = ImageFolder(ROOT_TEST, transform=val_transform)

train_dataloader = DataLoader(train_dataset, batch_size=32, shuffle=True)
val_dataloader = DataLoader(val_dataset, batch_size=32, shuffle=True)

# 如果顯示卡可用,則用顯示卡進行訓練
device = 'cuda' if  torch.cuda.is_available() else 'cpu'

# 調用net裡面的定義的網絡模型, 如果GPU可用則将模型轉到GPU
model = MyAlexNet().to(device)

# 定義損失函數(交叉熵損失)
loss_fn = nn.CrossEntropyLoss()

# 定義優化器(SGD)
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)

# 學習率每隔10epoch變為原來的0.1
lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.5)

# 定義訓練函數
def train(dataloader, model, loss_fn, optimizer):
    loss, current, n = 0.0, 0.0, 0
    for batch, (x, y) in enumerate(dataloader):

        # 前向傳播
        image, y = x.to(device), y.to(device)
        output = model(image)
        cur_loss = loss_fn(output, y)
        _, pred = torch.max(output, axis=1)
        cur_acc = torch.sum(y==pred)/output.shape[0]

        # 反向傳播
        optimizer.zero_grad()
        cur_loss.backward()
        optimizer.step()
        loss += cur_loss.item()
        current += cur_acc.item()
        n = n+1

    train_loss = loss / n
    tran_acc = current /n
    print('train_loss:' + str(train_loss))
    print('train_acc:' + str(tran_acc))
    return train_loss, tran_acc

# 定義測試函數
def val(dataloader, model, loss_fn):
    # 将模型轉為驗證模型
    model.eval()
    loss, current, n = 0.0, 0.0, 0
    with torch.no_grad():
        for batch, (x, y) in enumerate(dataloader):
            image, y = x.to(device), y.to(device)
            output = model(image)
            cur_loss = loss_fn(output, y)
            _, pred = torch.max(output, axis=1)
            cur_acc = torch.sum(y == pred) / output.shape[0]
            loss += cur_loss.item()
            current += cur_acc.item()
            n = n+1

        val_loss = loss / n
        val_acc = current / n
        print('val_loss:' + str(val_loss))
        print('val_acc:' + str(val_acc))
        return val_loss, val_acc

# 畫圖函數
def matplot_loss(train_loss, val_loss):
    plt.plot(train_loss, label='train_loss')
    plt.plot(val_loss, label='val_loss')
    plt.legend(loc='best')
    plt.ylabel('loss', fontsize=12)
    plt.xlabel('epoch', fontsize=12)
    plt.title("訓練集和驗證集loss值對比圖")
    plt.show()

def matplot_acc(train_acc, val_acc):
    plt.plot(train_acc, label='train_acc')
    plt.plot(val_acc, label='val_acc')
    plt.legend(loc='best')
    plt.ylabel('acc', fontsize=12)
    plt.xlabel('epoch', fontsize=12)
    plt.title("訓練集和驗證集精确度值對比圖")
    plt.show()



# 開始訓練
loss_train = []
acc_train = []
loss_val = []
acc_val = []

epoch = 100
min_acc = 0
for t in range(epoch):
    lr_scheduler.step()
    print(f"epoch{t+1}\n--------------")
    train_loss, train_acc = train(train_dataloader, model, loss_fn, optimizer)
    val_loss, val_acc = val(val_dataloader, model, loss_fn)

    loss_train.append(train_loss)
    acc_train.append(train_acc)
    loss_val.append(val_loss)
    acc_val.append(val_acc)

    # 儲存最好的模型權重檔案
    if val_acc > min_acc:
        folder = 'save_model'
        if not os.path.exists(folder):
            os.mkdir('save_model')
        min_acc = val_acc
        print(f'save best model,第{t+1}輪')
        torch.save(model.state_dict(), 'save_model/best_model.pth')
    # 儲存最後的權重模型檔案
    if t == epoch - 1:
        torch.save(model.state_dict(), 'save_model/last_model.pth')
print('Done!')

matplot_loss(loss_train, loss_val)
matplot_acc(acc_train, acc_val)






           

4 測試代碼

import torch
from net import MyAlexNet
import numpy as np
from torch.autograd import Variable
from torchvision import datasets, transforms
from torchvision.transforms import ToPILImage
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader

ROOT_TRAIN = r'D:/PycharmProjects/pytorch_test/test/data/train'
ROOT_TEST = r'D:/PycharmProjects/pytorch_test/test/data/val'

# 将圖像RGB三個通道的像素值分别減去0.5,再除以0.5.進而将所有的像素值固定在[-1,1]範圍内
normalize = transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])

train_transform = transforms.Compose([
    transforms.Resize((224, 224)), # 裁剪為224*224
    transforms.RandomVerticalFlip(), # 随機垂直旋轉
    transforms.ToTensor(), # 将0-255範圍内的像素轉為0-1範圍内的tensor
    normalize])

val_transform = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    normalize])

train_dataset = ImageFolder(ROOT_TRAIN, transform=train_transform)
val_dataset = ImageFolder(ROOT_TEST, transform=val_transform)

train_dataloader = DataLoader(train_dataset, batch_size=32, shuffle=True)
val_dataloader = DataLoader(val_dataset, batch_size=32, shuffle=True)


# 如果顯示卡可用,則用顯示卡進行訓練
device = 'cuda' if  torch.cuda.is_available() else 'cpu'

# 調用net裡面的定義的網絡模型, 如果GPU可用則将模型轉到GPU
model = MyAlexNet().to(device)

# 加載模型train.py裡面訓練的模型
model.load_state_dict(torch.load('D:/PycharmProjects/pytorch_test/test/save_model/best_model.pth'))

# 擷取預測結果
classes = [
    'cat',
    'dag',
]

# 把tensor轉成Image,友善可視化
show = ToPILImage()

# 進入驗證階段
model.eval()

# 對val_dataset裡面的照片進行推理驗證
for i in range(50):
    x, y = val_dataset[i][0], val_dataset[i][1]
    show(x).show()
    x = Variable(torch.unsqueeze(x, dim=0).float(), requires_grad=False).to(device)
    x = torch.tensor(x).to(device)
    with torch.no_grad():
        pred = model(x)
        predicted, actual = classes[torch.argmax(pred[0])], classes[y]
        print(f'Predicted: "{predicted}", Actual: "{actual}"')
           

繼續閱讀