天天看點

【Mask R-CNN】(十二):代碼了解coco.py一、導包、設定全局變量二、配置三、資料集四、COCO評估五、訓練

一、導包、設定全局變量

import os
import sys
import time
import numpy as np
import imgaug  # https://github.com/aleju/imgaug (pip3 install imgaug)

from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from pycocotools import mask as maskUtils

import zipfile
import urllib.request
import shutil

#設定根目錄
ROOT_DIR = os.path.abspath(".")

#導入Mask RCNN
sys.path.append(ROOT_DIR)
from mrcnn.config import Config
from mrcnn import model as modellib, utils

#預訓練權重檔案路徑
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")

#儲存logs和model checkpoints的路徑,可以通過指令行參數--logs設定
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
DEFAULT_DATASET_YEAR = "2014"
           

二、配置

class CocoConfig(Config):
    ""訓練MS COCO資料集的配置資訊。
    從Config class基類繼承并重寫訓練COCO資料集相關的值。
    """
    #為該配置命名
    NAME = "coco"

    #每個GPU一次處理多少幅圖像
    IMAGES_PER_GPU = 1

    #共有多少GPU參與訓練(預設是1)
    # GPU_COUNT = 8

    #有多少類物體需要分類 (包括背景)
    NUM_CLASSES = 1 + 80  # COCO資料集有80類
           

三、資料集

3.1 加載coco資料集

class CocoDataset(utils.Dataset):
    def load_coco(self, dataset_dir, subset, year=DEFAULT_DATASET_YEAR, class_ids=None,
                  class_map=None, return_coco=False, auto_download=False):
        """加載COCO資料集的一個子集.
        dataset_dir:COCO資料集的根目錄.
        subset:需要下載下傳的子集(train, val, minival, valminusminival)
        year:下載下傳哪一年的資料集(2014, 2017),字元串而非整型
        class_ids:如果給出,則隻加載給定類别的圖像.
        class_map: TODO: 還未實作. 将不同資料集的類别映射到相同的class ID.
        return_coco: If True, returns the COCO object.
        auto_download: 自動下載下傳并解壓MS-COCO圖像和标注檔案
        """

        if auto_download is True:
            self.auto_download(dataset_dir, subset, year)

        coco = COCO("{}/annotations/instances_{}{}.json".format(dataset_dir, subset, year))
        if subset == "minival" or subset == "valminusminival":
            subset = "val"
        image_dir = "{}/{}{}".format(dataset_dir, subset, year)

        #加載全部類别還是某個子集?
        if not class_ids:
            #全部類别
            class_ids = sorted(coco.getCatIds())

        #全部圖像還是某個子集?
        if class_ids:
            image_ids = []
            for id in class_ids:
                image_ids.extend(list(coco.getImgIds(catIds=[id])))
            #去掉重複
            image_ids = list(set(image_ids))
        else:
            #全部圖像
            image_ids = list(coco.imgs.keys())

        #添加類别
        for i in class_ids:
            self.add_class("coco", i, coco.loadCats(i)[0]["name"])

        #添加圖像
        for i in image_ids:
            self.add_image(
                "coco", image_id=i,
                path=os.path.join(image_dir, coco.imgs[i]['file_name']),
                width=coco.imgs[i]["width"],
                height=coco.imgs[i]["height"],
                annotations=coco.loadAnns(coco.getAnnIds(
                    imgIds=[i], catIds=class_ids, iscrowd=None)))
        if return_coco:
            return coco
           

3.2 自動下載下傳資料集

def auto_download(self, dataDir, dataType, dataYear):
        """下載下傳COCO資料集/标注檔案.
        dataDir:COCO資料集的根目錄.
        dataType:下載下傳哪個子集(train, val, minival, valminusminival)
        dataYear:下載下傳哪一年的資料(2014, 2017),string類型而非整型
        Note:
            For 2014, "train", "val", "minival", or "valminusminival"
            For 2017,"train" and "val" 
        """

        #設定路徑和檔案名
        if dataType == "minival" or dataType == "valminusminival":
            imgDir = "{}/{}{}".format(dataDir, "val", dataYear)
            imgZipFile = "{}/{}{}.zip".format(dataDir, "val", dataYear)
            imgURL = "http://images.cocodataset.org/zips/{}{}.zip".format("val", dataYear)
        else:
            imgDir = "{}/{}{}".format(dataDir, dataType, dataYear)
            imgZipFile = "{}/{}{}.zip".format(dataDir, dataType, dataYear)
            imgURL = "http://images.cocodataset.org/zips/{}{}.zip".format(dataType, dataYear)
        # print("Image paths:"); print(imgDir); print(imgZipFile); print(imgURL)

        #若檔案夾不存在,則建立
        if not os.path.exists(dataDir):
            os.makedirs(dataDir)

        #如果圖像不存在,則下載下傳
        if not os.path.exists(imgDir):
            os.makedirs(imgDir)
            print("Downloading images to " + imgZipFile + " ...")
            with urllib.request.urlopen(imgURL) as resp, open(imgZipFile, 'wb') as out:
                shutil.copyfileobj(resp, out)
            print("... done downloading.")
            print("Unzipping " + imgZipFile)
            with zipfile.ZipFile(imgZipFile, "r") as zip_ref:
                zip_ref.extractall(dataDir)
            print("... done unzipping")
        print("Will use images in " + imgDir)

        #設定标注檔案路徑
        annDir = "{}/annotations".format(dataDir)
        if dataType == "minival":
            annZipFile = "{}/instances_minival2014.json.zip".format(dataDir)
            annFile = "{}/instances_minival2014.json".format(annDir)
            annURL = "https://dl.dropboxusercontent.com/s/o43o90bna78omob/instances_minival2014.json.zip?dl=0"
            unZipDir = annDir
        elif dataType == "valminusminival":
            annZipFile = "{}/instances_valminusminival2014.json.zip".format(dataDir)
            annFile = "{}/instances_valminusminival2014.json".format(annDir)
            annURL = "https://dl.dropboxusercontent.com/s/s3tw5zcg7395368/instances_valminusminival2014.json.zip?dl=0"
            unZipDir = annDir
        else:
            annZipFile = "{}/annotations_trainval{}.zip".format(dataDir, dataYear)
            annFile = "{}/instances_{}{}.json".format(annDir, dataType, dataYear)
            annURL = "http://images.cocodataset.org/annotations/annotations_trainval{}.zip".format(dataYear)
            unZipDir = dataDir
        # print("Annotations paths:"); print(annDir); print(annFile); print(annZipFile); print(annURL)

        #如果标注檔案不存在,則下載下傳
        if not os.path.exists(annDir):
            os.makedirs(annDir)
        if not os.path.exists(annFile):
            if not os.path.exists(annZipFile):
                print("Downloading zipped annotations to " + annZipFile + " ...")
                with urllib.request.urlopen(annURL) as resp, open(annZipFile, 'wb') as out:
                    shutil.copyfileobj(resp, out)
                print("... done downloading.")
            print("Unzipping " + annZipFile)
            with zipfile.ZipFile(annZipFile, "r") as zip_ref:
                zip_ref.extractall(unZipDir)
            print("... done unzipping")
        print("Will use annotations in " + annFile)
           

3.3 加載mask

def load_mask(self, image_id):
        """加載給定圖像的instance masks.

        不同的資料集使用不同的方式存儲masks。本函數将不同格式的mask轉化
        成同一格式的bitmap,次元是[height, width, instances].

        傳回:
        masks:一個bool數組,尺寸是[height, width, instance count],
              一個instance有一個mask.
        class_ids:一個一維數組,元素是instance masks的class IDs。
        """
        #如果不是COCO資料集的圖像, 則退化到其父類.
        image_info = self.image_info[image_id]
        if image_info["source"] != "coco":
            return super(CocoDataset, self).load_mask(image_id)

        instance_masks = []
        class_ids = []
        annotations = self.image_info[image_id]["annotations"]
        #建立次元是[height, width, instance_count]的mask和
        #每個通道的mask對應的class IDs的清單。
        for annotation in annotations:
            class_id = self.map_source_class_id(
                "coco.{}".format(annotation['category_id']))
            if class_id:
                m = self.annToMask(annotation, image_info["height"],
                                   image_info["width"])
                #有些物體的面積小于一個像素,跳過這些物體,不予處理
                if m.max() < 1:
                    continue
                #是否是crowd?如果是,則賦給一個negative class ID.
                if annotation['iscrowd']:
                    #crowds的使用negative class ID
                    class_id *= -1
                    #對于crowd masks, annToMask()傳回的mask
                    #有時候會小于給定的尺寸。如果是的話,将其縮放。
                    if m.shape[0] != image_info["height"] or m.shape[1] != image_info["width"]:
                        m = np.ones([image_info["height"], image_info["width"]], dtype=bool)
                instance_masks.append(m)
                class_ids.append(class_id)

        #将instance masks打包成數組
        if class_ids:
            mask = np.stack(instance_masks, axis=2).astype(np.bool)
            class_ids = np.array(class_ids, dtype=np.int32)
            return mask, class_ids
        else:
            #調用super class傳回一個空的mask
            return super(CocoDataset, self).load_mask(image_id)
           
def image_reference(self, image_id):
        """傳回圖像在COCO Website上的連結"""
        info = self.image_info[image_id]
        if info["source"] == "coco":
            return "http://cocodataset.org/#explore?id={}".format(info["id"])
        else:
            super(CocoDataset, self).image_reference(image_id)

    #下面的兩個函數來自pycocotools,并做了一些小的修改。

    def annToRLE(self, ann, height, width):
        """
        轉化多邊形的标注, 未壓縮的RLE轉成RLE.
        :傳回:二值mask (numpy 2D array)
        """
        segm = ann['segmentation']
        if isinstance(segm, list):
            # 多邊形 -- 一個物體可能由多個部分構成
            # 将所有部分合并到一個mask rle code
            rles = maskUtils.frPyObjects(segm, height, width)
            rle = maskUtils.merge(rles)
        elif isinstance(segm['counts'], list):
            #未壓縮的RLE
            rle = maskUtils.frPyObjects(segm, height, width)
        else:
            # rle
            rle = ann['segmentation']
        return rle

    def annToMask(self, ann, height, width):
        """
        轉化多邊形的标注, 未壓縮的RLE或者RLE轉成二值的mask.
        :傳回: 二值的mask (numpy 2D array)
        """
        rle = self.annToRLE(ann, height, width)
        m = maskUtils.decode(rle)
        return m
           

四、COCO評估

def build_coco_results(dataset, image_ids, rois, class_ids, scores, masks):
    """排列結果以符合COCO的說明,http://cocodataset.org/#format
    """
    #如果沒有結果,則傳回一個空清單
    if rois is None:
        return []

    results = []
    for image_id in image_ids:
        #循環擷取檢測結果
        for i in range(rois.shape[0]):
            class_id = class_ids[i]
            score = scores[i]
            bbox = np.around(rois[i], 1)
            mask = masks[:, :, i]

            result = {
                "image_id": image_id,
                "category_id": dataset.get_source_class_id(class_id, "coco"),
                "bbox": [bbox[1], bbox[0], bbox[3] - bbox[1], bbox[2] - bbox[0]],
                "score": score,
                "segmentation": maskUtils.encode(np.asfortranarray(mask))
            }
            results.append(result)
    return results
           
def evaluate_coco(model, dataset, coco, eval_type="bbox", limit=0, image_ids=None):
    """運作官方的COCO驗證
    dataset:驗證資料集
    eval_type: "bbox"對應bounding box,"segm"對應segmentation evaluation
    limit:如果非0,則使用全部的驗證圖像
    """
    #從資料集中選擇COCO圖像
    image_ids = image_ids or dataset.image_ids

    #限制使用多少圖像
    if limit:
        image_ids = image_ids[:limit]

    #獲得對應的COCO image IDs.
    coco_image_ids = [dataset.image_info[id]["id"] for id in image_ids]

    t_prediction = 0
    t_start = time.time()

    results = []
    for i, image_id in enumerate(image_ids):
        # Load image
        image = dataset.load_image(image_id)

        #開始檢測
        t = time.time()
        r = model.detect([image], verbose=0)[0]
        t_prediction += (time.time() - t)

        #将結果轉化成COCO的歌聲
        # 将masks轉成uint8類型,因為使用bool型COCO tools會報錯
        image_results = build_coco_results(dataset, coco_image_ids[i:i + 1],
                                           r["rois"], r["class_ids"],
                                           r["scores"],
                                           r["masks"].astype(np.uint8))
        results.extend(image_results)

    #加載結果.會用一些附加的屬性對其做一些修改.
    coco_results = coco.loadRes(results)

    #評估
    cocoEval = COCOeval(coco, coco_results, eval_type)
    cocoEval.params.imgIds = coco_image_ids
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()

    print("Prediction time: {}. Average {}/image".format(
        t_prediction, t_prediction / len(image_ids)))
    print("Total time: ", time.time() - t_start)
           

五、訓練

if __name__ == '__main__':
    import argparse

    #解析指令行參數
    parser = argparse.ArgumentParser(
        description='Train Mask R-CNN on MS COCO.')
    parser.add_argument("command",
                        metavar="<command>",
                        help="'train' or 'evaluate' on MS COCO")
    parser.add_argument('--dataset', required=True,
                        metavar="/path/to/coco/",
                        help='Directory of the MS-COCO dataset')
    parser.add_argument('--year', required=False,
                        default=DEFAULT_DATASET_YEAR,
                        metavar="<year>",
                        help='Year of the MS-COCO dataset (2014 or 2017) (default=2014)')
    parser.add_argument('--model', required=True,
                        metavar="/path/to/weights.h5",
                        help="Path to weights .h5 file or 'coco'")
    parser.add_argument('--logs', required=False,
                        default=DEFAULT_LOGS_DIR,
                        metavar="/path/to/logs/",
                        help='Logs and checkpoints directory (default=logs/)')
    parser.add_argument('--limit', required=False,
                        default=500,
                        metavar="<image count>",
                        help='Images to use for evaluation (default=500)')
    parser.add_argument('--download', required=False,
                        default=False,
                        metavar="<True|False>",
                        help='Automatically download and unzip MS-COCO files (default=False)',
                        type=bool)
    args = parser.parse_args()
    print("Command: ", args.command)
    print("Model: ", args.model)
    print("Dataset: ", args.dataset)
    print("Year: ", args.year)
    print("Logs: ", args.logs)
    print("Auto Download: ", args.download)

    #配置
    if args.command == "train":
        config = CocoConfig()
    else:
        class InferenceConfig(CocoConfig):
            #因為在預測時一次隻處理一幅圖像,是以将batch size設為1
            #Batch size = GPU_COUNT * IMAGES_PER_GPU
            GPU_COUNT = 1
            IMAGES_PER_GPU = 1
            DETECTION_MIN_CONFIDENCE = 0
        config = InferenceConfig()
    config.display()

    #建立model
    if args.command == "train":
        model = modellib.MaskRCNN(mode="training", config=config,
                                  model_dir=args.logs)
    else:
        model = modellib.MaskRCNN(mode="inference", config=config,
                                  model_dir=args.logs)

    #選擇加載權重檔案
    if args.model.lower() == "coco":
        model_path = COCO_MODEL_PATH
    elif args.model.lower() == "last":
        #最近訓練的權重
        model_path = model.find_last()
    elif args.model.lower() == "imagenet":
        #ImageNet的權重
        model_path = model.get_imagenet_weights()
    else:
        model_path = args.model

    #加載權重
    print("Loading weights ", model_path)
    model.load_weights(model_path, by_name=True)

    #訓練或評估
    if args.command == "train":
        #訓練資料集. 使用訓練集合35K的驗證集,和Mask RCNN論文一緻。
        dataset_train = CocoDataset()
        dataset_train.load_coco(args.dataset, "train", year=args.year, auto_download=args.download)
        if args.year in '2014':
            dataset_train.load_coco(args.dataset, "valminusminival", year=args.year, auto_download=args.download)
        dataset_train.prepare()

        #驗證資料集
        dataset_val = CocoDataset()
        val_type = "val" if args.year in '2017' else "minival"
        dataset_val.load_coco(args.dataset, val_type, year=args.year, auto_download=args.download)
        dataset_val.prepare()

        #圖像增強
        #以50%的機率左右翻轉圖像
        augmentation = imgaug.augmenters.Fliplr(0.5)

        #訓練 - Stage 1
        print("Training network heads")
        model.train(dataset_train, dataset_val,
                    learning_rate=config.LEARNING_RATE,
                    epochs=40,
                    layers='heads',
                    augmentation=augmentation)

        #訓練 - Stage 2
        # Finetune 四層以後的layers
        print("Fine tune Resnet stage 4 and up")
        model.train(dataset_train, dataset_val,
                    learning_rate=config.LEARNING_RATE,
                    epochs=120,
                    layers='4+',
                    augmentation=augmentation)

        #訓練 - Stage 3
        # Fine tune所有layers
        print("Fine tune all layers")
        model.train(dataset_train, dataset_val,
                    learning_rate=config.LEARNING_RATE / 10,
                    epochs=160,
                    layers='all',
                    augmentation=augmentation)

    elif args.command == "evaluate":
        # 驗證集
        dataset_val = CocoDataset()
        val_type = "val" if args.year in '2017' else "minival"
        coco = dataset_val.load_coco(args.dataset, val_type, year=args.year, return_coco=True, auto_download=args.download)
        dataset_val.prepare()
        print("Running COCO evaluation on {} images.".format(args.limit))
        evaluate_coco(model, dataset_val, coco, "bbox", limit=int(args.limit))
    else:
        print("'{}' is not recognized. "
              "Use 'train' or 'evaluate'".format(args.command))
           

繼續閱讀