faster rcnn 的demo.py運作時,對于同一個圖像,每個類别顯示一個視窗,看起來不太友善,順便小改一下,讓一幅圖像中檢測到的所有類别物體都在一個視窗下标注,就友善多了。
代碼改動也不複雜,就是把vis_detections函數中for循環前後三行代碼移動到demo函數的for循環前後。
完整代碼如下(順便把标注框的線寬改成了1,以前是3.5太粗了,不好看):
py-faster-rcnn/tools/demo.py (注意代碼中本人添加的中文注釋)
#!/usr/bin/env python
#coding=utf8
# 因為代碼中我加了中文注釋,是以 上面這行用于指定編碼 ,否則python代碼執行會報錯
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Demo script showing detections in sample images.
See README.md for installation instructions before running.
"""
import _init_paths
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import caffe, os, sys, cv2
import argparse
CLASSES = ('__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
NETS = {'vgg16': ('VGG16',
'VGG16_faster_rcnn_final.caffemodel'),
'zf': ('ZF',
'ZF_faster_rcnn_final.caffemodel')}
#增加ax參數
def vis_detections(im, class_name, dets, ax, thresh=):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -] >= thresh)[]
if len(inds) == :
return
# 删除這三行
# im = im[:, :, (2, 1, 0)]
# fig, ax = plt.subplots(figsize=(12, 12))
# ax.imshow(im, aspect='equal')
for i in inds:
bbox = dets[i, :]
score = dets[i, -]
ax.add_patch(
plt.Rectangle((bbox[], bbox[]),
bbox[] - bbox[],
bbox[] - bbox[], fill=False,
edgecolor='red', linewidth=) # 矩形線寬從3.5改為1
)
ax.text(bbox[], bbox[] - ,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=),
fontsize=, color='white')
ax.set_title(('{} detections with '
'p({} | box) >= {:.1f}').format(class_name, class_name,
thresh),
fontsize=)
# 删除這三行
# plt.axis('off')
# plt.tight_layout()
# plt.draw()
def demo(net, image_name):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
im = cv2.imread(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes = im_detect(net, im)
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, boxes.shape[])
# Visualize detections for each class
CONF_THRESH =
NMS_THRESH =
# 将vis_detections 函數中for 循環之前的3行代碼移動到這裡
im = im[:, :, (, , )]
fig,ax = plt.subplots(figsize=(, ))
ax.imshow(im, aspect='equal')
for cls_ind, cls in enumerate(CLASSES[:]):
cls_ind += # because we skipped background
cls_boxes = boxes[:, *cls_ind:*(cls_ind + )]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
#将ax做為參數傳入vis_detections
vis_detections(im, cls, dets, ax,thresh=CONF_THRESH)
# 将vis_detections 函數中for 循環之後的3行代碼移動到這裡
plt.axis('off')
plt.tight_layout()
plt.draw()
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Faster R-CNN demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16]',
choices=NETS.keys(), default='vgg16')
args = parser.parse_args()
return args
if __name__ == '__main__':
cfg.TEST.HAS_RPN = True # Use RPN for proposals
args = parse_args()
prototxt = os.path.join(cfg.MODELS_DIR, NETS[args.demo_net][],
'faster_rcnn_alt_opt', 'faster_rcnn_test.pt')
caffemodel = os.path.join(cfg.DATA_DIR, 'faster_rcnn_models',
NETS[args.demo_net][])
if not os.path.isfile(caffemodel):
raise IOError(('{:s} not found.\nDid you run ./data/script/'
'fetch_faster_rcnn_models.sh?').format(caffemodel))
if args.cpu_mode:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
cfg.GPU_ID = args.gpu_id
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
print '\n\nLoaded network {:s}'.format(caffemodel)
# Warmup on a dummy image
im = * np.ones((, , ), dtype=np.uint8)
for i in xrange():
_, _= im_detect(net, im)
im_names = ['000456.jpg', '000542.jpg', '001150.jpg',
'001763.jpg', '004545.jpg']
for im_name in im_names:
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Demo for data/demo/{}'.format(im_name)
demo(net, im_name)
plt.show()
