從零開始的嵌入式圖像圖像處理(PI+QT+OpenCV)實戰演練
1綜述 http://www.cnblogs.com/jsxyhelu/p/7907241.html 2環境架設 http://www.cnblogs.com/jsxyhelu/p/7908226.html 3兩個例子 http://www.cnblogs.com/jsxyhelu/p/8000804.html 4程式架構 http://www.cnblogs.com/jsxyhelu/p/7953805.html 5編譯使用最新opencv http://www.cnblogs.com/jsxyhelu/p/8000819.html 6綜合實驗 http://www.cnblogs.com/jsxyhelu/p/8000829.html 7拾遺 http://www.cnblogs.com/jsxyhelu/p/8007117.html
最後,我們必須完成一個綜合實驗,來驗證前面所做的一切工作。為了達到這個目的,将實驗設定為:使用實時根據圖像的特征(包括ORB/SHIFT/SURF/BRISK),進行特征比對。這樣,就驗證了opencv類庫的編譯(因為使用了contrib庫,是以必須自己編譯)、基本程式架構的運作(涉及攝像頭操作)。并且我們是使用虛拟機(PC版本的PI系統)編譯測試,而後移植到PI上面去的。
配置檔案:
#
# Project created by QtCreator 2017-11-29T07:39:32
#-------------------------------------------------
QT += core gui
greaterThan(QT_MAJOR_VERSION, 4): QT += widgets
TARGET = GOQTTemplate2
TEMPLATE = app
INCLUDEPATH += /usr/local/include/opencv \
/usr/local/include/opencv2
LIBS += /usr/local/lib/libopencv_world.so
SOURCES += main.cpp\
mainwindow.cpp \
clickedlabel.cpp
HEADERS += mainwindow.h \
clickedlabel.h
FORMS += mainwindow.ui
主程式檔案,簡單說明流程:程式一開始就打開預設的攝像頭,而後截獲顯示攝像頭擷取的資料。當有點選圖檔的操作的時候,儲存目前圖檔作為模闆,而後開始特征點比對,并且顯示比對結果。有一個按鈕能夠切換不同的特征點算法:
//by jsxyhelu 2017/12/6
#include "mainwindow.h"
#include "ui_mainwindow.h"
#include <QMouseEvent>
//全局變量
Mat src;
Mat gray;
Mat tmp;
Mat dst;
Mat matMatch;//template
double m_lastTime;//time
Mat grayLeft;
Mat grayRight;
Mat descriptorsLeft;
std::vector<KeyPoint> keypointsLeft;
Mat descriptorsRight;
std::vector<KeyPoint> keypointsRight;
std::vector< DMatch > matches;
std::vector< DMatch > good_matches;
Mat img_matches;
int imethod;//0-ORB 1-SIFT 2-SURF 3-BRISK
using namespace cv;
MainWindow::MainWindow(QWidget *parent) :
QMainWindow(parent),
ui(new Ui::MainWindow)
{
ui->setupUi(this);
timer = new QTimer(this);
imag = new QImage(); // 初始化
connect(timer, SIGNAL(timeout()), this, SLOT(readFarme())); // 時間到,讀取目前攝像頭資訊
bMethod = false;//是否使用算法
on_pushButton_clicked();//main process
//take a picture
clickLabel = new ClickedLabel(this);
clickLabel->setGeometry(0,0,800,400);
connect(clickLabel,SIGNAL(clicked(ClickedLabel*)),this,SLOT(on_pushButton_3_clicked()));
imethod = 0;//ORB
setWindowState(Qt::WindowMaximized);//max
}
MainWindow::~MainWindow()
delete ui;
////////////////////////////////////事件驅動///////////////////////////////////////////////////////
//打開攝像頭
void MainWindow::on_pushButton_clicked()
//打開攝像頭,從攝像頭中擷取視訊
videocapture = new VideoCapture(0);
// 開始計時,逾時則發出timeout()信号
timer->start(33);
//main process 讀取下一Frame圖像 when timeout()
void MainWindow::readFarme()
// 從攝像頭中抓取并傳回每一幀
videocapture->read(matFrame);
src = matFrame.clone();
m_lastTime = (double)getTickCount();
tmp = matFrame.clone();
//final
cv::resize(tmp,tmp,Size(200,200));
dst = Mat(Size(tmp.cols*2,tmp.rows),tmp.type(),Scalar(255));
tmp.copyTo(dst(cv::Rect(0,0,200,200)));
//生成特征點算法及其比對方法
Ptr<Feature2D> extractor;
BFMatcher matcher;
switch (imethod)
{
case 1: //"SIFT"
extractor= SIFT::create();
matcher = BFMatcher(NORM_L2);
break;
case 2: //"SURF"
extractor= SURF::create();
case 3: //"BRISK"
extractor = BRISK::create();
matcher = BFMatcher(NORM_HAMMING);
case 0: //"ORB"
extractor= ORB::create();
}
if(matMatch.rows > 0)
{
//利用現有資料結構,對比對結構進行篩選
double max_dist = 0; double min_dist = 100;
//action
cv::resize(matMatch,matMatch,Size(200,200));
//gray
cvtColor(tmp,grayLeft,COLOR_BGR2GRAY);
cvtColor(matMatch,grayRight,COLOR_BGR2GRAY);
//尋找到特征點
extractor->detectAndCompute(grayLeft,Mat(),keypointsLeft,descriptorsLeft);
extractor->detectAndCompute(grayRight,Mat(),keypointsRight,descriptorsRight);
matcher.match( descriptorsLeft, descriptorsRight, matches );
//對現有距離進行排序
for( int i = 0; i < descriptorsLeft.rows; i++ )
{
double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
if( matches[i].distance <= max(2*min_dist, 0.02) )
{
good_matches.push_back( matches[i]);
}
drawMatches( tmp, keypointsLeft, matMatch, keypointsRight, good_matches, dst );
//clear
good_matches.clear();
}
cv::resize(dst,dst,Size(800,400));
case 0:
putText(dst,"METHOD:ORB",Point(10,350),CV_FONT_HERSHEY_DUPLEX,1.0f,Scalar(0,0,255));
break;
case 1:
putText(dst,"METHOD:SIFT",Point(10,350),CV_FONT_HERSHEY_DUPLEX,1.0f,Scalar(0,0,255));
case 2:
putText(dst,"METHOD:SURF",Point(10,350),CV_FONT_HERSHEY_DUPLEX,1.0f,Scalar(0,0,255));
case 3:
putText(dst,"METHOD:BRISK",Point(10,350),CV_FONT_HERSHEY_DUPLEX,1.0f,Scalar(0,0,255));
// 格式轉換
QPixmap qpixmap = Mat2QImage(dst);
// 将圖檔顯示到label上
clickLabel->setPixmap(qpixmap);
//method
void MainWindow::on_pushButton_2_clicked()
{
if(imethod == 4)
imethod = 1;
}else
imethod += 1;
//action
void MainWindow::on_pushButton_3_clicked()
matMatch = src.clone();
//exit
void MainWindow::on_pushButton_4_clicked()
timer->stop(); // 停止讀取資料。
videocapture->release();
//exit
QApplication* app;
app->exit(0);
//////////////////////////helper函數//////////////////////////////////////////////////
//格式轉換
QPixmap Mat2QImage(Mat src)
QImage img;
//根據QT的顯示方法進行轉換
if(src.channels() == 3)
cvtColor( src, tmp, CV_BGR2RGB );
img = QImage( (const unsigned char*)(tmp.data), tmp.cols, tmp.rows, QImage::Format_RGB888 );
else
img = QImage( (const unsigned char*)(src.data), src.cols, src.rows, QImage::Format_Indexed8 );
QPixmap qimg = QPixmap::fromImage(img) ;
return qimg;

附件清單
目前方向:圖像拼接融合、圖像識别
聯系方式:[email protected]