天天看點

《Example_MarkerBasedAR》中MarkerDetector.cpp源碼及詳細中文注釋

/*****************************************************************************
*   MarkerDetector.cpp
*   Example_MarkerBasedAR
******************************************************************************
*   by Khvedchenia Ievgen, 5th Dec 2012
*   http://computer-vision-talks.com
******************************************************************************
*   Ch2 of the book "Mastering OpenCV with Practical Computer Vision Projects"
*   Copyright Packt Publishing 2012.
*   http://www.packtpub.com/cool-projects-with-opencv/book
*****************************************************************************/
 
 
// Standard includes:
#include <iostream>
#include <sstream>
 
 
// File includes:
#include "MarkerDetector.hpp"
#include "Marker.hpp"
#include "TinyLA.hpp"
#include "DebugHelpers.hpp"
 
MarkerDetector::MarkerDetector(CameraCalibration calibration)
    : m_minContourLengthAllowed(100)
    , markerSize(100,100)
{
    cv::Mat(3,3, CV_32F, const_cast<float*>(&calibration.getIntrinsic().data[0])).copyTo(camMatrix);//相機的内參數
    cv::Mat(4,1, CV_32F, const_cast<float*>(&calibration.getDistorsion().data[0])).copyTo(distCoeff);//相機的畸變參數
 
    bool centerOrigin = true;
    if (centerOrigin)//坐标軸是否在标記的中心
    {
        m_markerCorners3d.push_back(cv::Point3f(-0.5f,-0.5f,0));
        m_markerCorners3d.push_back(cv::Point3f(+0.5f,-0.5f,0));
        m_markerCorners3d.push_back(cv::Point3f(+0.5f,+0.5f,0));
        m_markerCorners3d.push_back(cv::Point3f(-0.5f,+0.5f,0));
    }
    else
    {
        m_markerCorners3d.push_back(cv::Point3f(0,0,0));
        m_markerCorners3d.push_back(cv::Point3f(1,0,0));
        m_markerCorners3d.push_back(cv::Point3f(1,1,0));
        m_markerCorners3d.push_back(cv::Point3f(0,1,0));    
    }
 
    m_markerCorners2d.push_back(cv::Point2f(0,0));
    m_markerCorners2d.push_back(cv::Point2f(markerSize.width-1,0));
    m_markerCorners2d.push_back(cv::Point2f(markerSize.width-1,markerSize.height-1));
    m_markerCorners2d.push_back(cv::Point2f(0,markerSize.height-1));
}
 
void MarkerDetector::processFrame(const BGRAVideoFrame& frame)
{
    std::vector<Marker> markers;
    findMarkers(frame, markers);//☆★
 
    m_transformations.clear();
    for (size_t i=0; i<markers.size(); i++)
    {
        m_transformations.push_back(markers[i].transformation);
    }
}
 
//可以通過該對象取得旋轉矩陣和平移向量
const std::vector<Transformation>& MarkerDetector::getTransformations() const
{
    return m_transformations;
}
 
 
bool MarkerDetector::findMarkers(const BGRAVideoFrame& frame, std::vector<Marker>& detectedMarkers)
{
    cv::Mat bgraMat(frame.height, frame.width, CV_8UC4, frame.data, frame.stride);
 
    // BGRA=>gray
    prepareImage(bgraMat, m_grayscaleImage);
 
    // 二值化
    performThreshold(m_grayscaleImage, m_thresholdImg);
 
    // 輪廓檢測
    findContours(m_thresholdImg, m_contours, m_grayscaleImage.cols / 5);
 
    // 尋找具有四個角點的近似輪廓
    findCandidates(m_contours, detectedMarkers);
 
    // 檢測它們是否是指定的标記
    recognizeMarkers(m_grayscaleImage, detectedMarkers);
 
    // 标記的姿态估計
    estimatePosition(detectedMarkers);
 
    //根據id進行排序
    std::sort(detectedMarkers.begin(), detectedMarkers.end());
    return false;
}
 
void MarkerDetector::prepareImage(const cv::Mat& bgraMat, cv::Mat& grayscale) const
{
    // Convert to grayscale
    cv::cvtColor(bgraMat, grayscale, CV_BGRA2GRAY);
}
 
void MarkerDetector::performThreshold(const cv::Mat& grayscale, cv::Mat& thresholdImg) const
{
    cv::threshold(grayscale, thresholdImg, 127, 255, cv::THRESH_BINARY_INV);
 
    
//    cv::adaptiveThreshold(grayscale,    // Input image
//    thresholdImg,                       // Result binary image
//    255,         
//    cv::ADAPTIVE_THRESH_GAUSSIAN_C, 
//    cv::THRESH_BINARY_INV, 
//    7, 
//    7  
//    );
    
 
#ifdef SHOW_DEBUG_IMAGES
    cv::showAndSave("Threshold image", thresholdImg);
#endif
}
 
void MarkerDetector::findContours(cv::Mat& thresholdImg, ContoursVector& contours, int minContourPointsAllowed) const
{
    // 使用自定義的輪廓數組類型來臨時儲存檢測出的輪廓
    ContoursVector allContours;
    
//    輸入圖像image必須為一個2值單通道圖像
//    檢測的輪廓數組,每一個輪廓用一個point類型的vector表示
//    輪廓的檢索模式
    
//     CV_RETR_EXTERNAL表示隻檢測外輪廓
//     CV_RETR_LIST檢測的輪廓不建立等級關系
//     CV_RETR_CCOMP建立兩個等級的輪廓,上面的一層為外邊界,裡面的一層為内孔的邊界資訊。如果内孔内還有一個連通物體,這個物體的邊界也在頂層。
//     CV_RETR_TREE建立一個等級樹結構的輪廓。具體參考contours.c這個demo
    
//    輪廓的近似辦法
    
//     CV_CHAIN_APPROX_NONE存儲所有的輪廓點,相鄰的兩個點的像素位置差不超過1,即max(abs(x1-x2),abs(y2-y1))==1
//     CV_CHAIN_APPROX_SIMPLE壓縮水準方向,垂直方向,對角線方向的元素,隻保留該方向的終點坐标,例如一個矩形輪廓隻需4個點來儲存輪廓資訊
//     CV_CHAIN_APPROX_TC89_L1,CV_CHAIN_APPROX_TC89_KCOS使用teh-Chinl chain 近似算法
//     offset表示代表輪廓點的偏移量,可以設定為任意值。對ROI圖像中找出的輪廓,并要在整個圖像中進行分析時,這個參數還是很有用的。
    
    cv::findContours(thresholdImg, allContours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);
    
    // 最終儲存輪廓的結構,清空上一次儲存的結果
    contours.clear();
    // 提煉上一步得到的輪廓,隻有當輪廓面積大于一定門檻值時才有儲存的價值
    for (size_t i=0; i<allContours.size(); i++)
    {
        int contourSize = allContours[i].size();
        if (contourSize > minContourPointsAllowed)// 大于圖像寬度的五分之一
        {
            contours.push_back(allContours[i]);
        }
    }
 
#ifdef SHOW_DEBUG_IMAGES
    {
        cv::Mat contoursImage(thresholdImg.size(), CV_8UC1);
        contoursImage = cv::Scalar(0);
        cv::drawContours(contoursImage, contours, -1, cv::Scalar(255), 2, CV_AA);
        cv::showAndSave("Contours", contoursImage);
    }
#endif
}
 
void MarkerDetector::findCandidates
(
    const ContoursVector& contours, 
    std::vector<Marker>& detectedMarkers
) 
{
    std::vector<cv::Point>  approxCurve;
    std::vector<Marker>     possibleMarkers;
 
    // For each contour, analyze if it is a parallelepiped likely to be the marker
    for (size_t i=0; i<contours.size(); i++)
    {
        // 判斷是否是多邊形的誤差限 
        double eps = contours[i].size() * 0.05;
        
        // 對輪廓曲線進行平滑操作,得到一個在誤差限定下的近似多邊形
        cv::approxPolyDP(contours[i], approxCurve, eps, true);
 
        // 僅僅考慮四邊形
        if (approxCurve.size() != 4)
            continue;
 
        // 而且多邊形必須是凸面的
        if (!cv::isContourConvex(approxCurve))
            continue;
 
        // 確定相鄰兩點之間的距離足夠大:大到是一條邊,而不是短線段
        float minDist = std::numeric_limits<float>::max();
 
        for (int i = 0; i < 4; i++)
        {
            cv::Point side = approxCurve[i] - approxCurve[(i+1)%4]; // Point(dx, dy)
            float squaredSideLength = side.dot(side);               // dx*dx+dy*dy
            minDist = std::min(minDist, squaredSideLength);
        }
 
        if (minDist < m_minContourLengthAllowed) // 100
            continue;
 
        // 通過上述檢查之後,就儲存候選的标記:
        Marker m;
 
        for (int i = 0; i<4; i++)
            m.points.push_back( cv::Point2f(approxCurve[i].x, approxCurve[i].y) );
 
        // 調整四個點的方向,確定它們是呈逆時針方向的
        // 将第一點分别和第二點和第三點連接配接成直線
        // 如果第三個點在右側,那麼這些點就是預設的逆時針方向
        cv::Point v1 = m.points[1] - m.points[0];
        cv::Point v2 = m.points[2] - m.points[0];
 
        // (-1)*(v1.y/v1.x)-(-1)*(v2.y/v2.x):根據直線的斜率大小,來判斷第三個點的位置
        double o = (v1.x * v2.y) - (v1.y * v2.x); 
 
        if (o < 0.0)
            //如果第三個點在左側,那麼就交換第二個點和第四個點的位置,來調整它們成逆時針方向
            std::swap(m.points[1], m.points[3]);
 
        possibleMarkers.push_back(m);
    }
 
 
    // 檢測兩個marker是否互相過于接近
    std::vector< std::pair<int,int> > tooNearCandidates;
    for (size_t i=0;i<possibleMarkers.size();i++)
    { 
        const Marker& m1 = possibleMarkers[i];
 
        // 計算本标記到其他标記最近角點的平均距離
        // calculate the average distance of each corner to the nearest corner of the other marker candidate
        for (size_t j=i+1;j<possibleMarkers.size();j++)
        {
            const Marker& m2 = possibleMarkers[j];
 
            float distSquared = 0;
 
            for (int c = 0; c < 4; c++)
            {
                cv::Point v = m1.points[c] - m2.points[c];
                distSquared += v.dot(v);
            }
            // 取相應四個角點距離平方和的平均值
            distSquared /= 4;
            
            // 如果距離太近,則把它們一起加入移除隊列,以做進一步的檢查(檢查其周長大小)
            if (distSquared < 100)
            {
                tooNearCandidates.push_back(std::pair<int,int>(i,j));
            }
        }               
    }
 
    // 标記需要移除的周長較小的标記 
    std::vector<bool> removalMask (possibleMarkers.size(), false);
 
    for (size_t i=0; i<tooNearCandidates.size(); i++)
    {
        float p1 = perimeter(possibleMarkers[tooNearCandidates[i].first ].points);
        float p2 = perimeter(possibleMarkers[tooNearCandidates[i].second].points);
 
        size_t removalIndex;
        if (p1 > p2)
            removalIndex = tooNearCandidates[i].second;
        else
            removalIndex = tooNearCandidates[i].first;
 
        removalMask[removalIndex] = true;
    }
 
    // 傳回經過提煉的候選标記隊列
    detectedMarkers.clear();
    for (size_t i=0;i<possibleMarkers.size();i++)
    {
        if (!removalMask[i])
            detectedMarkers.push_back(possibleMarkers[i]);
    }
}
 
void MarkerDetector::recognizeMarkers(const cv::Mat& grayscale, std::vector<Marker>& detectedMarkers)
{
    std::vector<Marker> goodMarkers;
 
    // Identify the markers
    for (size_t i=0;i<detectedMarkers.size();i++)
    {
        Marker& marker = detectedMarkers[i];
 
        // 通過變換的角點坐标,計算得到透視矩陣
        cv::Mat markerTransform = cv::getPerspectiveTransform(marker.points, m_markerCorners2d);
 
        // 通過透視變換将檢測到的标記轉換成正視圖矩形
        cv::warpPerspective(grayscale, canonicalMarkerImage,  markerTransform, markerSize);
 
#ifdef SHOW_DEBUG_IMAGES
        {
            cv::Mat markerImage = grayscale.clone();
            marker.drawContour(markerImage);
            cv::Mat markerSubImage = markerImage(cv::boundingRect(marker.points));
 
            cv::showAndSave("Source marker" + ToString(i),           markerSubImage);
            cv::showAndSave("Marker " + ToString(i) + " after warp", canonicalMarkerImage);
        }
#endif
 
        int nRotations;
        // 檢測候選的标記是哪一種旋轉的标記,傳回值是id
        int id = Marker::getMarkerId(canonicalMarkerImage, nRotations);
        if (id !=- 1)
        {
            marker.id = id;
            // 根據相機的旋轉對标記的四個點進行排序(旋轉),這樣它們就總保持一個順序,與相機的方向無關了
            std::rotate(marker.points.begin(), marker.points.begin() + 4 - nRotations, marker.points.end());
 
            goodMarkers.push_back(marker);
        }
    }  
 
    // 通過亞像素精度來提取更精确的标記角點
    if (goodMarkers.size() > 0)
    {
        std::vector<cv::Point2f> preciseCorners(4 * goodMarkers.size());
 
        for (size_t i=0; i<goodMarkers.size(); i++)
        {  
            const Marker& marker = goodMarkers[i];      
 
            for (int c = 0; c <4; c++)
            {
                preciseCorners[i*4 + c] = marker.points[c];
            }
        }
        
        // 類型
        /*
          CV_TERMCRIT_ITER 用最大疊代次數作為終止條件
          CV_TERMCRIT_EPS 用精度作為疊代條件
          CV_TERMCRIT_ITER+CV_TERMCRIT_EPS 用最大疊代次數或者精度作為疊代條件,決定于哪個條件先滿足
         */
        // 疊代的最大次數
        // 特定的門檻值
        cv::TermCriteria termCriteria = cv::TermCriteria(cv::TermCriteria::MAX_ITER | cv::TermCriteria::EPS, 30, 0.01);
        // 輸入圖像
        // 輸入的角點,也作為輸出更精确的角點
        // 領域的大小
        // Sobel算子的大小
        // 像素疊代(擴張)的方法
        cv::cornerSubPix(grayscale, preciseCorners, cvSize(5,5), cvSize(-1,-1), termCriteria);
 
        // 拷貝并儲存精确的标記角點 
        for (size_t i=0; i<goodMarkers.size(); i++)
        {
            Marker& marker = goodMarkers[i];      
 
            for (int c=0;c<4;c++) 
            {
                marker.points[c] = preciseCorners[i*4 + c];
            }      
        }
    }
 
#if SHOW_DEBUG_IMAGES
    {
        cv::Mat markerCornersMat(grayscale.size(), grayscale.type());
        markerCornersMat = cv::Scalar(0);
 
        for (size_t i=0; i<goodMarkers.size(); i++)
        {
            goodMarkers[i].drawContour(markerCornersMat, cv::Scalar(255));    
        }
 
        cv::showAndSave("Markers refined edges", grayscale * 0.5 + markerCornersMat);
    }
#endif
 
    detectedMarkers = goodMarkers;
}
 
// 标記的姿态估計
void MarkerDetector::estimatePosition(std::vector<Marker>& detectedMarkers)
{
    for (size_t i=0; i<detectedMarkers.size(); i++)
    {                   
        Marker& m = detectedMarkers[i];
 
        cv::Mat Rvec;
        cv::Mat_<float> Tvec;
        cv::Mat raux,taux;// 把點從模型坐标系轉到相機坐标系下的旋轉向量、平移向量:儲存歐幾裡得變換的結果
        // 根據笛卡爾坐标系的3D坐标和标記的2D角點坐标,以及相機的内參數和畸變參數,求取相機相對于标記的歐幾裡得變換(剛體變換)
        cv::solvePnP(m_markerCorners3d, m.points, camMatrix, distCoeff,raux,taux);
        raux.convertTo(Rvec,CV_32F);
        taux.convertTo(Tvec ,CV_32F);
 
        cv::Mat_<float> rotMat(3,3);
        cv::Rodrigues(Rvec, rotMat);// 将旋轉向量轉換成旋轉矩陣
 
        // Copy to transformation matrix
        for (int col=0; col<3; col++)
        {
            for (int row=0; row<3; row++)
            {        
                m.transformation.r().mat[row][col] = rotMat(row,col); // Copy rotation component
            }
            m.transformation.t().data[col] = Tvec(col); // Copy translation component
        }
 
        // 之前求取的是相機相對于标記的歐幾裡得變換(剛體變換),可是結果我們是要求标記相對于相機的變換,是以僅需要對該變換求逆即可
        m.transformation = m.transformation.getInverted();
    }
}      

繼續閱讀