天天看點

hog_svm code

http://blog.csdn.net/zhazhiqiang/article/details/18664417

http://blog.csdn.net/hujingshuang/article/details/47337707/

http://blog.csdn.net/orsinozhu/article/details/40554211

http://blog.csdn.net/qq_14845119/article/details/52187774

http://blog.csdn.net/leifeng_soul/article/details/52608575

http://blog.csdn.net/zhazhiqiang/article/details/20723425

http://blog.csdn.net/alvine008/article/details/9097105

http://blog.csdn.net/love_linney/article/details/25192909

#ifndef MY_HOG_SVM_H
#define MY_HOG_SVM_H


#include <QObject>
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv/cv.h"
#include <QDebug>
#include <QTime>
#include <QDateTime>
#include <QTimer>
#include <QtCore/qmath.h>
#include "opencv/ml.h"
#include <iostream>
#include <fstream>
#include <string>
#include <vector>


using namespace cv;
using namespace std;
class Mysvm : public CvSVM
{
public:
    //獲得SVM的決策函數中的alpha數組
    double * get_alpha_vector()
    {
        return this->decision_func->alpha;
    }


    //獲得SVM的決策函數中的rho參數,即偏移量
    float get_rho()
    {
        return this->decision_func->rho;
    }
};


class Mysvm;
class My_Hog_Svm : public QObject
{
    Q_OBJECT
public:
    explicit My_Hog_Svm(QObject *parent = );


private:
    const int m_iImgHeight = ;
    const int m_iImgWidht =;
    const int m_iBlockSizeWidth = ;
    const int m_iCellSizeWidth =;
    const int m_iStrideSizeWidth =;


private:
    void MyTrain();
    void Detection();
    void GetFeatureVector();
    void DrawBox();
};


#endif // MY_HOG_SVM_H
           
#include "my_hog_svm.h"
//#include "mysvm.h"



My_Hog_Svm::My_Hog_Svm(QObject *parent) : QObject(parent)
{
//    //1:訓練
//    this->MyTrain();

//    //2:檢測
//    this->Detection();

//    //3:獲得特征向量
//    this->GetFeatureVector();

    //4:畫框
    this->DrawBox();
}

void My_Hog_Svm::MyTrain()
{

    vector<string> img_path;    //樣本路徑
    vector<int> img_catg;       //标記正負樣本
    int nLine = ;  //樣本總共的個數
    string buf;
    ifstream svm_data_true( "./TRAIN_HEAD/Pos.txt" );   //正樣本路徑
    ifstream svm_data_false( "./TRAIN_HEAD/Neg.txt" );  //負樣本路徑
    unsigned long n;

    //擷取樣本的路徑
    while(svm_data_true)    //正樣本
    {
        if( getline( svm_data_true, buf ) )
        {
            nLine ++;
            img_catg.push_back();
            img_path.push_back( buf );
        }
    }
    while(svm_data_false)   //負樣本
    {

        if(getline(svm_data_false, buf))
        {

            nLine ++;
            img_catg.push_back();
            img_path.push_back( buf );
        }
    }
    svm_data_true.close();//關閉檔案
    svm_data_false.close();


    Mat data_mat;   //存放特征值的矩陣
    Mat res_mat;    //存放正負樣本的辨別
    int nImgNum = nLine;            //讀入樣本數量
    //類型矩陣,存儲每個樣本的類型标志
    res_mat = Mat::zeros( nImgNum, , CV_32FC1);

    Mat src;
    Mat trainImg = Mat::zeros(m_iImgHeight, m_iImgWidht, CV_8UC3);//需要分析的圖檔

    //擷取每一個檔案的特征值矩陣
    for( string::size_type i = ; i != img_path.size(); i++ )
    {
        src = imread(img_path[i].c_str(), );
        resize(src, trainImg, cv::Size(m_iImgWidht,m_iImgHeight), , , INTER_CUBIC); //調整訓練的圖檔
        HOGDescriptor *hog=new HOGDescriptor(cvSize(m_iImgWidht,m_iImgHeight) //參數:視窗大小,塊的大小,塊滑動增量,cell的大小,每個bin的特征值
                                             ,cvSize(m_iBlockSizeWidth,m_iBlockSizeWidth),cvSize(m_iCellSizeWidth,m_iCellSizeWidth),cvSize(m_iStrideSizeWidth,m_iStrideSizeWidth), );  //具體意思見參考文章1,2

        vector<float>descriptors;//結果數組,特征值的個數
        hog->compute(trainImg, descriptors, Size(m_iStrideSizeWidth,m_iStrideSizeWidth), Size(,)); //調用計算函數開始計算,滑動塊增量
        if (i==)
        {
            //初始化存放所有圖檔特征值的容器
            data_mat = Mat::zeros( nImgNum, descriptors.size(), CV_32FC1 ); //根據輸入圖檔大小進行配置設定空間
        }
        n=;
        for(vector<float>::iterator iter=descriptors.begin();iter!=descriptors.end();iter++)
        {
            //為容器指派
            data_mat.at<float>(i,n) = *iter;
            n++;
        }
        res_mat.at<float>(i, ) =  img_catg[i];
        cout<<" end processing "<<img_path[i].c_str()<<"    label:"<<img_catg[i]<<" HOG dims: "<<descriptors.size()<<endl;
    }

    //svm訓練
    Mysvm* svm = new Mysvm();
    //訓練SVM分類器
    //疊代終止條件,當疊代滿1000次或誤差小于FLT_EPSILON時停止疊代
    CvTermCriteria criteria = cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, , FLT_EPSILON);
    //SVM參數:SVM類型為C_SVC;線性核函數;松弛因子C=0.01
    CvSVMParams param(CvSVM::C_SVC, CvSVM::LINEAR, , , , , , , , criteria);

    //☆☆☆☆☆☆☆☆☆(5)  SVM學習 ☆☆☆☆☆☆☆☆☆☆☆☆
    svm->train( data_mat, res_mat, Mat(), Mat(), param );
    //☆☆利用訓練資料和确定的學習參數,進行SVM學習☆☆☆☆

    svm->save( "./TRAIN_HEAD/SVM_DATA.xml" );
    qDebug() << "Finish!";

}


void My_Hog_Svm::Detection()
{
    Mysvm* svm = new Mysvm();
    svm->load("./TRAIN_HEAD/SVM_DATA.xml");

    Mat trainImg = Mat::zeros(m_iImgHeight, m_iImgWidht, CV_8UC3);//需要分析的圖檔
    string buf;
    //檢測樣本
    vector<string> img_tst_path;
    ifstream img_tstNeg( "./TRAIN_HEAD/testNeg.txt" );
    ifstream img_tstPos( "./TRAIN_HEAD/testPos.txt" );
    while( img_tstNeg )
    {
        if( getline( img_tstNeg, buf ) )
        {
            img_tst_path.push_back( buf );
        }
    }
    img_tstNeg.close();
    while( img_tstPos )
    {
        if( getline( img_tstPos, buf ) )
        {
            img_tst_path.push_back( buf );
        }
    }
    img_tstPos.close();

    Mat test;
    char line[];
    ofstream predict_txt( "./TRAIN_HEAD/SVM_PREDICT.txt" );
    for( string::size_type j = ; j != img_tst_path.size(); j++ )
    {
        test = imread( img_tst_path[j].c_str(), );//讀入圖像
        resize(test, trainImg, cv::Size(m_iImgWidht,m_iImgHeight), , , INTER_CUBIC);//要搞成同樣的大小才可以檢測到
        HOGDescriptor *hog=new HOGDescriptor(cvSize(m_iImgWidht,m_iImgHeight)
                                             ,cvSize(m_iBlockSizeWidth,m_iBlockSizeWidth),cvSize(m_iCellSizeWidth,m_iCellSizeWidth),cvSize(m_iStrideSizeWidth,m_iStrideSizeWidth), );
        vector<float>descriptors;//結果數組
        hog->compute(trainImg, descriptors,Size(m_iStrideSizeWidth,m_iStrideSizeWidth), Size(,)); //調用計算函數開始計算
        cout<<"The Detection Result:"<<endl;
        Mat SVMtrainMat =  Mat::zeros(,descriptors.size(),CV_32FC1);
        int n=;
        for(vector<float>::iterator iter=descriptors.begin();iter!=descriptors.end();iter++)
        {
            SVMtrainMat.at<float>(,n) = *iter;
            n++;
        }

        int ret = svm->predict(SVMtrainMat);
        std::sprintf( line, "%s %d\r\n", img_tst_path[j].c_str(), ret );
        printf("%s %d\r\n", img_tst_path[j].c_str(), ret);
        predict_txt<<line;
    }
    predict_txt.close();

    cout << "Finish" <<endl;
    return ;

}


/*************************************************************************************************
    線性SVM訓練完成後得到的XML檔案裡面,有一個數組,叫做support vector,還有一個數組,叫做alpha,有一個浮點數,叫做rho;
    将alpha矩陣同support vector相乘,注意,alpha*supportVector,将得到一個列向量。之後,再該列向量的最後添加一個元素rho。
    如此,變得到了一個分類器,利用該分類器,直接替換opencv中行人檢測預設的那個分類器(cv::HOGDescriptor::setSVMDetector()),
    就可以利用你的訓練樣本訓練出來的分類器進行行人檢測了。
***************************************************************************************************/
void My_Hog_Svm::GetFeatureVector()
{
    Mysvm* svm = new Mysvm();
    svm->load("./TRAIN_HEAD/SVM_DATA.xml");                                   //擷取特征值的個數
    int l_iFeatureNum = svm->get_var_count();//特征向量的維數,即HOG描述子的維數
    int supportVectorNum = svm->get_support_vector_count();//支援向量的個數
    qDebug()<<"支援向量個數:"<<supportVectorNum;

    Mat alphaMat = Mat::zeros(, supportVectorNum, CV_32FC1);//alpha向量,長度等于支援向量個數
    Mat supportVectorMat = Mat::zeros(supportVectorNum, l_iFeatureNum, CV_32FC1);//支援向量矩陣
    Mat resultMat = Mat::zeros(, l_iFeatureNum, CV_32FC1);//alpha向量乘以支援向量矩陣的結果


    //将支援向量的資料複制到supportVectorMat矩陣中
    for(int i=; i<supportVectorNum; i++)
    {
        const float * pSVData = svm->get_support_vector(i);//傳回第i個支援向量的資料指針
        for(int j=; j<l_iFeatureNum; j++)
        {
            supportVectorMat.at<float>(i,j) = pSVData[j];
        }
    }

    //将alpha向量的資料複制到alphaMat中
    double * pAlphaData = svm->get_alpha_vector();//傳回SVM的決策函數中的alpha向量
    for(int i=; i<supportVectorNum; i++)
    {
        alphaMat.at<float>(,i) = pAlphaData[i];
    }

    //gemm(alphaMat, supportVectorMat, -1, 0, 1, resultMat);//不知道為什麼加負号?
    resultMat = - * alphaMat * supportVectorMat;

    //得到最終的setSVMDetector(const vector<float>& detector)參數中可用的檢測子
    vector<float> myDetector;
    //将resultMat中的資料複制到數組myDetector中
    for(int i=; i<l_iFeatureNum; i++)
    {
        myDetector.push_back(resultMat.at<float>(,i));
    }
    //最後添加偏移量rho,得到檢測子
//    myDetector.push_back(svm->get_rho());
    qDebug()<<"檢測子維數:"<<myDetector.size() +;

    //儲存檢測子參數到檔案
    FILE* fp = fopen("./TRAIN_HEAD/hogSVMDetector-peopleFlow.txt","wb");
    if( NULL == fp )
    {
        return ;
    }
    for(int i=; i<myDetector.size(); i++)
    {
        fprintf(fp, "%f \n", myDetector[i]);
    }
    fprintf(fp, "%f", svm->get_rho());
    fclose(fp);
    qDebug() << "Finish!";
    return;

}


void My_Hog_Svm::DrawBox()
{

        vector<Rect> found;
        Mat img = imread("./11.jpg");

        vector<float> myDetector;
        ifstream fileIn("./TRAIN_HEAD/hogSVMDetector-peopleFlow.txt", ios::in);
        float val = ;
        while(!fileIn.eof())
        {
            fileIn>>val;
            myDetector.push_back(val);
        }
        fileIn.close();

        HOGDescriptor defaultHog(cvSize(m_iImgWidht,m_iImgHeight) //參數:視窗大小,塊的大小,塊滑動增量,cell的大小,每個bin的特征值
                                  ,cvSize(m_iBlockSizeWidth,m_iBlockSizeWidth),cvSize(m_iCellSizeWidth,m_iCellSizeWidth),cvSize(m_iStrideSizeWidth,m_iStrideSizeWidth), );
        defaultHog.setSVMDetector(myDetector);
        //進行檢測
        defaultHog.detectMultiScale(img, found);
        //畫長方形,框出行人
        for(int i = ; i < found.size(); i++)
        {
            Rect r = found[i];
            rectangle(img, r.tl(), r.br(), Scalar(, , ), );
        }
        namedWindow("檢測行人", CV_WINDOW_AUTOSIZE);
        imshow("檢測行人", img);
        waitKey();

//        Mat l_pImageEle;
//        namedWindow("Video");
//        VideoCapture capture("./a.avi");
//        while(1)    //循環每一幀
//        {
//            static int l_iNum = 0;
//            if(!capture.read(l_pImageEle))
//            {
//                return;
//            }

//            if(l_iNum%34 ==0)
//            {
//                //進行檢測
//                defaultHog.detectMultiScale(l_pImageEle, found);
//                //畫長方形,框出行人
//                for(int i = 0; i < found.size(); i++)
//                {
//                    Rect r = found[i];
//                    rectangle(l_pImageEle, r.tl(), r.br(), Scalar(255, 255, 255), 3);
//                }
//            }

//            imshow("Video", l_pImageEle);
//            cvWaitKey(34);

//        }
}