這隻是一次簡單的嘗試,不過收獲還是挺多的。
首先是地面上有一些tag,用攝像頭去拍攝,每次隻拍到部分的tag,拍多次。
因為tag是唯一的,而且尺度已知,可以直接計算攝像頭的RT,是以可以套用slam架構。基本思路如下:
1.初始幀将map坐标系遠點設為攝像頭初始位置,将初始幀看到的tag的四個角點放入map中。
2.之後的幀先識别目前幀裡的tag,分為兩類,一類是map中已有的,一類是map中沒有的。已有的那部分與map中的tag做比對,使用icp方法求解出目前攝像頭位姿,再将沒有的那部分根據相機位姿再拼入map中。
3.最後做一次g2o優化。
g2o 頂點是相機pose (VertexSE3) tag的四個頂點landmark(VertexPointXYZ)
邊是 tag的測量值(EdgeSE3PointXYZ)
其中采集圖像要注意,按時間順序,新的圖像中的tag必須有map中已有的。
結果并不讓人滿意,但至少值得分析。
g2o的優化結果可以看出攝像頭不在一個平面上,但實際上拍攝時攝像頭都是擺在地上的。這主要是因為tag的z軸測的不準,導緻攝像頭有很劇烈的上下變化。
對于tag的位置優化結果,其實也很差,我是擺成長方形的,但是很明顯優化出來的各個點不在長方形上。
從這個結果,我很跳躍的想到,在攝像頭标定時,攝像頭最好讓視野中絕大部分有标定闆存在,這樣可以避免像素級帶來的誤差。我這個試驗中,tag隻占視野裡很少的部分,是以誤差特别大。
代碼有些亂,僅作記錄。
#include <iostream>
#include <fstream>
#include <vector>
#include <cmath>
#include <ctime>
#include <Eigen/Core>
#include <Eigen/StdVector>
#include <Eigen/Geometry>
#include <opencv2/highgui.hpp>
#include <opencv2/aruco.hpp>
#include <opencv2/core/eigen.hpp>
#include <opencv2/calib3d.hpp>
#include "g2o/types/slam3d/types_slam3d.h"
#include "g2o/types/slam3d/vertex_pointxyz.h"
#include "g2o/types/slam3d/vertex_se3.h"
#include "g2o/types/slam3d/edge_se3.h"
#include "g2o/types/slam3d/edge_se3_pointxyz.h"
#include "g2o/core/factory.h"
#include "g2o/core/optimization_algorithm_factory.h"
#include "g2o/core/sparse_optimizer.h"
#include "g2o/core/block_solver.h"
#include "g2o/solvers/dense/linear_solver_dense.h"
#include "g2o/core/optimization_algorithm_levenberg.h"
#include "icp_g2o.hpp"
using namespace std;
using namespace cv;
using namespace Eigen;
using namespace g2o;
//使用圖檔
//輸入目前幀檢測到的tag
//維護一張節點地圖用于g2o
class ArucoFrame
{
public:
vector<int> landmark_id;
vector<Eigen::Vector3d> landmarks_pointXYZ;//在相機坐标系下的位置
Eigen::Isometry3d robot_pose;//在map坐标系下的位姿
};
class ArucoMap
{
public:
vector<int> landmark_id;
vector<Eigen::Vector3d> landmarks_pointXYZ;//在Map坐标系下的位置
};
ArucoMap ArucoMapGlobal;
vector<ArucoFrame> ArucoFrameGlobal;
int id_vertex_pose=, id_vertex_pointXYZ_start=;//機器人pose的初始點認為是建圖坐标系原點,id從0開始,tag的節點id從100開始。預設機器人pose不可能超過100個
void buildMap(vector< int > &ids, vector< vector< Point2f > > &corners, vector< Vec3d > &rvecs, vector< Vec3d > &tvecs, g2o::SparseOptimizer &optimizer)
{
//擷取目前幀裡的tag
ArucoFrame ArucoFrame_temp;
if(ids.size() != )
{
ArucoFrame_temp.robot_pose = Eigen::Isometry3d::Identity();
for(int i=; i<ids.size(); i++)
{
ArucoFrame_temp.landmark_id.push_back(ids[i]);
//cv轉換到eigen下
Mat rot_mat;
Rodrigues(rvecs[i], rot_mat);
Eigen::Matrix3d eigen_r;
Eigen::Vector3d eigen_t;
cv2eigen(rot_mat, eigen_r);
cv2eigen(tvecs[i],eigen_t);
//這裡的rt對應的T是從mark坐标系變到相機坐标系
//Point_camera = T_camera-mark * Point_mark
//很重要!!是以這裡的四個點是按mark坐标系下的順序排列的
ArucoFrame_temp.landmarks_pointXYZ.push_back( eigen_r * Eigen::Vector3d(-,,)+eigen_t );
ArucoFrame_temp.landmarks_pointXYZ.push_back( eigen_r * Eigen::Vector3d(,,)+eigen_t );
ArucoFrame_temp.landmarks_pointXYZ.push_back( eigen_r * Eigen::Vector3d(,-,)+eigen_t );
ArucoFrame_temp.landmarks_pointXYZ.push_back( eigen_r * Eigen::Vector3d(-,-,)+eigen_t );
}
ArucoFrameGlobal.push_back(ArucoFrame_temp);
}
else
return ;
//對地圖操作
if(ArucoMapGlobal.landmark_id.size() == )//初始化第一幀
{
for(int i=; i<ArucoFrame_temp.landmark_id.size(); i++)
{
ArucoMapGlobal.landmark_id.push_back(ArucoFrame_temp.landmark_id[i]);
ArucoMapGlobal.landmarks_pointXYZ.push_back(ArucoFrame_temp.landmarks_pointXYZ[i*+]);
ArucoMapGlobal.landmarks_pointXYZ.push_back(ArucoFrame_temp.landmarks_pointXYZ[i*+]);
ArucoMapGlobal.landmarks_pointXYZ.push_back(ArucoFrame_temp.landmarks_pointXYZ[i*+]);
ArucoMapGlobal.landmarks_pointXYZ.push_back(ArucoFrame_temp.landmarks_pointXYZ[i*+]);
//添加pose
VertexSE3* v = new VertexSE3;
v->setEstimate(ArucoFrame_temp.robot_pose);
v->setId(id_vertex_pose++);
optimizer.addVertex(v);
//添加landmark和edge
for(int j=; j<; j++)
{
VertexPointXYZ* l = new VertexPointXYZ;
l->setEstimate(ArucoFrame_temp.landmarks_pointXYZ[i*+j]);
l->setFixed(true);
l->setId(id_vertex_pointXYZ_start+ArucoFrame_temp.landmark_id[i]*+j);//之是以直接用tag的id是因為不會存在相同的tag,是以vertex的序号不會出現一樣的。
optimizer.addVertex(l);
EdgeSE3PointXYZ* e = new EdgeSE3PointXYZ;
e->setVertex(, (optimizer.vertices().find(id_vertex_pose-))->second);
e->setVertex(, (optimizer.vertices().find(id_vertex_pointXYZ_start+ArucoFrame_temp.landmark_id[i]*+j))->second);
e->setMeasurement(ArucoFrame_temp.landmarks_pointXYZ[i*+]);
e->setParameterId(,);//這句話必須加
optimizer.addEdge(e);
}
}
}
else
{
//尋找目前幀裡的tagid是否和map裡的id一樣
vector<int> id_same, id_different;
for(int i=; i<ArucoFrame_temp.landmark_id.size(); i++)
{
int j;
for(j=; j<ArucoMapGlobal.landmark_id.size(); j++)
{
if(ArucoMapGlobal.landmark_id[j] == ArucoFrame_temp.landmark_id[i])
{
id_same.push_back(i);
break;
}
}
if(j == ArucoMapGlobal.landmark_id.size())
id_different.push_back(i);
}
if(id_different.size() != )//如果有新的id能看到,就要添加進地圖裡
{
if(id_same.size() != )//必須要同時看到地圖裡存在的tag以及新的tag,才能将新的tag添加進地圖中,因為是根據scan比對map獲得自身位資
{
//先根據icp比對獲得目前幀的位置。
vector<Eigen::Vector3d> landmarkXYZ_common_map;
vector<Eigen::Vector3d> landmarkXYZ_common_frame;
for(int i=; i<ArucoMapGlobal.landmark_id.size();i++)
{
for(int j=; j<id_same.size(); j++)//使用所有能看得到的共視圖形
{
if(ArucoMapGlobal.landmark_id[i] == ArucoFrame_temp.landmark_id[id_same[j]])
{
landmarkXYZ_common_map.push_back(ArucoMapGlobal.landmarks_pointXYZ[i*+]);
landmarkXYZ_common_map.push_back(ArucoMapGlobal.landmarks_pointXYZ[i*+]);
landmarkXYZ_common_map.push_back(ArucoMapGlobal.landmarks_pointXYZ[i*+]);
landmarkXYZ_common_map.push_back(ArucoMapGlobal.landmarks_pointXYZ[i*+]);
landmarkXYZ_common_frame.push_back(ArucoFrame_temp.landmarks_pointXYZ[id_same[j]*+]);
landmarkXYZ_common_frame.push_back(ArucoFrame_temp.landmarks_pointXYZ[id_same[j]*+]);
landmarkXYZ_common_frame.push_back(ArucoFrame_temp.landmarks_pointXYZ[id_same[j]*+]);
landmarkXYZ_common_frame.push_back(ArucoFrame_temp.landmarks_pointXYZ[id_same[j]*+]);
break;
}
}
}
Eigen::Isometry3d T;
bundleAdjustment(landmarkXYZ_common_map, landmarkXYZ_common_frame, T);//這個T就是目前攝像頭的pose
ArucoFrame_temp.robot_pose = T;
ArucoFrameGlobal.back().robot_pose = T;
//目前攝像頭的位姿計算出來以後,更新map中的pose和edge,這一步沒有landmark添加進來
VertexSE3* v = new VertexSE3;
v->setEstimate(ArucoFrame_temp.robot_pose);
v->setId(id_vertex_pose++);
optimizer.addVertex(v);
for(int i=; i<id_same.size(); i++)
{
for(int j=; j<; j++)
{
EdgeSE3PointXYZ* e = new EdgeSE3PointXYZ;
e->setVertex(, (optimizer.vertices().find(id_vertex_pose-))->second);
e->setVertex(, (optimizer.vertices().find(id_vertex_pointXYZ_start+ArucoFrame_temp.landmark_id[id_same[i]]*+j))->second);
e->setMeasurement(ArucoFrame_temp.landmarks_pointXYZ[id_same[i]*+j]);
e->setParameterId(,);//這句話必須加
optimizer.addEdge(e);
}
}
//更新map中的landmark和edge
for(int i=; i<id_different.size(); i++)
{
ArucoMapGlobal.landmark_id.push_back(ArucoFrame_temp.landmark_id[id_different[i]]);
ArucoMapGlobal.landmarks_pointXYZ.push_back(T * ArucoFrame_temp.landmarks_pointXYZ[id_different[i]*+]);
ArucoMapGlobal.landmarks_pointXYZ.push_back(T * ArucoFrame_temp.landmarks_pointXYZ[id_different[i]*+]);
ArucoMapGlobal.landmarks_pointXYZ.push_back(T * ArucoFrame_temp.landmarks_pointXYZ[id_different[i]*+]);
ArucoMapGlobal.landmarks_pointXYZ.push_back(T * ArucoFrame_temp.landmarks_pointXYZ[id_different[i]*+]);
for(int j=; j<; j++)
{
VertexPointXYZ* l = new VertexPointXYZ;
l->setEstimate(T * ArucoFrame_temp.landmarks_pointXYZ[id_different[i]*+j]);
l->setId(id_vertex_pointXYZ_start+ArucoFrame_temp.landmark_id[id_different[i]]*+j);
optimizer.addVertex(l);
EdgeSE3PointXYZ* e = new EdgeSE3PointXYZ;
e->setVertex(, (optimizer.vertices().find(id_vertex_pose-))->second);
e->setVertex(, (optimizer.vertices().find(id_vertex_pointXYZ_start+ArucoFrame_temp.landmark_id[id_different[i]]*+j))->second);
e->setMeasurement(ArucoFrame_temp.landmarks_pointXYZ[id_different[i]*+j]);
e->setParameterId(,);//這句話必須加
optimizer.addEdge(e);
}
}
}
}
else//如果看到的全是存在的tag就添加優化關系
{
vector<int> id_same;
for(int i=; i<ArucoFrame_temp.landmark_id.size(); i++)
{
int j;
for(j=; j<ArucoMapGlobal.landmark_id.size(); j++)
{
if(ArucoMapGlobal.landmark_id[j] == ArucoFrame_temp.landmark_id[i])
{
id_same.push_back(i);
break;
}
}
}
//先根據icp比對獲得目前幀的位置。
vector<Eigen::Vector3d> landmarkXYZ_common_map;
vector<Eigen::Vector3d> landmarkXYZ_common_frame;
for(int i=; i<ArucoMapGlobal.landmark_id.size();i++)
for(int j=; j<id_same.size(); j++)//使用所有能看得到的共視圖形
{
if(ArucoMapGlobal.landmark_id[i] == ArucoFrame_temp.landmark_id[id_same[j]])
{
landmarkXYZ_common_map.push_back(ArucoMapGlobal.landmarks_pointXYZ[i*+]);
landmarkXYZ_common_map.push_back(ArucoMapGlobal.landmarks_pointXYZ[i*+]);
landmarkXYZ_common_map.push_back(ArucoMapGlobal.landmarks_pointXYZ[i*+]);
landmarkXYZ_common_map.push_back(ArucoMapGlobal.landmarks_pointXYZ[i*+]);
landmarkXYZ_common_frame.push_back(ArucoFrame_temp.landmarks_pointXYZ[id_same[j]*+]);
landmarkXYZ_common_frame.push_back(ArucoFrame_temp.landmarks_pointXYZ[id_same[j]*+]);
landmarkXYZ_common_frame.push_back(ArucoFrame_temp.landmarks_pointXYZ[id_same[j]*+]);
landmarkXYZ_common_frame.push_back(ArucoFrame_temp.landmarks_pointXYZ[id_same[j]*+]);
break;
}
}
Eigen::Isometry3d T;
bundleAdjustment(landmarkXYZ_common_map, landmarkXYZ_common_frame, T);//這個T就是目前攝像頭的pose
ArucoFrame_temp.robot_pose = T;
ArucoFrameGlobal.back().robot_pose = T;
VertexSE3* v = new VertexSE3;
v->setEstimate(ArucoFrame_temp.robot_pose);
v->setId(id_vertex_pose++);
optimizer.addVertex(v);
for(int i=; i<id_same.size(); i++)
{
for(int j=; j<; j++)
{
EdgeSE3PointXYZ* e = new EdgeSE3PointXYZ;
e->setVertex(, (optimizer.vertices().find(id_vertex_pose-))->second);
e->setVertex(, (optimizer.vertices().find(id_vertex_pointXYZ_start+ArucoFrame_temp.landmark_id[id_same[i]]*+j))->second);
e->setMeasurement(ArucoFrame_temp.landmarks_pointXYZ[id_same[i]*+j]);
e->setParameterId(,);//這句話必須加
optimizer.addEdge(e);
}
}
}
}
}
namespace {
const char* about = "Basic marker detection";
const char* keys =
"{d | 0 | dictionary: DICT_4X4_50=0, DICT_4X4_100=1, DICT_4X4_250=2,"
"DICT_4X4_1000=3, DICT_5X5_50=4, DICT_5X5_100=5, DICT_5X5_250=6, DICT_5X5_1000=7, "
"DICT_6X6_50=8, DICT_6X6_100=9, DICT_6X6_250=10, DICT_6X6_1000=11, DICT_7X7_50=12,"
"DICT_7X7_100=13, DICT_7X7_250=14, DICT_7X7_1000=15, DICT_ARUCO_ORIGINAL = 16}"
"{v | | Input from video file, if ommited, input comes from camera }"
"{ci | 0 | Camera id if input doesnt come from video (-v) }"
"{c | log270 | Camera intrinsic parameters. Needed for camera pose }"
"{l | 0.06 | Marker side lenght (in meters). Needed for correct scale in camera pose }"
"{dp | detector_params.yml | File of marker detector parameters }"
"{r | | show rejected candidates too }";
}
static bool readCameraParameters(string filename, Mat &camMatrix, Mat &distCoeffs) {
FileStorage fs(filename, FileStorage::READ);
if(!fs.isOpened())
return false;
fs["camera_matrix"] >> camMatrix;
fs["distortion_coefficients"] >> distCoeffs;
return true;
}
static bool readDetectorParameters(string filename, Ptr<aruco::DetectorParameters> ¶ms) {
FileStorage fs(filename, FileStorage::READ);
if(!fs.isOpened())
return false;
fs["adaptiveThreshWinSizeMin"] >> params->adaptiveThreshWinSizeMin;
fs["adaptiveThreshWinSizeMax"] >> params->adaptiveThreshWinSizeMax;
fs["adaptiveThreshWinSizeStep"] >> params->adaptiveThreshWinSizeStep;
fs["adaptiveThreshConstant"] >> params->adaptiveThreshConstant;
fs["minMarkerPerimeterRate"] >> params->minMarkerPerimeterRate;
fs["maxMarkerPerimeterRate"] >> params->maxMarkerPerimeterRate;
fs["polygonalApproxAccuracyRate"] >> params->polygonalApproxAccuracyRate;
fs["minCornerDistanceRate"] >> params->minCornerDistanceRate;
fs["minDistanceToBorder"] >> params->minDistanceToBorder;
fs["minMarkerDistanceRate"] >> params->minMarkerDistanceRate;
fs["doCornerRefinement"] >> params->doCornerRefinement;
fs["cornerRefinementWinSize"] >> params->cornerRefinementWinSize;
fs["cornerRefinementMaxIterations"] >> params->cornerRefinementMaxIterations;
fs["cornerRefinementMinAccuracy"] >> params->cornerRefinementMinAccuracy;
fs["markerBorderBits"] >> params->markerBorderBits;
fs["perspectiveRemovePixelPerCell"] >> params->perspectiveRemovePixelPerCell;
fs["perspectiveRemoveIgnoredMarginPerCell"] >> params->perspectiveRemoveIgnoredMarginPerCell;
fs["maxErroneousBitsInBorderRate"] >> params->maxErroneousBitsInBorderRate;
fs["minOtsuStdDev"] >> params->minOtsuStdDev;
fs["errorCorrectionRate"] >> params->errorCorrectionRate;
return true;
}
int main(int argc, char *argv[]) {
CommandLineParser parser(argc, argv, keys);
parser.about(about);
int dictionaryId = parser.get<int>("d");
bool showRejected = parser.has("r");
bool estimatePose = parser.has("c");
float markerLength = parser.get<float>("l");
Ptr<aruco::DetectorParameters> detectorParams = aruco::DetectorParameters::create();
if(parser.has("dp")) {
bool readOk = readDetectorParameters(parser.get<string>("dp"), detectorParams);
if(!readOk) {
cerr << "Invalid detector parameters file" << endl;
return ;
}
}
int camId = parser.get<int>("ci");
String video;
if(parser.has("v")) {
video = parser.get<String>("v");
}
if(!parser.check()) {
parser.printErrors();
return ;
}
Ptr<aruco::Dictionary> dictionary =
aruco::getPredefinedDictionary(aruco::PREDEFINED_DICTIONARY_NAME(dictionaryId));
Mat camMatrix, distCoeffs;
if(estimatePose) {
bool readOk = readCameraParameters(parser.get<string>("c"), camMatrix, distCoeffs);
if(!readOk) {
cerr << "Invalid camera file" << endl;
return ;
}
}
VideoCapture inputVideo;
/* int waitTime;
if(!video.empty()) {
inputVideo.open(video);
waitTime = 0;
} else {
inputVideo.open(camId);
waitTime = 10;
}
inputVideo.set(CAP_PROP_FRAME_WIDTH, 640);
inputVideo.set(CAP_PROP_FRAME_HEIGHT , 480);
*/
double totalTime = ;
int totalIterations = ;
g2o::SparseOptimizer optimizer;//全局優化器
//以下三句話要加
g2o::ParameterSE3Offset* cameraOffset = new g2o::ParameterSE3Offset;
cameraOffset->setId();
optimizer.addParameter(cameraOffset);
optimizer.setVerbose(true);//調試資訊輸出
g2o::BlockSolverX::LinearSolverType * linearSolver;//6代表pose的自由度 3代表landmark的自由度
linearSolver= new g2o::LinearSolverDense<g2o::BlockSolverX::PoseMatrixType>();
g2o::BlockSolverX * solver_ptr
= new g2o::BlockSolverX(linearSolver);
//優化方法LM
g2o::OptimizationAlgorithmLevenberg* solver = new g2o::OptimizationAlgorithmLevenberg(solver_ptr);
optimizer.setAlgorithm(solver);
Mat inputImage=imread("./squre_80_120/0.jpg");
int num_image=;
while(true) {
Mat image, imageCopy;
//inputVideo.retrieve(image);
image = inputImage;
double tick = (double)getTickCount();
vector< int > ids;
vector< vector< Point2f > > corners, rejected;
vector< Vec3d > rvecs, tvecs;
// detect markers and estimate pose
aruco::detectMarkers(image, dictionary, corners, ids, detectorParams, rejected);
if(estimatePose && ids.size() > )
aruco::estimatePoseSingleMarkers(corners, markerLength, camMatrix, distCoeffs, rvecs,
tvecs);
double currentTime = ((double)getTickCount() - tick) / getTickFrequency();
totalTime += currentTime;
// draw results
image.copyTo(imageCopy);
if(ids.size() > )
{
aruco::drawDetectedMarkers(imageCopy, corners, ids);
if(estimatePose) {
for(unsigned int i = ; i < ids.size(); i++)
aruco::drawAxis(imageCopy, camMatrix, distCoeffs, rvecs[i], tvecs[i],
markerLength * );
}
}
if(showRejected && rejected.size() > )
aruco::drawDetectedMarkers(imageCopy, rejected, noArray(), Scalar(, , ));
imshow("out", imageCopy);
int key = waitKey();
cout << key << endl;
if(key == ) break;//'q'
if(key == )//'c'
buildMap(ids, corners, rvecs, tvecs, optimizer);
string string_temp = "./squre2/" + to_string(num_image++) +".jpg";
if(num_image == )
break;
inputImage=imread(string_temp);
}
//之後開始g2o優化建圖等。
optimizer.save("before.g2o");
optimizer.initializeOptimization();
optimizer.optimize();
optimizer.save("after.g2o");
return ;
}