这只是一次简单的尝试,不过收获还是挺多的。
首先是地面上有一些tag,用摄像头去拍摄,每次只拍到部分的tag,拍多次。
因为tag是唯一的,而且尺度已知,可以直接计算摄像头的RT,因此可以套用slam框架。基本思路如下:
1.初始帧将map坐标系远点设为摄像头初始位置,将初始帧看到的tag的四个角点放入map中。
2.之后的帧先识别当前帧里的tag,分为两类,一类是map中已有的,一类是map中没有的。已有的那部分与map中的tag做匹配,使用icp方法求解出当前摄像头位姿,再将没有的那部分根据相机位姿再拼入map中。
3.最后做一次g2o优化。
g2o 顶点是相机pose (VertexSE3) tag的四个顶点landmark(VertexPointXYZ)
边是 tag的测量值(EdgeSE3PointXYZ)
其中采集图像要注意,按时间顺序,新的图像中的tag必须有map中已有的。
结果并不让人满意,但至少值得分析。
g2o的优化结果可以看出摄像头不在一个平面上,但实际上拍摄时摄像头都是摆在地上的。这主要是因为tag的z轴测的不准,导致摄像头有很剧烈的上下变化。
对于tag的位置优化结果,其实也很差,我是摆成长方形的,但是很明显优化出来的各个点不在长方形上。
从这个结果,我很跳跃的想到,在摄像头标定时,摄像头最好让视野中绝大部分有标定板存在,这样可以避免像素级带来的误差。我这个试验中,tag只占视野里很少的部分,所以误差特别大。
代码有些乱,仅作记录。
#include <iostream>
#include <fstream>
#include <vector>
#include <cmath>
#include <ctime>
#include <Eigen/Core>
#include <Eigen/StdVector>
#include <Eigen/Geometry>
#include <opencv2/highgui.hpp>
#include <opencv2/aruco.hpp>
#include <opencv2/core/eigen.hpp>
#include <opencv2/calib3d.hpp>
#include "g2o/types/slam3d/types_slam3d.h"
#include "g2o/types/slam3d/vertex_pointxyz.h"
#include "g2o/types/slam3d/vertex_se3.h"
#include "g2o/types/slam3d/edge_se3.h"
#include "g2o/types/slam3d/edge_se3_pointxyz.h"
#include "g2o/core/factory.h"
#include "g2o/core/optimization_algorithm_factory.h"
#include "g2o/core/sparse_optimizer.h"
#include "g2o/core/block_solver.h"
#include "g2o/solvers/dense/linear_solver_dense.h"
#include "g2o/core/optimization_algorithm_levenberg.h"
#include "icp_g2o.hpp"
using namespace std;
using namespace cv;
using namespace Eigen;
using namespace g2o;
//使用图片
//输入当前帧检测到的tag
//维护一张节点地图用于g2o
class ArucoFrame
{
public:
vector<int> landmark_id;
vector<Eigen::Vector3d> landmarks_pointXYZ;//在相机坐标系下的位置
Eigen::Isometry3d robot_pose;//在map坐标系下的位姿
};
class ArucoMap
{
public:
vector<int> landmark_id;
vector<Eigen::Vector3d> landmarks_pointXYZ;//在Map坐标系下的位置
};
ArucoMap ArucoMapGlobal;
vector<ArucoFrame> ArucoFrameGlobal;
int id_vertex_pose=, id_vertex_pointXYZ_start=;//机器人pose的初始点认为是建图坐标系原点,id从0开始,tag的节点id从100开始。默认机器人pose不可能超过100个
void buildMap(vector< int > &ids, vector< vector< Point2f > > &corners, vector< Vec3d > &rvecs, vector< Vec3d > &tvecs, g2o::SparseOptimizer &optimizer)
{
//获取当前帧里的tag
ArucoFrame ArucoFrame_temp;
if(ids.size() != )
{
ArucoFrame_temp.robot_pose = Eigen::Isometry3d::Identity();
for(int i=; i<ids.size(); i++)
{
ArucoFrame_temp.landmark_id.push_back(ids[i]);
//cv转换到eigen下
Mat rot_mat;
Rodrigues(rvecs[i], rot_mat);
Eigen::Matrix3d eigen_r;
Eigen::Vector3d eigen_t;
cv2eigen(rot_mat, eigen_r);
cv2eigen(tvecs[i],eigen_t);
//这里的rt对应的T是从mark坐标系变到相机坐标系
//Point_camera = T_camera-mark * Point_mark
//很重要!!所以这里的四个点是按mark坐标系下的顺序排列的
ArucoFrame_temp.landmarks_pointXYZ.push_back( eigen_r * Eigen::Vector3d(-,,)+eigen_t );
ArucoFrame_temp.landmarks_pointXYZ.push_back( eigen_r * Eigen::Vector3d(,,)+eigen_t );
ArucoFrame_temp.landmarks_pointXYZ.push_back( eigen_r * Eigen::Vector3d(,-,)+eigen_t );
ArucoFrame_temp.landmarks_pointXYZ.push_back( eigen_r * Eigen::Vector3d(-,-,)+eigen_t );
}
ArucoFrameGlobal.push_back(ArucoFrame_temp);
}
else
return ;
//对地图操作
if(ArucoMapGlobal.landmark_id.size() == )//初始化第一帧
{
for(int i=; i<ArucoFrame_temp.landmark_id.size(); i++)
{
ArucoMapGlobal.landmark_id.push_back(ArucoFrame_temp.landmark_id[i]);
ArucoMapGlobal.landmarks_pointXYZ.push_back(ArucoFrame_temp.landmarks_pointXYZ[i*+]);
ArucoMapGlobal.landmarks_pointXYZ.push_back(ArucoFrame_temp.landmarks_pointXYZ[i*+]);
ArucoMapGlobal.landmarks_pointXYZ.push_back(ArucoFrame_temp.landmarks_pointXYZ[i*+]);
ArucoMapGlobal.landmarks_pointXYZ.push_back(ArucoFrame_temp.landmarks_pointXYZ[i*+]);
//添加pose
VertexSE3* v = new VertexSE3;
v->setEstimate(ArucoFrame_temp.robot_pose);
v->setId(id_vertex_pose++);
optimizer.addVertex(v);
//添加landmark和edge
for(int j=; j<; j++)
{
VertexPointXYZ* l = new VertexPointXYZ;
l->setEstimate(ArucoFrame_temp.landmarks_pointXYZ[i*+j]);
l->setFixed(true);
l->setId(id_vertex_pointXYZ_start+ArucoFrame_temp.landmark_id[i]*+j);//之所以直接用tag的id是因为不会存在相同的tag,因此vertex的序号不会出现一样的。
optimizer.addVertex(l);
EdgeSE3PointXYZ* e = new EdgeSE3PointXYZ;
e->setVertex(, (optimizer.vertices().find(id_vertex_pose-))->second);
e->setVertex(, (optimizer.vertices().find(id_vertex_pointXYZ_start+ArucoFrame_temp.landmark_id[i]*+j))->second);
e->setMeasurement(ArucoFrame_temp.landmarks_pointXYZ[i*+]);
e->setParameterId(,);//这句话必须加
optimizer.addEdge(e);
}
}
}
else
{
//寻找当前帧里的tagid是否和map里的id一样
vector<int> id_same, id_different;
for(int i=; i<ArucoFrame_temp.landmark_id.size(); i++)
{
int j;
for(j=; j<ArucoMapGlobal.landmark_id.size(); j++)
{
if(ArucoMapGlobal.landmark_id[j] == ArucoFrame_temp.landmark_id[i])
{
id_same.push_back(i);
break;
}
}
if(j == ArucoMapGlobal.landmark_id.size())
id_different.push_back(i);
}
if(id_different.size() != )//如果有新的id能看到,就要添加进地图里
{
if(id_same.size() != )//必须要同时看到地图里存在的tag以及新的tag,才能将新的tag添加进地图中,因为是根据scan匹配map获得自身位资
{
//先根据icp匹配获得当前帧的位置。
vector<Eigen::Vector3d> landmarkXYZ_common_map;
vector<Eigen::Vector3d> landmarkXYZ_common_frame;
for(int i=; i<ArucoMapGlobal.landmark_id.size();i++)
{
for(int j=; j<id_same.size(); j++)//使用所有能看得到的共视图形
{
if(ArucoMapGlobal.landmark_id[i] == ArucoFrame_temp.landmark_id[id_same[j]])
{
landmarkXYZ_common_map.push_back(ArucoMapGlobal.landmarks_pointXYZ[i*+]);
landmarkXYZ_common_map.push_back(ArucoMapGlobal.landmarks_pointXYZ[i*+]);
landmarkXYZ_common_map.push_back(ArucoMapGlobal.landmarks_pointXYZ[i*+]);
landmarkXYZ_common_map.push_back(ArucoMapGlobal.landmarks_pointXYZ[i*+]);
landmarkXYZ_common_frame.push_back(ArucoFrame_temp.landmarks_pointXYZ[id_same[j]*+]);
landmarkXYZ_common_frame.push_back(ArucoFrame_temp.landmarks_pointXYZ[id_same[j]*+]);
landmarkXYZ_common_frame.push_back(ArucoFrame_temp.landmarks_pointXYZ[id_same[j]*+]);
landmarkXYZ_common_frame.push_back(ArucoFrame_temp.landmarks_pointXYZ[id_same[j]*+]);
break;
}
}
}
Eigen::Isometry3d T;
bundleAdjustment(landmarkXYZ_common_map, landmarkXYZ_common_frame, T);//这个T就是当前摄像头的pose
ArucoFrame_temp.robot_pose = T;
ArucoFrameGlobal.back().robot_pose = T;
//当前摄像头的位姿计算出来以后,更新map中的pose和edge,这一步没有landmark添加进来
VertexSE3* v = new VertexSE3;
v->setEstimate(ArucoFrame_temp.robot_pose);
v->setId(id_vertex_pose++);
optimizer.addVertex(v);
for(int i=; i<id_same.size(); i++)
{
for(int j=; j<; j++)
{
EdgeSE3PointXYZ* e = new EdgeSE3PointXYZ;
e->setVertex(, (optimizer.vertices().find(id_vertex_pose-))->second);
e->setVertex(, (optimizer.vertices().find(id_vertex_pointXYZ_start+ArucoFrame_temp.landmark_id[id_same[i]]*+j))->second);
e->setMeasurement(ArucoFrame_temp.landmarks_pointXYZ[id_same[i]*+j]);
e->setParameterId(,);//这句话必须加
optimizer.addEdge(e);
}
}
//更新map中的landmark和edge
for(int i=; i<id_different.size(); i++)
{
ArucoMapGlobal.landmark_id.push_back(ArucoFrame_temp.landmark_id[id_different[i]]);
ArucoMapGlobal.landmarks_pointXYZ.push_back(T * ArucoFrame_temp.landmarks_pointXYZ[id_different[i]*+]);
ArucoMapGlobal.landmarks_pointXYZ.push_back(T * ArucoFrame_temp.landmarks_pointXYZ[id_different[i]*+]);
ArucoMapGlobal.landmarks_pointXYZ.push_back(T * ArucoFrame_temp.landmarks_pointXYZ[id_different[i]*+]);
ArucoMapGlobal.landmarks_pointXYZ.push_back(T * ArucoFrame_temp.landmarks_pointXYZ[id_different[i]*+]);
for(int j=; j<; j++)
{
VertexPointXYZ* l = new VertexPointXYZ;
l->setEstimate(T * ArucoFrame_temp.landmarks_pointXYZ[id_different[i]*+j]);
l->setId(id_vertex_pointXYZ_start+ArucoFrame_temp.landmark_id[id_different[i]]*+j);
optimizer.addVertex(l);
EdgeSE3PointXYZ* e = new EdgeSE3PointXYZ;
e->setVertex(, (optimizer.vertices().find(id_vertex_pose-))->second);
e->setVertex(, (optimizer.vertices().find(id_vertex_pointXYZ_start+ArucoFrame_temp.landmark_id[id_different[i]]*+j))->second);
e->setMeasurement(ArucoFrame_temp.landmarks_pointXYZ[id_different[i]*+j]);
e->setParameterId(,);//这句话必须加
optimizer.addEdge(e);
}
}
}
}
else//如果看到的全是存在的tag就添加优化关系
{
vector<int> id_same;
for(int i=; i<ArucoFrame_temp.landmark_id.size(); i++)
{
int j;
for(j=; j<ArucoMapGlobal.landmark_id.size(); j++)
{
if(ArucoMapGlobal.landmark_id[j] == ArucoFrame_temp.landmark_id[i])
{
id_same.push_back(i);
break;
}
}
}
//先根据icp匹配获得当前帧的位置。
vector<Eigen::Vector3d> landmarkXYZ_common_map;
vector<Eigen::Vector3d> landmarkXYZ_common_frame;
for(int i=; i<ArucoMapGlobal.landmark_id.size();i++)
for(int j=; j<id_same.size(); j++)//使用所有能看得到的共视图形
{
if(ArucoMapGlobal.landmark_id[i] == ArucoFrame_temp.landmark_id[id_same[j]])
{
landmarkXYZ_common_map.push_back(ArucoMapGlobal.landmarks_pointXYZ[i*+]);
landmarkXYZ_common_map.push_back(ArucoMapGlobal.landmarks_pointXYZ[i*+]);
landmarkXYZ_common_map.push_back(ArucoMapGlobal.landmarks_pointXYZ[i*+]);
landmarkXYZ_common_map.push_back(ArucoMapGlobal.landmarks_pointXYZ[i*+]);
landmarkXYZ_common_frame.push_back(ArucoFrame_temp.landmarks_pointXYZ[id_same[j]*+]);
landmarkXYZ_common_frame.push_back(ArucoFrame_temp.landmarks_pointXYZ[id_same[j]*+]);
landmarkXYZ_common_frame.push_back(ArucoFrame_temp.landmarks_pointXYZ[id_same[j]*+]);
landmarkXYZ_common_frame.push_back(ArucoFrame_temp.landmarks_pointXYZ[id_same[j]*+]);
break;
}
}
Eigen::Isometry3d T;
bundleAdjustment(landmarkXYZ_common_map, landmarkXYZ_common_frame, T);//这个T就是当前摄像头的pose
ArucoFrame_temp.robot_pose = T;
ArucoFrameGlobal.back().robot_pose = T;
VertexSE3* v = new VertexSE3;
v->setEstimate(ArucoFrame_temp.robot_pose);
v->setId(id_vertex_pose++);
optimizer.addVertex(v);
for(int i=; i<id_same.size(); i++)
{
for(int j=; j<; j++)
{
EdgeSE3PointXYZ* e = new EdgeSE3PointXYZ;
e->setVertex(, (optimizer.vertices().find(id_vertex_pose-))->second);
e->setVertex(, (optimizer.vertices().find(id_vertex_pointXYZ_start+ArucoFrame_temp.landmark_id[id_same[i]]*+j))->second);
e->setMeasurement(ArucoFrame_temp.landmarks_pointXYZ[id_same[i]*+j]);
e->setParameterId(,);//这句话必须加
optimizer.addEdge(e);
}
}
}
}
}
namespace {
const char* about = "Basic marker detection";
const char* keys =
"{d | 0 | dictionary: DICT_4X4_50=0, DICT_4X4_100=1, DICT_4X4_250=2,"
"DICT_4X4_1000=3, DICT_5X5_50=4, DICT_5X5_100=5, DICT_5X5_250=6, DICT_5X5_1000=7, "
"DICT_6X6_50=8, DICT_6X6_100=9, DICT_6X6_250=10, DICT_6X6_1000=11, DICT_7X7_50=12,"
"DICT_7X7_100=13, DICT_7X7_250=14, DICT_7X7_1000=15, DICT_ARUCO_ORIGINAL = 16}"
"{v | | Input from video file, if ommited, input comes from camera }"
"{ci | 0 | Camera id if input doesnt come from video (-v) }"
"{c | log270 | Camera intrinsic parameters. Needed for camera pose }"
"{l | 0.06 | Marker side lenght (in meters). Needed for correct scale in camera pose }"
"{dp | detector_params.yml | File of marker detector parameters }"
"{r | | show rejected candidates too }";
}
static bool readCameraParameters(string filename, Mat &camMatrix, Mat &distCoeffs) {
FileStorage fs(filename, FileStorage::READ);
if(!fs.isOpened())
return false;
fs["camera_matrix"] >> camMatrix;
fs["distortion_coefficients"] >> distCoeffs;
return true;
}
static bool readDetectorParameters(string filename, Ptr<aruco::DetectorParameters> ¶ms) {
FileStorage fs(filename, FileStorage::READ);
if(!fs.isOpened())
return false;
fs["adaptiveThreshWinSizeMin"] >> params->adaptiveThreshWinSizeMin;
fs["adaptiveThreshWinSizeMax"] >> params->adaptiveThreshWinSizeMax;
fs["adaptiveThreshWinSizeStep"] >> params->adaptiveThreshWinSizeStep;
fs["adaptiveThreshConstant"] >> params->adaptiveThreshConstant;
fs["minMarkerPerimeterRate"] >> params->minMarkerPerimeterRate;
fs["maxMarkerPerimeterRate"] >> params->maxMarkerPerimeterRate;
fs["polygonalApproxAccuracyRate"] >> params->polygonalApproxAccuracyRate;
fs["minCornerDistanceRate"] >> params->minCornerDistanceRate;
fs["minDistanceToBorder"] >> params->minDistanceToBorder;
fs["minMarkerDistanceRate"] >> params->minMarkerDistanceRate;
fs["doCornerRefinement"] >> params->doCornerRefinement;
fs["cornerRefinementWinSize"] >> params->cornerRefinementWinSize;
fs["cornerRefinementMaxIterations"] >> params->cornerRefinementMaxIterations;
fs["cornerRefinementMinAccuracy"] >> params->cornerRefinementMinAccuracy;
fs["markerBorderBits"] >> params->markerBorderBits;
fs["perspectiveRemovePixelPerCell"] >> params->perspectiveRemovePixelPerCell;
fs["perspectiveRemoveIgnoredMarginPerCell"] >> params->perspectiveRemoveIgnoredMarginPerCell;
fs["maxErroneousBitsInBorderRate"] >> params->maxErroneousBitsInBorderRate;
fs["minOtsuStdDev"] >> params->minOtsuStdDev;
fs["errorCorrectionRate"] >> params->errorCorrectionRate;
return true;
}
int main(int argc, char *argv[]) {
CommandLineParser parser(argc, argv, keys);
parser.about(about);
int dictionaryId = parser.get<int>("d");
bool showRejected = parser.has("r");
bool estimatePose = parser.has("c");
float markerLength = parser.get<float>("l");
Ptr<aruco::DetectorParameters> detectorParams = aruco::DetectorParameters::create();
if(parser.has("dp")) {
bool readOk = readDetectorParameters(parser.get<string>("dp"), detectorParams);
if(!readOk) {
cerr << "Invalid detector parameters file" << endl;
return ;
}
}
int camId = parser.get<int>("ci");
String video;
if(parser.has("v")) {
video = parser.get<String>("v");
}
if(!parser.check()) {
parser.printErrors();
return ;
}
Ptr<aruco::Dictionary> dictionary =
aruco::getPredefinedDictionary(aruco::PREDEFINED_DICTIONARY_NAME(dictionaryId));
Mat camMatrix, distCoeffs;
if(estimatePose) {
bool readOk = readCameraParameters(parser.get<string>("c"), camMatrix, distCoeffs);
if(!readOk) {
cerr << "Invalid camera file" << endl;
return ;
}
}
VideoCapture inputVideo;
/* int waitTime;
if(!video.empty()) {
inputVideo.open(video);
waitTime = 0;
} else {
inputVideo.open(camId);
waitTime = 10;
}
inputVideo.set(CAP_PROP_FRAME_WIDTH, 640);
inputVideo.set(CAP_PROP_FRAME_HEIGHT , 480);
*/
double totalTime = ;
int totalIterations = ;
g2o::SparseOptimizer optimizer;//全局优化器
//以下三句话要加
g2o::ParameterSE3Offset* cameraOffset = new g2o::ParameterSE3Offset;
cameraOffset->setId();
optimizer.addParameter(cameraOffset);
optimizer.setVerbose(true);//调试信息输出
g2o::BlockSolverX::LinearSolverType * linearSolver;//6代表pose的自由度 3代表landmark的自由度
linearSolver= new g2o::LinearSolverDense<g2o::BlockSolverX::PoseMatrixType>();
g2o::BlockSolverX * solver_ptr
= new g2o::BlockSolverX(linearSolver);
//优化方法LM
g2o::OptimizationAlgorithmLevenberg* solver = new g2o::OptimizationAlgorithmLevenberg(solver_ptr);
optimizer.setAlgorithm(solver);
Mat inputImage=imread("./squre_80_120/0.jpg");
int num_image=;
while(true) {
Mat image, imageCopy;
//inputVideo.retrieve(image);
image = inputImage;
double tick = (double)getTickCount();
vector< int > ids;
vector< vector< Point2f > > corners, rejected;
vector< Vec3d > rvecs, tvecs;
// detect markers and estimate pose
aruco::detectMarkers(image, dictionary, corners, ids, detectorParams, rejected);
if(estimatePose && ids.size() > )
aruco::estimatePoseSingleMarkers(corners, markerLength, camMatrix, distCoeffs, rvecs,
tvecs);
double currentTime = ((double)getTickCount() - tick) / getTickFrequency();
totalTime += currentTime;
// draw results
image.copyTo(imageCopy);
if(ids.size() > )
{
aruco::drawDetectedMarkers(imageCopy, corners, ids);
if(estimatePose) {
for(unsigned int i = ; i < ids.size(); i++)
aruco::drawAxis(imageCopy, camMatrix, distCoeffs, rvecs[i], tvecs[i],
markerLength * );
}
}
if(showRejected && rejected.size() > )
aruco::drawDetectedMarkers(imageCopy, rejected, noArray(), Scalar(, , ));
imshow("out", imageCopy);
int key = waitKey();
cout << key << endl;
if(key == ) break;//'q'
if(key == )//'c'
buildMap(ids, corners, rvecs, tvecs, optimizer);
string string_temp = "./squre2/" + to_string(num_image++) +".jpg";
if(num_image == )
break;
inputImage=imread(string_temp);
}
//之后开始g2o优化建图等。
optimizer.save("before.g2o");
optimizer.initializeOptimization();
optimizer.optimize();
optimizer.save("after.g2o");
return ;
}