跟高翔学RGBD-SLAM(5) 视觉里程计
2018-01-26 15:21
507 查看
#include <iostream> #include <fstream> #include <sstream> using namespace std; #include "slamBase.h" FRAME readFrame( int index, ParameterReader& pd ); double normofTransform( cv::Mat rvec, cv::Mat tvec ); int main( int argc, char** argv ) { ParameterReader pd; int startIndex = atoi( pd.getData( "start_index" ).c_str() ); int endIndex = atoi( pd.getData( "end_index" ).c_str() ); // initialize cout<<"Initializing ..."<<endl; int currIndex = startIndex; FRAME lastFrame = readFrame( currIndex, pd ); // lastFrame数据 // 我们总是在比较currFrame和lastFrame string detector = pd.getData( "detector" ); string descriptor = pd.getData( "descriptor" ); CAMERA_INTRINSIC_PARAMETERS camera = getDefaultCamera(); computeKeyPointsAndDesp( lastFrame, detector, descriptor ); PointCloud::Ptr cloud = image2PointCloud( lastFrame.rgb, lastFrame.depth, camera ); pcl::visualization::CloudViewer viewer("viewer"); bool visualize = pd.getData("visualize_pointcloud")==string("yes"); int min_inliers = atoi( pd.getData("min_inliers").c_str() ); double max_norm = atof( pd.getData("max_norm").c_str() ); for ( currIndex=startIndex+1; currIndex<endIndex; currIndex++ ) { cout<<"Reading files "<<currIndex<<endl; FRAME currFrame = readFrame( currIndex,pd ); // currFrame数据 computeKeyPointsAndDesp( currFrame, detector, descriptor ); // 比较currFrame 和 lastFrame RESULT_OF_PNP result = estimateMotion( lastFrame, currFrame, camera ); //inliers不够,放弃该帧 if ( result.inliers < min_inliers ) continue; double norm = normofTransform(result.rvec, result.tvec); cout<<"norm = "<<norm<<endl; // 运动范围是否太大,放弃该帧 if ( norm >= max_norm ) continue; Eigen::Isometry3d T = cvMat2Eigen( result.rvec, result.tvec ); cout<<"T="<<T.matrix()<<endl; cloud = joinPointCloud( cloud, currFrame, T, camera ); if ( visualize == true ) viewer.showCloud( cloud ); lastFrame = currFrame; } pcl::io::savePCDFile( "data/result.pcd", *cloud ); return 0; } //读取帧数据的函数,告诉他我要读第几帧,他就返回第几帧的FRAME结构体 FRAME readFrame( int index, ParameterReader& pd ) { FRAME f; string rgbDir = pd.getData("rgb_dir"); string depthDir = pd.getData("depth_dir"); string rgbExt = pd.getData("rgb_extension"); string depthExt = pd.getData("depth_extension"); stringstream ss; ss<<rgbDir<<index<<rgbExt; string filename; ss>>filename; f.rgb = cv::imread( filename ); ss.clear(); filename.clear(); ss<<depthDir<<index<<depthExt; ss>>filename; f.depth = cv::imread( filename, -1 ); return f; } //度量运动的大小 |min(R,2PAI-R)|+|T| double normofTransform( cv::Mat rvec, cv::Mat tvec ) { return fabs(min(cv::norm(rvec), 2*M_PI-cv::norm(rvec)))+ fabs(cv::norm(tvec)); }如何检测匹配失败呢?我们采用了三个方法:
1:去掉goodmatch太少的帧,最少的goodmatch定义为:
min_good_match=10
2:去掉solvePnPRASNAC里,inlier较少的帧,同理定义为:
min_inliers=5
3:去掉求出来的变换矩阵太大的情况。因为假设运动是连贯的,两帧之间不会隔的太远:
max_norm=0.3
如何知道两帧之间不隔太远呢?我们计算了一个度量运动大小的值:
相关文章推荐
- SLAM第二篇:视觉里程计
- SLAM入门之视觉里程计(6):相机标定 张正友经典标定法详解
- slam学习(5)-vo (visual odometry)视觉里程计和图优化工具g2o
- SLAM入门之视觉里程计(5):单应矩阵
- SLAM入门之视觉里程计(3):两视图对极约束 基础矩阵
- SLAM入门之视觉里程计(4):基础矩阵的估计
- SLAM入门之视觉里程计(3):两视图对极约束 基础矩阵
- SLAM入门之视觉里程计(4):基础矩阵的估计
- SLAM入门之视觉里程计(6):相机标定 张正友经典标定法详解
- SLAM入门之视觉里程计(1):特征点的匹配
- 跟高翔学RGBD-SLAM(4)点云拼接
- SLAM入门之视觉里程计(1):特征点的匹配
- SLAM入门之视觉里程计(2):相机模型(内参数,外参数)
- 使用RGBD深度摄像头实现视觉里程计
- SLAM入门之视觉里程计(2):相机模型(内参数,外参数)
- 跟高翔学RGBD-SLAM(1)第一个CMake项目
- 【视觉 SLAM 1】 视觉SLAM- RGBD 加 语义分割 1 (需要RGBD相机)
- SLAM入门之视觉里程计(5):单应矩阵
- 跟高翔学RGBD-SLAM(2)从图像到点云
- 视觉里程计 特征点法