您的位置:首页 > 运维架构

opencv 计算旋转矩阵R,平移矩阵T

2016-12-05 17:15 537 查看
问题:已知摄像机内参K ,以及两张不同角度的图片 ,求解摄像机的RT矩阵?
步骤:

           1.从两张图片中提取特征点,本文采用的SURF

           2.匹配特征点,得到相对应的匹配关系

           3.将keyPoint转化为Mat,然后计算基本矩阵F

           4.由基本矩阵F,求本质矩阵E。根据公式 E=(K‘)t * F *K

           5.对E进行SVD分解得到R 和 T 。

原理:参考文章 http://blog.csdn.net/xiao4399/article/details/48037287
#include <iostream>
#include<opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/nonfree/nonfree.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <vector>
using namespace std;
using namespace cv;

int main(int argc, char *argv[])
{
//
Mat img1 = imread("WIN_20161202_09_04_28_Pro.jpg");
Mat img2 = imread("WIN_20161202_09_04_47_Pro.jpg");

if (!img1.data || !img2.data)
return -1;

//step1: Detect the keypoints using SURF Detector
int minHessian = 400;

SurfFeatureDetector detector(minHessian);

vector<KeyPoint> keypoints1, keypoints2;

detector.detect(img1, keypoints1);
detector.detect(img2, keypoints2);

//step2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors1, descriptors2;
extractor.compute(img1, keypoints1, descriptors1);
extractor.compute(img2, keypoints2, descriptors2);

//step3:Matching descriptor vectors with a brute force matcher
BFMatcher matcher(NORM_L2,true);
vector<DMatch> matches;
matcher.match(descriptors1, descriptors2,matches);

//Draw matches
Mat imgMatches;
drawMatches(img1, keypoints1, img2, keypoints2, matches, imgMatches);

resize(imgMatches,imgMatches,Size(192*5,108*5));
//  namedWindow("Matches");
//  imshow("brute force Matches", imgMatches);

int ptcount=(int) matches.size();
Mat p1(ptcount,2,CV_32F);
Mat p2(ptcount,2,CV_32F);

//change keypoint to mat
Point2f pt;
for(int i=0;i<ptcount;i++)
{
pt=keypoints1[matches[i].queryIdx].pt;
p1.at<float>(i,0)=pt.x;
p1.at<float>(i,1)=pt.y;

pt=keypoints2[matches[i].trainIdx].pt;
p2.at<float>(i,0)=pt.x;
p2.at<float>(i,1)=pt.y;
}

//use RANSAC to calculate F
Mat fundamental;
vector <uchar> RANSACStatus;
fundamental=findFundamentalMat(p1,p2,RANSACStatus,FM_RANSAC);

cout<<"F="<<fundamental<<endl;

double fx,fy,cx,cy;
fx=700.388086;
fy=700.784113;
cx=353.260055;
cy=223.483445;
//标定矩阵
Mat K= cv::Mat::eye(3,3,CV_64FC1);
K.at<double>(0,0) = fx;
K.at<double>(1,1) = fy;
K.at<double>(0,2) = cx;
K.at<double>(1,2) = cy;

cout<<"K="<<K<<endl;

//K转置
Mat Kt=K.t();

cout<<"Kt="<<Kt<<endl;

//E=K't * F * K
Mat E=Kt*fundamental*K;
cout<<"E="<<E<<endl;

SVD svd(E);
//        Matx33d  W(0,-1,0,
//                            1,0,0,
//                            0,0,1);
Mat W=Mat::eye(3,3,CV_64FC1);
W.at<double>(0,1)=-1;
W.at<double>(1,0)=1;
W.at<double>(2,2)=1;

Mat_<double> R=svd.u*W*svd.vt;
Mat_<double> t=svd.u.col(2);
cout<<"R="<<R<<endl;
cout<<"t="<<t<<endl;

//下面的代码为RANSAC优化后的特征点匹配效果
 /*
//calculate the number of outliner
int outlinerCount=0;
for(int i=0;i<ptcount;i++)
{
if(RANSACStatus[i]==0)
outlinerCount++;
}

//calculate inLiner
vector<Point2f> inliner1,inliner2;
vector<DMatch> inlierMatches;
int inlinerCount=ptcount-outlinerCount;
inliner1.resize(inlinerCount);
inliner2.resize(inlinerCount);
inlierMatches .resize(inlinerCount);

int inlinerMatchesCount=0;
for(int i=0;i<ptcount;i++)
{
if(RANSACStatus[i]!=0)
{
inliner1[inlinerMatchesCount].x=p1.at<float>(i,0);
inliner1[inlinerMatchesCount].y=p1.at<float>(i,1);
inliner2[inlinerMatchesCount].x=p2.at<float>(i,0);
inliner2[inlinerMatchesCount].y=p2.at<float>(i,1);
inlierMatches[inlinerMatchesCount].queryIdx=inlinerMatchesCount;
inlierMatches[inlinerMatchesCount].trainIdx=inlinerMatchesCount;
inlinerMatchesCount++;
}
}

vector<KeyPoint> key1(inlinerMatchesCount);
vector<KeyPoint> key2(inlinerMatchesCount);
KeyPoint::convert(inliner1,key1);
KeyPoint::convert(inliner2,key2);

Mat out;
drawMatches(img1,key1,img2,key2,inlierMatches,out);
resize(out,out,Size(192*5,108*5));
imshow("good match result",out);
*/
waitKey();
}
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签:  计算机视觉 opencv