您的位置:首页 > 运维架构

opencv学习系列:实例练习(含多个实例)

2017-12-25 15:42 423 查看
//-----------------------------------OpenCV学习(续9月份)-------------------------------------
//  程序名称:OpenCV程序模板样式
//  所用IDE版本:        Visual Studio 2013
//  开发所用OpenCV版本:        2.4.9
//  2016年10月 Created by 孙立波

//包含程序所依赖的头文件:为方便起见把经常用的头文件都写在这里(前三必须包含),也可以用#include "opencv.hpp"包含下面所有头文件
#include <opencv2\core\core.hpp>    //程序库核心功能,基本数据与结构和算法函数
#include <opencv2\imgproc\imgproc.hpp>  //包含主要的图像处理函数
#include <opencv2\highgui\highgui.hpp>  //包含图像、视频读写函数和部分用户界面函数

//出现#include "cv.h"表示老式风格,并包含老式所有头文件
//#include <opencv2\features2d\features2d.hpp> //包含特征点检测器、描述子及特征点匹配框架
//#include <opencv2\nonfree\nonfree.hpp>         //SURF和SIFT会用到

#include <iostream>

//包含程序所使用的命名空间
using namespace cv;         //opencv2的名字空间
using namespace std;

//描述:控制台应用程序的入口函数,程序从这里开始执行??

int main(int argc, char** argv)     //argv相当于"行"指针
{

return 0;
}
**************************************************************************************************************

//-----------------------------------OpenCV学习15-------------------------------------
//  程序名称:生成标定板
//  2016年10月 Created by孙立波(Visual Studio 2013+OpenCV2.4.9)
#include <opencv2\opencv.hpp>
#include <iostream>
#include <string>
using namespace cv;
using namespace std;
//隐藏控制台窗口
#pragma comment(linker, "/subsystem:\"windows\" /entry:\"mainCRTStartup\"")

int main(int argc, char*argv[])
{
int width = 140;//棋盘格宽度
int height = 140;//棋盘格高度
IplImage *src = cvCreateImage(cvSize(1120, 1120), IPL_DEPTH_8U, 1);
cvZero(src);
for (int i = 0; i<src->width; i++)
{
for (int j = 0; j<src->height; j++)
{
if ((i / width + j / height) % 2 == 0)
{
src->imageData[i*src->widthStep + j*src->nChannels] = 255;
}
}
}

cvNamedWindow("src");
cvShowImage("src", src);

cvSaveImage("ChessBoard.bmp", src, 0);
cvWaitKey(0);

return 0;

}

// ----------------------------------OpenCV学习16-------------------------------------
//  程序摘要:读取视频序列;
/*  自定义视频处理类VideoProcessor:可以处理视频和独立的图像序列,前者用以前的帧处理函数(回调函数process),
后者用自定义的帧处理类(FrameProcessor类接口即对象->process方法);其中用到了canny函数,其中涉及把每帧图像彩色转灰度和用Canny和
threshold函数*/
//  还包括写入视频帧类VideoWriter,在VideoProcessor的方法中应用写入类实例的成员函数,也分为处理视频和独立的图像序列即用到成员函数重载
//  以上描述为:程序后半段用自定义视频处理类封装视频捕捉类、帧处理类和视频帧写入类
//  2016年10月 Created by孙立波(Visual Studio 2013+OpenCV2.4.9)
#include <opencv2\opencv.hpp>
#include <iostream>
#include <string>
#include <sstream>
#include <vector>
using namespace cv;
using namespace std;

#include "videoprocessor.h"

void draw(const cv::Mat& img, cv::Mat& out) {

img.copyTo(out);
cv::circle(out, cv::Point(100, 100), 5, cv::Scalar(255, 0, 0), 2);
}

// processing function此函数定义用于真正的图像处理,为回调函数在main中注册后每次执行的函数处理
void canny(cv::Mat& img, cv::Mat& out) {

// Convert to gray
if (img.channels() == 3)
cv::cvtColor(img, out, CV_BGR2GRAY);
// Compute Canny edges
cv::Canny(out, out, 100, 200);
// Invert the image
cv::threshold(out, out, 128, 255, cv::THRESH_BINARY_INV);
}

int main()
{
// 打开视频文件
cv::VideoCapture capture("D:\\workplace\\opencv_training\\bike.avi");
//cv::VideoCapture capture("http://www.laganiere.name/bike.avi");
// 核实视频文件是否却是打开
if (!capture.isOpened())
return 1;
// Get the frame rate
double rate = capture.get(CV_CAP_PROP_FPS);
std::cout << "Frame rate: " << rate << "fps" << std::endl;

bool stop(false);
cv::Mat frame; // current video frame
cv::namedWindow("Extracted Frame");

// Delay between each frame
// corresponds to video frame rate
int delay = 1000 / rate;//视频播放按原始帧率,可设置使视频快进或倒退
long long i = 0;
std::string b = "bike";
std::string ext = ".bmp";
// for all frames in video
while (!stop) {

// read next frame if any
if (!capture.read(frame))
break;

cv::imshow("Extra   cted Frame", frame);

std::string name(b);
// note: some MinGW compilers generate an error for this line
// this is a compiler bug
// try: std::ostringstream ss; ss << i; name+= ss.rdbuf(); i++;
//      name+=std::to_string(i++);
std::ostringstream ss; ss << i; name += ss.str(); i++;
name += ext;

std::cout << name << std::endl;

cv::imwrite(name, frame);

// introduce a delay
// or press key to stop
if (cv::waitKey(delay) >= 0)//当超过指定时间,没有按键盘时,返回值-1
stop = true;
}

// Close the video file
capture.release();

cv::waitKey();

// Now using the VideoProcessor class用自定义视频处理类封装视频捕捉类、帧处理类和视频帧写入类

// Create instance
VideoProcessor processor;

// Open video file
processor.setInput("D:\\workplace\\opencv_training\\bike.avi");

// Declare a window to display the video注册窗口内存预备显示用
processor.displayInput("Input Video");//彩色图像
processor.displayOutput("Output Video");//灰度图像

// Play the video at the original frame rate
processor.setDelay(1000. / processor.getFrameRate());

// 此为注册回调函数Set the frame processor callback function*****设置类的实例的回调函数方法为main前声明的canny()函数!!!
processor.setFrameProcessor(canny);

// output a video
//传入值-1代表不采用与输入一致的编解码方式,而是提供MFC供选择,15代表帧的速率
processor.setOutput("D:\\workplace\\opencv_training\\bike1.avi", -1, 15);

// stop the process at this frame,只处理前51帧并保存到内存
processor.stopAtFrameNo(51);

// Start the process
processor.run();

cv::waitKey();

return 0;
}
//-----------------------------------OpenCV学习17-------------------------------------
//  程序名称:利用OpenCV做标定板,可自定义!!!
//  2016年10月 Created by孙立波(Visual Studio 2013+OpenCV2.4.9)
#include <opencv2\opencv.hpp>
#include <iostream>
#include <string>
using namespace cv;
using namespace std;
int main()
{
//---生成标定图

int dd = 80;        //棋盘格大小,像素为单位
int dx = 3;     //行:白块开头,竖着格数为6.竖向为4个焦点
int dy = 4;     //列:白块开头,横格数为8,横向为6个角点
//!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
//注意:Size第一个为行,第二个为列!!
Mat img(Size(2 * dy * dd,2 * dx * dd), CV_8UC1, Scalar(0));
imshow("sss", img);
int flag = 0;
for (int i = 0; i < 2 * dx; i++)
for (int j = 0; j < 2 * dy; j++)
{
flag = (i + j) % 2;
if (flag == 0)
{
for (int m = i*dd; m < (i + 1)*dd; m++)
for (int n = j*dd; n < (j + 1)*dd; n++)
//or((uchar *)(img.data + m * img.step))
= 255;
(*(img.data + m * img.step+n * img.elemSize())) = 255;
//elemSize为一个像素所占用字节个数
//*(img->imageData+m*img->widthStep+n)=255;
}

}
//---END生成标定图
imwrite("棋盘格标定图.bmp", img);
cvNamedWindow("棋盘格", 1);
imshow("棋盘格", img);
cvWaitKey(0);
cvDestroyWindow("棋盘格");
}
//-----------------------------------OpenCV学习17-------------------------------------
//  程序名称:利用OpenCV做标定板,标定图的格子大小80*80像素,共10*10个黑白格
//  2016年10月 Created by孙立波(Visual Studio 2013+OpenCV2.4.9)
#include <opencv2\opencv.hpp>
#include <iostream>
#include <string>
using namespace cv;
using namespace std;
int main()
{
//---生成标定图

int dx = 80;        //棋盘格大小,像素为单位
int dy = 5;     //棋盘格数目
Mat img(Size(2 * dx*dy, 2 * dx*dy), CV_8UC1, Scalar(0));
int flag = 0;
for (int i = 0; i<2 * dy; i++)
for (int j = 0; j<2 * dy; j++)
{
flag = (i + j) % 2;
if (flag == 0)
{
for (int m = i*dx; m<(i + 1)*dx; m++)
for (int n = j*dx; n<(j + 1)*dx; n++)
((uchar *)(img.data + m * img.step))
= 255;
//or(*(img.data + m * img.step+n * img.elemSize())) = 255;
//elemSize为一个像素所占用字节个数
//*(img->imageData+m*img->widthStep+n)=255;
}

}
//---END生成标定图
imwrite("棋盘格标定图.bmp", img);
cvNamedWindow("棋盘格", 1);
imshow("棋盘格", img);
cvWaitKey(0);
cvDestroyWindow("棋盘格");
}
//-----------------------------------OpenCV学习18-------------------------------------
//  程序名称:标定
//  2016年10月 Created by孙立波(Visual Studio 2013+OpenCV2.4.9)
#include <opencv2\opencv.hpp>
#include <iostream>
#include <string>
#include <vector>
#include <iomanip>
#include <fstream>
using namespace cv;
using namespace std;
int main()
{
double time0 = static_cast<double>(getTickCount());
ofstream fout("caliberation_result.txt");  /**    保存定标结果的文件     **/

/************************************************************************
读取每一幅图像,从中提取出角点,然后对角点进行亚像素精确化
*************************************************************************/
cout << "开始提取角点………………" << endl;
int image_count = 21;                     /****    图像数量        ****/
Size image_size;                          /****    图像的尺寸      ****/
Size board_size = Size(9, 6);             /****    定标板上每行、列的角点数       ****/
vector<Point2f> corners;                  /****    缓存每幅图像上检测到的角点     ****/
vector<vector<Point2f>>  corners_Seq;     /****    保存检测到的所有角点           ****/
vector<Mat>  image_Seq;

int count = 0;
for (int i = 0; i != image_count; i++)
{
cout << "Frame #" << i + 1 << "..." << endl;
/*或者:
std::stringstream StrStm;
//为了将i转化为字符型,用StrStm做中介
StrStm << i + 1;
StrStm >> imageFileName;
imageFileName += ".jpg";
Mat image = imread("D:\\workplace\\opencv_training\\mytrainings\\mytest4\\color2\\img" + imageFileName);
*/
std::stringstream str;
str << "D:\\workplace\\opencv_training\\mytrainings\\mytest5\\color3\\img" << std::setw(2) << std::setfill('0') << i + 1 << ".jpg";
std::cout << str.str() << std::endl;
Mat image = cv::imread(str.str());
image_size = image.size();
//image_size = Size(image.cols , image.rows);
/* 提取角点 */
Mat imageGray;
cvtColor(image, imageGray, CV_RGB2GRAY);
bool patternfound = findChessboardCorners(image, board_size, corners, CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE +
CALIB_CB_FAST_CHECK);
if (!patternfound)
{
cout << "can not find chessboard corners!\n";
continue;
exit(1);
}
else
{
/*
亚像素精确化 :迭代过程的终止条件可以是最大迭代次数CV_TERMCRIT_ITER类型,或者是设定的精度CV_TERMCRIT_EPS类型(或者是两
者的组合)。终止条件的设置在极大程度上影响最终得到的亚像素值的精度。在此,指定为0.10,则求得的亚像素级精度为像素的十分
之一
*/
cornerSubPix(imageGray, corners, Size(11, 11), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
//Size(11, 11)为搜索窗口的一半尺寸,Size(-1, -1)死区的一半尺寸,死区为不对搜索区的中央位置做求和运算的区域。/
//它是用来避免自相关矩阵出现的某些可能的奇异性。当值为(-1,-1)表示没有死区。
//TermCriteria为求角点的迭代过程的终止条件。即角点位置的确定,要么迭代数大于某个设定值,或者是精确懂达到某个设定值。
//criteria可以是最大迭代数目,或者是设定的精确度,也可以是它们的组合。
/* 绘制检测到的角点并保存 */
Mat imageTemp = image.clone();
for (int j = 0; j < corners.size(); j++)
{
circle(imageTemp, corners[j], 10, Scalar(0, 0, 255), 2, 8, 0);
}
string imageFileName;
std::stringstream StrStm;
StrStm << i + 1;
StrStm >> imageFileName;
imageFileName += "_corner.jpg";
//保存提取角点的图像
imwrite(imageFileName, imageTemp);
cout << "Frame corner#" << i + 1 << "...end" << endl;

count = count + corners.size();
//将该角点压入角点序列堆栈
corners_Seq.push_back(corners);
}
//将处理过的图像压入源图像堆栈
image_Seq.push_back(image);
}
cout << "角点提取完成!\n";

/************************************************************************
摄像机定标
*************************************************************************/
cout << "开始定标………………" << endl;
Size square_size = Size(20, 20);                                      /**** 实际测量得到的定标板上每个棋盘格的大小 ****/
vector<vector<Point3f>>  object_Points;                               /**** 保存定标板上角点的三维坐标   ****/

Mat image_points = Mat(1, count, CV_32FC2, Scalar::all(0));          /***** 保存提取的所有角点   *****/
vector<int>  point_counts;                                           /***** 每幅图像中角点的数量 ****/
Mat intrinsic_matrix = Mat(3, 3, CV_32FC1, Scalar::all(0));          /***** 摄像机内参数矩阵    ****/
Mat distortion_coeffs = Mat(1, 4, CV_32FC1, Scalar::all(0));         /* 摄像机的4个畸变系数:k1,k2,p1,p2 */
vector<cv::Mat> rotation_vectors;                                    /* 每幅图像的旋转向量 */
vector<cv::Mat> translation_vectors;                                 /* 每幅图像的平移向量 */

/* 初始化定标板上角点的三维坐标 */
for (int t = 0; t<image_count; t++)
{
vector<Point3f> tempPointSet;
for (int i = 0; i<board_size.height; i++)
{
for (int j = 0; j<board_size.width; j++)
{
/* 假设定标板放在世界坐标系中z=0的平面上 */
Point3f tempPoint;
//在这里,坐标为位置的单位为:当棋盘格子为单位长度,则代表width、height为1,当为边长为20,代表width、height为20
tempPoint.x = i*square_size.width;
tempPoint.y = j*square_size.height;
tempPoint.z = 0;
tempPointSet.push_back(tempPoint);
}
}
object_Points.push_back(tempPointSet);
}

/* 初始化每幅图像中的角点数量,这里我们假设每幅图像中都可以看到完整的定标板 */
for (int i = 0; i< image_count; i++)
{
point_counts.push_back(board_size.width*board_size.height);
}

/* 开始定标 */
calibrateCamera(object_Points, corners_Seq, image_size, intrinsic_matrix, distortion_coeffs, rotation_vectors, translation_vectors, 0);
cout << "定标完成!\n";

/************************************************************************
对定标结果进行评价
*************************************************************************/
cout << "开始评价定标结果………………" << endl;
double total_err = 0.0;                   /* 所有图像的平均误差的总和 */
double err = 0.0;                        /* 每幅图像的平均误差 */
vector<Point2f>  image_points2;             /****   保存重新计算得到的投影点    ****/

cout << "每幅图像的定标误差:" << endl;
fout << "每幅图像的定标误差:" << endl << endl;
for (int i = 0; i<image_count; i++)
{
vector<Point3f> tempPointSet = object_Points[i];
/****    通过得到的摄像机内外参数,对空间的三维点进行重新投影计算,得到新的投影点     ****/
projectPoints(tempPointSet, rotation_vectors[i], translation_vectors[i], intrinsic_matrix, distortion_coeffs, image_points2);
/* 计算新的投影点和旧的投影点之间的误差*/
vector<Point2f> tempImagePoint = corners_Seq[i];
Mat tempImagePointMat = Mat(1, tempImagePoint.size(), CV_32FC2);
Mat image_points2Mat = Mat(1, image_points2.size(), CV_32FC2);
for (size_t i = 0; i != tempImagePoint.size(); i++)
{
image_points2Mat.at<Vec2f>(0, i) = Vec2f(image_points2[i].x, image_points2[i].y);
tempImagePointMat.at<Vec2f>(0, i) = Vec2f(tempImagePoint[i].x, tempImagePoint[i].y);
}
err = norm(image_points2Mat, tempImagePointMat, NORM_L2);
total_err += err /= point_counts[i];
cout << "第" << i + 1 << "幅图像的平均误差:" << err << "像素" << endl;
fout << "第" << i + 1 << "幅图像的平均误差:" << err << "像素" << endl;
}
cout << "总体平均误差:" << total_err / image_count << "像素" << endl;
fout << "总体平均误差:" << total_err / image_count << "像素" << endl << endl;
cout << "评价完成!" << endl;

/************************************************************************
保存定标结果
*************************************************************************/
cout << "开始保存定标结果………………" << endl;
Mat rotation_matrix = Mat(3, 3, CV_32FC1, Scalar::all(0)); /* 保存每幅图像的旋转矩阵 */

fout << "相机内参数矩阵:" << endl;
cout << "相机内参数矩阵:" << endl;
fout << intrinsic_matrix << endl;
cout << intrinsic_matrix << endl;
fout << "畸变系数:\n";
cout << "畸变系数:\n";
fout << distortion_coeffs << endl;
cout << distortion_coeffs << endl;
for (int i = 0; i<image_count; i++)
{
fout << "第" << i + 1 << "幅图像的旋转向量:" << endl;
fout << rotation_vectors[i] << endl;

/* 将旋转向量转换为相对应的旋转矩阵 */
Rodrigues(rotation_vectors[i], rotation_matrix);
fout << "第" << i + 1 << "幅图像的旋转矩阵:" << endl;
fout << rotation_matrix << endl;
fout << "第" << i + 1 << "幅图像的平移向量:" << endl;
fout << translation_vectors[i] << endl;
}
cout << "完成保存" << endl;
fout << endl;

/************************************************************************
显示定标结果
*************************************************************************/
Mat mapx = Mat(image_size, CV_32FC1);
Mat mapy = Mat(image_size, CV_32FC1);
Mat R = Mat::eye(3, 3, CV_32F);
cout << "保存矫正图像" << endl;
for (int i = 0; i != image_count; i++)
{
cout << "Frame #" << i + 1 << "..." << endl;
//newCameraMatrix——输入的校正后的3X3摄像机矩阵(也可用cvStereoRectify()得出的3X4的左或右投影矩阵,其实系统会自动提取该
//矩阵前三列的有用部分作为输入参数)注:!!无校正变换的相机仍用求得内参矩阵
Mat newCameraMatrix = Mat(3, 3, CV_32FC1, Scalar::all(0));
//得到映射关系:R——输入的第一和第二相机坐标系之间的旋转矩阵,一般般无校正变换的相机,默认为单位矩阵
// opencv中,remap与undistortion都是消除畸变的函数,undistortion在设置了一些参数后调用了remap函数,二者的算法实质是一样
//的。由目标图像的坐标,找到对应的原始图像坐标,然后将其值复制到目标图像。大致思路是如此,由于图像大小和变换,需要插值或
//近似的方法,如最邻近法、线性插值等
initUndistortRectifyMap(intrinsic_matrix, distortion_coeffs, R, intrinsic_matrix, image_size, CV_32FC1, mapx, mapy);
Mat t = image_Seq[i].clone();
cv::remap(image_Seq[i], t, mapx, mapy, INTER_LINEAR);
string imageFileName;
std::stringstream StrStm;
StrStm << i + 1;
StrStm >> imageFileName;
imageFileName += "_校正图像.jpg";
imwrite(imageFileName, t);
}
cout << "保存结束" << endl;

time0 = ((double)getTickCount() - time0) / getTickFrequency();
cout << "标定用时:" << time0 << "秒" << endl;
waitKey(0);
/************************************************************************
测试一张图片,用标定过参数去校正畸变,并记所用时间
*************************************************************************/
double time1 = static_cast<double>(getTickCount());
if (1)
{
cout << "TestImage ..." << endl;
//newCameraMatrix——输入的校正后的3X3摄像机矩阵(也可用cvStereoRectify()得出的3X4的左或右投影矩阵,其实系统会自动提取该
//矩阵前三列的有用部分作为输入参数)
Mat newCameraMatrix = Mat(3, 3, CV_32FC1, Scalar::all(0));
Mat testImage = imread("D:\\workplace\\opencv_training\\mytrainings\\mytest5\\color3\\img03.jpg", 1);
initUndistortRectifyMap(intrinsic_matrix, distortion_coeffs, R, intrinsic_matrix, image_size, CV_32FC1, mapx, mapy);
Mat t = testImage.clone();
cv::remap(testImage, t, mapx, mapy, INTER_LINEAR);

imwrite("img03_TestOutput.jpg", t);
cout << "保存结束" << endl;
}
time1 = ((double)getTickCount() - time1) / getTickFrequency();
cout << "一张图片的畸变校正用时:" << time1 << "秒" << endl;

waitKey(0);
return 0;
}
//-----------------------------------OpenCV学习19-------------------------------------
//  程序名称:练习基于特征的ORB、SIFT、SURF特征匹配,并练习找单应性矩阵,将1图片映射到2图片坐标中,
//  用框线标出
//  2016年10月 Created by孙立波(Visual Studio 2013+OpenCV2.4.9)
#include <opencv2\opencv.hpp>
#include <opencv2\opencv.hpp>
#include <iostream>
#include <string>
using namespace cv;
using namespace std;
#include <vector>
#include<opencv2/legacy/legacy.hpp>//BruteForceMatcher<L2<float> > 在这里面!!!!!!!!
int main()
{
Mat img_1 = imread("D:\\workplace\\opencv_training\\mytrainings\\mytest7\\test1.png");
Mat img_2 = imread("D:\\workplace\\opencv_training\\mytrainings\\mytest7\\test2.png");
if (!img_1.data || !img_2.data)
{
cout << "error reading images " << endl;
return -1;
}

ORB orb;//以ORB特征检测算法为例
vector<KeyPoint> keyPoints_1, keyPoints_2;
Mat descriptors_1, descriptors_2;

orb(img_1, Mat(), keyPoints_1, descriptors_1);
orb(img_2, Mat(), keyPoints_2, descriptors_2);
cout << "img1特征点个数:" << keyPoints_1.size() << " points  img2特征点个数:" << keyPoints_2.size()
<< " points" << endl << ">" << endl;
Mat img_keypoints1, img_keypoints2;
drawKeypoints(img_1, keyPoints_1, img_keypoints1, Scalar::all(-1), 0);
drawKeypoints(img_2, keyPoints_2, img_keypoints2, Scalar::all(-1), 0);
imshow("test1", img_keypoints1);
imshow("test2", img_keypoints2);
BruteForceMatcher<L2<float> >  matcher;
vector<DMatch> matches;
matcher.match(descriptors_1, descriptors_2, matches);

/*
SIFT sift;
sift(img_1, Mat(), keyPoints_1, descriptors_1);
sift(img_2, Mat(), keyPoints_2, descriptors_2);
BruteForceMatcher<L2<float> >  matcher;
*/

/*
SURF surf;
surf(img_1, Mat(), keyPoints_1);
surf(img_2, Mat(), keyPoints_2);
SurfDescriptorExtractor extrator;
extrator.compute(img_1, keyPoints_1, descriptors_1);
extrator.compute(img_2, keyPoints_2, descriptors_2);
BruteForceMatcher<L2<float> >  matcher;
*/

double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for (int i = 0; i < descriptors_1.rows; i++)
{
double dist = matches[i].distance;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist);
printf("-- Min dist : %f \n", min_dist);
//-- Draw only "good" matches (i.e. whose distance is less than 0.6*max_dist )
//-- PS.- radiusMatch can also be used here.
std::vector< DMatch > good_matches;
for (int i = 0; i < descriptors_1.rows; i++)
{
if (matches[i].distance < 0.6*max_dist)
{
good_matches.push_back(matches[i]);
}
}

Mat img_matches;
drawMatches(img_1, keyPoints_1, img_2, keyPoints_2,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
imshow("Match1", img_matches);
waitKey();
//*************************************************************
// localize the object
std::vector<Point2f> obj;
std::vector<Point2f> scene;

for (size_t i = 0; i < good_matches.size(); ++i)
{
// get the keypoints from the good matches
obj.push_back(keyPoints_1[good_matches[i].queryIdx].pt);
scene.push_back(keyPoints_2[good_matches[i].trainIdx].pt);
}
Mat H = findHomography(obj, scene, CV_RANSAC);

// get the corners from the image_1
std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0, 0);
obj_corners[1] = cvPoint(img_1.cols, 0);
obj_corners[2] = cvPoint(img_1.cols, img_1.rows);
obj_corners[3] = cvPoint(0, img_1.rows);
std::vector<Point2f> scene_corners(4);

perspectiveTransform(obj_corners, scene_corners, H);

// draw lines between the corners (the mapped object in the scene - image_2)
line(img_matches, scene_corners[0] + Point2f(img_1.cols, 0), scene_corners[1] + Point2f(img_1.cols, 0), Scalar(0, 255, 0));
line(img_matches, scene_corners[1] + Point2f(img_1.cols, 0), scene_corners[2] + Point2f(img_1.cols, 0), Scalar(0, 255, 0));
line(img_matches, scene_corners[2] + Point2f(img_1.cols, 0), scene_corners[3] + Point2f(img_1.cols, 0), Scalar(0, 255, 0));
line(img_matches, scene_corners[3] + Point2f(img_1.cols, 0), scene_corners[0] + Point2f(img_1.cols, 0), Scalar(0, 255, 0));
imshow("被标记的Match2", img_matches);
waitKey(0);

}

//-----------------------------------OpenCV学习20-------------------------------------
//  程序名称:测试自动生成图片名序列并保存视频图像图像
//  2016年10月 Created by孙立波(Visual Studio 2013+OpenCV2.4.9)
#include <opencv2\opencv.hpp>
#include <opencv2\opencv.hpp>
#include <iostream>
#include <string>
#include <stdio.h>
using namespace cv;
using namespace std;

int main(int argc, char *argv[])
{

CvCapture* capture = cvCaptureFromAVI("D:\\workplace\\opencv_training\\bike.avi");
//打开摄像机用CvCapture* capture = cvCreateCameraCapture(-1); //或者VideoCapture capture(0);capture>>img
int i = 0;
IplImage* img = 0;
char image_name[100];
cvNamedWindow("抽取视频窗口");
//读取和显示
while (1)
{
img = cvQueryFrame(capture); //获取一帧图片
if (img == NULL)
break;

cvShowImage("抽取视频窗口", img); //将其显示
char key = cvWaitKey(25);
if (i == 10)
break;
//注:一个..\\代表上一级目录!!!!!!!!!!!!!!!!!!!
sprintf(image_name, "%s%d%s", "D:..\\mytest", ++i, ".jpg");//保存的图片名,即jpg文件保存在上一级目录
cvSaveImage(image_name, img);   //保存一帧图片
}

cvReleaseCapture(&capture);
cvDestroyWindow("抽取视频窗口");

return 0;
}

//int main()
//{
////下面两种方法都可以打开视频
//VideoCapture capture("..//..//1.avi");
///*2、VideoCapture capture;
//capture.open("..//..//1.avi");*/
//if (!capture.isOpened())
//  return 1;
//double rate = capture.get(CV_CAP_PROP_FPS);
//bool stop(false);
//Mat frame;
//namedWindow("Extracted Frame");
////这个delay的单位是ms,若是秒,则delay为1/rate。
////rate为每秒播放的帧数
//int delay = 1000 / rate;
////用于设置直接播放哪一帧
//double position = 0.0;
//capture.set(CV_CAP_PROP_POS_FRAMES, position);
//  while (!stop)
//  {
//      //以下三种方法都可以读取视频
//      if (!capture.read(frame))
//          break;
//      /*2、capture>>frame;*/
//      /*3、capture.grab();
//      capture.retrieve(frame);*/
//      imshow("Extracted Frame", frame);
//          if (waitKey(delay) >= 0)
//          stop = true;
//          //当delay==0时,将会暂停在该帧,不再执行下去,若不是0,则会等待键盘的消息输入,则结束
//  }
//  //关闭视频文件,但是不是必须的,VideoCapture构造函数会默认调用它
// capture.release();
//}

//-----------------------------------OpenCV学习21-------------------------------------
//  程序名称:用OpenCV2的类关于视频的操作!!!!
//  2016年10月 Created by孙立波(Visual Studio 2013+OpenCV2.4.9)
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>

using namespace std;
using namespace cv;

int main()
{
//打开视频文件:其实就是建立一个VideoCapture结构
VideoCapture capture("D:\\workplace\\opencv_training\\1.avi");
//检测是否正常打开:成功打开时,isOpened返回ture
if (!capture.isOpened())
cout << "fail to open!" << endl;
//获取整个帧数
long totalFrameNumber = capture.get(CV_CAP_PROP_FRAME_COUNT);
cout << "整个视频共" << totalFrameNumber << "帧" << endl;

//设置开始帧()
long frameToStart = 300;
capture.set(CV_CAP_PROP_POS_FRAMES, frameToStart);
cout << "从第" << frameToStart << "帧开始读" << endl;

//设置结束帧
int frameToStop = 400;

if (frameToStop < frameToStart)
{
cout << "结束帧小于开始帧,程序错误,即将退出!" << endl;
return -1;
}
else
{
cout << "结束帧为:第" << frameToStop << "帧" << endl;
}

//获取帧率
double rate = capture.get(CV_CAP_PROP_FPS);
cout << "帧率为:" << rate << endl;

//定义一个用来控制读取视频循环结束的变量
bool stop = false;
//承载每一帧的图像
Mat frame;
//显示每一帧的窗口
namedWindow("Extracted frame");
//两帧间的间隔时间:
//int delay = 1000/rate;
int delay = 1000 / rate;

//利用while循环读取帧
//currentFrame是在循环体中控制读取到指定的帧后循环结束的变量
long currentFrame = frameToStart;

//滤波器的核
int kernel_size = 3;
Mat kernel = Mat::ones(kernel_size, kernel_size, CV_32F) / (float)(kernel_size*kernel_size);

while (!stop)
{
//读取下一帧
if (!capture.read(frame))
{
cout << "读取视频失败" << endl;
return -1;
}

//这里加滤波程序
imshow("Extracted frame", frame);
filter2D(frame, frame, -1, kernel);

imshow("after filter", frame);
cout << "正在读取第" << currentFrame << "帧" << endl;
//waitKey(int delay=0)当delay ≤ 0时会永远等待;当delay>0时会等待delay毫秒
//当时间结束前没有按键按下时,返回值为-1;否则返回按键

int c = waitKey(delay);
//按下ESC或者到达指定的结束帧后退出读取视频
if ((char)c == 27 || currentFrame > frameToStop)
{
stop = true;
}
//按下按键后会停留在当前帧,等待下一次按键
if (c >= 0)
{
waitKey(0);
}
currentFrame++;

}
//关闭视频文件
capture.release();
waitKey(0);
return 0;
}

//-----------------------------------OpenCV学习22-------------------------------------
//  程序名称:测试天敏VC400采集卡程序,该程序适用于笔记本自带摄像头视频采集
//  2016年10月 Created by孙立波(Visual Studio 2013+OpenCV2.4.9)
#include <opencv2\opencv.hpp>
#include <iostream>
#include <string>

#include "videoInput.h"

using namespace std;
using namespace cv;
void main()
{
//创建捕获对象
videoInput VI;
//可用摄像头,返回ID号的各路总个数,在此直接用0,因为采集卡插在0号ID
int numID = VI.listDevices();

int device1 = 0;
//默认参数设置摄像头
VI.setupDevice(device1, 640, 480, 1);//1代表采用混合路VI_COMPOSITE连接方式
/*VI.setupDevice(device1);*/
//VI.setFormat(device1, VI_PAL_B);  //可设置视频格式,默认下为PAL
//长宽尺寸
int width = VI.getWidth(device1);
int height = VI.getHeight(device1);
int size = VI.getSize(device1);
cout << "width=" << width << "\t" << "height=" << height << endl;
cout << "framesize=" << size << endl;

//声明显示图像
Mat image;
Mat frame;
image.create(Size(width, height), CV_8UC3);
frame.create(Size(width, height), CV_8UC3);

//分配内存
uchar* yourBuffer = (uchar*)malloc(size);

while (1)
{
VI.getPixels(device1, yourBuffer, false, false);
image.data = (uchar*)yourBuffer;

//左右翻转
flip(image, image, 0);//0竖直翻转1水平翻转-1垂直水平翻转
waitKey(50);
imshow("采集的图像", image);

}

}

//-----------------------------------OpenCV学习23-标定焊接图像-------------------------------------
//  程序名称:OpenCV标定焊接图像,该程序根据DOS提示,一步步实现标定的自动化,求取内参,
//           标定请在F盘创建“biaoding”用于储存采集的图像和结果
//  过程描述:提取图像并保存,提取各图像序列角点,角点亚像素精确化,摄像机标定畸变参数和内参,
//           定标结果评价(保存结果在F:\\biaoding\\biaoding_result.txt),最后矫正所有图像到“F:\\biaoding”文件夹
//  需设参数:需要提取的的图像数imageCount = **,每个标定板图像角点数board_size = Size(*, *),每个格子像素边长Size square_size = Size(80, 80)这个参数设置为每个格子边长像素值;

//******************************************************注意事项**********************************
/*
注意:
要想让程序跑完完成整个过程,必须把board_size设对,且程序正以假设得到的都是完全的包含所有角点的图像,所以采集的图像需包含所有格子!!!因为这点调了半天程序
*/
//  所用IDE版本:        Visual Studio 2013
//  开发所用OpenCV版本:        2.4.9
//  2016年10月 Created by 孙立波

//包含程序所依赖的头文件:为方便起见把经常用的头文件都写在这里(前三必须包含),也可以用#include "opencv.hpp"包含下面所有头文件
#include <opencv2\opencv.hpp>
#include <iostream>
#include <string>
#include <vector>
#include <iomanip>
#include <fstream>

#include "videoInput.h"
using namespace cv;
using namespace std;
//隐藏控制台窗口
//#pragma comment(linker, "/subsystem:\"windows\" /entry:\"mainCRTStartup\"")
int main()
{
//********************************************************************************************
//创建捕获对象
videoInput VI;
//可用摄像头,返回ID号的各路总个数,在此直接用0,因为采集卡插在0号ID
int numID = VI.listDevices();

int device1 = 0;
//默认参数设置摄像头
VI.setupDevice(device1, 640, 480, 1);//1代表采用混合路VI_COMPOSITE连接方式,即针对模拟摄像头,视频一路传送的为亮度信号和两个色差信号的混合信号
/*VI.setupDevice(device1);*/
//VI.setFormat(device1, VI_PAL_B);  //可设置视频格式,默认下为PAL
//长宽尺寸
int width = VI.getWidth(device1);
int height = VI.getHeight(device1);
int size = VI.getSize(device1);
cout << "width=" << width << "\t" << "height=" << height << endl;
cout << "framesize=" << size << endl;

//声明显示图像
Mat frame;

frame.create(Size(width, height), CV_8UC3);

//分配内存
uchar* yourBuffer = (uchar*)malloc(size);

/************************************************************************
读取21张图像,存入内存F:\biaoding文件中
*************************************************************************/
cout << "开始提取21张标定板图像………………" << endl;
int imageCount = 21;
int key = 0;
int count1 = 0;
for (int i = 0; i != imageCount; i++)
{
cout << "Frame#" << i + 1 << "..." << endl;
std::stringstream StrStm;
//为了将i转化为字符型,用StrStm做中介
string imageFileName;
StrStm << i + 1;
StrStm >> imageFileName;
imageFileName += ".jpg";

//system("pause");//用waitKey必须有GUI窗口生成才行

cout << "按Enter开始抽取图像,进入后可按q或者ESC键重新抽取图像,若按Enter键表明这帧图像被存入文件夹中" << endl;
//key = waitKey();

int flag = 1;
while (flag)
{
VI.getPixels(device1, yourBuffer, false, false);
frame.data = (uchar*)yourBuffer;
waitKey(50);
//左右翻转
flip(frame, frame, 0);//0竖直翻转1水平翻转,-1垂直水平翻转

Mat image0 = frame;//抽取到的临时图像数据并显示出来

imshow("显示抓取图像", image0);//显示是否符合要求
int key2;
key2 = waitKey();
if (key2 == 13)//按Enter键存取图像到F盘biaoding文件区
{
cout << "提取标定板图像成功!………………" << endl;
std::stringstream str;
str << "F:\\biaoding\\img" << std::setw(2) << std::setfill('0') << i + 1 << ".jpg";
std::cout << "提取的图像保存路径及文件名" << str.str() << endl;
imwrite(str.str(), image0);//保存的是从硬件得到的源格式图像
flag = 0;
count1 += 1;//已经得到的标定图像计数总数
}
else
if (key2 == 113 || key2 == 27)//按Q或者ESC键从新获取一阵图像
cout << "这次提取的标定板图像不成功!重新提取!!!!………………" << endl;
};//分号很重要!!!!!!

}
if (count1 == imageCount)
{
cout << "***********************………………" << endl;
cout << "***********************………………" << endl;
cout << "下面开始标定图像...................." << endl;
count1 = 0;
}

system("pause");//用waitKey必须有GUI窗口生成才行
/************************************************************************
读取每一幅图像,从中提取出角点,然后对角点进行亚像素精确化
*************************************************************************/

cout << "开始提取角点………………" << endl;

cout << "开始提取角点………………" << endl;

double time0 = static_cast<double>(getTickCount());//记录定标参数求取和将所有图像矫正用的总时间
ofstream fout("F:\\biaoding\\biaoding_result.txt");  /**    保存定标结果的文件     **/
Size image_size;                          /****    图像的尺寸      ****/
Size board_size(9, 9);                    /****    定标板上每行、列的角点数       ****/
vector<Point2f> corners;                  /****    缓存每幅图像上检测到的角点     ****/
vector<vector<Point2f>>  corners_Seq;     /****    保存检测到的所有角点           ****/
vector<Mat>  image_Seq;

int count = 0;
int image_count = imageCount;
for (int i = 0; i != image_count; i++)
{
cout << "Frame #" << i + 1 << "..." << endl;
std::stringstream str;
//str << "F:\\biaoding\\img" << std::setw(2) << std::setfill('0') << i + 1 << ".jpg";
str << "F:\\biaoding\\img" << std::setw(2) << std::setfill('0') << i + 1 << ".jpg";
std::cout << str.str() << std::endl;
Mat image = imread(str.str());
image_size = image.size();

/* 提取角点 */
Mat imageGray;
cvtColor(image, imageGray, CV_RGB2GRAY);
bool patternfound = findChessboardCorners(image, board_size, corners, CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE +
CALIB_CB_FAST_CHECK);
if (!patternfound)
{
cout << "can not find chessboard corners!\n";
continue;
exit(1);
}
else
{
/*
亚像素精确化 :迭代过程的终止条件可以是最大迭代次数CV_TERMCRIT_ITER类型,或者是设定的精度CV_TERMCRIT_EPS类型(或者是两
者的组合)。终止条件的设置在极大程度上影响最终得到的亚像素值的精度。在此,指定为0.10,则求得的亚像素级精度为像素的十分
之一
*/
cornerSubPix(imageGray, corners, Size(11, 11), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
//Size(11, 11)为搜索窗口的一半尺寸,Size(-1, -1)死区的一半尺寸,死区为不对搜索区的中央位置做求和运算的区域。/
//它是用来避免自相关矩阵出现的某些可能的奇异性。当值为(-1,-1)表示没有死区。
//TermCriteria为求角点的迭代过程的终止条件。即角点位置的确定,要么迭代数大于某个设定值,或者是精确懂达到某个设定值。
//criteria可以是最大迭代数目,或者是设定的精确度,也可以是它们的组合。
/* 绘制检测到的角点并保存 */
Mat imageTemp = image.clone();
for (int j = 0; j < corners.size(); j++)
{
circle(imageTemp, corners[j], 10, Scalar(0, 0, 255), 2, 8, 0);
}
string imageFileName;
std::stringstream StrStm;
StrStm << i + 1;
StrStm >> imageFileName;
imageFileName += "_corner.jpg";
//保存提取角点的图像
imwrite("F:\\biaoding\\" + imageFileName, imageTemp);
cout << "Frame corner#" << i + 1 << "...end" << endl;

count = count + corners.size();
//将该角点压入角点序列堆栈
corners_Seq.push_back(corners);
}
//将处理过的图像压入源图像堆栈
image_Seq.push_back(image);
}
cout << "角点提取完成!\n";

cout << "角点提取完成!下一步摄像机定标\n";
system("pause");
/************************************************************************
摄像机定标
*************************************************************************/
cout << "开始定标………………" << endl;
Size square_size = Size(80, 80);                                      /**** 实际测量得到的定标板上每个棋盘格的大小,单位为像素 ****/
vector<vector<Point3f>>  object_Points;                       /**** 保存定标板上角点的三维坐标   ****/

Mat image_points = Mat(1, count, CV_32FC2, Scalar::all(0));   /***** 保存提取的所有角点1*序列图像的角点总数   *****/
vector<int>  point_counts;                                    /***** 每幅图像中角点的数量 ****/
Mat intrinsic_matrix = Mat(3, 3, CV_32FC1, Scalar::all(0));  /***** 摄像机内参数矩阵    ****/
Mat distortion_coeffs = Mat(1, 4, CV_32FC1, Scalar::all(0));  /* 摄像机的4个畸变系数:k1,k2,p1,p2 */
vector<cv::Mat> rotation_vectors;                             /* 图像序列图像的旋转向量,每一行代表一个旋转向量 */
vector<cv::Mat> translation_vectors;                         /* 每幅图像的平移向量,每一行代表一个平移向量 */

/* 初始化定标板上角点的三维坐标,此用的是像素坐标 */
for (int t = 0; t<image_count; t++)
{
vector<Point3f> tempPointSet;//存储每幅图像的像素坐标
for (int i = 0; i<board_size.height; i++)
{
for (int j = 0; j<board_size.width; j++)
{
/* 假设定标板放在世界坐标系中z=0的平面上 */
Point3f tempPoint;
//在这里,board_size中棋盘格子为单位长度,当square_size存储的像素边长为边长为80,代表width、height为80
tempPoint.x = i*square_size.width;
tempPoint.y = j*square_size.height;
tempPoint.z = 0;
tempPointSet.push_back(tempPoint);
}
}
object_Points.push_back(tempPointSet);//存储图像序列中每幅图像的像素坐标
}

/* 初始化每幅图像中的角点数量,这里我们假设每幅图像中都可以看到完整的定标板 */
for (int i = 0; i< image_count; i++)
{
point_counts.push_back(board_size.width*board_size.height);
}

/* 开始定标 */
calibrateCamera(object_Points, corners_Seq, image_size, intrinsic_matrix, distortion_coeffs, rotation_vectors, translation_vectors, 0);
cout << "定标完成!\n";
/************************************************************************
对定标结果进行评价
*************************************************************************/
cout << "开始评价定标结果………………" << endl;
double total_err = 0.0;                   /* 所有图像的平均误差的总和 */
double err = 0.0;                        /* 每幅图像的平均误差 */
vector<Point2f>  image_points2;             /****   保存重新计算得到的投影点    ****/

cout << "每幅图像的定标误差:" << endl;
fout << "每幅图像的定标误差:" << endl << endl;
for (int i = 0; i<image_count; i++)
{
vector<Point3f> tempPointSet = object_Points[i];
/****    通过得到的摄像机内外参数,对空间的三维点进行重新投影计算,得到新的投影点     ****/
projectPoints(tempPointSet, rotation_vectors[i], translation_vectors[i], intrinsic_matrix, distortion_coeffs, image_points2);
/* 计算新的投影点和旧的投影点之间的误差*/
vector<Point2f> tempImagePoint = corners_Seq[i];
Mat tempImagePointMat = Mat(1, tempImagePoint.size(), CV_32FC2);
Mat image_points2Mat = Mat(1, image_points2.size(), CV_32FC2);
for (size_t i = 0; i != tempImagePoint.size(); i++)
{
image_points2Mat.at<Vec2f>(0, i) = Vec2f(image_points2[i].x, image_points2[i].y);
tempImagePointMat.at<Vec2f>(0, i) = Vec2f(tempImagePoint[i].x, tempImagePoint[i].y);
}
err = norm(image_points2Mat, tempImagePointMat, NORM_L2);
total_err += err /= point_counts[i];
cout << "第" << i + 1 << "幅图像的平均误差:" << err << "像素" << endl;
fout << "第" << i + 1 << "幅图像的平均误差:" << err << "像素" << endl;
}
cout << "总体平均误差:" << total_err / image_count << "像素" << endl;
fout << "总体平均误差:" << total_err / image_count << "像素" << endl << endl;
cout << "评价完成!" << endl;

/************************************************************************
保存定标结果
*************************************************************************/
cout << "开始保存定标结果………………" << endl;
Mat rotation_matrix = Mat(3, 3, CV_32FC1, Scalar::all(0)); /* 保存每幅图像的旋转矩阵 */

fout << "相机内参数矩阵:" << endl;
cout << "相机内参数矩阵:" << endl;
fout << intrinsic_matrix << endl;
cout << intrinsic_matrix << endl;
fout << "畸变系数:\n";
cout << "畸变系数:\n";
fout << distortion_coeffs << endl;
cout << distortion_coeffs << endl;
for (int i = 0; i<image_count; i++)
{
fout << "第" << i + 1 << "幅图像的旋转向量:" << endl;
fout << rotation_vectors[i] << endl;

/* 将旋转向量转换为相对应的旋转矩阵 */
Rodrigues(rotation_vectors[i], rotation_matrix);
fout << "第" << i + 1 << "幅图像的旋转矩阵:" << endl;
fout << rotation_matrix << endl;
fout << "第" << i + 1 << "幅图像的平移向量:" << endl;
fout << translation_vectors[i] << endl;
}
cout << "完成保存" << endl;
fout << endl;

/************************************************************************
显示定标结果
*************************************************************************/
Mat mapx = Mat(image_size, CV_32FC1);
Mat mapy = Mat(image_size, CV_32FC1);
Mat R = Mat::eye(3, 3, CV_32F);
cout << "保存矫正图像" << endl;
for (int i = 0; i != image_count; i++)
{
cout << "Frame #" << i + 1 << "..." << endl;
//newCameraMatrix——输入的校正后的3X3摄像机矩阵(也可用cvStereoRectify()得出的3X4的左或右投影矩阵,其实系统会自动提取该
//矩阵前三列的有用部分作为输入参数)注:!!无校正变换的相机仍用求得内参矩阵
Mat newCameraMatrix = Mat(3, 3, CV_32FC1, Scalar::all(0));
//得到映射关系:R——输入的第一和第二相机坐标系之间的旋转矩阵,一般般无校正变换的相机,默认为单位矩阵
// opencv中,remap与undistortion都是消除畸变的函数,undistortion在设置了一些参数后调用了remap函数,二者的算法实质是一样
//的。由目标图像的坐标,找到对应的原始图像坐标,然后将其值复制到目标图像。大致思路是如此,由于图像大小和变换,需要插值或
//近似的方法,如最邻近法、线性插值等
initUndistortRectifyMap(intrinsic_matrix, distortion_coeffs, R, intrinsic_matrix, image_size, CV_32FC1, mapx, mapy);
Mat t = image_Seq[i].clone();
cv::remap(image_Seq[i], t, mapx, mapy, INTER_LINEAR);
string imageFileName;
std::stringstream StrStm;
StrStm << i + 1;
StrStm >> imageFileName;
imageFileName += "_校正图像.jpg";
imwrite("F:\\biaoding\\" + imageFileName, t);
}
cout << "保存结束" << endl;

time0 = ((double)getTickCount() - time0) / getTickFrequency();
cout << "标定用时:" << time0 << "秒" << endl;
system("pause");

/************************************************************************
测试一张图片,用标定过参数去校正畸变,并记所用时间
*************************************************************************/
/*
double time1 = static_cast<double>(getTickCount());
if (1)
{
cout << "TestImage ..." << endl;
//newCameraMatrix——输入的校正后的3X3摄像机矩阵(也可用cvStereoRectify()得出的3X4的左或右投影矩阵,其实系统会自动提取该
//矩阵前三列的有用部分作为输入参数)
Mat newCameraMatrix = Mat(3, 3, CV_32FC1, Scalar::all(0));
Mat testImage = imread("F:\\biaoding\\img03.jpg", 1);
initUndistortRectifyMap(intrinsic_matrix, distortion_coeffs, R, intrinsic_matrix, image_size, CV_32FC1, mapx, mapy);
Mat t = testImage.clone();
cv::remap(testImage, t, mapx, mapy, INTER_LINEAR);

imwrite("img3_TestOutput.jpg", t);
cout << "保存结束" << endl;
}
time1 = ((double)getTickCount() - time1) / getTickFrequency();
cout << "一张图片的畸变校正用时:" << time1 << "秒" << endl;
*/
return 0;
}
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: