您的位置:首页 > 理论基础

相机标定

2016-06-01 01:05 363 查看
在图像测量过程以及机器视觉应用中,为确定空间物体表面某点的三维几何位置与其在图像中对应点之间的相互关系,必须建立相机成像的几何模型,这些几何模型参数就是相机参数。在大多数条件下这些参数必须通过实验与计算才能得到,这个求解参数的过程就称之为相机标定(或摄像机标定)。无论是在图像测量或者机器视觉应用中,相机参数的标定都是非常关键的环节,其标定结果的精度及算法的稳定性直接影响相机工作产生结果的准确性。因此,做好相机标定是做好后续工作的前提,提高标定精度是科研工作的重点所在。

相机标定可以分为如下6步:

1、图像增强

2、边缘检测

3、椭圆提取

4、编码识别

5、初值估计

6、光速法平差

1、图像增强:

首先进行图像增强,扩大图像中不同物体特征之间的差别。图像增强算法有很多,比如增强直方图均衡化、对数变换、指数变换以及Walis变换等。这里我用的是对数变换。代码如下:

#define a 100.0
//控制参数,表示曲线的弯曲程度
#define b 0.0 //控制参数,表示曲线的上下偏移量
//图像增强,对数变换
for (i = 0; i < mheight; i++)
{
for (j = 0; j < mwidth; j++)
{
temp = a*log10((double)imageMat_Gray[i*mwidth + j] + 1.0) + b;
if (temp > 255)
temp = 255.0;
if (temp < 0)
temp = 0.0;
imageMat_Gray[i*mwidth + j] = int(temp + 0.5);
}
}
这里需要注意的是,在对数变换之前,我们需要对图像进行灰度化变换。效果如下:



   


2、边缘检测:

边缘检测算子有Roberts、Sobel、Prewitt、LOG、DOG、Canny等,比如LOG算子:高斯平滑+二阶差分算子
有如下四个步骤:生成模板、卷积运算、零交叉点检测和边缘细化。
这里为方便起见,我直接调用了openCV的函数cvCanny()。关于openCV的配置,可以参考我的另一篇博客:http://blog.csdn.net/secyb/article/details/51341061

cvCanny(img_gray, img_edgeExtract, 50, 150, 3);
效果如下:



3、椭圆提取:

椭圆提取首先去除不闭合的区域和一些噪声点,即找到所有的闭合区域并存储每个闭合区域边缘上所有的点。判断图形是否闭合,只需要判断当前点的八邻域是否存在相同的点。在这个例子中,我们将所有的边缘用白色显示,即灰度值是255。我们只需判断当前的八邻域是否存在灰度值是255的点,如果存在,继续寻找,直到闭合为止,如果不存在,则删除该图形之前所有的点,即将这些点用黑色显示,也就是将灰度值赋为0。在八邻域的判断过程中,最好先寻找上下左右四邻域的点,如果找不到,再寻找另外四个点。这里我将所有的闭合区域的点保存了下来,代码在最后全部粘出。接着我们需要对这些闭合区域进行椭圆拟合,去除残差较大的区域,从而得到较为准确的椭圆,我们也可以去除过大或过小的椭圆。效果如下:



4、编码识别:

编号识别原理:

1、标定板上每四个点分为共线的一组,不同的组不共线,这些直线相交于标定板中心。

2、每一个组可以分为大和小两种点,每组点按照大小两种状态编码得到的二进制数值各异。

3、标定板平面与影像平面构成二维射影变换(二维DLT变换)。

编号识别步骤(不唯一):

1、按照共线关系对点进行分组,求解每一组点在影像上的直线方程。

2、用RANSAC方法求解线束中心,并提出错误分组。

3、区分每一组中点的大小状态,并按照二进制编码,从而得到可能的编号。

4、用RANSAC方法(基于二维射影变换)对已经识别的点号进行重新编码,将内点数目最大者作为最终编码。
5、初值估计:

二维DLT变换求解初值

基于前述标志点提取和识别结果,由此可以按照二维DLT变换方法(张永军,2006)的方法求解每一张影像的内外参数初值。

6、光速法平差:
列出误差方程

代码如下(这里包括前三步的代码,就是到椭圆提取,后面的部分代码没有给出):
// OpencvTest.cpp : 定义控制台应用程序的入口点。
//

#include "cv.h"
#include "highgui.h"
#include "math.h"
#include "vector"
using namespace std;

#define a 100.0 //控制参数,表示曲线的弯曲程度
#define b 0.0 //控制参数,表示曲线的上下偏移量

struct EllipsePoint
{
int rowIndex;
int colIndex;
bool isEdge;
};

void LogOperator(IplImage *src, IplImage *dst);//边缘检测log算子
EllipsePoint hasNextPoint(int* imageMat, int i, int j, int mwidth, EllipsePoint *ep);

int main(int argc, char** argv)
{
int i, j, index;
vector<vector<EllipsePoint>> ellipse;
IplImage* img = cvLoadImage("C:\\Users\\HP\\Desktop\\Buddha_001.JPG");
//IplImage* img = cvLoadImage("C:\\Users\\HP\\Desktop\\1.jpg");

int mwidth, mheight, mtype;
mwidth = img->width;
mheight = img->height;
mtype = img->nChannels;

int *imageMat = new int[mwidth*mheight*mtype];
int *imageMat_Gray = new int[mwidth*mheight];

//去除像素点放到imageMat中
for (i = 0; i<mheight; i++)
for (j = 0; j<mwidth; j++) {
CvScalar s;
s = cvGet2D(img, i, j);// get the (i,j) pixel value
for (index = 0; index<mtype; index++) {
imageMat[3 * mwidth*i + 3 * j + index] = s.val[index];
}
}

//RGB转灰度
for (i = 0; i < mheight; i++)
{
for (j = 0; j < mwidth; j++)
{
//Gray = (R * 30 + G * 59 + B * 11 + 50) / 100
//Gray = (R*299 + G*587 + B*114 + 500) / 1000
imageMat_Gray[i*mwidth + j] = (imageMat[(i*mwidth + j) * 3] * 299 + imageMat[(i*mwidth + j) * 3 + 1] * 597 +
imageMat[(i*mwidth + j) * 3 + 2] * 114 + 500) / 1000;
}
}

double temp;
//图像增强,对数变换 for (i = 0; i < mheight; i++) { for (j = 0; j < mwidth; j++) { temp = a*log10((double)imageMat_Gray[i*mwidth + j] + 1.0) + b; if (temp > 255) temp = 255.0; if (temp < 0) temp = 0.0; imageMat_Gray[i*mwidth + j] = int(temp + 0.5); } }

IplImage* img_gray = cvCreateImage(cvSize(mwidth, mheight), IPL_DEPTH_8U, 1);
//将处理后的图像值放入图像中显示
for (int i = 0; i<mheight; i++)
for (int j = 0; j<mwidth; j++) {
CvScalar s;
s.val[0] = imageMat_Gray[i*mwidth+j];
cvSet2D(img_gray, i, j, s);
}

IplImage* img_edgeExtract = cvCreateImage(cvSize(mwidth, mheight), IPL_DEPTH_8U, 1);
//LogOperator(img_gray, img_edgeExtract);
cvCanny(img_gray, img_edgeExtract, 50, 150, 3);

int *imageMat_edgeExtract = new int[mwidth*mheight];
for (i = 0; i<mheight; i++)
for (j = 0; j<mwidth; j++) {
CvScalar s;
s = cvGet2D(img_edgeExtract, i, j);// get the (i,j) pixel value
imageMat_edgeExtract[mwidth*i + j] = s.val[0];
}

IplImage* img4 = cvCreateImage(cvSize(mwidth, mheight), IPL_DEPTH_8U, 3);
//将处理后的图像值放入图像中显示
for (int i = 0; i<mheight; i++)
for (int j = 0; j<mwidth; j++) {
CvScalar s;
s.val[0] = imageMat_edgeExtract[i*mwidth + j];
s.val[1] = imageMat_edgeExtract[i*mwidth + j];
s.val[2] = imageMat_edgeExtract[i*mwidth + j];
cvSet2D(img4, i, j, s);
}
//for (i = 0; i < 100; i++)
//printf("%d\n", imageMat_edgeExtract[i]);

EllipsePoint *ellipsePoint = new EllipsePoint[mheight*mwidth];
for (i = 0; i < mheight; i++)
{
for (j = 0; j < mwidth; j++)
{
ellipsePoint[i*mwidth+j].isEdge = 0;
ellipsePoint[i*mwidth + j].rowIndex = i;
ellipsePoint[i*mwidth + j].colIndex = j;
}
}

int num = 0;
//边缘跟踪
for (i = 1; i < mheight-1; i++)
{
for (j = 1; j < mwidth-1; j++)
{
//if (ellipsePoint[i*mwidth + j].isEdge == 1)
if (imageMat_edgeExtract[i*mwidth + j] == 0)
continue;
if (ellipsePoint[i*mwidth + j].isEdge == 1)
continue;
EllipsePoint elli = hasNextPoint(imageMat_edgeExtract, i, j, mwidth, ellipsePoint);
if ((elli.rowIndex<1 || elli.rowIndex>mheight - 2 || elli.colIndex < 1 || elli.colIndex > mwidth - 2) && elli.isEdge)
{
imageMat_edgeExtract[i*mwidth + j] = 0;
imageMat_edgeExtract[elli.rowIndex*mwidth + elli.colIndex] = 0;
}
else if (!elli.isEdge)
{
imageMat_edgeExtract[i*mwidth + j] = 0;
//num++;
}
// else if (imageMat_edgeExtract[i*mwidth + j] == 255)
else
{
//num++;
//printf("%d,%d\n", i, j);
ellipsePoint[i*mwidth + j].isEdge = 1;
//ellipsePoint[i*mwidth + j].rowIndex = i;
//ellipsePoint[i*mwidth + j].colIndex = j;
int m, n;
vector <EllipsePoint> ep;
ep.push_back(ellipsePoint[i*mwidth + j]);
ep.push_back(elli);
EllipsePoint elli2 = hasNextPoint(imageMat_edgeExtract, elli.rowIndex, elli.colIndex, mwidth, ellipsePoint);
while (elli2.isEdge)
{
ep.push_back(elli2);
m = elli2.rowIndex;
n = elli2.colIndex;
if (m<1 || m>mheight - 2 || n < 1 || n > mwidth - 2)
break;
elli2 = hasNextPoint(imageMat_edgeExtract, m, n, mwidth, ellipsePoint);
}
/*if ((ep[0].rowIndex != ep[ep.size() - 1].rowIndex) || (ep[0].colIndex != ep[ep.size() - 1].colIndex))
{
for (i = 0; i < ep.size(); i++)
{
imageMat_edgeExtract[ep[i].rowIndex*mwidth + ep[i].colIndex] = 0;
//ellipsePoint[ep[i].rowIndex*mwidth + ep[i].colIndex].isEdge = 0;
}
}*/
if (ep[ep.size() - 1].rowIndex<1 || ep[ep.size() - 1].rowIndex>mheight - 2 || ep[ep.size() - 1].colIndex < 1 || ep[ep.size() - 1].colIndex > mwidth - 2)
{
for (int ii = 0; ii < ep.size(); ii++)
{
imageMat_edgeExtract[ep[ii].rowIndex*mwidth + ep[ii].colIndex] = 0;
}
}
//else if (hasPointNum(imageMat_edgeExtract, ep[ep.size() - 1].rowIndex, ep[ep.size() - 1].colIndex, mwidth) <3)
else if(abs(ep[ep.size() - 1].rowIndex- ep[0].rowIndex)>1|| abs(ep[ep.size() - 1].colIndex - ep[0].colIndex)>1||ep.size()<100)
{
for (int ii = 0; ii < ep.size(); ii++)
{
imageMat_edgeExtract[ep[ii].rowIndex*mwidth + ep[ii].colIndex] = 0;
}
}
else
ellipse.push_back(ep);
//ep.clear();
ep.swap(vector <EllipsePoint>());
}
/*else {
ellipsePoint[elli.rowIndex*mwidth + elli.colIndex].isEdge = 0;
}*/
}
}

int numCount = ellipse.size();//椭圆个数
CvBox2D *box = new CvBox2D[numCount];
CvPoint pt;
for (i = 0; i < numCount; i++)
{
CvMemStorage* storage = cvCreateMemStorage(0);
CvSeq* ptseq = cvCreateSeq(CV_SEQ_ELTYPE_POINT, sizeof(CvSeq), sizeof(CvPoint), storage);//创建点序列
for (int j = 0; j < ellipse[i].size(); j++)
{
pt.x = ellipse[i][j].colIndex;
pt.y = ellipse[i][j].rowIndex;
cvSeqPush(ptseq, &pt);
}
box[i] = cvFitEllipse2(ptseq);//拟合椭圆
//cvEllipseBox(img, box[i], CV_RGB(0, 255, 0), 2);//绘制在图形上
//cvLine(img, cvPoint((int)box[i].center.x - 5, (int)box[i].center.y), cvPoint((int)box[i].center.x + 5, (int)box[i].center.y), CV_RGB(255, 0, 0));
//cvLine(img, cvPoint((int)box[i].center.x, (int)box[i].center.y - 5), cvPoint((int)box[i].center.x, (int)box[i].center.y + 5), CV_RGB(255, 0, 0));
}

int distance;
int *finalellipse = new int[ellipse.size()];
for (i = 0; i < ellipse.size(); i++)
finalellipse[i] = 1;
for (i = 0; i < numCount; i++)
{
int outpointnum = 0;
for (int j = 0; j < ellipse[i].size(); j++)
{
pt.x = ellipse[i][j].colIndex;
pt.y = ellipse[i][j].rowIndex;
distance = sqrt((pt.x - box[i].center.x)*(pt.x - box[i].center.x) + (pt.y - box[i].center.y)*(pt.y - box[i].center.y));
if ((distance >(max(box[i].size.height / 2, box[i].size.width / 2)) + 1)||(distance <(min(box[i].size.height / 2, box[i].size.width / 2)) - 1))
{
outpointnum++;
if(outpointnum>10)
{
finalellipse[i] = 0;
break;
}
}
}
if (finalellipse[i])
{
cvEllipseBox(img, box[i], CV_RGB(0, 255, 0), 2);//绘制在图形上
cvLine(img, cvPoint((int)box[i].center.x - 5, (int)box[i].center.y), cvPoint((int)box[i].center.x + 5, (int)box[i].center.y), CV_RGB(255, 0, 0));
cvLine(img, cvPoint((int)box[i].center.x, (int)box[i].center.y - 5), cvPoint((int)box[i].center.x, (int)box[i].center.y + 5), CV_RGB(255, 0, 0));
}
}

//m_img.CopyOf(m_iplImg);

//printf("num is:%d\n", num);
printf("The ellipse num:%d\n",ellipse.size());
IplImage* img_edgeTrace = cvCreateImage(cvSize(mwidth, mheight), IPL_DEPTH_8U, 1);
//将处理后的图像值放入图像中显示
for (int i = 0; i<mheight; i++)
for (int j = 0; j<mwidth; j++) {
CvScalar s;
s.val[0] = imageMat_edgeExtract[i*mwidth + j];
cvSet2D(img_edgeTrace, i, j, s);
}

IplImage* img5= cvCreateImage(cvSize(mwidth, mheight), IPL_DEPTH_8U, 3);
//将处理后的图像值放入图像中显示
for (int i = 0; i<mheight; i++)
for (int j = 0; j<mwidth; j++) {
CvScalar s;
s.val[0] = imageMat_edgeExtract[i*mwidth + j];
s.val[1] = imageMat_edgeExtract[i*mwidth + j];
s.val[2] = imageMat_edgeExtract[i*mwidth + j];
cvSet2D(img5, i, j, s);
}
//保存灰度图
const char* path;
path = "C:\\Users\\HP\\Desktop\\image4.JPG";
cvSaveImage(path, img5);

cvNamedWindow("显示原图像", 0);
cvShowImage("显示原图像", img);
cvNamedWindow("显示灰度图像", 0);
cvShowImage("显示灰度图像", img_gray);

cvNamedWindow("显示边缘提取后的图像", 0);
cvShowImage("显示边缘提取后的图像", img_edgeExtract);
cvNamedWindow("显示边缘跟踪后的图像", 0);
cvShowImage("显示边缘跟踪后的图像", img_edgeTrace);

cvWaitKey(0);
cvReleaseImage(&img);
cvReleaseImage(&img_gray);
cvReleaseImage(&img_edgeExtract);
cvReleaseImage(&img_edgeTrace);
cvDestroyWindow("显示原图像");
cvDestroyWindow("显示灰度图像");
cvDestroyWindow("显示边缘提取后的图像");
cvDestroyWindow("显示边缘跟踪后的图像");
delete[] imageMat;
delete[] imageMat_Gray;
delete[] imageMat_edgeExtract;
}

void LogOperator(IplImage *src, IplImage *dst)
{
//dst = cvCloneImage(src);
IplImage* SmoothImg = cvCloneImage(src);

cvSmooth(src, SmoothImg, CV_GAUSSIAN, 15, 15); //对图像做3*3的高斯平滑滤波
cvLaplace(SmoothImg, dst, 5);

double min_val = 0; double max_val = 0;
cvMinMaxLoc(dst, &min_val, &max_val); //取图像中的最大最小像素值
//printf("max_val = %f\nmin_val = %f\n", max_val, min_val);

//cvNormalize(dst, dst, 0, 255, CV_MINMAX); //归一化处理

//对梯度图加门限,二值化
int x,y;
char* p = dst->imageData;
int w = dst->widthStep;
for(x = 0;x<dst->width;x++)
{
for(y = 0;y<dst->height;y++)
{
if(p[x+y*w]>40)
p[x+y*w] = 255;
else
p[x+y*w] = 0;
}
}
/*for (x = 0; x < dst->height; x++)
{
for (y = 0; y < dst->width; y++)
{
if (p[x*dst->height + y]>100)
p[x*dst->height + y] = 255;
else
p[x*dst->height + y] = 0;
}
}*/
cvReleaseImage(&SmoothImg);
}

EllipsePoint hasNextPoint(int* imageMat, int i, int j, int mwidth, EllipsePoint *ep)
{
//int arr[8];
int index[8];
EllipsePoint ell;
ell.isEdge = 0;
index[4] = (i - 1)*mwidth + j - 1;
index[0] = (i - 1)*mwidth + j;
index[5] = (i - 1)*mwidth + j + 1;
index[2] = i*mwidth + j - 1;
index[3] = i*mwidth + j + 1;
index[6] = (i + 1)*mwidth + j - 1;
index[1] = (i + 1)*mwidth + j;
index[7] = (i + 1)*mwidth + j + 1;
for (int k = 0; k < 8; k++)
{
if (!ep[index[k]].isEdge&&imageMat[index[k]]==255)
{
ell.isEdge = 1;
ep[index[k]].isEdge = 1;
//ep[i*mwidth + j].rowIndex = index[k] / mwidth;
//ep[i*mwidth + j].colIndex = index[k] - ep[i*mwidth + j].rowIndex*mwidth;
//ep[index[k]].rowIndex = index[k] / mwidth;
//ep[index[k]].colIndex = index[k] - ep[index[k]].rowIndex*mwidth;
ell.rowIndex = index[k] / mwidth;
ell.colIndex = index[k] - ell.rowIndex*mwidth;
break;
}
}
//return ep[i*mwidth + j];
//返回下一个点的信息
return ell;
}


这里我用到了openCV的一些函数,关于openCV的配置可以参照我的另外一篇博客:windows下opencv的配置http://blog.csdn.net/secyb/article/details/51341061

如有问题,欢迎大家讨论。
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息