您的位置:首页 > 其它

虚拟视点图像的生成001

2015-05-06 15:10 204 查看
这是虚拟视点图像生成的第一篇博客,希望自己能坚持下来。

1. 实验环境

Win8-----vs2013-----opencv2.4.11

2. 实验材料

微软亚洲研究院多视点视频序列

下载地址:

http://research.microsoft.com/en-us/downloads/5e4675af-03f4-4b16-b3bc-a85c5bafb21d/

使用微软亚洲研究院的多视点视频序列,由视点0和视点2生成虚拟视点1的过程,采用opencv库来辅助开发,主要的成果有:

1,由参考视点0的彩色图像和深度图像生成虚拟视点1的彩色图像和深度图像,中间用z-buffer算法解决遮挡问题;

2,在1的基础上,由参考视点2的彩色图像和深度图像生成虚拟视点1的彩色图像和深度图像。

3,为解决裂缝问题,用中值滤波来滤波彩色图像和深度图像。

4,采用的是双向映射。

原始图像



生成的图像



源代码

#ifndef _wrapingOf3D1
#define _wrapingOf3D1
#include<iostream>
#include<opencv2\opencv.hpp>
class Histogram1D{
private:
int histSize[1];
float hranges[2];
const float *ranges[1];
int channels[1];
public:
Histogram1D(){
histSize[0] = 256;
hranges[0] = 0.0;
hranges[1] = 255.0;
ranges[0] = hranges;
channels[0] = 0;
}
cv::MatND getHistogram(const cv::Mat &image){
cv::MatND hist;
cv::calcHist(&image, 1, channels, cv::Mat(), hist, 1, histSize, ranges);
return hist;
}
cv::Mat getHistogramImage(const cv::Mat &image){
cv::MatND hist = getHistogram(image);
double maxVal = 0;
double minVal = 0;
cv::minMaxLoc(hist, &minVal, &maxVal, 0, 0);
cv::Mat histImg(histSize[0], histSize[0], CV_8U, cv::Scalar(255));
int hpt = static_cast<int>(0.9*histSize[0]);
for (int h = 0; h < histSize[0]; h++){
float binVal = hist.at<float>(h);
int intensity = static_cast<int>(binVal*hpt / maxVal);
cv::line(histImg, cv::Point(h, histSize[0]), cv::Point(h, histSize[0] - intensity), cv::Scalar::all(100), 1);
}
return histImg;
}
};

/*
**define a struct included intrinsic and extrinsic args
*/
typedef struct {
double m_K[3][3]; // 3x3 intrinsic matrix
double m_RotMatrix[3][3]; // rotation matrix
double m_Trans[3]; // translation vector

double m_ProjMatrix[4][4]; // projection matrix
} CalibStruct;
/*
**define globle variables
*/
CalibStruct m_CalibParams[8];
int m_NumCameras = 8;
int m_Width = 1024, m_Height = 768; // camera resolution is 1024x768
double pts[8][3];

/*
**declare function
*/
void InitializeFromFile(char *fileName);
double DepthLevelToZ(unsigned char d);
unsigned char ZToDepthLever(double z);
double projXYZtoUV(double projMatrix[4][4], double x, double y, double z, double *u, double *v);
void projUVZtoXY(double projMatrix[4][4], double u, double v, double z, double *x, double *y, double pt[8][2]);
void wrapingImage(int ref, int proj, cv::Mat &imageColor, cv::Mat &imageDepth, cv::Mat &imageColorOut);
void pointProject_from_ref_to_otherView(double pts[8][2], int ref, int u, int v, unsigned char d);
/*
**define function
*/

/*
** read text file and write args to struct of globle variable
*/
void readCalibrationFile(char *fileName)
{
int i, j, k;
FILE *pIn;
double dIn; // dummy variable
int camIdx;

if (pIn = fopen(fileName, "r"))
{
for (k = 0; k<m_NumCameras; k++)
{
// camera index
fscanf(pIn, "%d", &camIdx);
//std::cout << camIdx << std::endl;

// camera intrinsics
for (i = 0; i < 3; i++){
fscanf(pIn, "%lf\t%lf\t%lf", &(m_CalibParams[camIdx].m_K[i][0]), &(m_CalibParams[camIdx].m_K[i][1]), &(m_CalibParams[camIdx].m_K[i][2]));
//std::cout << (m_CalibParams[camIdx].m_K[i][0])<<"\t"<<(m_CalibParams[camIdx].m_K[i][1]) <<"\t"<< (m_CalibParams[camIdx].m_K[i][2]) << std::endl;
}

// read barrel distortion params (assume 0)
fscanf(pIn, "%lf", &dIn);
fscanf(pIn, "%lf", &dIn);

// read extrinsics
for (i = 0; i<3; i++)
{
for (j = 0; j<3; j++)
{
fscanf(pIn, "%lf", &dIn);
m_CalibParams[camIdx].m_RotMatrix[i][j] = dIn;
//std::cout << m_CalibParams[camIdx].m_RotMatrix[i][j] << std::endl;
}

fscanf(pIn, "%lf", &dIn);
m_CalibParams[camIdx].m_Trans[i] = dIn;
}

}

fclose(pIn);
}
}// readCalibrationFile

/*
**calcular all projectionMatrices depended on struct variables
*/
void computeProjectionMatrices()
{
int i, j, k, camIdx;
double(*inMat)[3];
double exMat[3][4];

for (camIdx = 0; camIdx<m_NumCameras; camIdx++)
{
// The intrinsic matrix
inMat = m_CalibParams[camIdx].m_K;

// The extrinsic matrix
for (i = 0; i<3; i++)
{
for (j = 0; j<3; j++)
{
exMat[i][j] = m_CalibParams[camIdx].m_RotMatrix[i][j];
}
}

for (i = 0; i<3; i++)
{
exMat[i][3] = m_CalibParams[camIdx].m_Trans[i];
}

// Multiply the intrinsic matrix by the extrinsic matrix to find our projection matrix
for (i = 0; i<3; i++)
{
for (j = 0; j<4; j++)
{
m_CalibParams[camIdx].m_ProjMatrix[i][j] = 0.0;

for (k = 0; k<3; k++)
{
m_CalibParams[camIdx].m_ProjMatrix[i][j] += inMat[i][k] * exMat[k][j];
}
}
}

m_CalibParams[camIdx].m_ProjMatrix[3][0] = 0.0;
m_CalibParams[camIdx].m_ProjMatrix[3][1] = 0.0;
m_CalibParams[camIdx].m_ProjMatrix[3][2] = 0.0;
m_CalibParams[camIdx].m_ProjMatrix[3][3] = 1.0;
}
}

/**
**init projection matrix
*/
void InitializeFromFile(char *fileName)
{
readCalibrationFile(fileName);
computeProjectionMatrices();
}
/**
**calcular z depended on d of depth image
*/
double DepthLevelToZ(unsigned char d)
{
double z;
double MinZ = 44.0, MaxZ = 120.0;

z = 1.0 / ((d / 255.0)*(1.0 / MinZ - 1.0 / MaxZ) + 1.0 / MaxZ);
return z;
}

/**
**calcular d of depth image depended on z
*/
unsigned char ZToDepthLever(double z)
{
double MinZ = 44.0, MaxZ = 120.0;
unsigned char d;
d = (unsigned char)(255.0*(1 / (double)z - 1 / MaxZ) / (1 / MinZ - 1 / MaxZ));
return d;
}

/**
**calcular x,y depended on u,v,z and projection Matrix
**for example,projection Matrix is m_CalibParams[i].m_ProjMatrix which is index of camera
*/
void projUVZtoXY(double projMatrix[4][4], double u, double v, double z, double *x, double *y)
{
double c0, c1, c2;

// image (0,0) is bottom lefthand corner
v = (double)m_Height - v - 1.0;

c0 = z*projMatrix[0][2] + projMatrix[0][3];
c1 = z*projMatrix[1][2] + projMatrix[1][3];
c2 = z*projMatrix[2][2] + projMatrix[2][3];

*y = u*(c1*projMatrix[2][0] - projMatrix[1][0] * c2) +
v*(c2*projMatrix[0][0] - projMatrix[2][0] * c0) +
projMatrix[1][0] * c0 - c1*projMatrix[0][0];

*y /= v*(projMatrix[2][0] * projMatrix[0][1] - projMatrix[2][1] * projMatrix[0][0]) +
u*(projMatrix[1][0] * projMatrix[2][1] - projMatrix[1][1] * projMatrix[2][0]) +
projMatrix[0][0] * projMatrix[1][1] - projMatrix[1][0] * projMatrix[0][1];

*x = (*y)*(projMatrix[0][1] - projMatrix[2][1] * u) + c0 - c2*u;
*x /= projMatrix[2][0] * u - projMatrix[0][0];
} // projUVZtoXY

/**
**calcular u,v,z1 depended on x,y,z
**z1 is after projection and z is before projection.z1 is return value
**/

double projXYZtoUV(double projMatrix[4][4], double x, double y, double z, double *u, double *v)
{
double w;

*u = projMatrix[0][0] * x +
projMatrix[0][1] * y +
projMatrix[0][2] * z +
projMatrix[0][3];

*v = projMatrix[1][0] * x +
projMatrix[1][1] * y +
projMatrix[1][2] * z +
projMatrix[1][3];

w = projMatrix[2][0] * x +
projMatrix[2][1] * y +
projMatrix[2][2] * z +
projMatrix[2][3];

*u /= w;
*v /= w;

// image (0,0) is bottom lefthand corner
*v = (double)m_Height - *v - 1.0;

return w;

} // projXYZtoUV

/**

**/

void pointProject_from_ref_to_otherView(double pts[8][3], int ref, int u, int v, unsigned char d)
{
double x, y, z = DepthLevelToZ(d);

//printf("Testing projection of pt (%d,%d) in camera 0 with d = %d (z = %f) to other cameras\n", u, v, d, z);

projUVZtoXY(m_CalibParams[ref].m_ProjMatrix, (double)u, (double)v, z, &x, &y);
//printf("3D pt = (%f, %f, %f) [coordinates wrt reference camera]\n", x, y, z);

for (int cam = 0; cam<8; cam++)
{
double *pt = pts[cam];

pt[2]=projXYZtoUV(m_CalibParams[cam].m_ProjMatrix, x, y, z, &pt[0], &pt[1]);
//printf("Camera %d: (%f, %f)\n", cam, pt[0], pt[1]);
pt[2] = ZToDepthLever(pt[2]);
}
}

/**
**wraping image,ref represent reference cam,proj represent projection cam
**the kernal code
**/
void wrapingImage(int ref, int proj, cv::Mat &imageColor, cv::Mat &imageDepth, cv::Mat &imageColorOut, cv::Mat &imageDepthOut)
{
for (int v = 0; v < imageColor.rows; v++)
for (int u = 0; u < imageColor.cols; u++)
{
double d = imageDepth.at<cv::Vec3b>(v, u)[0];
pointProject_from_ref_to_otherView(pts, ref, u, v, d);
int u1 = (int)pts[proj][0];
int v1 = (int)pts[proj][1];
int k1 = (int)pts[proj][2];
if (u1 < 0 || u1 >= imageColor.cols - 1 || v1 < 0 || v1 >= imageColor.rows - 1)
continue;
if (k1 < imageDepthOut.at<cv::Vec3b>(v1, u1)[0])
continue;
imageColorOut.at<cv::Vec3b>(v1, u1) = imageColor.at<cv::Vec3b>(v, u);
imageDepthOut.at<cv::Vec3b>(v1, u1)[0] = k1;
imageDepthOut.at<cv::Vec3b>(v1, u1)[1] = k1;
imageDepthOut.at<cv::Vec3b>(v1, u1)[2] = k1;
}
}

void dipslay(char *calibParam, char *refColor, char *refDepth, char *refColor2, char *refDepth2, char *actColor)
{
//initialize projection_Matrix
InitializeFromFile(calibParam);
//display projection_Matrix
for (int i = 0; i < m_NumCameras; i++){
for (int j = 0; j < 3; j++){
for (int k = 0; k < 3; k++)
std::cout << m_CalibParams[i].m_K[j][k] << "\t";
std::cout << std::endl;
}

for (int j = 0; j < 3; j++){
for (int k = 0; k < 3; k++)
std::cout << m_CalibParams[i].m_RotMatrix[j][k] << "\t";
std::cout << std::endl;
}
for (int k = 0; k < 3; k++)
std::cout << m_CalibParams[i].m_Trans[k] << "\t";
std::cout << std::endl;
std::cout << std::endl;
std::cout << std::endl;
std::cout << std::endl;
}
//suspend and users enter a digit
int aa;
std::cin >> aa;
//read color image and depth image of refrence
cv::Mat imageColor = cv::imread(refColor);
cv::Mat imageDepth = cv::imread(refDepth);
cv::Mat imageColor2 = cv::imread(refColor2);
cv::Mat imageDepth2 = cv::imread(refDepth2);
//read true image used to compare
cv::Mat imageColor_actual = cv::imread(actColor);
//set reference cam
int ref = 0;
int ref2 = 2;
//set projection cam
int proj = 1;
//test code
pointProject_from_ref_to_otherView(pts, ref, 700, 700, imageDepth.at<cv::Vec3b>(700, 700)[0]);
for (int i = 0; i < 8; i++)
{
std::cout << pts[i][0] << "\t" << pts[i][1] << std::endl;
}
std::cin >> aa;
//define two variable of output
cv::Mat imageColorOut;
cv::Mat imageColorOut2;
cv::Mat imageDepthOut;
imageColorOut.create(imageColor.rows, imageColor.cols, imageColor.type());
imageColorOut2.create(imageColor.rows, imageColor.cols, imageColor.type());
imageDepthOut.create(imageColor.rows, imageColor.cols, imageColor.type());
for (int v = 0; v < imageColor.rows; v++)
{
for (int u = 0; u < imageColor.cols; u++)
{
imageColorOut.at<cv::Vec3b>(v, u)[0] = 0;
imageColorOut.at<cv::Vec3b>(v, u)[1] = 0;
imageColorOut2.at<cv::Vec3b>(v, u)[2] = 0;
imageColorOut2.at<cv::Vec3b>(v, u)[0] = 0;
imageColorOut2.at<cv::Vec3b>(v, u)[1] = 0;
imageColorOut.at<cv::Vec3b>(v, u)[2] = 0;
imageDepthOut.at<cv::Vec3b>(v, u)[0] = 0;
imageDepthOut.at<cv::Vec3b>(v, u)[1] = 0;
imageDepthOut.at<cv::Vec3b>(v, u)[2] = 0;

}
}
//save_dir
char *save_dir = "C:\\Users\\jiang\\Desktop\\experientdata\\experiencePictrue\\";
//wraping from reference view to virtruel view
wrapingImage(ref, proj, imageColor, imageDepth, imageColorOut,imageDepthOut);
wrapingImage(ref2, proj, imageColor2, imageDepth2, imageColorOut, imageDepthOut);
//display reference_image
/*cv::imshow("reference_image", imageColor);
cv::imwrite("C:\\Users\\jiang\\Desktop\\experientdata\\experiencePictrue\\reference_image.jpg", imageColor);*/
//display virtruel_image
cv::medianBlur(imageColorOut, imageColorOut, 3);
cv::imshow("virtruel_Color_image01", imageColorOut);
cv::imwrite("C:\\Users\\jiang\\Desktop\\experientdata\\experiencePictrue\\virtruel_Color_image01.jpg", imageColorOut);
////display virtruel_image
//cv::imshow("virtruel_Color_image21", imageColorOut2);
//cv::imwrite("C:\\Users\\jiang\\Desktop\\experientdata\\experiencePictrue\\virtruel_Color_image21.jpg", imageColorOut2);
//display virtruel_image
cv::medianBlur(imageDepthOut, imageDepthOut, 3);
cv::imshow("virtruel_Depth_image", imageDepthOut);
cv::imwrite("C:\\Users\\jiang\\Desktop\\experientdata\\experiencePictrue\\virtruel_Depth_image.jpg", imageDepthOut);
//display actruel_image
cv::imshow("actruel_image", imageColor_actual);
cv::imwrite("C:\\Users\\jiang\\Desktop\\experientdata\\experiencePictrue\\actruel_image.jpg", imageColor_actual);
////mix reference and virtruel and check changes
//cv::Mat imageColorRefVSProj;
//cv::addWeighted(imageColor, 0.5, imageColorOut, 0.5, 0.0, imageColorRefVSProj);
//cv::imshow("imageColorRefVSProj", imageColorRefVSProj);
//cv::imwrite("C:\\Users\\jiang\\Desktop\\experientdata\\experiencePictrue\\imageColorRefVSProj.jpg", imageColorRefVSProj);
////mix actual and virtruel and check changes
//cv::Mat imageColorActVSProj;
//cv::addWeighted(imageColor_actual, 0.5, imageColorOut, 0.5, 0.0, imageColorActVSProj);
//cv::imshow("imageColorActVSProj", imageColorActVSProj);
//cv::imwrite("C:\\Users\\jiang\\Desktop\\experientdata\\experiencePictrue\\imageColorActVSProj.jpg", imageColorActVSProj);
cv::waitKey();

}

void main()
{
char *refColor = "C:\\Users\\jiang\\Desktop\\experientdata\\3DVideos-distrib\\MSR3DVideo-Breakdancers\\cam0\\color-cam0-f000.jpg";
char *refDepth = "C:\\Users\\jiang\\Desktop\\experientdata\\3DVideos-distrib\\MSR3DVideo-Breakdancers\\cam0\\depth-cam0-f000.png";
char *refColor2 = "C:\\Users\\jiang\\Desktop\\experientdata\\3DVideos-distrib\\MSR3DVideo-Breakdancers\\cam2\\color-cam2-f000.jpg";
char *refDepth2 = "C:\\Users\\jiang\\Desktop\\experientdata\\3DVideos-distrib\\MSR3DVideo-Breakdancers\\cam2\\depth-cam2-f000.png";
char *calibParam = "C:\\Users\\jiang\\Desktop\\experientdata\\3DVideos-distrib\\MSR3DVideo-Breakdancers\\calibParams-breakdancers.txt";
char *actColor = "C:\\Users\\jiang\\Desktop\\experientdata\\3DVideos-distrib\\MSR3DVideo-Breakdancers\\cam1\\color-cam1-f000.jpg";
dipslay(calibParam, refColor, refDepth, refColor2, refDepth2, actColor);
}
#endif
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: