OPENNI学习实践-openni+kinect 获取真实世界坐标
2016-11-02 10:55
525 查看
#include <stdlib.h> #include <iostream> #include <string> #include <XnCppWrapper.h> #include <opencv2/opencv.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/core/core.hpp> using namespace std; using namespace xn; using namespace cv; void CheckOpenNIError(XnStatus eResult,string sStatus) { if(eResult != XN_STATUS_OK) cout << sStatus << "Error: " << xnGetStatusString(eResult) << endl;//P53 } int main() { XnStatus eResult = XN_STATUS_OK; DepthMetaData depthMD; IRMetaData irData; namedWindow("Depth Image"); namedWindow("IRImage"); Context mContext; eResult = mContext.Init(); CheckOpenNIError(eResult,"Initialize context"); DepthGenerator mDepthGenerator; eResult = mDepthGenerator.Create(mContext); CheckOpenNIError(eResult,"Create depth generator"); IRGenerator mIRGenerator; eResult = mIRGenerator.Create(mContext); CheckOpenNIError(eResult,"Create depth generator"); mDepthGenerator.GetMirrorCap().SetMirror(true); mIRGenerator.GetMirrorCap().SetMirror(true); XnMapOutputMode mapMode; mapMode.nXRes = 640; mapMode.nYRes = 480; mapMode.nXRes = 30; eResult = mDepthGenerator.SetMapOutputMode(mapMode); XnMapOutputMode MyMapMode; MyMapMode.nXRes = 640; MyMapMode.nYRes = 480; MyMapMode.nFPS = 30; mIRGenerator.SetMapOutputMode( MyMapMode ); eResult = mContext.StartGeneratingAll(); int flag=0; while(true) { eResult = mContext.WaitAnyUpdateAll(); if(eResult == XN_STATUS_OK) { mDepthGenerator.GetMetaData(depthMD); Mat cvDepthImage16UC1(depthMD.FullYRes(),depthMD.FullXRes(),CV_16UC1,(char*) depthMD.Data()); Mat cvDepthImage8UC1; cvDepthImage16UC1.convertTo(cvDepthImage8UC1,CV_8UC1,255.0/(depthMD.ZRes())); imshow("Depth Image",cvDepthImage8UC1); mIRGenerator.GetMetaData(irData); Mat irImage16UC1(irData.FullYRes(),irData.FullXRes(),CV_16UC1,(char*) irData.Data()); Mat irImage8UC1; irImage16UC1.convertTo(irImage8UC1,CV_8UC1,255.0/255); cv::circle(irImage8UC1,Point2d(MyMapMode.nXRes/2,MyMapMode.nYRes/2),10,Scalar(255),5); XnPoint3D *ProjectivePoint=new XnPoint3D[1]; XnPoint3D *ProjectivePoint2=new XnPoint3D[1]; XnPoint3D *RealPoint=new XnPoint3D[1]; ProjectivePoint[0]=xnCreatePoint3D(MyMapMode.nXRes/2,MyMapMode.nYRes/2,depthMD(MyMapMode.nXRes/2,MyMapMode.nYRes/2)); mDepthGenerator.ConvertProjectiveToRealWorld(1,ProjectivePoint,RealPoint); printf("z=%f",RealPoint[0].Z); imshow("IRImage",irImage8UC1); waitKey(30); } } mContext.StopGeneratingAll(); mContext.Shutdown(); return 0; }重点在于
XnPoint3D *ProjectivePoint=new XnPoint3D[1]; XnPoint3D *ProjectivePoint2=new XnPoint3D[1]; XnPoint3D *RealPoint=new XnPoint3D[1]; ProjectivePoint[0]=xnCreatePoint3D(MyMapMode.nXRes/2,MyMapMode.nYRes/2,depthMD(MyMapMode.nXRes/2,MyMapMode.nYRes/2)); mDepthGenerator.ConvertProjectiveToRealWorld(1,ProjectivePoint,RealPoint);首先定义几个点,然后选取需要转换的点在深度图中的位置,然后利用函数进行计算即可得到真实坐标值。
利用DepthGenerator 中GetDepthMap或者getMetaData均可获得深度图像的像素值;GetMetaData函数是将像素值填充到DepthMetaData的对象中,GetDepthMap是直接读取像素值。
XnPoint3D是保持类型为float的x、y、z的结构,为将像素值转为点云,须利用DepthGenerator的ConvertProjectToRealWorld函数:
01.XnStatus ConvertProjectiveToRealWorld (XnUInt32 nCount, const XnPoint3D aProjective[], XnPoint3D aRealWorld[]) const其中nCount表示点数数目,aProjective表示投影坐标下所有(x,y,z)的值,aRealWorld表示实际空间坐标值,该坐标系的坐标原点是:depth map的中心投影到Kinect所在平面上的点。
相关文章推荐
- 从kinect获取世界坐标
- OPENNI学习实践-利用openni获取红外图像
- 【STM32 .Net MF开发板学习-10】SPI测试之触摸屏坐标获取
- Google Maps API V3学习一(获取地图坐标与街道地址)
- Cocos2d-x学习:根据贝塞尔曲线进行抛物线移动补充,三角形顶点坐标获取某个角的角度
- Flare3d 如何获取骨骼部位的世界坐标
- kinect与openni学习资料汇总【转】
- 获取HTML控件当前的真实坐标。
- Opencv及Kinect/OpenNI常用学习网址
- Building Coder(Revit 二次开发) - 真实世界中的角坐标
- Kinect+OpenNI学习笔记之4(OpenNI获取的图像结合OpenCV显示)
- Opencv及Kinect/OpenNI常用学习网址
- OpenCV学习笔记(20)Kinect + OpenNI + OpenCV + OpenGL 组合体验
- Kinect开发教程五:OpenNI获取人体骨架
- kinect和openNI学习资料汇总
- Kinect+OpenNI学习笔记之9(不需要骨骼跟踪的人体手部分割)
- 【STM32 .Net MF开发板学习-10】SPI测试之触摸屏坐标获取
- OpenGl学习笔记3——屏幕坐标(2维)转化为世界坐标(3维)
- kinect和openNI学习资料汇总
- ROS链接openni库获取kinect数据(PC端)