您的位置:首页 > 其它

Kinect2.0之使用KinectSDK自带的KinectStudio进行数据采集

2016-11-29 16:59 441 查看
去年一直摆弄Kinect2.0,发现二代Kinect和一代存在差别较大,采集程序也大致不一样了。

在这里我主要是介绍两种采集方法,一种是基于KinectSDK驱动直接采集的,能够采集到深度图和彩色图。第二种是采用OPENNI2间接驱动KinectSDK进行采集,能够采集到ONI格式的视频。这篇博文我主要介绍怎么采用KinectSDK Studio分别采集RGB信息和深度信息。

下面C++代码段是在一个线程中采用深度生成器IFrameDescription *pDepthDescription和RGB生成器IFrameDescription *pColorDescription进行数据的生成和保存。

其中RGB信息主要用6个颜色vector进行存储,深度信息主要通过构建深度Map进行存储。其中因为SDK采集到的颜色格式并不是传统的RGB,所以需要对颜色空间进行转换。

void CKinectStudio::ReceiveStudioMessage(WPARAM wParam, LPARAM lParam)
{
p_StudioFatherHwnd = (HWND)lParam;
BSTR b = (BSTR)wParam;
CString videoPath(b);
SysFreeString(b);
////////////////////////////////////////////////////////////////
int depth_width, depth_height, color_width, color_height;
//深度RGB定义
IKinectSensor* m_pKinectSensor = NULL;
IDepthFrameReader*  m_pDepthFrameReader = NULL;
IColorFrameReader*  m_pColorFrameReader = NULL;
IDepthFrameSource* pDepthFrameSource = NULL;
IColorFrameSource* pColorFrameSource = NULL;
HRESULT hr;

//创建上下文
hr = GetDefaultKinectSensor(&m_pKinectSensor);
//打开Kinect
hr = m_pKinectSensor->Open();
//创建map
ICoordinateMapper* pCoordinateMapper;
hr = m_pKinectSensor->get_CoordinateMapper(&pCoordinateMapper);
//打开深度生成器
hr = m_pKinectSensor->get_DepthFrameSource(&pDepthFrameSource);
hr = pDepthFrameSource->OpenReader(&m_pDepthFrameReader);
IFrameDescription* pDepthDescription;
hr = pDepthFrameSource->get_FrameDescription(&pDepthDescription);
pDepthDescription->get_Width(&depth_width); // 512
pDepthDescription->get_Height(&depth_height); // 424
SafeRelease(pDepthFrameSource);
//打开RGB生成器
hr = m_pKinectSensor->get_ColorFrameSource(&pColorFrameSource);
hr = pColorFrameSource->OpenReader(&m_pColorFrameReader);
IFrameDescription* pColorDescription;
hr = pColorFrameSource->get_FrameDescription(&pColorDescription);
pColorDescription->get_Width(&color_width); // 1920
pColorDescription->get_Height(&color_height);//1080
SafeRelease(pColorFrameSource);

Mat depthImg_show = Mat::zeros(depth_height, depth_width, CV_8UC3);//原始UINT16 深度图像不适合用来显示
Mat depthImg = Mat::zeros(depth_height, depth_width, CV_16UC1);
Mat colorImg = Mat::zeros(color_height, color_width, CV_8UC3);
Mat m_BodyIndex = Mat::zeros(depth_height, depth_width, CV_8UC1);

//获取视频信息的缓存区
UINT nBufferSize_depth = 0;
UINT16 *pBuffer_depth = NULL;
UINT nBufferSize_coloar = 0;
RGBQUAD *pBuffer_color = NULL;
RGBQUAD* m_pColorRGBX = new RGBQUAD[color_width * color_height];

std::vector<UINT16> depthBuffer(depth_width * depth_height);
std::vector<RGBQUAD> colorBuffer(color_width * color_height);
IBodyIndexFrameSource* pBodyIndexFrameSource = NULL;
IBodyIndexFrameReader*  m_pBodyIndexFrameReader = NULL;
HRESULT hrBodyIndex;
hrBodyIndex = m_pKinectSensor->get_BodyIndexFrameSource(&pBodyIndexFrameSource);
hrBodyIndex = pBodyIndexFrameSource->OpenReader(&m_pBodyIndexFrameReader);
HRESULT hResult = S_OK;

// Source
IBodyIndexFrameSource* pBodyIndexSource;
hResult = m_pKinectSensor->get_BodyIndexFrameSource(&pBodyIndexSource);
// Reader
IBodyIndexFrameReader* pBodyIndexReader;
hResult = pBodyIndexSource->OpenReader(&pBodyIndexReader);
cv::Mat bodyIndexMat(424, 512, CV_8UC3);
cv::Vec3b color[6];
color[0] = cv::Vec3b(255, 0, 0);
color[1] = cv::Vec3b(0, 255, 0);
color[2] = cv::Vec3b(0, 0, 255);
color[3] = cv::Vec3b(255, 255, 0);
color[4] = cv::Vec3b(255, 0, 255);
color[5] = cv::Vec3b(0, 255, 255);

PointCloud* pPointCloud = new PointCloud[SumFrame];
for (int i = 0; i < SumFrame; i++)
{
(pPointCloud + i)->arryX = new float[424 * 512];
memset((pPointCloud + i)->arryX, 0, 424 * 512);
(pPointCloud + i)->arryY = new float[424 * 512];
(pPointCloud + i)->arryZ = new float[424 * 512];
memset((pPointCloud + i)->arryY, 0, 424 * 512);
memset((pPointCloud + i)->arryZ, 0, 424 * 512);
}
int nFrame = 0;
while (1)
{
IDepthFrame* pDepthFrame = NULL;
IColorFrame* pColorFrame = NULL;
USHORT nDepthMinReliableDistance = 0;
USHORT nDepthMaxReliableDistance = 0;
//获取深度信息
HRESULT hrDepth = m_pDepthFrameReader->AcquireLatestFrame(&pDepthFrame);
if (FAILED(hrDepth))
{
continue;
}
hrDepth = pDepthFrame->get_DepthMinReliableDistance(&nDepthMinReliableDistance);
hrDepth = pDepthFrame->get_DepthMaxReliableDistance(&nDepthMaxReliableDistance);
hrDepth = pDepthFrame->AccessUnderlyingBuffer(&nBufferSize_depth, &pBuffer_depth);
hrDepth = pDepthFrame->CopyFrameDataToArray(depthBuffer.size(), &depthBuffer[0]);
depthImg_show = ConvertMat(pBuffer_depth, depth_width, depth_height, nDepthMinReliableDistance, nDepthMaxReliableDistance);
SafeRelease(pDepthFrame);
//获取RGB信息
HRESULT hrColor = m_pColorFrameReader->AcquireLatestFrame(&pColorFrame);
if (FAILED(hrColor))
{
continue;
}
ColorImageFormat imageFormat = ColorImageFormat_None;
hrColor = pColorFrame->get_RawColorImageFormat(&imageFormat);
pBuffer_color = m_pColorRGBX;
hrColor = pColorFrame->CopyConvertedFrameDataToArray(color_width * color_height * sizeof(RGBQUAD),
reinterpret_cast<BYTE*>(pBuffer_color), ColorImageFormat_Bgra);
colorImg = ConvertMat(pBuffer_color, color_width, color_height);
SafeRelease(pColorFrame);
IBodyIndexFrame* pBodyIndexFrame = NULL;
hrBodyIndex = m_pBodyIndexFrameReader->AcquireLatestFrame(&pBodyIndexFrame);
if (FAILED(hrBodyIndex))
{
continue;
}
UINT uSize = 0;
BYTE* pBodyIndexBuffer = NULL;
pBodyIndexFrame->AccessUnderlyingBuffer(&uSize, &pBodyIndexBuffer);
for (int y = 0; y < depth_height; ++y)
{
for (int x = 0; x < depth_width; ++x)
{
DepthSpacePoint depthSpacePoint = { static_cast<float>(x), static_cast<float>(y) };
UINT16 depth = depthBuffer[y * depth_width + x];
CameraSpacePoint cameraSpacePoint = { 0.0f, 0.0f, 0.0f };
pCoordinateMapper->MapDepthPointToCameraSpace(depthSpacePoint, depth, &cameraSpacePoint);
int uBodyIdx = x + y * depth_width;
if (pBodyIndexBuffer[uBodyIdx] != 0xff) {
bodyIndexMat.at<cv::Vec3b>(y, x) = color[pBodyIndexBuffer[uBodyIdx]];
}
else {
bodyIndexMat.at<cv::Vec3b>(y, x) = cv::Vec3b(0, 0, 0);
}
if (pBodyIndexBuffer[uBodyIdx] < 6 && (cameraSpacePoint.X > NAGATIVE && cameraSpacePoint.Y > NAGATIVE && cameraSpacePoint.Z > NAGATIVE))
{
(pPointCloud + nFrame)->arryX[x + y*depth_height] = cameraSpacePoint.X;
(pPointCloud + nFrame)->arryY[x + y*depth_height] = cameraSpacePoint.Y;
(pPointCloud + nFrame)->arryZ[x + y*depth_height] = cameraSpacePoint.Z;
}
else
{
(pPointCloud + nFrame)->arryX[x + y*depth_height] = 0;
(pPointCloud + nFrame)->arryY[x + y*depth_height] = 0;
(pPointCloud + nFrame)->arryZ[x + y*depth_height] = 0;
}
}
if (g_SaveKinectStudio == TRUE)
{
::PostMessage(p_StudioFatherHwnd, WM_UPDATE_PROGRESSBAR, (WPARAM)nFrame, (LPARAM)0);
if (nFrame == 150-1)
{
nFrame = 0;
g_SaveKinectStudio = FALSE;
for (int i = 0; i < SumFrame; i++)
{
::PostMessage(p_StudioFatherHwnd, WM_UPDATE_PROGRESSBAR, (WPARAM)i, (LPARAM)0);
CString btemp;
btemp.Format(_T("%d"), i);
CString txtPath = videoPath +btemp+ _T(".txt");
CStringA nameA(txtPath);
char* tempnameA = new char[MAX_PATH];
memset(tempnameA, 0, MAX_PATH);
memcpy(tempnameA, nameA.GetBuffer(), strlen(nameA));
std::ofstream out(tempnameA);
if (out.is_open())
{
for (int y = 0; y < depth_height; ++y)
{
for (int x = 0; x < depth_width; ++x)
{
if ((pPointCloud + i)->arryX[x + y*depth_height] == 0 && (pPointCloud + i)->arryY[x + y*depth_height] == 0 && (pPointCloud + i)->arryZ[x + y*depth_height] == 0)
{
continue;
}
else
{
out << (pPointCloud + i)->arryX[x + y*depth_height] << " " << (pPointCloud + i)->arryY[x + y*depth_height] << " " << (pPointCloud + i)->arryZ[x + y*depth_height] << endl;
}
}
}
}
out.close();
}
::PostMessage(p_StudioFatherHwnd, WM_UPDATE_PROGRESSBAR, (WPARAM)0, (LPARAM)0);
}
}
}
SafeRelease(pBodyIndexFrame);
resize(colorImg, colorImg, depthImg_show.size());
BGRImage = &IplImage(colorImg);
BodyImage = &IplImage(bodyIndexMat);
if (g_SaveKinectStudio == TRUE)
{
/*char namefile[50];
CTime tm = CTime::GetCurrentTime();
CString strBKNum;
strBKNum.Format(_T("%u"), tm.GetTime());
strBKNum = _T("F:\\数据11\\") + strBKNum + _T(".jpg");
gr_save_image(strBKNum, BodyImage);*/
nFrame++;
}
::PostMessage(p_StudioFatherHwnd, WM_SHOW_RGBIMG, (WPARAM)BGRImage, (LPARAM)0);
::PostMessage(p_StudioFatherHwnd, WM_SHOW_DEPTHIMG, (WPARAM)BodyImage, (LPARAM)1);
Sleep(20);
}
SafeRelease(m_pBodyIndexFrameReader);
SafeRelease(m_pDepthFrameReader);
SafeRelease(m_pColorFrameReader);
if (m_pKinectSensor)
{
m_pKinectSensor->Close();
}
SafeRelease(m_pKinectSensor);
//gr_init_studio(videoPath);
}
下面程序片段是保存图片的程序
void CKinectStudio::gr_save_image(CString &strImageName, IplImage* img)
{
CStringA name(strImageName);
char* tempname = new char[MAX_PATH];
memset(tempname, 0, MAX_PATH);
memcpy(tempname, name.GetBuffer(), strlen(name));
cvSaveImage(tempname, img);
delete tempname;
}
下面是释放Kinect2.0上下文,深度生成器和RGB生成器的资源的函数。
template<class Interface>
inline void CKinectStudio::SafeRelease(Interface *& pInterfaceToRelease)
{
if (pInterfaceToRelease != NULL) {
pInterfaceToRelease->Release();
pInterfaceToRelease = NULL;
}
}
下面是图像从IPlmage转换到Mat的过程函数。
// 转换color图像到cv::Mat
Mat CKinectStudio::ConvertMat(const RGBQUAD* pBuffer, int nWidth, int nHeight)
{
//cv::Mat img(nHeight, nWidth, CV_8UC3);
Mat img(nHeight, nWidth, CV_8UC3);
uchar* p_mat = img.data;
const RGBQUAD* pBufferEnd = pBuffer + (nWidth * nHeight);
while (pBuffer < pBufferEnd)
{
*p_mat = pBuffer->rgbBlue;
p_mat++;
*p_mat = pBuffer->rgbGreen;
p_mat++;
*p_mat = pBuffer->rgbRed;
p_mat++;
++pBuffer;
}
return img;
}
下面是深度图Map转换成Mat的过程函数。
// 转换depth图像到cv::Mat
Mat CKinectStudio::ConvertMat(const UINT16* pBuffer, int nWidth, int nHeight, USHORT nMinDepth, USHORT nMaxDepth)
{
Mat img(nHeight, nWidth, CV_8UC3);
uchar* p_mat = img.data;
const UINT16* pBufferEnd = pBuffer + (nWidth * nHeight);
while (pBuffer < pBufferEnd)
{
USHORT depth = *pBuffer;
BYTE intensity = static_cast<BYTE>((depth >= nMinDepth) && (depth <= nMaxDepth) ? (depth % 256) : 0);
*p_mat = intensity;
p_mat++;
*p_mat = intensity;
p_mat++;
*p_mat = intensity;
p_mat++;
++pBuffer;
}
return img;
}


以上这些主要利用了Kinect2.0的SDK驱动进行采集深度信息和RGB。并并不能保存ONI格式的视频流。要保存食品类格式需要自己利用OPENNI2驱动Kinect2.0。
下一篇博文会写到这种方法。

以下是Kinect的样图





内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: 
相关文章推荐