您的位置:首页 > 其它

自适应三特征融合之Camshift目标跟踪——颜色、纹理、边缘方向

2017-06-06 14:45 281 查看
         Camshift是我接触的第一个跟踪算法,原理简单明了(情怀啊!),Opencv将其实现并封装,拿起就用。

但不得不承认传统Camshift局限性很大,对于背景复杂或者光照变化的环境跟踪效果很不理想。

一些人对此进行了部分改进,除了颜色特征以外,大家融入了纹理、边缘方向等特征,以此提高该算法的鲁棒性,也达到了一定的效果。

值得一提的是,就算是改进后的算法,在实际应用中也是相当的鸡肋,该算法表面上看早已是走上了末路。(个人观点,不喜绕路!)

但基于Camshift算法的改进算法在目标跟踪入门时是很好的参考算法,甚至随便改进下在国内一些低质量的期刊上发表论文很容易,这些文章泛滥了估计有20年了,但来者不拒。

        以下是比较流行的一种自适应三特征融合之Camshift目标跟踪——颜色、纹理、边缘方向的实现,仅供大家参考!作者能力有限,如有疏漏,万分抱歉!

以下分别为特征提取函数:

1-- 纹理特征提取

/*****************************

函数功能:局部纹理特征(LBP)提取

返回:纹理图像 Mat(8UC1)

******************************/
Mat getTextureImg(Mat& inputImg)   //inputImg为灰度图像
{
Mat outputImg(inputImg.rows,inputImg.cols,CV_8UC1);
int temp[8];
for(int i = 1;i < inputImg.rows-1;i++)
{
uchar* data = inputImg.ptr<uchar>(i);
uchar* data_pre = inputImg.ptr<uchar>(i-1);
uchar* data_lat = inputImg.ptr<uchar>(i+1);

uchar* out = outputImg.ptr<uchar>(i);
for (int j = 1;j < inputImg.cols-1;j++)
{
int k = data[j];

if (data_pre[j-1]>k)
temp[0] = 1;
else
temp[0] = 0;
if (data_pre[j]>k)
temp[1] = 1;
else
temp[1] = 0;
if (data_pre[j+1]>k)
temp[2] = 1;
else
temp[2] = 0;
if (data[j+1]>k)
temp[3] = 1;
else
temp[3] = 0;
if (data_lat[j+1]>k)
temp[4] = 1;
else
temp[4] = 0;
if (data_lat[j]>k)
temp[5] = 1;
else
temp[5] = 0;
if (data_lat[j-1]>k)
temp[6] = 1;
else
temp[6] = 0;
if (data[j-1]>k)
temp[7] = 1;
else
temp[7] = 0;
out[j] = temp[0]*128+temp[1]*64+temp[2]*32+temp[3]*16+temp[4]*8+temp[5]*4+temp[6]*2+temp[7];
}
}
return outputImg;
}

或者Gabor滤波器提取纹理特征,封装为一个类,需要的转到以下地址Download

2--边缘方向特征提取

/***********************************

函数功能:提取图像边缘方向特征

返回:边缘方向图像

************************************/
Mat getEdgeDirectionImage(Mat& inputImg) //inputImg为灰度图像
{
Size size = inputImg.size();
Mat canny(size,CV_8UC1);
Mat dx16(size,CV_16SC1);
Mat dy16(size,CV_16SC1);
Mat dx32(size,CV_32FC1);
Mat dy32(size,CV_32FC1);
Mat gradient_dir(inputImg.size(),CV_32FC1);

Canny(inputImg,canny,60,180,3);
Sobel(inputImg,dx16,CV_16S,1,0,3);// 一阶X方向的图像差分 :dx
Sobel(inputImg,dy16,CV_16S,0,1,3);// 一阶Y方向的图像差分 :dy

dx16.convertTo(dx32,CV_32F);
dy16.convertTo(dy32,CV_32F);

divide(dy32,dx32,gradient_dir); // 梯度方向
float theta;
for(int i = 0;i< gradient_dir.rows;i++){

float* dgr = gradient_dir.ptr<float>(i);
uchar* dca = canny.ptr<uchar>(i);
float* ddx = dx32.ptr<float>(i);

for(int j = 0;j < gradient_dir.cols;j++){

if(dca[j] != 0 && ddx[j] != 0){
theta = dgr[j];
theta = atanf(theta);
dgr[j] = theta;
//TRACE("边缘方向第 %d 行第 %d 列的值为 %f\n",i,j,theta);

}else{
dgr[j] = 0.0f;
}
}
}
return gradient_dir;
}颜色特征提取直接绘制颜色分布直方图即可;
以下为三特征融合函数

/**********************************

函数功能:多特征融合直方图
返回:反向投影直方图 Mat

***********************************/
Mat fuseHist(Mat& backproja,Mat& backprojb,Mat& backprojc,double afa,double beta,double gama)
{
Mat dst(backproja.size(),backproja.depth());
//TRACE("反向直方图深度为: %d\n",backproja.depth());
for (int i = 0;i < dst.rows;i++)
{
uchar* data = dst.ptr<uchar>(i);
uchar* dataa = backproja.ptr<uchar>(i);
uchar* datab = backprojb.ptr<uchar>(i);
uchar* datac = backprojc.ptr<uchar>(i);
for (int j = 0;j < dst.cols;j++)
{

data[j] = afa*dataa[j] + beta*datab[j] + gama*datac[j];
}
}
return dst;
}以下为跟踪函数,代码比较乱,仅供参考!由于跟踪只是项目的一部分,所以大家看到莫名其妙的代码,比如背景减除什么的,自动忽略,找到Camshift部分看下就可以了,不便之处,见谅!
int img_count       = 0;//三帧差帧计数
int realframe_count = 0;//实际帧数计数 控制帧采样间隔
int ObjectCount = 0;//跟踪目标个数

//int frame_count = 0;//过程测试专用,没有实际意义
//int paoQi_count = 0;//过程测试专用
//int store_count = 0;//过程测试专用

Mat src,src_YCrCb,frame1,frame2,frame3;
Mat gray1,gray2,gray3;
Mat diff1,diff2;
Mat result;
Mat roi_Object1;
Mat roi_Object2;
Mat roi_Object3;
Mat roi_Object4;

list<Mat> frameQueue; 

vector<vector<Point>> contours;//存储轮廓数据
vector<vector<Point>> contoursPre;//存储轮廓数据
//vector<vector<Point>> contours1;
Mat image;  
bool backprojMode = false; //表示是否要进入反向投影模式,ture表示准备进入反向投影模式  
bool selectObject = false;//代表是否在选要跟踪的初始目标,true表示正在用鼠标选择  
int trackObject = 0; //代表跟踪目标数目  
bool showHist = true;//是否显示直方图  
Point origin;//用于保存鼠标选择第一次单击时点的位置  
Rect selection;//用于保存鼠标选择的矩形框  
int vmin = 10, vmax = 256, smin = 30;  
Rect trackWindow;  
RotatedRect trackBox;//定义一个旋转的矩阵类对象
Point point_last;
Point point_predict;
int hsize[] = {51};
int ssize[] = {16};
int edsize[] = {90};
int tsize[] = {30};
int lbpsize[] = {40};
float hranges[] = {0,255};
float sranges[] = {0,1};
float edranges[] = {-PI/2,0};
// float edranges1[] = {-PI/2,-PI/4};
// float edranges2[] = {-PI/4,0};
// float edranges3[] = {0,PI/4};
// float edranges4[] = {PI/4,PI/2};
float tranges[] = {0,255};
float lbpranges[] = {0,255};
const float* phranges[] = {hranges};
const float* psranges[] = {sranges};
//const float* pedranges[] = {edranges1,edranges2,edranges3,edranges4};
const float* pedranges[] = {edranges};
const float* ptranges[] = {tranges};
const float* plbpranges[] = {lbpranges};
int ch[] = {0};
int ch1[] = {1};
int size[] = {256,256};
const float* pranges[] = {hranges,sranges};
int ch2[] = {0,1};
vector<Point> trajectory;
/*********************************

鼠标控制

**********************************/
void onMouse( int event, int x, int y, int, void* )
{
if( selectObject )//只有当鼠标左键按下去时才有效,然后通过if里面代码就可以确定所选择的矩形区域selection了
{
selection.x = MIN(x, origin.x);//矩形左上角顶点坐标
selection.y = MIN(y, origin.y);
selection.width = std::abs(x - origin.x);//矩形宽
selection.height = std::abs(y - origin.y);//矩形高

selection &= Rect(0, 0, src.cols, src.rows);//用于确保所选的矩形区域在图片范围内
//rectangle(src,selection,Scalar(0,0,255),2);

}

switch( event )
{
case CV_EVENT_LBUTTONDOWN:
origin = Point(x,y);
selection = Rect(x,y,0,0);//鼠标刚按下去时初始化了一个矩形区域
selectObject = true;
break;
case CV_EVENT_LBUTTONUP:
selectObject = false;
if( selection.width > 0 && selection.height > 0 )
trackObject = -1;
break;
}
}
/****************************

函数功能 显示轨迹

*****************************/
void displayTrajectory(Mat img, vector<Point> traj)
{
if (!traj.empty())
{
for (size_t i = 0;i < traj.size()-1;i++)
{
line(img,traj[i],traj[i+1],Scalar(255,255,0),3,8,0);

}
}

}
/***********************************

图像处理 目标跟踪 线程

***********************************/
DWORD WINAPI dealFun(LPVOID lpParamter)
{
BackgroundSubtractorMOG2 mog(150,25,false);
Mat foreground;
Mat background;

//初始化自适应系数为1/3
double a = 0;
double b = 0;
double c = 1.0;

Mat src_gray;
Mat hsv, hhist,hhist_pre,hhistImg, hbackproj,mask;
//Mat shist,shistImg,sbackproj;
Mat edst,ehist,ehist_pre,ehistImg,ebackproj;
Mat tdst,thist,thist_pre,thistImg,tbackproj;
Rect bRect;

//Mat lbp_Img,lbp,lbp_mast,lbp_hist,lbp_histimg,lbp_backproj;

double s
bf86
igma = sqrt(5.0);//gabor滤波器参数初始化
double F = sqrt(2.0);
GaborFilter gabor(0,3,sigma,F);

while(1)
{
if(!frameQueue.empty()){
WaitForSingleObject(hEvent,INFINITE);
src = (Mat)(*(frameQueue.begin()));//frameQueue.front();
//src_YCrCb = (Mat)(*(frameQueue.begin()));
cvtColor(src,src_YCrCb,CV_BGR2YCrCb);
frameQueue.pop_front();
SetEvent(hEvent);
#if NOAUTO
if (selectObject)
{
rectangle(src,selection,Scalar(0,0,255),2);
}
mog(src_YCrCb,foreground,0.005);
//threshold(foreground,foreground,128,255,THRESH_BINARY_INV);
//mog.getBackgroundImage(background);
dilate(foreground,foreground,Mat(),Point(-1,-1),3);
erode(foreground,foreground,Mat(),Point(-1,-1),6);
//erode(foreground,foreground,element_e1,Point(-1,-1),3);
dilate(foreground,foreground,Mat(),Point(-1,-1),3);
bitwise_not(foreground,foreground);
cvtColor(src, hsv, CV_BGR2HSV_FULL);//转换后H值为0-255
//colorReduce(hsv,64);
cvtColor(src,src_gray,CV_BGR2GRAY);
//imshow("result",hsv);
Mat mat(src.size(),CV_8UC1,Scalar::all(255));//反转纹理反向投影直方图

edst = getEdgeDirectionImage(src_gray);

Mat tdst32F(src.size(),CV_32FC1);
gabor.conv_img(src_gray,tdst32F,1);
tdst32F.convertTo(tdst,CV_8UC1);

if( trackObject )//trackObject初始化为0,当鼠标单击松开后为-1
{
if( trackObject < 0 )//鼠标选择区域松开后,该函数内部又将其赋值1
{
inRange(hsv, Scalar(0, 30, 10),Scalar(256, 256, 256), mask);
Mat roi(hsv, selection); Mat maskroi(mask, selection);//mask保存的hsv的最小值
Mat eroi(edst,selection);
Mat troi(tdst,selection);

calcHist(&roi,1,ch,maskroi,hhist,1,hsize,phranges,true,false);
//normalize(hhist,hhist,0.0,255.0,NORM_MINMAX);
calcHist(&eroi,1,0,Mat(),ehist,1,edsize,pedranges,true,false);
//normalize(ehist,ehist,0.0,255.0,NORM_MINMAX);
calcHist(&troi,1,0,Mat(),thist,1,tsize,ptranges,true,false);
//normalize(thist,thist,0.0,255.0,NORM_MINMAX);
trackWindow = selection;
bRect = selection;
point_last.x = trackWindow.x + trackWindow.width/2;
point_last.y = trackWindow.y + trackWindow.height/2;
trajectory.push_back(point_last);
trackObject = 1;
}
//Mat sh_3d[] = {hsv,eddst};
calcBackProject(&hsv,1,ch,hhist,hbackproj,phranges,1,true);
hbackproj &= mask;
calcBackProject(&edst,1,0,ehist,ebackproj,pedranges,1,true);
calcBackProject(&tdst,1,0,thist,tbackproj,ptranges,1,true);

absdiff(mat,tbackproj,tbackproj);
absdiff(mat,hbackproj,hbackproj);

//局部可区分度更新系数
// 			 if(bRect.area() != 0)
// 			 {
// 				 double dc = localDiffCounter(hbackproj,bRect,2);
// 			     double de = localDiffCounter(ebackproj,bRect,2);
// 			     double dt = localDiffCounter(tbackproj,bRect,2);
//
// 			     double d = dc + de + dt;
// 			     a = (dt + de)/(2 * d);
// 			     b = (dc + dt)/(2 * d);
// 			     c = (dc + de)/(2 * d);
//
// 			    TRACE("a = %lf\n",a );
// 			    TRACE("b = %lf\n",b );
// 			    TRACE("c = %lf\n",c );
// 			 }
Mat backproj = fuseHist(hbackproj,ebackproj,tbackproj,a,b,c);//三特征融合
//threshold(backproj,backproj,180,255,THRESH_BINARY);
normalize(backproj,backproj,0,255,NORM_MINMAX);
subtract(backproj,backproj,backproj,foreground);

imshow("result",backproj);
RotatedRect trackBox = CamShift(backproj, trackWindow,TermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 5, 1 ));
//			 RotatedRect trackBox = CamShift(lbp_backproj, trackWindow,TermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ));

if( trackWindow.area() <= 1 )
{
//trackWindow = bRect;
int cols = hbackproj.cols, rows = hbackproj.rows, r = (MIN(cols, rows) + 5)/6;
trackWindow = Rect(trackWindow.x - r, trackWindow.y - r,
trackWindow.x + r, trackWindow.y + r) &
Rect(0, 0, cols, rows);//Rect函数为矩阵的偏移和大小,即第一二个参数为矩阵的左上角点坐标,第三四个参数为矩阵的宽和高
}

/*-------------------------------------
预测算法实现 -- 线性外推法
预测位置更新搜索窗口

--------------------------------------*/
Point point_current;
bRect = trackBox.boundingRect();
point_current = trackBox.center;
point_predict.x = 2*point_current.x - point_last.x;
point_predict.y = 2*point_current.y - point_last.y;

//Size size_current = trackBox.size;
//trackWindow = Rect(point_predict.x - size_current.width/2, point_predict.y - size_current.height/2,
//size_current.width, size_current.height);

trackWindow = Rect(point_predict.x -bRect.width/2, point_predict.y - bRect.height/2,
bRect.width,bRect.height);
Point pu = trackWindow.tl();
Point pd = trackWindow.br();
if((pu.x < 0)||(pu.y < 0)||(pd.x > src.cols)||(pd.y > src.rows))
{
trackWindow = bRect;
}
point_last.x = point_current.x;
point_last.y = point_current.y;

trajectory.push_back(point_current);
//ellipse( src, trackBox, Scalar(0,0,255), 3, CV_AA );//跟踪的时候以椭圆为代表目标
displayTrajectory(src,trajectory);//黄色轨迹线
rectangle(src,trackWindow,Scalar(0,255,0),2);//绿色预测搜索框
rectangle(src,bRect,Scalar(0,0,255),2);//红色当前目标框
putText(src,"ID:1",point_current,3,1,Scalar(255,0,0),3,8,false);
/* ------------------------------------------------------
自适应系数求取方法:

求取当前目标的三特征统计直方图,与前一帧对应直方图求巴氏距离
B(hhist,hhist_pre) B(edhist,edhist_pre) B(thist,thist_pre)
初始化由鼠标手动框选

----------------------------------------------------------*/
hhist.copyTo(hhist_pre);
ehist.copyTo(ehist_pre);
thist.copyTo(thist_pre);

inRange(hsv, Scalar(0, 0, 10),Scalar(256, 256, 256), mask);
Mat roi(hsv,bRect);Mat maskroi(mask,bRect);//mask保存的hsv的最小值
Mat eroi(edst,bRect);
Mat troi(tdst,bRect);

calcHist(&roi,1,ch,maskroi,hhist,1,hsize,phranges,true,false);
//normalize(hhist,hhist,0.0,255.0,NORM_MINMAX);
calcHist(&eroi,1,0,Mat(),ehist,1,edsize,pedranges,true,false);
//normalize(ehist,ehist,0.0,255.0,NORM_MINMAX);
calcHist(&troi,1,0,Mat(),thist,1,tsize,ptranges,true,false);
//normalize(thist,thist,0.0,255.0,NORM_MINMAX);
//ehistImg = getHistImg(ehist);
//imshow("result",ehistImg);

double dc = compareHist(hhist,hhist_pre,CV_COMP_BHATTACHARYYA);
double de = compareHist(ehist,ehist_pre,CV_COMP_BHATTACHARYYA);
double dt = compareHist(thist,thist_pre,CV_COMP_BHATTACHARYYA);

// 			 double d = de + dt;
// 			 a = 0;//颜色直方图系数
// 			 b = dt/d;//边缘方向直方图系数
// 			 c = de/d;//纹理直方图系数

double d = dc + de + dt;
a = (dt + de)/(2 * d);
b = (dc + dt)/(2 * d);
c = (dc + de)/(2 * d);

TRACE("a = %lf\n",a );
TRACE("b = %lf\n",b );
TRACE("c = %lf\n",c );
}

//imshow("result",foreground);

imshow("frame",src);
waitKey(1);
//Sleep(100);
}
}
return 0;
}
       基本上自适应三特征融合之Camshift目标跟踪实现就弄完了,代码自己写的比较随意,没有整理,大神绕路吧!
       PS:哎,多次强调就是怕能力有限,被无脑喷,但是还是想贴出来给需要的人参考一下。想想自己初学时也是想在网上找到点东西给自己参考,也希望大神多写点好东西和大家分享,共同学习!
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息