您的位置:首页 > 编程语言

目标检测:SSD目标检测中PriorBox代码解读

2018-11-30 16:16 513 查看

这篇博客主要写prior_box_layer 
这一层完成的是给定一系列feature map后如何在上面生成prior box。SSD的做法很有意思,对于输入大小是W×H的feature map,生成的prior box中心就是W×H个,均匀分布在整张图上,像下图中演示的一样。在每个中心上,可以生成多个不同长宽比的prior box,如[1/3, 1/2, 1, 2, 3]。所以在一个feature map上可以生成的prior box总数是W×H×length_of_aspect_ratio,对于比较大的feature map,如VGG的conv4_3,生成的prior box可以达到数千个。当然对于边界上的box,还要做一些处理保证其不超出图片范围,这都是细节了。

这里需要注意的是,虽然prior box的位置是在W×H的格子上,但prior box的大小并不是跟格子一样大,而是人工指定的,原论文中随着feature map从底层到高层,prior box的大小在0.2到0.9之间均匀变化。

一开始看SSD的时候很困扰我的一点就是形状的匹配问题:SSD用卷积层做bbox的拟合,输出的不应该是feature map吗,怎么能正好输出4个坐标呢?这里的做法有点暴力,比如需要输出W×H×length_of_aspect_ratio×4个坐标,就直接用length_of_aspect_ratio×4个channel的卷积层做拟合,这样就得到length_of_aspect_ratio×4个大小为W×H的feature map,然后把feature map拉成一个长度为W×H×length_of_aspect_ratio×4的向量,用SmoothL1之类的loss去拟合,效果还意外地不错…… 

代码解读:
  1. #include <algorithm>
  2. #include <functional>
  3. #include <utility>
  4. #include <vector>
  5. #include "caffe/layers/prior_box_layer.hpp"
  6. namespace caffe {
  7. template <typename Dtype>
  8. void PriorBoxLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom, // 参数解析
  9. const vector<Blob<Dtype>*>& top) {
  10. const PriorBoxParameter& prior_box_param =
  11. this->layer_param_.prior_box_param();
  12. CHECK_GT(prior_box_param.min_size_size(), 0) << "must provide min_size.";
  13. for (int i = 0; i < prior_box_param.min_size_size(); ++i) { // min_size_size()=1
  14. min_sizes_.push_back(prior_box_param.min_size(i));
  15. CHECK_GT(min_sizes_.back(), 0) << "min_size must be positive.";
  16. }
  17. aspect_ratios_.clear();
  18. aspect_ratios_.push_back(1.); // 加入1,在ProtoTXT只设置了2,3或者2
  19. flip_ = prior_box_param.flip(); // 默认true
  20. for (int i = 0; i < prior_box_param.aspect_ratio_size(); ++i) { // aspect_ratio_size=2
  21. float ar = prior_box_param.aspect_ratio(i);
  22. bool already_exist = false;
  23. for (int j = 0; j < aspect_ratios_.size(); ++j) { // 这里判断是不是已近把ratio压入栈,保证每个ratios都只有一个1/ratios
  24. if (fabs(ar - aspect_ratios_[j]) < 1e-6) { // 这里aspect_ratios_只有1一个值
  25. already_exist = true;
  26. break; // 跳出for循环
  27. }
  28. }
  29. if (!already_exist) {
  30. aspect_ratios_.push_back(ar);
  31. if (flip_) { // 翻转,改变长宽比
  32. aspect_ratios_.push_back(1./ar); // 得到1,2,3,1/2,1/3
  33. }
  34. } // 到这里,共有5个ratios,分别为1,2,1/2,3,1/3
  35. }
  36. num_priors_ = aspect_ratios_.size() * min_sizes_.size(); // min_sizes_.size()=1 5*1
  37. if (prior_box_param.max_size_size() > 0) {
  38. CHECK_EQ(prior_box_param.min_size_size(), prior_box_param.max_size_size()); // 最大和最小不能相等
  39. for (int i = 0; i < prior_box_param.max_size_size(); ++i) { // max_size_size=1
  40. max_sizes_.push_back(prior_box_param.max_size(i));
  41. CHECK_GT(max_sizes_[i], min_sizes_[i])
  42. << "max_size must be greater than min_size.";
  43. num_priors_ += 1; // num_priors_ = 6;这里很重要,不然就只有5个,和论文中的6个就不相符了
  44. }
  45. }
  46. clip_ = prior_box_param.clip(); // true 默认false
  47. if (prior_box_param.variance_size() > 1) { // variance_size = 4
  48. // Must and only provide 4 variance.
  49. CHECK_EQ(prior_box_param.variance_size(), 4); // 必须有4个variance
  50. for (int i = 0; i < prior_box_param.variance_size(); ++i) { // variance:0.1 0.1 0.2 0.2
  51. CHECK_GT(prior_box_param.variance(i), 0);
  52. variance_.push_back(prior_box_param.variance(i));
  53. }
  54. } else if (prior_box_param.variance_size() == 1) { // 或者只设置一个,设为0.1
  55. CHECK_GT(prior_box_param.variance(0), 0);
  56. variance_.push_back(prior_box_param.variance(0));
  57. } else {
  58. // Set default to 0.1.
  59. variance_.push_back(0.1);
  60. }
  61. if (prior_box_param.has_img_h() || prior_box_param.has_img_w()) { // 设置图片的长宽
  62. CHECK(!prior_box_param.has_img_size())
  63. << "Either img_size or img_h/img_w should be specified; not both.";
  64. img_h_ = prior_box_param.img_h();
  65. CHECK_GT(img_h_, 0) << "img_h should be larger than 0.";
  66. img_w_ = prior_box_param.img_w();
  67. CHECK_GT(img_w_, 0) << "img_w should be larger than 0.";
  68. } else if (prior_box_param.has_img_size()) {
  69. const int img_size = prior_box_param.img_size();
  70. CHECK_GT(img_size, 0) << "img_size should be larger than 0.";
  71. img_h_ = img_size;
  72. img_w_ = img_size;
  73. } else {
  74. img_h_ = 0;
  75. img_w_ = 0;
  76. }
  77. if (prior_box_param.has_step_h() || prior_box_param.has_step_w()) { // step,tesp_h,step_w参数设置
  78. CHECK(!prior_box_param.has_step())
  79. << "Either step or step_h/step_w should be specified; not both.";
  80. step_h_ = prior_box_param.step_h();
  81. CHECK_GT(step_h_, 0.) << "step_h should be larger than 0.";
  82. step_w_ = prior_box_param.step_w();
  83. CHECK_GT(step_w_, 0.) << "step_w should be larger than 0.";
  84. } else if (prior_box_param.has_step()) {
  85. const float step = prior_box_param.step();
  86. CHECK_GT(step, 0) << "step should be larger than 0.";
  87. step_h_ = step;
  88. step_w_ = step;
  89. } else {
  90. step_h_ = 0;
  91. step_w_ = 0;
  92. }
  93. offset_ = prior_box_param.offset(); // 偏移量,默认0.5
  94. } // layersetup 结束
  95. template <typename Dtype>
  96. void PriorBoxLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
  97. const vector<Blob<Dtype>*>& top) {
  98. const int layer_width = bottom[0]->width(); // 输入feature map的大小
  99. const int layer_height = bottom[0]->height();
  100. vector<int> top_shape(3, 1);
  101. // Since all images in a batch has same height and width, we only need to
  102. // generate one set of priors which can be shared across all images.
  103. top_shape[0] = 1;
  104. // 2 channels. First channel stores the mean of each prior coordinate.
  105. // Second channel stores the variance of each prior coordinate.
  106. top_shape[1] = 2;
  107. top_shape[2] = layer_width * layer_height * num_priors_ * 4;
  108. // 输出坐标,就是需要这么多个map,类似faster rcnn,注意:这里,如果没有在ptototxt中没有设置max_size,num_priors_的值就要减1
  109. CHECK_GT(top_shape[2], 0);
  110. top[0]->Reshape(top_shape);
  111. // 在mbox_priorbox层中,concat是选的axis: 2,就是说是concat的map数。
  112. }
  113. template <typename Dtype>
  114. void PriorBoxLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
  115. const vector<Blob<Dtype>*>& top) {
  116. const int layer_width = bottom[0]->width(); // 上一层feature map
  117. const int layer_height = bottom[0]->height();
  118. int img_width, img_height;
  119. if (img_h_ == 0 || img_w_ == 0) {
  120. img_width = bottom[1]->width(); // data layer出来的结果,原始图
  121. img_height = bottom[1]->height();
  122. } else {
  123. img_width = img_w_; // 对图进行缩放,可以设置参数
  124. img_height = img_h_;
  125. }
  126. float step_w, step_h;
  127. if (step_w_ == 0 || step_h_ == 0) { // 得到缩放比例,相当于faster的feat_stride,这里处理的稍好些,长宽都有相应参数
  128. step_w = static_cast<float>(img_width) / layer_width; // 这里都用的float,不像faster直接暴力int型
  129. step_h = static_cast<float>(img_height) / layer_height;
  130. } else {
  131. step_w = step_w_;
  132. step_h = step_h_;
  133. }
  134. Dtype* top_data = top[0]->mutable_cpu_data();
  135. int dim = layer_height * layer_width * num_priors_ * 4; // 一般情况下w*h*6*4,conv4_3除外,详细参考笔记上的框架图
  136. int idx = 0;
  137. for (int h = 0; h < layer_height; ++h) { // 对于feature map上的每个点逐一映射
  138. for (int w = 0; w < layer_width; ++w) {
  139. // 这里和Faster RCNN 一样,就是把feature map上的点映射回原图,这里加上0.5也是为了四舍五入,和faster rcnn python代码类似
  140. float center_x = (w + offset_) * step_w;
  141. float center_y = (h + offset_) * step_h;
  142. float box_width, box_height;
  143. for (int s = 0; s < min_sizes_.size(); ++s) { // min_sizes_.size()=1
  144. int min_size_ = min_sizes_[s];
  145. // 这里的min_size从fc7_mbox_priorbox的60到最后的276,就是s_k从0.2到0.92的过程
  146. // first prior: aspect_ratio = 1, size = min_size
  147. box_width = box_height = min_size_;
  148. // xmin
  149. top_data[idx++] = (center_x - box_width / 2.) / img_width; //
  150. // ymin
  151. top_data[idx++] = (center_y - box_height / 2.) / img_height;
  152. // xmax
  153. top_data[idx++] = (center_x + box_width / 2.) / img_width;
  154. // ymax
  155. top_data[idx++] = (center_y + box_height / 2.) / img_height;
  156. if (max_sizes_.size() > 0) {
  157. CHECK_EQ(min_sizes_.size(), max_sizes_.size());
  158. int max_size_ = max_sizes_[s];
  159. // second prior: aspect_ratio = 1, size = sqrt(min_size * max_size) // 这里就和论文中一致,s_k的选法,每个都不同
  160. box_width = box_height = sqrt(min_size_ * max_size_);
  161. // xmin
  162. top_data[idx++] = (center_x - box_width / 2.) / img_width;
  163. // ymin
  164. top_data[idx++] = (center_y - box_height / 2.) / img_height;
  165. // xmax
  166. top_data[idx++] = (center_x + box_width / 2.) / img_width;
  167. // ymax
  168. top_data[idx++] = (center_y + box_height / 2.) / img_height;
  169. }
  170. // rest of priors
  171. for (int r = 0; r < aspect_ratios_.size(); ++r) { // 其他几个比例计算
  172. float ar = aspect_ratios_[r];
  173. if (fabs(ar - 1.) < 1e-6) {
  174. continue;
  175. }
  176. box_width = min_size_ * sqrt(ar);
  177. box_height = min_size_ / sqrt(ar);
  178. // xmin
  179. top_data[idx++] = (center_x - box_width / 2.) / img_width;
  180. // ymin
  181. top_data[idx++] = (center_y - box_height / 2.) / img_height;
  182. // xmax
  183. top_data[idx++] = (center_x + box_width / 2.) / img_width;
  184. // ymax
  185. top_data[idx++] = (center_y + box_height / 2.) / img_height;
  186. }
  187. } // end for min_size=1
  188. } // end for w
  189. } // end for h
  190. // 到这里,所有的prior_box选取完成,共6个比例,和论文中相符合,同时在每一层中算一个s_k,就是每一层都会设置一个min_size
  191. // clip the prior's coordidate such that it is within [0, 1]
  192. if (clip_) { // 裁剪到[0,1]
  193. for (int d = 0; d < dim; ++d) {
  194. top_data[d] = std::min<Dtype>(std::max<Dtype>(top_data[d], 0.), 1.);
  195. }
  196. }
  197. // set the variance.
  198. // 解答: https://github.com/weiliu89/caffe/issues/75
  199. // 除以variance是对预测box和真实box的误差进行放大,从而增加loss,增大梯度,加快收敛。
  200. // 另外,top_data += top[0]->offset(0, 1);已经使指针指向新的地址,所以variance不会覆盖前面的结果。
  201. // offse一般都是4个参数的offset(n,c,w,h),设置相应的参数就可以指到下一张图(以四位张量为例)
  202. top_data += top[0]->offset(0, 1); // 这里我猜是指向了下一个chanel
  203. if (variance_.size() == 1) {
  204. caffe_set<Dtype>(dim, Dtype(variance_[0]), top_data);// 用常数variance_[0]对top_data进行初始化
  205. } else {
  206. int count = 0;
  207. for (int h = 0; h < layer_height; ++h) {
  208. for (int w = 0; w < layer_width; ++w) {
  209. for (int i = 0; i < num_priors_; ++i) {
  210. for (int j = 0; j < 4; ++j) {
  211. top_data[count] = variance_[j];
  212. ++count;
  213. }
  214. }
  215. }
  216. }
  217. }
  218. }
  219. INSTANTIATE_CLASS(PriorBoxLayer);
  220. REGISTER_LAYER_CLASS(PriorBox);
  221. } // namespace caffe

内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: