您的位置:首页 > Web前端

Caffe_Layer源码解析

2017-03-20 14:24 393 查看

include/caffe/layer.hpp

template <typename Dtype>
class Layer {
public:
/**
* You should not implement your own constructor. Any set up code should go
* to SetUp(), where the dimensions of the bottom blobs are provided to the
* layer.
*/
explicit Layer(const LayerParameter& param)
//显示构造函数,从LayerParameter对象中加载配置,并赋值给类成员变量layer_param_和is_shared_。
: layer_param_(param), is_shared_(false) {
// Set phase and copy blobs (if there are any).
phase_ = param.phase();
if (layer_param_.blobs_size() > 0) {
//blobs_是vector类型的类成员变量,储存lay内部权值和偏置项,以blob形式组织。
blobs_.resize(layer_param_.blobs_size());
//根据layer_param_类中的参数设置本身Blob的数目及大小。
for (int i = 0; i < layer_param_.blobs_size(); ++i) {
blobs_[i].reset(new Blob<Dtype>());
blobs_[i]->FromProto(layer_param_.blobs(i));
}
}
}
virtual ~Layer() {}//层虚析构函数。


下面是层的建立,实现通用层的接口。

//所有层都具有如下基本功能。
void SetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
InitMutex();
// Checks that the number of bottom and top blobs is correct.
CheckBlobCounts(bottom, top);
//特定层的setup是在这里实现的。
LayerSetUp(bottom, top);
//set up sizes of top blobs and 内部缓冲区.
Reshape(bottom, top);
SetLossWeights(top);//设置损失权值因子blob
}


实现特定层:

virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {}

/**
* @brief Whether a layer should be shared by multiple nets during data
*        parallelism. By default, all layers except for data layers should
*        not be shared. data layers should be shared to ensure each worker
*        solver access data sequentially during data parallelism.
*/该层是否可以被多个网络共享,只有datalayer返回true。
virtual inline bool ShareInParallel() const { return false; }

/** @brief Return whether this layer is actually shared by other nets.
*         If ShareInParallel() is true and using more than one GPU and the
*         net has TRAIN phase, then this function is expected return true.
*/返回该层实际上是否被多个网络共享。
inline bool IsShared() const { return is_shared_; }

/** @brief Set whether this layer is actually shared by other nets
*         If ShareInParallel() is true and using more than one GPU and the
*         net has TRAIN phase, then is_shared should be set true.
*/设置该层实际上是否被多个网络共享。
inline void SetShared(bool is_shared) {
CHECK(ShareInParallel() || !is_shared)
<< type() << "Layer does not support sharing.";
is_shared_ = is_shared;
}


// 根据需要reshape top_blob的形状。//纯虚函数,仅仅是一个声明作用,具体实现需要在子类中实现后才能调用。
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) = 0;


//inline 说明这个函数是内联的,在编译过程中内联函数会直接被源代码替换,提高执行效率 如果类中的某个函数会被调用很多次或者放在循环中,那么建议将这个函数声明为内联,可以提高程序的运行效率。

// **实现前向传播,给定Bottom Blob,计算Top Blob和Loss,返回值为当前层loss。
inline Dtype Forward(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);

// 实现后向传播,给定Top Blob的误差梯度,计算Bottom Blob的误差梯度。
* @brief Given the top blob error gradients, compute the bottom blob error
*     the output blobs, whose diff fields store the gradient of the error
*    // a vector with equal length to bottom, with each index indicating
*     //whether to propagate the error gradients down to the bottom blob at the corresponding index
* @param bottom
*     the input blobs, whose diff fields will store the gradient of the error
*     with respect to themselves after Backward is run
*
* The Backward wrapper calls the relevant device wrapper function
* (Backward_cpu or Backward_gpu) to compute the bottom blob diffs given the
* top blob diffs.
*
// Your layer should implement Backward_cpu and (optionally) Backward_gpu.
*/
inline void Backward(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom);


返回存储的blobs

vector<shared_ptr<Blob<Dtype> > >& blobs() {
return blobs_;
}

/**
* @brief Returns the layer parameter.
*
const LayerParameter& layer_param() const { return layer_param_; }
* @brief Writes the layer parameter to a protocol buffer
*/
virtual void ToProto(LayerParameter* param, bool write_diff = false);

/**
* @brief Returns the scalar loss associated with a top blob at a given index.
*/
inline Dtype loss(const int top_index) const {
return (loss_.size() > top_index) ? loss_[top_index] : Dtype(0);
}

/**
* @brief Sets the loss associated with a top blob at a given index.
*/
inline void set_loss(const int top_index, const Dtype value) {
if (loss_.size() <= top_index) {
loss_.resize(top_index + 1, Dtype(0));
}
loss_[top_index] = value;
}

/**
* @brief Returns the layer type.
//返回层的类型,派生类中实现。
virtual inline const char* type() const { return ""; }

/**


没注释部分的英文简单,解释的很清楚。中间的一些不重要的代码就不贴了,只贴重要部分。

//下面四个函数在派生类中经常看到。也是layer功能的最重要的部分。
/** @brief Using the CPU device, compute the layer output. */
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) = 0;
/**
* @brief Using the GPU device, compute the layer output.
*        Fall back to Forward_cpu() if unavailable.
*/
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
// LOG(WARNING) << "Using CPU code as backup.";
return Forward_cpu(bottom, top);
}

/**
* @brief Using the CPU device, compute the gradients for any parameters and
*        for the bottom blobs if propagate_down is true.
*/
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) = 0;
/**
* @brief Using the GPU device, compute the gradients for any parameters and
*        for the bottom blobs if propagate_down is true.
*        Fall back to Backward_cpu() if unavailable.
*/
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
// LOG(WARNING) << "Using CPU code as backup.";
Backward_cpu(top, propagate_down, bottom);
}


检查bottom和top的数目是否满足要求。并根据top设置loss_weight

virtual void CheckBlobCounts(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
if (ExactNumBottomBlobs() >= 0) {
CHECK_EQ(ExactNumBottomBlobs(), bottom.size())
<< type() << " Layer takes " << ExactNumBottomBlobs()
<< " bottom blob(s) as input.";
}
if (MinBottomBlobs() >= 0) {
CHECK_LE(MinBottomBlobs(), bottom.size())
<< type() << " Layer takes at least " << MinBottomBlobs()
<< " bottom blob(s) as input.";
}
if (MaxBottomBlobs() >= 0) {
CHECK_GE(MaxBottomBlobs(), bottom.size())
<< type() << " Layer takes at most " << MaxBottomBlobs()
<< " bottom blob(s) as input.";
}
if (ExactNumTopBlobs() >= 0) {
CHECK_EQ(ExactNumTopBlobs(), top.size())
<< type() << " Layer produces " << ExactNumTopBlobs()
<< " top blob(s) as output.";
}
if (MinTopBlobs() >= 0) {
CHECK_LE(MinTopBlobs(), top.size())
<< type() << " Layer produces at least " << MinTopBlobs()
<< " top blob(s) as output.";
}
if (MaxTopBlobs() >= 0) {
CHECK_GE(MaxTopBlobs(), top.size())
<< type() << " Layer produces at most " << MaxTopBlobs()
<< " top blob(s) as output.";
}
if (EqualNumBottomTopBlobs()) {
CHECK_EQ(bottom.size(), top.size())
<< type() << " Layer produces one top blob as output for each "
<< "bottom blob input.";
}
}
inline void SetLossWeights(const vector<Blob<Dtype>*>& top) {
const int num_loss_weights = layer_param_.loss_weight_size();
if (num_loss_weights) {
CHECK_EQ(top.size(), num_loss_weights) << "loss_weight must be "
"unspecified or specified once per top blob.";
for (int top_id = 0; top_id < top.size(); ++top_id) {
const Dtype loss_weight = layer_param_.loss_weight(top_id);
if (loss_weight == Dtype(0)) { continue; }
this->set_loss(top_id, loss_weight);
const int count = top[top_id]->count();
Dtype* loss_multiplier = top[top_id]->mutable_cpu_diff();
caffe_set(count, loss_weight, loss_multiplier);
}
}
}


前向传播包装和后向传播包装,具体实现的时候,不需要改变这两个函数,只要具体实现其中的foward_cpu,forward_gpu,backward_cpu,backward_gpu。

template <typename Dtype>
inline Dtype Layer<Dtype>::Forward(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
// Lock during forward to ensure sequential forward
Lock();
Dtype loss = 0;
Reshape(bottom, top);
switch (Caffe::mode()) {
case Caffe::CPU:
//派生类中需要实现这部分前向传播cpu模式。
Forward_cpu(bottom, top);
for (int top_id = 0; top_id < top.size(); ++top_id) {
if (!this->loss(top_id)) { continue; }
const int count = top[top_id]->count();
const Dtype* data = top[top_id]->cpu_data();
const Dtype* loss_weights = top[top_id]->cpu_diff();
loss += caffe_cpu_dot(count, data, loss_weights);
}
break;
case Caffe::GPU:
//派生类中需要实现这部分前向传播的gpu模式。
Forward_gpu(bottom, top);
#ifndef CPU_ONLY
for (int top_id = 0; top_id < top.size(); ++top_id) {
if (!this->loss(top_id)) { continue; }
const int count = top[top_id]->count();
const Dtype* data = top[top_id]->gpu_data();
const Dtype* loss_weights = top[top_id]->gpu_diff();
Dtype blob_loss = 0;
caffe_gpu_dot(count, data, loss_weights, &blob_loss);
loss += blob_loss;
}
#endif
break;
default:
LOG(FATAL) << "Unknown caffe mode.";
}
Unlock();
return loss;
}

template <typename Dtype>
inline void Layer<Dtype>::Backward(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
switch (Caffe::mode()) {
case Caffe::CPU:
//派生类中需要实现这部分后向传播的cpu模式。
Backward_cpu(top, propagate_down, bottom);
break;
case Caffe::GPU:
//派生类中需要实现这部分后向传播的gpu模式。
Backward_gpu(top, propagate_down, bottom);
break;
default:
LOG(FATAL) << "Unknown caffe mode.";
}
}

// Serialize LayerParameter to protocol buffer
template <typename Dtype>
void Layer<Dtype>::ToProto(LayerParameter* param, bool write_diff) {
param->Clear();
param->CopyFrom(layer_param_);
param->clear_blobs();
for (int i = 0; i < blobs_.size(); ++i) {
blobs_[i]->ToProto(param->add_blobs(), write_diff);
}
}

}  // namespace caffe
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: