8 #include "caffe/blob.hpp"
9 #include "caffe/common.hpp"
10 #include "caffe/proto/caffe.pb.h"
11 #include "caffe/util/device_alternate.hpp"
16 * @brief An interface for the units of computation which can be composed into a
19 * Layer&s must implement a Forward function, in which they take their input
20 * (bottom) Blob&s (if any) and compute their output Blob&s (if any).
21 * They may also implement a Backward function, in which they compute the error
22 * gradients with respect to their input Blob&s, given the error gradients with
23 * their output Blob&s.
25 template <typename Dtype>
29 * You should not implement your own constructor. Any set up code should go
30 * to SetUp(), where the dimensions of the bottom blobs are provided to the
33 explicit Layer(const LayerParameter& param)
34 : layer_param_(param) {
35 // The only thing we do is to copy blobs if there are any.
36 if (layer_param_.blobs_size() > 0) {
37 blobs_.resize(layer_param_.blobs_size());
38 for (int i = 0; i < layer_param_.blobs_size(); ++i) {
39 blobs_[i].reset(new Blob<Dtype>());
40 blobs_[i]->FromProto(layer_param_.blobs(i));
47 * @brief Implements common layer setup functionality.
49 * @param bottom the preshaped input blobs
51 * the allocated but unshaped output blobs, to be shaped by Reshape
53 * Checks that the number of bottom and top blobs is correct.
54 * Calls LayerSetUp to do special layer setup for individual layer types,
55 * followed by Reshape to set up sizes of top blobs and internal buffers.
56 * Sets up the loss weight multiplier blobs for any non-zero loss weights.
57 * This method may not be overridden.
59 void SetUp(const vector<Blob<Dtype>*>& bottom,
60 const vector<Blob<Dtype>*>& top) {
61 CheckBlobCounts(bottom, top);
62 LayerSetUp(bottom, top);
68 * @brief Does layer-specific setup: your layer should implement this function
72 * the preshaped input blobs, whose data fields store the input data for
75 * the allocated but unshaped output blobs
77 * This method should do one-time layer specific setup. This includes reading
78 * and processing relevent parameters from the <code>layer_param_</code>.
79 * Setting up the shapes of top blobs and internal buffers should be done in
80 * <code>Reshape</code>, which will be called before the forward pass to
81 * adjust the top blob sizes.
83 virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
84 const vector<Blob<Dtype>*>& top) {}
87 * @brief Adjust the shapes of top blobs and internal buffers to accomodate
88 * the shapes of the bottom blobs.
90 * @param bottom the input blobs, with the requested input shapes
91 * @param top the top blobs, which should be reshaped as needed
93 * This method should reshape top blobs as needed according to the shapes
94 * of the bottom (input) blobs, as well as reshaping any internal buffers
95 * and making any other necessary adjustments so that the layer can
96 * accomodate the bottom blobs.
98 virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
99 const vector<Blob<Dtype>*>& top) = 0;
102 * @brief Given the bottom blobs, compute the top blobs and the loss.
105 * the input blobs, whose data fields store the input data for this layer
107 * the preshaped output blobs, whose data fields will store this layers'
109 * \return The total loss from the layer.
111 * The Forward wrapper calls the relevant device wrapper function
112 * (Forward_cpu or Forward_gpu) to compute the top blob values given the
113 * bottom blobs. If the layer has any non-zero loss_weights, the wrapper
114 * then computes and returns the loss.
116 * Your layer should implement Forward_cpu and (optionally) Forward_gpu.
118 inline Dtype Forward(const vector<Blob<Dtype>*>& bottom,
119 const vector<Blob<Dtype>*>& top);
122 * @brief Given the top blob error gradients, compute the bottom blob error
126 * the output blobs, whose diff fields store the gradient of the error
127 * with respect to themselves
128 * @param propagate_down
129 * a vector with equal length to bottom, with each index indicating
130 * whether to propagate the error gradients down to the bottom blob at
131 * the corresponding index
133 * the input blobs, whose diff fields will store the gradient of the error
134 * with respect to themselves after Backward is run
136 * The Backward wrapper calls the relevant device wrapper function
137 * (Backward_cpu or Backward_gpu) to compute the bottom blob diffs given the
140 * Your layer should implement Forward_cpu and (optionally) Forward_gpu.
142 inline void Backward(const vector<Blob<Dtype>*>& top,
143 const vector<bool>& propagate_down,
144 const vector<Blob<Dtype>*>& bottom);
147 * @brief Returns the vector of learnable parameter blobs.
149 vector<shared_ptr<Blob<Dtype> > >& blobs() {
154 * @brief Returns the layer parameter.
156 const LayerParameter& layer_param() const { return layer_param_; }
159 * @brief Writes the layer parameter to a protocol buffer
161 virtual void ToProto(LayerParameter* param, bool write_diff = false);
164 * @brief Returns the scalar loss associated with a top blob at a given index.
166 inline Dtype loss(const int top_index) const {
167 return (loss_.size() > top_index) ? loss_[top_index] : Dtype(0);
171 * @brief Sets the loss associated with a top blob at a given index.
173 inline void set_loss(const int top_index, const Dtype value) {
174 if (loss_.size() <= top_index) {
175 loss_.resize(top_index + 1, Dtype(0));
177 loss_[top_index] = value;
181 * @brief Returns the layer type as an enum value.
183 virtual inline LayerParameter_LayerType type() const {
184 return LayerParameter_LayerType_NONE;
188 * @brief Returns the layer type name.
190 virtual inline const string& type_name() const {
191 return LayerParameter_LayerType_Name(type());
195 * @brief Returns the exact number of bottom blobs required by the layer,
196 * or -1 if no exact number is required.
198 * This method should be overridden to return a non-negative value if your
199 * layer expects some exact number of bottom blobs.
201 virtual inline int ExactNumBottomBlobs() const { return -1; }
203 * @brief Returns the minimum number of bottom blobs required by the layer,
204 * or -1 if no minimum number is required.
206 * This method should be overridden to return a non-negative value if your
207 * layer expects some minimum number of bottom blobs.
209 virtual inline int MinBottomBlobs() const { return -1; }
211 * @brief Returns the maximum number of bottom blobs required by the layer,
212 * or -1 if no maximum number is required.
214 * This method should be overridden to return a non-negative value if your
215 * layer expects some maximum number of bottom blobs.
217 virtual inline int MaxBottomBlobs() const { return -1; }
219 * @brief Returns the exact number of top blobs required by the layer,
220 * or -1 if no exact number is required.
222 * This method should be overridden to return a non-negative value if your
223 * layer expects some exact number of top blobs.
225 virtual inline int ExactNumTopBlobs() const { return -1; }
227 * @brief Returns the minimum number of top blobs required by the layer,
228 * or -1 if no minimum number is required.
230 * This method should be overridden to return a non-negative value if your
231 * layer expects some minimum number of top blobs.
233 virtual inline int MinTopBlobs() const { return -1; }
235 * @brief Returns the maximum number of top blobs required by the layer,
236 * or -1 if no maximum number is required.
238 * This method should be overridden to return a non-negative value if your
239 * layer expects some maximum number of top blobs.
241 virtual inline int MaxTopBlobs() const { return -1; }
243 * @brief Returns true if the layer requires an equal number of bottom and
246 * This method should be overridden to return true if your layer expects an
247 * equal number of bottom and top blobs.
249 virtual inline bool EqualNumBottomTopBlobs() const { return false; }
252 * @brief Return whether "anonymous" top blobs are created automatically
255 * If this method returns true, Net::Init will create enough "anonymous" top
256 * blobs to fulfill the requirement specified by ExactNumTopBlobs() or
259 virtual inline bool AutoTopBlobs() const { return false; }
262 * @brief Return whether to allow force_backward for a given bottom blob
265 * If AllowForceBackward(i) == false, we will ignore the force_backward
266 * setting and backpropagate to blob i only if it needs gradient information
267 * (as is done when force_backward == false).
269 virtual inline bool AllowForceBackward(const int bottom_index) const {
274 * @brief Specifies whether the layer should compute gradients w.r.t. a
275 * parameter at a particular index given by param_id.
277 * You can safely ignore false values and always compute gradients
278 * for all parameters, but possibly with wasteful computation.
280 inline bool param_propagate_down(const int param_id) {
281 return (param_propagate_down_.size() > param_id) ?
282 param_propagate_down_[param_id] : false;
285 * @brief Sets whether the layer should compute gradients w.r.t. a
286 * parameter at a particular index given by param_id.
288 inline void set_param_propagate_down(const int param_id, const bool value) {
289 if (param_propagate_down_.size() <= param_id) {
290 param_propagate_down_.resize(param_id + 1, true);
292 param_propagate_down_[param_id] = value;
297 /** The protobuf that stores the layer parameters */
298 LayerParameter layer_param_;
299 /** The vector that stores the learnable parameters as a set of blobs. */
300 vector<shared_ptr<Blob<Dtype> > > blobs_;
301 /** Vector indicating whether to compute the diff of each param blob. */
302 vector<bool> param_propagate_down_;
304 /** The vector that indicates whether each top blob has a non-zero weight in
305 * the objective function. */
308 /** @brief Using the CPU device, compute the layer output. */
309 virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
310 const vector<Blob<Dtype>*>& top) = 0;
312 * @brief Using the GPU device, compute the layer output.
313 * Fall back to Forward_cpu() if unavailable.
315 virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
316 const vector<Blob<Dtype>*>& top) {
317 // LOG(WARNING) << "Using CPU code as backup.";
318 return Forward_cpu(bottom, top);
322 * @brief Using the CPU device, compute the gradients for any parameters and
323 * for the bottom blobs if propagate_down is true.
325 virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
326 const vector<bool>& propagate_down,
327 const vector<Blob<Dtype>*>& bottom) = 0;
329 * @brief Using the GPU device, compute the gradients for any parameters and
330 * for the bottom blobs if propagate_down is true.
331 * Fall back to Backward_cpu() if unavailable.
333 virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
334 const vector<bool>& propagate_down,
335 const vector<Blob<Dtype>*>& bottom) {
336 // LOG(WARNING) << "Using CPU code as backup.";
337 Backward_cpu(top, propagate_down, bottom);
341 * Called by the parent Layer's SetUp to check that the number of bottom
342 * and top Blobs provided as input match the expected numbers specified by
343 * the {ExactNum,Min,Max}{Bottom,Top}Blobs() functions.
345 virtual void CheckBlobCounts(const vector<Blob<Dtype>*>& bottom,
346 const vector<Blob<Dtype>*>& top) {
347 if (ExactNumBottomBlobs() >= 0) {
348 CHECK_EQ(ExactNumBottomBlobs(), bottom.size())
349 << type_name() << " Layer takes " << ExactNumBottomBlobs()
350 << " bottom blob(s) as input.";
352 if (MinBottomBlobs() >= 0) {
353 CHECK_LE(MinBottomBlobs(), bottom.size())
354 << type_name() << " Layer takes at least " << MinBottomBlobs()
355 << " bottom blob(s) as input.";
357 if (MaxBottomBlobs() >= 0) {
358 CHECK_GE(MaxBottomBlobs(), bottom.size())
359 << type_name() << " Layer takes at most " << MaxBottomBlobs()
360 << " bottom blob(s) as input.";
362 if (ExactNumTopBlobs() >= 0) {
363 CHECK_EQ(ExactNumTopBlobs(), top.size())
364 << type_name() << " Layer produces " << ExactNumTopBlobs()
365 << " top blob(s) as output.";
367 if (MinTopBlobs() >= 0) {
368 CHECK_LE(MinTopBlobs(), top.size())
369 << type_name() << " Layer produces at least " << MinTopBlobs()
370 << " top blob(s) as output.";
372 if (MaxTopBlobs() >= 0) {
373 CHECK_GE(MaxTopBlobs(), top.size())
374 << type_name() << " Layer produces at most " << MaxTopBlobs()
375 << " top blob(s) as output.";
377 if (EqualNumBottomTopBlobs()) {
378 CHECK_EQ(bottom.size(), top.size())
379 << type_name() << " Layer produces one top blob as output for each "
380 << "bottom blob input.";
385 * Called by SetUp to initialize the weights associated with any top blobs in
386 * the loss function. Store non-zero loss weights in the diff blob.
388 inline void SetLossWeights(const vector<Blob<Dtype>*>& top) {
389 const int num_loss_weights = layer_param_.loss_weight_size();
390 if (num_loss_weights) {
391 CHECK_EQ(top.size(), num_loss_weights) << "loss_weight must be "
392 "unspecified or specified once per top blob.";
393 for (int top_id = 0; top_id < top.size(); ++top_id) {
394 const Dtype loss_weight = layer_param_.loss_weight(top_id);
395 if (loss_weight == Dtype(0)) { continue; }
396 this->set_loss(top_id, loss_weight);
397 const int count = top[top_id]->count();
398 Dtype* loss_multiplier = top[top_id]->mutable_cpu_diff();
399 caffe_set(count, loss_weight, loss_multiplier);
404 DISABLE_COPY_AND_ASSIGN(Layer);
407 // Forward and backward wrappers. You should implement the cpu and
408 // gpu specific implementations instead, and should not change these
410 template <typename Dtype>
411 inline Dtype Layer<Dtype>::Forward(const vector<Blob<Dtype>*>& bottom,
412 const vector<Blob<Dtype>*>& top) {
414 switch (Caffe::mode()) {
416 Forward_cpu(bottom, top);
417 for (int top_id = 0; top_id < top.size(); ++top_id) {
418 if (!this->loss(top_id)) { continue; }
419 const int count = top[top_id]->count();
420 const Dtype* data = top[top_id]->cpu_data();
421 const Dtype* loss_weights = top[top_id]->cpu_diff();
422 loss += caffe_cpu_dot(count, data, loss_weights);
426 Forward_gpu(bottom, top);
428 for (int top_id = 0; top_id < top.size(); ++top_id) {
429 if (!this->loss(top_id)) { continue; }
430 const int count = top[top_id]->count();
431 const Dtype* data = top[top_id]->gpu_data();
432 const Dtype* loss_weights = top[top_id]->gpu_diff();
434 caffe_gpu_dot(count, data, loss_weights, &blob_loss);
440 LOG(FATAL) << "Unknown caffe mode.";
445 template <typename Dtype>
446 inline void Layer<Dtype>::Backward(const vector<Blob<Dtype>*>& top,
447 const vector<bool>& propagate_down,
448 const vector<Blob<Dtype>*>& bottom) {
449 switch (Caffe::mode()) {
451 Backward_cpu(top, propagate_down, bottom);
454 Backward_gpu(top, propagate_down, bottom);
457 LOG(FATAL) << "Unknown caffe mode.";
461 // Serialize LayerParameter to protocol buffer
462 template <typename Dtype>
463 void Layer<Dtype>::ToProto(LayerParameter* param, bool write_diff) {
465 param->CopyFrom(layer_param_);
466 param->clear_blobs();
467 for (int i = 0; i < blobs_.size(); ++i) {
468 blobs_[i]->ToProto(param->add_blobs(), write_diff);
472 // The layer factory function
473 template <typename Dtype>
474 Layer<Dtype>* GetLayer(const LayerParameter& param);
478 #endif // CAFFE_LAYER_H_