8 #include "caffe/blob.hpp"
9 #include "caffe/common.hpp"
10 #include "caffe/layer_factory.hpp"
11 #include "caffe/proto/caffe.pb.h"
12 #include "caffe/util/math_functions.hpp"
15 Forward declare boost::thread instead of including boost/thread.hpp
16 to avoid a boost/NVCC issues (#1009, #1010) on OSX.
18 namespace boost { class mutex; }
23 * @brief An interface for the units of computation which can be composed into a
26 * Layer%s must implement a Forward function, in which they take their input
27 * (bottom) Blob%s (if any) and compute their output Blob%s (if any).
28 * They may also implement a Backward function, in which they compute the error
29 * gradients with respect to their input Blob%s, given the error gradients with
30 * their output Blob%s.
32 template <typename Dtype>
36 * You should not implement your own constructor. Any set up code should go
37 * to SetUp(), where the dimensions of the bottom blobs are provided to the
40 explicit Layer(const LayerParameter& param)
41 : layer_param_(param) {
42 // Set phase and copy blobs (if there are any).
43 phase_ = param.phase();
44 if (layer_param_.blobs_size() > 0) {
45 blobs_.resize(layer_param_.blobs_size());
46 for (int i = 0; i < layer_param_.blobs_size(); ++i) {
47 blobs_[i].reset(new Blob<Dtype>());
48 blobs_[i]->FromProto(layer_param_.blobs(i));
55 * @brief Implements common layer setup functionality.
57 * @param bottom the preshaped input blobs
59 * the allocated but unshaped output blobs, to be shaped by Reshape
61 * Checks that the number of bottom and top blobs is correct.
62 * Calls LayerSetUp to do special layer setup for individual layer types,
63 * followed by Reshape to set up sizes of top blobs and internal buffers.
64 * Sets up the loss weight multiplier blobs for any non-zero loss weights.
65 * This method may not be overridden.
67 void SetUp(const vector<Blob<Dtype>*>& bottom,
68 const vector<Blob<Dtype>*>& top) {
69 CheckBlobCounts(bottom, top);
70 LayerSetUp(bottom, top);
76 * @brief Does layer-specific setup: your layer should implement this function
80 * the preshaped input blobs, whose data fields store the input data for
83 * the allocated but unshaped output blobs
85 * This method should do one-time layer specific setup. This includes reading
86 * and processing relevent parameters from the <code>layer_param_</code>.
87 * Setting up the shapes of top blobs and internal buffers should be done in
88 * <code>Reshape</code>, which will be called before the forward pass to
89 * adjust the top blob sizes.
91 virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
92 const vector<Blob<Dtype>*>& top) {}
95 * @brief Adjust the shapes of top blobs and internal buffers to accommodate
96 * the shapes of the bottom blobs.
98 * @param bottom the input blobs, with the requested input shapes
99 * @param top the top blobs, which should be reshaped as needed
101 * This method should reshape top blobs as needed according to the shapes
102 * of the bottom (input) blobs, as well as reshaping any internal buffers
103 * and making any other necessary adjustments so that the layer can
104 * accommodate the bottom blobs.
106 virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
107 const vector<Blob<Dtype>*>& top) = 0;
110 * @brief Given the bottom blobs, compute the top blobs and the loss.
113 * the input blobs, whose data fields store the input data for this layer
115 * the preshaped output blobs, whose data fields will store this layers'
117 * \return The total loss from the layer.
119 * The Forward wrapper calls the relevant device wrapper function
120 * (Forward_cpu or Forward_gpu) to compute the top blob values given the
121 * bottom blobs. If the layer has any non-zero loss_weights, the wrapper
122 * then computes and returns the loss.
124 * Your layer should implement Forward_cpu and (optionally) Forward_gpu.
126 inline Dtype Forward(const vector<Blob<Dtype>*>& bottom,
127 const vector<Blob<Dtype>*>& top);
130 * @brief Given the top blob error gradients, compute the bottom blob error
134 * the output blobs, whose diff fields store the gradient of the error
135 * with respect to themselves
136 * @param propagate_down
137 * a vector with equal length to bottom, with each index indicating
138 * whether to propagate the error gradients down to the bottom blob at
139 * the corresponding index
141 * the input blobs, whose diff fields will store the gradient of the error
142 * with respect to themselves after Backward is run
144 * The Backward wrapper calls the relevant device wrapper function
145 * (Backward_cpu or Backward_gpu) to compute the bottom blob diffs given the
148 * Your layer should implement Backward_cpu and (optionally) Backward_gpu.
150 inline void Backward(const vector<Blob<Dtype>*>& top,
151 const vector<bool>& propagate_down,
152 const vector<Blob<Dtype>*>& bottom);
155 * @brief Returns the vector of learnable parameter blobs.
157 vector<shared_ptr<Blob<Dtype> > >& blobs() {
162 * @brief Returns the layer parameter.
164 const LayerParameter& layer_param() const { return layer_param_; }
167 * @brief Writes the layer parameter to a protocol buffer
169 virtual void ToProto(LayerParameter* param, bool write_diff = false);
172 * @brief Returns the scalar loss associated with a top blob at a given index.
174 inline Dtype loss(const int top_index) const {
175 return (loss_.size() > top_index) ? loss_[top_index] : Dtype(0);
179 * @brief Sets the loss associated with a top blob at a given index.
181 inline void set_loss(const int top_index, const Dtype value) {
182 if (loss_.size() <= top_index) {
183 loss_.resize(top_index + 1, Dtype(0));
185 loss_[top_index] = value;
189 * @brief Returns the layer type.
191 virtual inline const char* type() const { return ""; }
194 * @brief Returns the exact number of bottom blobs required by the layer,
195 * or -1 if no exact number is required.
197 * This method should be overridden to return a non-negative value if your
198 * layer expects some exact number of bottom blobs.
200 virtual inline int ExactNumBottomBlobs() const { return -1; }
202 * @brief Returns the minimum number of bottom blobs required by the layer,
203 * or -1 if no minimum number is required.
205 * This method should be overridden to return a non-negative value if your
206 * layer expects some minimum number of bottom blobs.
208 virtual inline int MinBottomBlobs() const { return -1; }
210 * @brief Returns the maximum number of bottom blobs required by the layer,
211 * or -1 if no maximum number is required.
213 * This method should be overridden to return a non-negative value if your
214 * layer expects some maximum number of bottom blobs.
216 virtual inline int MaxBottomBlobs() const { return -1; }
218 * @brief Returns the exact number of top blobs required by the layer,
219 * or -1 if no exact number is required.
221 * This method should be overridden to return a non-negative value if your
222 * layer expects some exact number of top blobs.
224 virtual inline int ExactNumTopBlobs() const { return -1; }
226 * @brief Returns the minimum number of top blobs required by the layer,
227 * or -1 if no minimum number is required.
229 * This method should be overridden to return a non-negative value if your
230 * layer expects some minimum number of top blobs.
232 virtual inline int MinTopBlobs() const { return -1; }
234 * @brief Returns the maximum number of top blobs required by the layer,
235 * or -1 if no maximum number is required.
237 * This method should be overridden to return a non-negative value if your
238 * layer expects some maximum number of top blobs.
240 virtual inline int MaxTopBlobs() const { return -1; }
242 * @brief Returns true if the layer requires an equal number of bottom and
245 * This method should be overridden to return true if your layer expects an
246 * equal number of bottom and top blobs.
248 virtual inline bool EqualNumBottomTopBlobs() const { return false; }
251 * @brief Return whether "anonymous" top blobs are created automatically
254 * If this method returns true, Net::Init will create enough "anonymous" top
255 * blobs to fulfill the requirement specified by ExactNumTopBlobs() or
258 virtual inline bool AutoTopBlobs() const { return false; }
261 * @brief Return whether to allow force_backward for a given bottom blob
264 * If AllowForceBackward(i) == false, we will ignore the force_backward
265 * setting and backpropagate to blob i only if it needs gradient information
266 * (as is done when force_backward == false).
268 virtual inline bool AllowForceBackward(const int bottom_index) const {
273 * @brief Specifies whether the layer should compute gradients w.r.t. a
274 * parameter at a particular index given by param_id.
276 * You can safely ignore false values and always compute gradients
277 * for all parameters, but possibly with wasteful computation.
279 inline bool param_propagate_down(const int param_id) {
280 return (param_propagate_down_.size() > param_id) ?
281 param_propagate_down_[param_id] : false;
284 * @brief Sets whether the layer should compute gradients w.r.t. a
285 * parameter at a particular index given by param_id.
287 inline void set_param_propagate_down(const int param_id, const bool value) {
288 if (param_propagate_down_.size() <= param_id) {
289 param_propagate_down_.resize(param_id + 1, true);
291 param_propagate_down_[param_id] = value;
296 /** The protobuf that stores the layer parameters */
297 LayerParameter layer_param_;
298 /** The phase: TRAIN or TEST */
300 /** The vector that stores the learnable parameters as a set of blobs. */
301 vector<shared_ptr<Blob<Dtype> > > blobs_;
302 /** Vector indicating whether to compute the diff of each param blob. */
303 vector<bool> param_propagate_down_;
305 /** The vector that indicates whether each top blob has a non-zero weight in
306 * the objective function. */
309 /** @brief Using the CPU device, compute the layer output. */
310 virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
311 const vector<Blob<Dtype>*>& top) = 0;
313 * @brief Using the GPU device, compute the layer output.
314 * Fall back to Forward_cpu() if unavailable.
316 virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
317 const vector<Blob<Dtype>*>& top) {
318 // LOG(WARNING) << "Using CPU code as backup.";
319 return Forward_cpu(bottom, top);
323 * @brief Using the CPU device, compute the gradients for any parameters and
324 * for the bottom blobs if propagate_down is true.
326 virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
327 const vector<bool>& propagate_down,
328 const vector<Blob<Dtype>*>& bottom) = 0;
330 * @brief Using the GPU device, compute the gradients for any parameters and
331 * for the bottom blobs if propagate_down is true.
332 * Fall back to Backward_cpu() if unavailable.
334 virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
335 const vector<bool>& propagate_down,
336 const vector<Blob<Dtype>*>& bottom) {
337 // LOG(WARNING) << "Using CPU code as backup.";
338 Backward_cpu(top, propagate_down, bottom);
342 * Called by the parent Layer's SetUp to check that the number of bottom
343 * and top Blobs provided as input match the expected numbers specified by
344 * the {ExactNum,Min,Max}{Bottom,Top}Blobs() functions.
346 virtual void CheckBlobCounts(const vector<Blob<Dtype>*>& bottom,
347 const vector<Blob<Dtype>*>& top) {
348 if (ExactNumBottomBlobs() >= 0) {
349 CHECK_EQ(ExactNumBottomBlobs(), bottom.size())
350 << type() << " Layer takes " << ExactNumBottomBlobs()
351 << " bottom blob(s) as input.";
353 if (MinBottomBlobs() >= 0) {
354 CHECK_LE(MinBottomBlobs(), bottom.size())
355 << type() << " Layer takes at least " << MinBottomBlobs()
356 << " bottom blob(s) as input.";
358 if (MaxBottomBlobs() >= 0) {
359 CHECK_GE(MaxBottomBlobs(), bottom.size())
360 << type() << " Layer takes at most " << MaxBottomBlobs()
361 << " bottom blob(s) as input.";
363 if (ExactNumTopBlobs() >= 0) {
364 CHECK_EQ(ExactNumTopBlobs(), top.size())
365 << type() << " Layer produces " << ExactNumTopBlobs()
366 << " top blob(s) as output.";
368 if (MinTopBlobs() >= 0) {
369 CHECK_LE(MinTopBlobs(), top.size())
370 << type() << " Layer produces at least " << MinTopBlobs()
371 << " top blob(s) as output.";
373 if (MaxTopBlobs() >= 0) {
374 CHECK_GE(MaxTopBlobs(), top.size())
375 << type() << " Layer produces at most " << MaxTopBlobs()
376 << " top blob(s) as output.";
378 if (EqualNumBottomTopBlobs()) {
379 CHECK_EQ(bottom.size(), top.size())
380 << type() << " Layer produces one top blob as output for each "
381 << "bottom blob input.";
386 * Called by SetUp to initialize the weights associated with any top blobs in
387 * the loss function. Store non-zero loss weights in the diff blob.
389 inline void SetLossWeights(const vector<Blob<Dtype>*>& top) {
390 const int num_loss_weights = layer_param_.loss_weight_size();
391 if (num_loss_weights) {
392 CHECK_EQ(top.size(), num_loss_weights) << "loss_weight must be "
393 "unspecified or specified once per top blob.";
394 for (int top_id = 0; top_id < top.size(); ++top_id) {
395 const Dtype loss_weight = layer_param_.loss_weight(top_id);
396 if (loss_weight == Dtype(0)) { continue; }
397 this->set_loss(top_id, loss_weight);
398 const int count = top[top_id]->count();
399 Dtype* loss_multiplier = top[top_id]->mutable_cpu_diff();
400 caffe_set(count, loss_weight, loss_multiplier);
406 DISABLE_COPY_AND_ASSIGN(Layer);
409 // Forward and backward wrappers. You should implement the cpu and
410 // gpu specific implementations instead, and should not change these
412 template <typename Dtype>
413 inline Dtype Layer<Dtype>::Forward(const vector<Blob<Dtype>*>& bottom,
414 const vector<Blob<Dtype>*>& top) {
416 Reshape(bottom, top);
417 switch (Caffe::mode()) {
419 Forward_cpu(bottom, top);
420 for (int top_id = 0; top_id < top.size(); ++top_id) {
421 if (!this->loss(top_id)) { continue; }
422 const int count = top[top_id]->count();
423 const Dtype* data = top[top_id]->cpu_data();
424 const Dtype* loss_weights = top[top_id]->cpu_diff();
425 loss += caffe_cpu_dot(count, data, loss_weights);
429 Forward_gpu(bottom, top);
431 for (int top_id = 0; top_id < top.size(); ++top_id) {
432 if (!this->loss(top_id)) { continue; }
433 const int count = top[top_id]->count();
434 const Dtype* data = top[top_id]->gpu_data();
435 const Dtype* loss_weights = top[top_id]->gpu_diff();
437 caffe_gpu_dot(count, data, loss_weights, &blob_loss);
443 LOG(FATAL) << "Unknown caffe mode.";
448 template <typename Dtype>
449 inline void Layer<Dtype>::Backward(const vector<Blob<Dtype>*>& top,
450 const vector<bool>& propagate_down,
451 const vector<Blob<Dtype>*>& bottom) {
452 switch (Caffe::mode()) {
454 Backward_cpu(top, propagate_down, bottom);
457 Backward_gpu(top, propagate_down, bottom);
460 LOG(FATAL) << "Unknown caffe mode.";
464 // Serialize LayerParameter to protocol buffer
465 template <typename Dtype>
466 void Layer<Dtype>::ToProto(LayerParameter* param, bool write_diff) {
468 param->CopyFrom(layer_param_);
469 param->clear_blobs();
470 for (int i = 0; i < blobs_.size(); ++i) {
471 blobs_[i]->ToProto(param->add_blobs(), write_diff);
477 #endif // CAFFE_LAYER_H_