1 // Copyright 2013 Yangqing Jia
7 #include "caffe/blob.hpp"
8 #include "caffe/common.hpp"
9 #include "caffe/proto/caffe.pb.h"
15 template <typename Dtype>
18 // You should not implement your own constructor. Any set up code should go
19 // to SetUp(), where the dimensions of the bottom blobs are provided to the
21 explicit Layer(const LayerParameter& param)
22 : layer_param_(param) {
23 // The only thing we do is to copy blobs if there are any.
24 if (layer_param_.blobs_size() > 0) {
25 blobs_.resize(layer_param_.blobs_size());
26 for (int i = 0; i < layer_param_.blobs_size(); ++i) {
27 blobs_[i].reset(new Blob<Dtype>());
28 blobs_[i]->FromProto(layer_param_.blobs(i));
33 // SetUp: your function should implement this.
34 virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
35 vector<Blob<Dtype>*>* top) = 0;
37 // Forward and backward wrappers. You should implement the cpu and
38 // gpu specific implementations instead, and should not change these
40 inline void Forward(const vector<Blob<Dtype>*>& bottom,
41 vector<Blob<Dtype>*>* top);
42 inline Dtype Backward(const vector<Blob<Dtype>*>& top,
43 const bool propagate_down,
44 vector<Blob<Dtype>*>* bottom);
46 // Returns the vector of blobs.
47 vector<shared_ptr<Blob<Dtype> > >& blobs() {
51 // Returns the layer parameter
52 const LayerParameter& layer_param() { return layer_param_; }
53 // Writes the layer parameter to a protocol buffer
54 virtual void ToProto(LayerParameter* param, bool write_diff = false);
57 // The protobuf that stores the layer parameters
58 LayerParameter layer_param_;
59 // The vector that stores the parameters as a set of blobs.
60 vector<shared_ptr<Blob<Dtype> > > blobs_;
63 virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
64 vector<Blob<Dtype>*>* top) = 0;
65 // If no gpu code is provided, we will simply use cpu code.
66 virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
67 vector<Blob<Dtype>*>* top) {
68 // LOG(WARNING) << "Using CPU code as backup.";
69 Forward_cpu(bottom, top);
72 // Backward functions: the backward function will compute the gradients for
73 // any parameters and also for the bottom blobs if propagate_down is true.
74 // It will return the loss produced from this layer.
75 virtual Dtype Backward_cpu(const vector<Blob<Dtype>*>& top,
76 const bool propagate_down,
77 vector<Blob<Dtype>*>* bottom) = 0;
78 virtual Dtype Backward_gpu(const vector<Blob<Dtype>*>& top,
79 const bool propagate_down,
80 vector<Blob<Dtype>*>* bottom) {
81 // LOG(WARNING) << "Using CPU code as backup.";
82 return Backward_cpu(top, propagate_down, bottom);
85 DISABLE_COPY_AND_ASSIGN(Layer);
88 // Forward and backward wrappers. You should implement the cpu and
89 // gpu specific implementations instead, and should not change these
91 template <typename Dtype>
92 inline void Layer<Dtype>::Forward(const vector<Blob<Dtype>*>& bottom,
93 vector<Blob<Dtype>*>* top) {
94 switch (Caffe::mode()) {
96 Forward_cpu(bottom, top);
99 Forward_gpu(bottom, top);
102 LOG(FATAL) << "Unknown caffe mode.";
106 template <typename Dtype>
107 inline Dtype Layer<Dtype>::Backward(const vector<Blob<Dtype>*>& top,
108 const bool propagate_down,
109 vector<Blob<Dtype>*>* bottom) {
110 switch (Caffe::mode()) {
112 return Backward_cpu(top, propagate_down, bottom);
114 return Backward_gpu(top, propagate_down, bottom);
116 LOG(FATAL) << "Unknown caffe mode.";
120 template <typename Dtype>
121 void Layer<Dtype>::ToProto(LayerParameter* param, bool write_diff) {
123 param->CopyFrom(layer_param_);
124 param->clear_blobs();
125 for (int i = 0; i < blobs_.size(); ++i) {
126 blobs_[i]->ToProto(param->add_blobs(), write_diff);
130 // The layer factory function
131 template <typename Dtype>
132 Layer<Dtype>* GetLayer(const LayerParameter& param);
136 #endif // CAFFE_LAYER_H_