int N_;
};
+template <typename Dtype>
+class ConcatLayer : public Layer<Dtype> {
+ public:
+ explicit ConcatLayer(const LayerParameter& param)
+ : Layer<Dtype>(param) {}
+ virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
+ vector<Blob<Dtype>*>* top);
+
+ protected:
+ virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
+ vector<Blob<Dtype>*>* top);
+ virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
+ vector<Blob<Dtype>*>* top);
+ virtual Dtype Backward_cpu(const vector<Blob<Dtype>*>& top,
+ const bool propagate_down, vector<Blob<Dtype>*>* bottom);
+ virtual Dtype Backward_gpu(const vector<Blob<Dtype>*>& top,
+ const bool propagate_down, vector<Blob<Dtype>*>* bottom);
+ Blob<Dtype> col_bob_;
+
+ int COUNT_;
+ int NUM_;
+ int CHANNELS_;
+ int HEIGHT_;
+ int WIDTH_;
+ int concat_dim_;
+};
// This function is used to create a pthread that prefetches the data.
template <typename Dtype>
return new BNLLLayer<Dtype>(param);
} else if (type == "conv") {
return new ConvolutionLayer<Dtype>(param);
+ } else if (type == "concat") {
+ return new ConcatLayer<Dtype>(param);
} else if (type == "data") {
return new DataLayer<Dtype>(param);
} else if (type == "hdf5_data") {
// point would be set as rand_skip * rand(0,1). Note that rand_skip should not
// be larger than the number of keys in the leveldb.
optional uint32 rand_skip = 53 [ default = 0 ];
+
+ // Concat Layer need to specify the dimension along the concat will happen,
+ // the other dimensions must be the same for all the bottom blobs
+ // By default it will concatenate blobs along channels dimension
+ optional uint32 concat_dim = 65 [ default = 1 ];
}
message LayerConnection {