vector<Blob<Dtype>*>* top);
};
-
template <typename Dtype>
-class ReLULayer : public NeuronLayer<Dtype> {
+class BNLLLayer : public NeuronLayer<Dtype> {
public:
- explicit ReLULayer(const LayerParameter& param)
+ explicit BNLLLayer(const LayerParameter& param)
: NeuronLayer<Dtype>(param) {}
protected:
vector<Blob<Dtype>*>* top);
virtual Dtype Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top);
-
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
};
template <typename Dtype>
-class TanHLayer : public NeuronLayer<Dtype> {
+class DropoutLayer : public NeuronLayer<Dtype> {
public:
- explicit TanHLayer(const LayerParameter& param)
+ explicit DropoutLayer(const LayerParameter& param)
: NeuronLayer<Dtype>(param) {}
+ virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
+ vector<Blob<Dtype>*>* top);
protected:
virtual Dtype Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const bool propagate_down, vector<Blob<Dtype>*>* bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom);
+
+ shared_ptr<SyncedMemory> rand_vec_;
+ float threshold_;
+ float scale_;
+ unsigned int uint_thres_;
};
template <typename Dtype>
-class SigmoidLayer : public NeuronLayer<Dtype> {
+class ReLULayer : public NeuronLayer<Dtype> {
public:
- explicit SigmoidLayer(const LayerParameter& param)
+ explicit ReLULayer(const LayerParameter& param)
: NeuronLayer<Dtype>(param) {}
protected:
vector<Blob<Dtype>*>* top);
virtual Dtype Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top);
+
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom);
};
-
template <typename Dtype>
-class BNLLLayer : public NeuronLayer<Dtype> {
+class SigmoidLayer : public NeuronLayer<Dtype> {
public:
- explicit BNLLLayer(const LayerParameter& param)
+ explicit SigmoidLayer(const LayerParameter& param)
: NeuronLayer<Dtype>(param) {}
protected:
const bool propagate_down, vector<Blob<Dtype>*>* bottom);
};
-
template <typename Dtype>
-class DropoutLayer : public NeuronLayer<Dtype> {
+class TanHLayer : public NeuronLayer<Dtype> {
public:
- explicit DropoutLayer(const LayerParameter& param)
+ explicit TanHLayer(const LayerParameter& param)
: NeuronLayer<Dtype>(param) {}
- virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
protected:
virtual Dtype Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const bool propagate_down, vector<Blob<Dtype>*>* bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom);
-
- shared_ptr<SyncedMemory> rand_vec_;
- float threshold_;
- float scale_;
- unsigned int uint_thres_;
};
template <typename Dtype>
-class SplitLayer : public Layer<Dtype> {
+class AccuracyLayer : public Layer<Dtype> {
public:
- explicit SplitLayer(const LayerParameter& param)
+ explicit AccuracyLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top);
protected:
virtual Dtype Forward_cpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top);
- virtual Dtype Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ // The accuracy layer should not be used to compute backward operations.
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const bool propagate_down, vector<Blob<Dtype>*>* bottom);
- virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const bool propagate_down, vector<Blob<Dtype>*>* bottom);
-
- int count_;
+ const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
+ NOT_IMPLEMENTED;
+ }
};
-
template <typename Dtype>
-class FlattenLayer : public Layer<Dtype> {
+class ConcatLayer : public Layer<Dtype> {
public:
- explicit FlattenLayer(const LayerParameter& param)
+ explicit ConcatLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom);
+ Blob<Dtype> col_bob_;
int count_;
+ int num_;
+ int channels_;
+ int height_;
+ int width_;
+ int concat_dim_;
};
-
template <typename Dtype>
-class InnerProductLayer : public Layer<Dtype> {
+class ConvolutionLayer : public Layer<Dtype> {
public:
- explicit InnerProductLayer(const LayerParameter& param)
+ explicit ConvolutionLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top);
vector<Blob<Dtype>*>* top);
virtual Dtype Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top);
-
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom);
+ int kernel_size_;
+ int stride_;
+ int num_;
+ int channels_;
+ int pad_;
+ int height_;
+ int width_;
+ int num_output_;
+ int group_;
+ Blob<Dtype> col_buffer_;
+ shared_ptr<SyncedMemory> bias_multiplier_;
+ bool bias_term_;
int M_;
int K_;
int N_;
- bool bias_term_;
- shared_ptr<SyncedMemory> bias_multiplier_;
};
+// This function is used to create a pthread that prefetches the data.
+template <typename Dtype>
+void* DataLayerPrefetch(void* layer_pointer);
template <typename Dtype>
-class LRNLayer : public Layer<Dtype> {
+class DataLayer : public Layer<Dtype> {
+ // The function used to perform prefetching.
+ friend void* DataLayerPrefetch<Dtype>(void* layer_pointer);
+
public:
- explicit LRNLayer(const LayerParameter& param)
+ explicit DataLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
+ virtual ~DataLayer();
virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top);
virtual Dtype Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const bool propagate_down, vector<Blob<Dtype>*>* bottom);
+ const bool propagate_down, vector<Blob<Dtype>*>* bottom) { return; }
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const bool propagate_down, vector<Blob<Dtype>*>* bottom);
+ const bool propagate_down, vector<Blob<Dtype>*>* bottom) { return; }
- // scale_ stores the intermediate summing results
- Blob<Dtype> scale_;
- int size_;
- int pre_pad_;
- Dtype alpha_;
- Dtype beta_;
- int num_;
- int channels_;
- int height_;
- int width_;
+ shared_ptr<leveldb::DB> db_;
+ shared_ptr<leveldb::Iterator> iter_;
+ int datum_channels_;
+ int datum_height_;
+ int datum_width_;
+ int datum_size_;
+ pthread_t thread_;
+ shared_ptr<Blob<Dtype> > prefetch_data_;
+ shared_ptr<Blob<Dtype> > prefetch_label_;
+ Blob<Dtype> data_mean_;
};
-
template <typename Dtype>
-class Im2colLayer : public Layer<Dtype> {
+class EuclideanLossLayer : public Layer<Dtype> {
public:
- explicit Im2colLayer(const LayerParameter& param)
- : Layer<Dtype>(param) {}
+ explicit EuclideanLossLayer(const LayerParameter& param)
+ : Layer<Dtype>(param), difference_() {}
virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top);
protected:
+ // The loss layer will do nothing during forward - all computation are
+ // carried out in the backward pass.
virtual Dtype Forward_cpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top);
- virtual Dtype Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ // virtual Dtype Forward_gpu(const vector<Blob<Dtype>*>& bottom,
+ // vector<Blob<Dtype>*>* top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom);
- virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const bool propagate_down, vector<Blob<Dtype>*>* bottom);
+ // virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
+ // const bool propagate_down, vector<Blob<Dtype>*>* bottom);
- int kernel_size_;
- int stride_;
- int channels_;
- int height_;
- int width_;
- int pad_;
+ Blob<Dtype> difference_;
};
template <typename Dtype>
-class PoolingLayer : public Layer<Dtype> {
+class FlattenLayer : public Layer<Dtype> {
public:
- explicit PoolingLayer(const LayerParameter& param)
+ explicit FlattenLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom);
- int kernel_size_;
- int stride_;
- int channels_;
- int height_;
- int width_;
- int pooled_height_;
- int pooled_width_;
- Blob<float> rand_idx_;
+ int count_;
};
-
template <typename Dtype>
-class ConvolutionLayer : public Layer<Dtype> {
+class HDF5DataLayer : public Layer<Dtype> {
public:
- explicit ConvolutionLayer(const LayerParameter& param)
+ explicit HDF5DataLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
+ virtual ~HDF5DataLayer();
virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top);
const bool propagate_down, vector<Blob<Dtype>*>* bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom);
+ virtual void load_hdf5_file_data(const char* filename);
- int kernel_size_;
- int stride_;
- int num_;
- int channels_;
- int pad_;
- int height_;
- int width_;
- int num_output_;
- int group_;
- Blob<Dtype> col_buffer_;
- shared_ptr<SyncedMemory> bias_multiplier_;
- bool bias_term_;
- int M_;
- int K_;
- int N_;
+ std::vector<std::string> hdf_filenames_;
+ unsigned int num_files_;
+ unsigned int current_file_;
+ hsize_t current_row_;
+ Blob<Dtype> data_blob_;
+ Blob<Dtype> label_blob_;
};
template <typename Dtype>
-class ConcatLayer : public Layer<Dtype> {
+class Im2colLayer : public Layer<Dtype> {
public:
- explicit ConcatLayer(const LayerParameter& param)
+ explicit Im2colLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom);
- Blob<Dtype> col_bob_;
- int count_;
- int num_;
+ int kernel_size_;
+ int stride_;
int channels_;
int height_;
int width_;
- int concat_dim_;
+ int pad_;
};
// This function is used to create a pthread that prefetches the data.
template <typename Dtype>
-void* DataLayerPrefetch(void* layer_pointer);
+void* ImagesLayerPrefetch(void* layer_pointer);
template <typename Dtype>
-class DataLayer : public Layer<Dtype> {
+class ImagesLayer : public Layer<Dtype> {
// The function used to perform prefetching.
- friend void* DataLayerPrefetch<Dtype>(void* layer_pointer);
+ friend void* ImagesLayerPrefetch<Dtype>(void* layer_pointer);
public:
- explicit DataLayer(const LayerParameter& param)
+ explicit ImagesLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
- virtual ~DataLayer();
+ virtual ~ImagesLayer();
virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom) { return; }
- shared_ptr<leveldb::DB> db_;
- shared_ptr<leveldb::Iterator> iter_;
+ vector<std::pair<std::string, int> > lines_;
+ int lines_id_;
int datum_channels_;
int datum_height_;
int datum_width_;
Blob<Dtype> data_mean_;
};
-// This function is used to create a pthread that prefetches the data.
-template <typename Dtype>
-void* ImagesLayerPrefetch(void* layer_pointer);
-
template <typename Dtype>
-class ImagesLayer : public Layer<Dtype> {
- // The function used to perform prefetching.
- friend void* ImagesLayerPrefetch<Dtype>(void* layer_pointer);
-
+class InfogainLossLayer : public Layer<Dtype> {
public:
- explicit ImagesLayer(const LayerParameter& param)
- : Layer<Dtype>(param) {}
- virtual ~ImagesLayer();
+ explicit InfogainLossLayer(const LayerParameter& param)
+ : Layer<Dtype>(param), infogain_() {}
virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top);
protected:
+ // The loss layer will do nothing during forward - all computation are
+ // carried out in the backward pass.
virtual Dtype Forward_cpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top);
- virtual Dtype Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ // virtual Dtype Forward_gpu(const vector<Blob<Dtype>*>& bottom,
+ // vector<Blob<Dtype>*>* top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const bool propagate_down, vector<Blob<Dtype>*>* bottom) { return; }
- virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const bool propagate_down, vector<Blob<Dtype>*>* bottom) { return; }
+ const bool propagate_down, vector<Blob<Dtype>*>* bottom);
+ // virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
+ // const bool propagate_down, vector<Blob<Dtype>*>* bottom);
- vector<std::pair<std::string, int> > lines_;
- int lines_id_;
- int datum_channels_;
- int datum_height_;
- int datum_width_;
- int datum_size_;
- pthread_t thread_;
- shared_ptr<Blob<Dtype> > prefetch_data_;
- shared_ptr<Blob<Dtype> > prefetch_label_;
- Blob<Dtype> data_mean_;
+ Blob<Dtype> infogain_;
};
-
template <typename Dtype>
-class HDF5DataLayer : public Layer<Dtype> {
+class InnerProductLayer : public Layer<Dtype> {
public:
- explicit HDF5DataLayer(const LayerParameter& param)
+ explicit InnerProductLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
- virtual ~HDF5DataLayer();
virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top);
const bool propagate_down, vector<Blob<Dtype>*>* bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom);
- virtual void load_hdf5_file_data(const char* filename);
- std::vector<std::string> hdf_filenames_;
- unsigned int num_files_;
- unsigned int current_file_;
- hsize_t current_row_;
- Blob<Dtype> data_blob_;
- Blob<Dtype> label_blob_;
+ int M_;
+ int K_;
+ int N_;
+ bool bias_term_;
+ shared_ptr<SyncedMemory> bias_multiplier_;
};
-
template <typename Dtype>
class HDF5OutputLayer : public Layer<Dtype> {
public:
Blob<Dtype> label_blob_;
};
-
template <typename Dtype>
-class SoftmaxLayer : public Layer<Dtype> {
+class LRNLayer : public Layer<Dtype> {
public:
- explicit SoftmaxLayer(const LayerParameter& param)
+ explicit LRNLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- const bool propagate_down, vector<Blob<Dtype>*>* bottom);
+ const bool propagate_down, vector<Blob<Dtype>*>* bottom);
- // sum_multiplier is just used to carry out sum using blas
- Blob<Dtype> sum_multiplier_;
- // scale is an intermediate blob to hold temporary results.
+ // scale_ stores the intermediate summing results
Blob<Dtype> scale_;
+ int size_;
+ int pre_pad_;
+ Dtype alpha_;
+ Dtype beta_;
+ int num_;
+ int channels_;
+ int height_;
+ int width_;
};
-
template <typename Dtype>
class MultinomialLogisticLossLayer : public Layer<Dtype> {
public:
};
template <typename Dtype>
-class InfogainLossLayer : public Layer<Dtype> {
+class PoolingLayer : public Layer<Dtype> {
public:
- explicit InfogainLossLayer(const LayerParameter& param)
- : Layer<Dtype>(param), infogain_() {}
+ explicit PoolingLayer(const LayerParameter& param)
+ : Layer<Dtype>(param) {}
virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top);
protected:
- // The loss layer will do nothing during forward - all computation are
- // carried out in the backward pass.
virtual Dtype Forward_cpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top);
- // virtual Dtype Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- // vector<Blob<Dtype>*>* top);
+ virtual Dtype Forward_gpu(const vector<Blob<Dtype>*>& bottom,
+ vector<Blob<Dtype>*>* top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom);
- // virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- // const bool propagate_down, vector<Blob<Dtype>*>* bottom);
+ virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
+ const bool propagate_down, vector<Blob<Dtype>*>* bottom);
- Blob<Dtype> infogain_;
+ int kernel_size_;
+ int stride_;
+ int channels_;
+ int height_;
+ int width_;
+ int pooled_height_;
+ int pooled_width_;
+ Blob<float> rand_idx_;
};
+template <typename Dtype>
+class SoftmaxLayer : public Layer<Dtype> {
+ public:
+ explicit SoftmaxLayer(const LayerParameter& param)
+ : Layer<Dtype>(param) {}
+ virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
+ vector<Blob<Dtype>*>* top);
+
+ protected:
+ virtual Dtype Forward_cpu(const vector<Blob<Dtype>*>& bottom,
+ vector<Blob<Dtype>*>* top);
+ virtual Dtype Forward_gpu(const vector<Blob<Dtype>*>& bottom,
+ vector<Blob<Dtype>*>* top);
+ virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
+ const bool propagate_down, vector<Blob<Dtype>*>* bottom);
+ virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
+ const bool propagate_down, vector<Blob<Dtype>*>* bottom);
+
+ // sum_multiplier is just used to carry out sum using blas
+ Blob<Dtype> sum_multiplier_;
+ // scale is an intermediate blob to hold temporary results.
+ Blob<Dtype> scale_;
+};
// SoftmaxWithLossLayer is a layer that implements softmax and then computes
// the loss - it is preferred over softmax + multinomiallogisticloss in the
vector<Blob<Dtype>*> softmax_top_vec_;
};
-
template <typename Dtype>
-class EuclideanLossLayer : public Layer<Dtype> {
+class SplitLayer : public Layer<Dtype> {
public:
- explicit EuclideanLossLayer(const LayerParameter& param)
- : Layer<Dtype>(param), difference_() {}
+ explicit SplitLayer(const LayerParameter& param)
+ : Layer<Dtype>(param) {}
virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top);
protected:
- // The loss layer will do nothing during forward - all computation are
- // carried out in the backward pass.
virtual Dtype Forward_cpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top);
- // virtual Dtype Forward_gpu(const vector<Blob<Dtype>*>& bottom,
- // vector<Blob<Dtype>*>* top);
+ virtual Dtype Forward_gpu(const vector<Blob<Dtype>*>& bottom,
+ vector<Blob<Dtype>*>* top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom);
- // virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
- // const bool propagate_down, vector<Blob<Dtype>*>* bottom);
-
- Blob<Dtype> difference_;
-};
-
-
-template <typename Dtype>
-class AccuracyLayer : public Layer<Dtype> {
- public:
- explicit AccuracyLayer(const LayerParameter& param)
- : Layer<Dtype>(param) {}
- virtual void SetUp(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
+ virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
+ const bool propagate_down, vector<Blob<Dtype>*>* bottom);
- protected:
- virtual Dtype Forward_cpu(const vector<Blob<Dtype>*>& bottom,
- vector<Blob<Dtype>*>* top);
- // The accuracy layer should not be used to compute backward operations.
- virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
- const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
- NOT_IMPLEMENTED;
- }
+ int count_;
};
// This function is used to create a pthread that prefetches the window data.