From a68395c2ea2b6092b71c2c98a84920e730044fd0 Mon Sep 17 00:00:00 2001 From: Jeff Donahue Date: Fri, 21 Mar 2014 15:01:55 -0700 Subject: [PATCH] alphabetize classes in vision_layers.hpp --- include/caffe/vision_layers.hpp | 372 +++++++++++++++++++--------------------- 1 file changed, 178 insertions(+), 194 deletions(-) diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index d80809b..079537e 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -34,11 +34,10 @@ class NeuronLayer : public Layer { vector*>* top); }; - template -class ReLULayer : public NeuronLayer { +class BNLLLayer : public NeuronLayer { public: - explicit ReLULayer(const LayerParameter& param) + explicit BNLLLayer(const LayerParameter& param) : NeuronLayer(param) {} protected: @@ -46,7 +45,6 @@ class ReLULayer : public NeuronLayer { vector*>* top); virtual Dtype Forward_gpu(const vector*>& bottom, vector*>* top); - virtual void Backward_cpu(const vector*>& top, const bool propagate_down, vector*>* bottom); virtual void Backward_gpu(const vector*>& top, @@ -54,10 +52,12 @@ class ReLULayer : public NeuronLayer { }; template -class TanHLayer : public NeuronLayer { +class DropoutLayer : public NeuronLayer { public: - explicit TanHLayer(const LayerParameter& param) + explicit DropoutLayer(const LayerParameter& param) : NeuronLayer(param) {} + virtual void SetUp(const vector*>& bottom, + vector*>* top); protected: virtual Dtype Forward_cpu(const vector*>& bottom, @@ -68,12 +68,17 @@ class TanHLayer : public NeuronLayer { const bool propagate_down, vector*>* bottom); virtual void Backward_gpu(const vector*>& top, const bool propagate_down, vector*>* bottom); + + shared_ptr rand_vec_; + float threshold_; + float scale_; + unsigned int uint_thres_; }; template -class SigmoidLayer : public NeuronLayer { +class ReLULayer : public NeuronLayer { public: - explicit SigmoidLayer(const LayerParameter& param) + explicit ReLULayer(const LayerParameter& param) : NeuronLayer(param) {} protected: @@ -81,17 +86,17 @@ class SigmoidLayer : public NeuronLayer { vector*>* top); virtual Dtype Forward_gpu(const vector*>& bottom, vector*>* top); + virtual void Backward_cpu(const vector*>& top, const bool propagate_down, vector*>* bottom); virtual void Backward_gpu(const vector*>& top, const bool propagate_down, vector*>* bottom); }; - template -class BNLLLayer : public NeuronLayer { +class SigmoidLayer : public NeuronLayer { public: - explicit BNLLLayer(const LayerParameter& param) + explicit SigmoidLayer(const LayerParameter& param) : NeuronLayer(param) {} protected: @@ -105,14 +110,11 @@ class BNLLLayer : public NeuronLayer { const bool propagate_down, vector*>* bottom); }; - template -class DropoutLayer : public NeuronLayer { +class TanHLayer : public NeuronLayer { public: - explicit DropoutLayer(const LayerParameter& param) + explicit TanHLayer(const LayerParameter& param) : NeuronLayer(param) {} - virtual void SetUp(const vector*>& bottom, - vector*>* top); protected: virtual Dtype Forward_cpu(const vector*>& bottom, @@ -123,18 +125,13 @@ class DropoutLayer : public NeuronLayer { const bool propagate_down, vector*>* bottom); virtual void Backward_gpu(const vector*>& top, const bool propagate_down, vector*>* bottom); - - shared_ptr rand_vec_; - float threshold_; - float scale_; - unsigned int uint_thres_; }; template -class SplitLayer : public Layer { +class AccuracyLayer : public Layer { public: - explicit SplitLayer(const LayerParameter& param) + explicit AccuracyLayer(const LayerParameter& param) : Layer(param) {} virtual void SetUp(const vector*>& bottom, vector*>* top); @@ -142,21 +139,17 @@ class SplitLayer : public Layer { protected: virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); - virtual Dtype Forward_gpu(const vector*>& bottom, - vector*>* top); + // The accuracy layer should not be used to compute backward operations. virtual void Backward_cpu(const vector*>& top, - const bool propagate_down, vector*>* bottom); - virtual void Backward_gpu(const vector*>& top, - const bool propagate_down, vector*>* bottom); - - int count_; + const bool propagate_down, vector*>* bottom) { + NOT_IMPLEMENTED; + } }; - template -class FlattenLayer : public Layer { +class ConcatLayer : public Layer { public: - explicit FlattenLayer(const LayerParameter& param) + explicit ConcatLayer(const LayerParameter& param) : Layer(param) {} virtual void SetUp(const vector*>& bottom, vector*>* top); @@ -171,14 +164,19 @@ class FlattenLayer : public Layer { virtual void Backward_gpu(const vector*>& top, const bool propagate_down, vector*>* bottom); + Blob col_bob_; int count_; + int num_; + int channels_; + int height_; + int width_; + int concat_dim_; }; - template -class InnerProductLayer : public Layer { +class ConvolutionLayer : public Layer { public: - explicit InnerProductLayer(const LayerParameter& param) + explicit ConvolutionLayer(const LayerParameter& param) : Layer(param) {} virtual void SetUp(const vector*>& bottom, vector*>* top); @@ -188,25 +186,41 @@ class InnerProductLayer : public Layer { vector*>* top); virtual Dtype Forward_gpu(const vector*>& bottom, vector*>* top); - virtual void Backward_cpu(const vector*>& top, const bool propagate_down, vector*>* bottom); virtual void Backward_gpu(const vector*>& top, const bool propagate_down, vector*>* bottom); + int kernel_size_; + int stride_; + int num_; + int channels_; + int pad_; + int height_; + int width_; + int num_output_; + int group_; + Blob col_buffer_; + shared_ptr bias_multiplier_; + bool bias_term_; int M_; int K_; int N_; - bool bias_term_; - shared_ptr bias_multiplier_; }; +// This function is used to create a pthread that prefetches the data. +template +void* DataLayerPrefetch(void* layer_pointer); template -class LRNLayer : public Layer { +class DataLayer : public Layer { + // The function used to perform prefetching. + friend void* DataLayerPrefetch(void* layer_pointer); + public: - explicit LRNLayer(const LayerParameter& param) + explicit DataLayer(const LayerParameter& param) : Layer(param) {} + virtual ~DataLayer(); virtual void SetUp(const vector*>& bottom, vector*>* top); @@ -216,53 +230,49 @@ class LRNLayer : public Layer { virtual Dtype Forward_gpu(const vector*>& bottom, vector*>* top); virtual void Backward_cpu(const vector*>& top, - const bool propagate_down, vector*>* bottom); + const bool propagate_down, vector*>* bottom) { return; } virtual void Backward_gpu(const vector*>& top, - const bool propagate_down, vector*>* bottom); + const bool propagate_down, vector*>* bottom) { return; } - // scale_ stores the intermediate summing results - Blob scale_; - int size_; - int pre_pad_; - Dtype alpha_; - Dtype beta_; - int num_; - int channels_; - int height_; - int width_; + shared_ptr db_; + shared_ptr iter_; + int datum_channels_; + int datum_height_; + int datum_width_; + int datum_size_; + pthread_t thread_; + shared_ptr > prefetch_data_; + shared_ptr > prefetch_label_; + Blob data_mean_; }; - template -class Im2colLayer : public Layer { +class EuclideanLossLayer : public Layer { public: - explicit Im2colLayer(const LayerParameter& param) - : Layer(param) {} + explicit EuclideanLossLayer(const LayerParameter& param) + : Layer(param), difference_() {} virtual void SetUp(const vector*>& bottom, vector*>* top); protected: + // The loss layer will do nothing during forward - all computation are + // carried out in the backward pass. virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); - virtual Dtype Forward_gpu(const vector*>& bottom, - vector*>* top); + // virtual Dtype Forward_gpu(const vector*>& bottom, + // vector*>* top); virtual void Backward_cpu(const vector*>& top, const bool propagate_down, vector*>* bottom); - virtual void Backward_gpu(const vector*>& top, - const bool propagate_down, vector*>* bottom); + // virtual void Backward_gpu(const vector*>& top, + // const bool propagate_down, vector*>* bottom); - int kernel_size_; - int stride_; - int channels_; - int height_; - int width_; - int pad_; + Blob difference_; }; template -class PoolingLayer : public Layer { +class FlattenLayer : public Layer { public: - explicit PoolingLayer(const LayerParameter& param) + explicit FlattenLayer(const LayerParameter& param) : Layer(param) {} virtual void SetUp(const vector*>& bottom, vector*>* top); @@ -277,22 +287,15 @@ class PoolingLayer : public Layer { virtual void Backward_gpu(const vector*>& top, const bool propagate_down, vector*>* bottom); - int kernel_size_; - int stride_; - int channels_; - int height_; - int width_; - int pooled_height_; - int pooled_width_; - Blob rand_idx_; + int count_; }; - template -class ConvolutionLayer : public Layer { +class HDF5DataLayer : public Layer { public: - explicit ConvolutionLayer(const LayerParameter& param) + explicit HDF5DataLayer(const LayerParameter& param) : Layer(param) {} + virtual ~HDF5DataLayer(); virtual void SetUp(const vector*>& bottom, vector*>* top); @@ -305,28 +308,20 @@ class ConvolutionLayer : public Layer { const bool propagate_down, vector*>* bottom); virtual void Backward_gpu(const vector*>& top, const bool propagate_down, vector*>* bottom); + virtual void load_hdf5_file_data(const char* filename); - int kernel_size_; - int stride_; - int num_; - int channels_; - int pad_; - int height_; - int width_; - int num_output_; - int group_; - Blob col_buffer_; - shared_ptr bias_multiplier_; - bool bias_term_; - int M_; - int K_; - int N_; + std::vector hdf_filenames_; + unsigned int num_files_; + unsigned int current_file_; + hsize_t current_row_; + Blob data_blob_; + Blob label_blob_; }; template -class ConcatLayer : public Layer { +class Im2colLayer : public Layer { public: - explicit ConcatLayer(const LayerParameter& param) + explicit Im2colLayer(const LayerParameter& param) : Layer(param) {} virtual void SetUp(const vector*>& bottom, vector*>* top); @@ -341,28 +336,27 @@ class ConcatLayer : public Layer { virtual void Backward_gpu(const vector*>& top, const bool propagate_down, vector*>* bottom); - Blob col_bob_; - int count_; - int num_; + int kernel_size_; + int stride_; int channels_; int height_; int width_; - int concat_dim_; + int pad_; }; // This function is used to create a pthread that prefetches the data. template -void* DataLayerPrefetch(void* layer_pointer); +void* ImagesLayerPrefetch(void* layer_pointer); template -class DataLayer : public Layer { +class ImagesLayer : public Layer { // The function used to perform prefetching. - friend void* DataLayerPrefetch(void* layer_pointer); + friend void* ImagesLayerPrefetch(void* layer_pointer); public: - explicit DataLayer(const LayerParameter& param) + explicit ImagesLayer(const LayerParameter& param) : Layer(param) {} - virtual ~DataLayer(); + virtual ~ImagesLayer(); virtual void SetUp(const vector*>& bottom, vector*>* top); @@ -376,8 +370,8 @@ class DataLayer : public Layer { virtual void Backward_gpu(const vector*>& top, const bool propagate_down, vector*>* bottom) { return; } - shared_ptr db_; - shared_ptr iter_; + vector > lines_; + int lines_id_; int datum_channels_; int datum_height_; int datum_width_; @@ -388,51 +382,34 @@ class DataLayer : public Layer { Blob data_mean_; }; -// This function is used to create a pthread that prefetches the data. -template -void* ImagesLayerPrefetch(void* layer_pointer); - template -class ImagesLayer : public Layer { - // The function used to perform prefetching. - friend void* ImagesLayerPrefetch(void* layer_pointer); - +class InfogainLossLayer : public Layer { public: - explicit ImagesLayer(const LayerParameter& param) - : Layer(param) {} - virtual ~ImagesLayer(); + explicit InfogainLossLayer(const LayerParameter& param) + : Layer(param), infogain_() {} virtual void SetUp(const vector*>& bottom, vector*>* top); protected: + // The loss layer will do nothing during forward - all computation are + // carried out in the backward pass. virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); - virtual Dtype Forward_gpu(const vector*>& bottom, - vector*>* top); + // virtual Dtype Forward_gpu(const vector*>& bottom, + // vector*>* top); virtual void Backward_cpu(const vector*>& top, - const bool propagate_down, vector*>* bottom) { return; } - virtual void Backward_gpu(const vector*>& top, - const bool propagate_down, vector*>* bottom) { return; } + const bool propagate_down, vector*>* bottom); + // virtual void Backward_gpu(const vector*>& top, + // const bool propagate_down, vector*>* bottom); - vector > lines_; - int lines_id_; - int datum_channels_; - int datum_height_; - int datum_width_; - int datum_size_; - pthread_t thread_; - shared_ptr > prefetch_data_; - shared_ptr > prefetch_label_; - Blob data_mean_; + Blob infogain_; }; - template -class HDF5DataLayer : public Layer { +class InnerProductLayer : public Layer { public: - explicit HDF5DataLayer(const LayerParameter& param) + explicit InnerProductLayer(const LayerParameter& param) : Layer(param) {} - virtual ~HDF5DataLayer(); virtual void SetUp(const vector*>& bottom, vector*>* top); @@ -445,17 +422,14 @@ class HDF5DataLayer : public Layer { const bool propagate_down, vector*>* bottom); virtual void Backward_gpu(const vector*>& top, const bool propagate_down, vector*>* bottom); - virtual void load_hdf5_file_data(const char* filename); - std::vector hdf_filenames_; - unsigned int num_files_; - unsigned int current_file_; - hsize_t current_row_; - Blob data_blob_; - Blob label_blob_; + int M_; + int K_; + int N_; + bool bias_term_; + shared_ptr bias_multiplier_; }; - template class HDF5OutputLayer : public Layer { public: @@ -482,11 +456,10 @@ class HDF5OutputLayer : public Layer { Blob label_blob_; }; - template -class SoftmaxLayer : public Layer { +class LRNLayer : public Layer { public: - explicit SoftmaxLayer(const LayerParameter& param) + explicit LRNLayer(const LayerParameter& param) : Layer(param) {} virtual void SetUp(const vector*>& bottom, vector*>* top); @@ -499,15 +472,20 @@ class SoftmaxLayer : public Layer { virtual void Backward_cpu(const vector*>& top, const bool propagate_down, vector*>* bottom); virtual void Backward_gpu(const vector*>& top, - const bool propagate_down, vector*>* bottom); + const bool propagate_down, vector*>* bottom); - // sum_multiplier is just used to carry out sum using blas - Blob sum_multiplier_; - // scale is an intermediate blob to hold temporary results. + // scale_ stores the intermediate summing results Blob scale_; + int size_; + int pre_pad_; + Dtype alpha_; + Dtype beta_; + int num_; + int channels_; + int height_; + int width_; }; - template class MultinomialLogisticLossLayer : public Layer { public: @@ -530,28 +508,56 @@ class MultinomialLogisticLossLayer : public Layer { }; template -class InfogainLossLayer : public Layer { +class PoolingLayer : public Layer { public: - explicit InfogainLossLayer(const LayerParameter& param) - : Layer(param), infogain_() {} + explicit PoolingLayer(const LayerParameter& param) + : Layer(param) {} virtual void SetUp(const vector*>& bottom, vector*>* top); protected: - // The loss layer will do nothing during forward - all computation are - // carried out in the backward pass. virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); - // virtual Dtype Forward_gpu(const vector*>& bottom, - // vector*>* top); + virtual Dtype Forward_gpu(const vector*>& bottom, + vector*>* top); virtual void Backward_cpu(const vector*>& top, const bool propagate_down, vector*>* bottom); - // virtual void Backward_gpu(const vector*>& top, - // const bool propagate_down, vector*>* bottom); + virtual void Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); - Blob infogain_; + int kernel_size_; + int stride_; + int channels_; + int height_; + int width_; + int pooled_height_; + int pooled_width_; + Blob rand_idx_; }; +template +class SoftmaxLayer : public Layer { + public: + explicit SoftmaxLayer(const LayerParameter& param) + : Layer(param) {} + virtual void SetUp(const vector*>& bottom, + vector*>* top); + + protected: + virtual Dtype Forward_cpu(const vector*>& bottom, + vector*>* top); + virtual Dtype Forward_gpu(const vector*>& bottom, + vector*>* top); + virtual void Backward_cpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + virtual void Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); + + // sum_multiplier is just used to carry out sum using blas + Blob sum_multiplier_; + // scale is an intermediate blob to hold temporary results. + Blob scale_; +}; // SoftmaxWithLossLayer is a layer that implements softmax and then computes // the loss - it is preferred over softmax + multinomiallogisticloss in the @@ -584,47 +590,25 @@ class SoftmaxWithLossLayer : public Layer { vector*> softmax_top_vec_; }; - template -class EuclideanLossLayer : public Layer { +class SplitLayer : public Layer { public: - explicit EuclideanLossLayer(const LayerParameter& param) - : Layer(param), difference_() {} + explicit SplitLayer(const LayerParameter& param) + : Layer(param) {} virtual void SetUp(const vector*>& bottom, vector*>* top); protected: - // The loss layer will do nothing during forward - all computation are - // carried out in the backward pass. virtual Dtype Forward_cpu(const vector*>& bottom, vector*>* top); - // virtual Dtype Forward_gpu(const vector*>& bottom, - // vector*>* top); + virtual Dtype Forward_gpu(const vector*>& bottom, + vector*>* top); virtual void Backward_cpu(const vector*>& top, const bool propagate_down, vector*>* bottom); - // virtual void Backward_gpu(const vector*>& top, - // const bool propagate_down, vector*>* bottom); - - Blob difference_; -}; - - -template -class AccuracyLayer : public Layer { - public: - explicit AccuracyLayer(const LayerParameter& param) - : Layer(param) {} - virtual void SetUp(const vector*>& bottom, - vector*>* top); + virtual void Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom); - protected: - virtual Dtype Forward_cpu(const vector*>& bottom, - vector*>* top); - // The accuracy layer should not be used to compute backward operations. - virtual void Backward_cpu(const vector*>& top, - const bool propagate_down, vector*>* bottom) { - NOT_IMPLEMENTED; - } + int count_; }; // This function is used to create a pthread that prefetches the window data. -- 2.7.4