From 31326a111bcb5ac5a9adeec13cfe6330e78ef31e Mon Sep 17 00:00:00 2001 From: Jonathan L Long Date: Fri, 19 Sep 2014 16:04:37 -0700 Subject: [PATCH] fix types of (Layer)SetUp, Reshape, Forward, and Backward calls Using the type vector* for outputs allows modification of the vector itself, while it is only okay to modify the blobs pointed to by the elements of the vector. Switching the types to const vector*>& makes them more correct. --- include/caffe/common_layers.hpp | 114 ++++++++++----------- include/caffe/data_layers.hpp | 60 +++++------ include/caffe/layer.hpp | 51 ++++----- include/caffe/loss_layers.hpp | 74 ++++++------- include/caffe/neuron_layers.hpp | 98 +++++++++--------- include/caffe/test/test_gradient_check_util.hpp | 85 +++++++-------- include/caffe/util/device_alternate.hpp | 8 +- include/caffe/vision_layers.hpp | 76 +++++++------- src/caffe/layers/absval_layer.cpp | 16 +-- src/caffe/layers/absval_layer.cu | 12 +-- src/caffe/layers/accuracy_layer.cpp | 10 +- src/caffe/layers/argmax_layer.cpp | 16 +-- src/caffe/layers/base_data_layer.cpp | 12 +-- src/caffe/layers/base_data_layer.cu | 6 +- src/caffe/layers/bnll_layer.cpp | 12 +-- src/caffe/layers/bnll_layer.cu | 12 +-- src/caffe/layers/concat_layer.cpp | 26 ++--- src/caffe/layers/concat_layer.cu | 18 ++-- src/caffe/layers/contrastive_loss_layer.cpp | 18 ++-- src/caffe/layers/contrastive_loss_layer.cu | 16 +-- src/caffe/layers/conv_layer.cpp | 30 +++--- src/caffe/layers/conv_layer.cu | 18 ++-- src/caffe/layers/cudnn_conv_layer.cpp | 4 +- src/caffe/layers/cudnn_conv_layer.cu | 10 +- src/caffe/layers/cudnn_pooling_layer.cpp | 4 +- src/caffe/layers/cudnn_pooling_layer.cu | 12 +-- src/caffe/layers/cudnn_relu_layer.cpp | 4 +- src/caffe/layers/cudnn_relu_layer.cu | 10 +- src/caffe/layers/cudnn_sigmoid_layer.cpp | 4 +- src/caffe/layers/cudnn_sigmoid_layer.cu | 10 +- src/caffe/layers/cudnn_softmax_layer.cpp | 4 +- src/caffe/layers/cudnn_softmax_layer.cu | 10 +- src/caffe/layers/cudnn_tanh_layer.cpp | 4 +- src/caffe/layers/cudnn_tanh_layer.cu | 10 +- src/caffe/layers/data_layer.cpp | 14 +-- src/caffe/layers/dropout_layer.cpp | 14 +-- src/caffe/layers/dropout_layer.cu | 10 +- src/caffe/layers/dummy_data_layer.cpp | 12 +-- src/caffe/layers/eltwise_layer.cpp | 28 ++--- src/caffe/layers/eltwise_layer.cu | 20 ++-- src/caffe/layers/euclidean_loss_layer.cpp | 14 +-- src/caffe/layers/euclidean_loss_layer.cu | 12 +-- src/caffe/layers/flatten_layer.cpp | 14 +-- src/caffe/layers/flatten_layer.cu | 8 +- src/caffe/layers/hdf5_data_layer.cpp | 22 ++-- src/caffe/layers/hdf5_data_layer.cu | 10 +- src/caffe/layers/hdf5_output_layer.cpp | 4 +- src/caffe/layers/hdf5_output_layer.cu | 4 +- src/caffe/layers/hinge_loss_layer.cpp | 14 +-- src/caffe/layers/im2col_layer.cpp | 18 ++-- src/caffe/layers/im2col_layer.cu | 12 +-- src/caffe/layers/image_data_layer.cpp | 14 +-- src/caffe/layers/infogain_loss_layer.cpp | 24 ++--- src/caffe/layers/inner_product_layer.cpp | 16 +-- src/caffe/layers/inner_product_layer.cu | 10 +- src/caffe/layers/loss_layer.cpp | 6 +- src/caffe/layers/lrn_layer.cpp | 56 +++++----- src/caffe/layers/lrn_layer.cu | 14 +-- src/caffe/layers/memory_data_layer.cpp | 12 +-- .../layers/multinomial_logistic_loss_layer.cpp | 20 ++-- src/caffe/layers/mvn_layer.cpp | 20 ++-- src/caffe/layers/mvn_layer.cu | 16 +-- src/caffe/layers/neuron_layer.cpp | 4 +- src/caffe/layers/pooling_layer.cpp | 40 ++++---- src/caffe/layers/pooling_layer.cu | 16 +-- src/caffe/layers/power_layer.cpp | 14 +-- src/caffe/layers/power_layer.cu | 12 +-- src/caffe/layers/relu_layer.cpp | 12 +-- src/caffe/layers/relu_layer.cu | 12 +-- .../layers/sigmoid_cross_entropy_loss_layer.cpp | 24 ++--- .../layers/sigmoid_cross_entropy_loss_layer.cu | 16 +-- src/caffe/layers/sigmoid_layer.cpp | 10 +- src/caffe/layers/sigmoid_layer.cu | 10 +- src/caffe/layers/silence_layer.cpp | 8 +- src/caffe/layers/silence_layer.cu | 10 +- src/caffe/layers/slice_layer.cpp | 58 +++++------ src/caffe/layers/slice_layer.cu | 18 ++-- src/caffe/layers/softmax_layer.cpp | 16 +-- src/caffe/layers/softmax_layer.cu | 8 +- src/caffe/layers/softmax_loss_layer.cpp | 28 ++--- src/caffe/layers/softmax_loss_layer.cu | 4 +- src/caffe/layers/split_layer.cpp | 24 ++--- src/caffe/layers/split_layer.cu | 14 +-- src/caffe/layers/tanh_layer.cpp | 10 +- src/caffe/layers/tanh_layer.cu | 10 +- src/caffe/layers/threshold_layer.cpp | 6 +- src/caffe/layers/threshold_layer.cu | 4 +- src/caffe/layers/window_data_layer.cpp | 20 ++-- src/caffe/net.cpp | 10 +- src/caffe/test/test_accuracy_layer.cpp | 12 +-- src/caffe/test/test_argmax_layer.cpp | 20 ++-- src/caffe/test/test_concat_layer.cpp | 12 +-- src/caffe/test/test_contrastive_loss_layer.cpp | 14 +-- src/caffe/test/test_convolution_layer.cpp | 64 ++++++------ src/caffe/test/test_data_layer.cpp | 24 ++--- src/caffe/test/test_dummy_data_layer.cpp | 14 +-- src/caffe/test/test_eltwise_layer.cpp | 38 +++---- src/caffe/test/test_euclidean_loss_layer.cpp | 14 +-- src/caffe/test/test_flatten_layer.cpp | 10 +- src/caffe/test/test_hdf5_output_layer.cpp | 4 +- src/caffe/test/test_hdf5data_layer.cpp | 6 +- src/caffe/test/test_hinge_loss_layer.cpp | 8 +- src/caffe/test/test_im2col_layer.cpp | 18 ++-- src/caffe/test/test_image_data_layer.cpp | 12 +-- src/caffe/test/test_infogain_loss_layer.cpp | 4 +- src/caffe/test/test_inner_product_layer.cpp | 10 +- src/caffe/test/test_lrn_layer.cpp | 30 +++--- src/caffe/test/test_maxpool_dropout_layers.cpp | 26 ++--- src/caffe/test/test_memory_data_layer.cpp | 10 +- .../test/test_multinomial_logistic_loss_layer.cpp | 6 +- src/caffe/test/test_mvn_layer.cpp | 24 ++--- src/caffe/test/test_neuron_layer.cpp | 96 ++++++++--------- src/caffe/test/test_pooling_layer.cpp | 92 ++++++++--------- src/caffe/test/test_power_layer.cpp | 8 +- .../test/test_sigmoid_cross_entropy_loss_layer.cpp | 10 +- src/caffe/test/test_slice_layer.cpp | 20 ++-- src/caffe/test/test_softmax_layer.cpp | 16 +-- src/caffe/test/test_softmax_with_loss_layer.cpp | 4 +- src/caffe/test/test_split_layer.cpp | 10 +- src/caffe/test/test_stochastic_pooling.cpp | 14 +-- src/caffe/test/test_threshold_layer.cpp | 10 +- tools/caffe.cpp | 6 +- 122 files changed, 1199 insertions(+), 1197 deletions(-) diff --git a/include/caffe/common_layers.hpp b/include/caffe/common_layers.hpp index 1f945ca..9718b82 100644 --- a/include/caffe/common_layers.hpp +++ b/include/caffe/common_layers.hpp @@ -39,9 +39,9 @@ class ArgMaxLayer : public Layer { explicit ArgMaxLayer(const LayerParameter& param) : Layer(param) {} virtual void LayerSetUp(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual inline LayerParameter_LayerType type() const { return LayerParameter_LayerType_ARGMAX; @@ -62,10 +62,10 @@ class ArgMaxLayer : public Layer { * @f$ (for @f$ K = 1 @f$). */ virtual void Forward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); /// @brief Not implemented (non-differentiable function) virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { + const vector& propagate_down, const vector*>& bottom) { NOT_IMPLEMENTED; } bool out_max_val_; @@ -82,9 +82,9 @@ class ConcatLayer : public Layer { explicit ConcatLayer(const LayerParameter& param) : Layer(param) {} virtual void LayerSetUp(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual inline LayerParameter_LayerType type() const { return LayerParameter_LayerType_CONCAT; @@ -110,9 +110,9 @@ class ConcatLayer : public Layer { * @f$ */ virtual void Forward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); /** * @brief Computes the error gradient w.r.t. the concatenate inputs. @@ -137,9 +137,9 @@ class ConcatLayer : public Layer { * @f$ */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); Blob col_bob_; int count_; @@ -162,9 +162,9 @@ class EltwiseLayer : public Layer { explicit EltwiseLayer(const LayerParameter& param) : Layer(param) {} virtual void LayerSetUp(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual inline LayerParameter_LayerType type() const { return LayerParameter_LayerType_ELTWISE; @@ -174,13 +174,13 @@ class EltwiseLayer : public Layer { protected: virtual void Forward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); EltwiseParameter_EltwiseOp op_; vector coeffs_; @@ -205,7 +205,7 @@ class FlattenLayer : public Layer { explicit FlattenLayer(const LayerParameter& param) : Layer(param) {} virtual void Reshape(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual inline LayerParameter_LayerType type() const { return LayerParameter_LayerType_FLATTEN; @@ -223,9 +223,9 @@ class FlattenLayer : public Layer { * the outputs -- i.e., the (virtually) copied, flattened inputs */ virtual void Forward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); /** * @brief Computes the error gradient w.r.t. the concatenate inputs. @@ -237,9 +237,9 @@ class FlattenLayer : public Layer { * gradient is (virtually) copied */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); int count_; }; @@ -256,9 +256,9 @@ class InnerProductLayer : public Layer { explicit InnerProductLayer(const LayerParameter& param) : Layer(param) {} virtual void LayerSetUp(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual inline LayerParameter_LayerType type() const { return LayerParameter_LayerType_INNER_PRODUCT; @@ -268,13 +268,13 @@ class InnerProductLayer : public Layer { protected: virtual void Forward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); int M_; int K_; @@ -294,7 +294,7 @@ class MVNLayer : public Layer { explicit MVNLayer(const LayerParameter& param) : Layer(param) {} virtual void Reshape(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual inline LayerParameter_LayerType type() const { return LayerParameter_LayerType_MVN; @@ -304,13 +304,13 @@ class MVNLayer : public Layer { protected: virtual void Forward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); Blob mean_, variance_, temp_; @@ -328,7 +328,7 @@ class SilenceLayer : public Layer { explicit SilenceLayer(const LayerParameter& param) : Layer(param) {} virtual void Reshape(const vector*>& bottom, - vector*>* top) {} + const vector*>& top) {} virtual inline LayerParameter_LayerType type() const { return LayerParameter_LayerType_SILENCE; @@ -338,15 +338,15 @@ class SilenceLayer : public Layer { protected: virtual void Forward_cpu(const vector*>& bottom, - vector*>* top) {} + const vector*>& top) {} // We can't define Forward_gpu here, since STUB_GPU will provide // its own definition for CPU_ONLY mode. virtual void Forward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); }; /** @@ -360,7 +360,7 @@ class SoftmaxLayer : public Layer { explicit SoftmaxLayer(const LayerParameter& param) : Layer(param) {} virtual void Reshape(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual inline LayerParameter_LayerType type() const { return LayerParameter_LayerType_SOFTMAX; @@ -370,13 +370,13 @@ class SoftmaxLayer : public Layer { protected: virtual void Forward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); /// sum_multiplier is used to carry out sum using BLAS Blob sum_multiplier_; @@ -395,16 +395,16 @@ class CuDNNSoftmaxLayer : public SoftmaxLayer { explicit CuDNNSoftmaxLayer(const LayerParameter& param) : SoftmaxLayer(param) {} virtual void LayerSetUp(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual ~CuDNNSoftmaxLayer(); protected: virtual void Forward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); cudnnHandle_t handle_; cudnnTensor4dDescriptor_t bottom_desc_; @@ -424,7 +424,7 @@ class SplitLayer : public Layer { explicit SplitLayer(const LayerParameter& param) : Layer(param) {} virtual void Reshape(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual inline LayerParameter_LayerType type() const { return LayerParameter_LayerType_SPLIT; @@ -434,13 +434,13 @@ class SplitLayer : public Layer { protected: virtual void Forward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); int count_; }; @@ -457,9 +457,9 @@ class SliceLayer : public Layer { explicit SliceLayer(const LayerParameter& param) : Layer(param) {} virtual void LayerSetUp(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual inline LayerParameter_LayerType type() const { return LayerParameter_LayerType_SLICE; @@ -469,13 +469,13 @@ class SliceLayer : public Layer { protected: virtual void Forward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); Blob col_bob_; int count_; diff --git a/include/caffe/data_layers.hpp b/include/caffe/data_layers.hpp index 8e2637b..b3f93ce 100644 --- a/include/caffe/data_layers.hpp +++ b/include/caffe/data_layers.hpp @@ -37,17 +37,17 @@ class BaseDataLayer : public Layer { // DataLayerSetUp to do special data layer setup for individual layer types. // This method may not be overridden except by the BasePrefetchingDataLayer. virtual void LayerSetUp(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void DataLayerSetUp(const vector*>& bottom, - vector*>* top) {} + const vector*>& top) {} // Data layers have no bottoms, so reshaping is trivial. virtual void Reshape(const vector*>& bottom, - vector*>* top) {} + const vector*>& top) {} virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) {} + const vector& propagate_down, const vector*>& bottom) {} virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) {} + const vector& propagate_down, const vector*>& bottom) {} int datum_channels() const { return datum_channels_; } int datum_height() const { return datum_height_; } @@ -78,12 +78,12 @@ class BasePrefetchingDataLayer : // DataLayerSetUp to do special data layer setup for individual layer types. // This method may not be overridden. void LayerSetUp(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Forward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void CreatePrefetchThread(); virtual void JoinPrefetchThread(); @@ -102,7 +102,7 @@ class DataLayer : public BasePrefetchingDataLayer { : BasePrefetchingDataLayer(param) {} virtual ~DataLayer(); virtual void DataLayerSetUp(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual inline LayerParameter_LayerType type() const { return LayerParameter_LayerType_DATA; @@ -136,10 +136,10 @@ class DummyDataLayer : public Layer { explicit DummyDataLayer(const LayerParameter& param) : Layer(param) {} virtual void LayerSetUp(const vector*>& bottom, - vector*>* top); + const vector*>& top); // Data layers have no bottoms, so reshaping is trivial. virtual void Reshape(const vector*>& bottom, - vector*>* top) {} + const vector*>& top) {} virtual inline LayerParameter_LayerType type() const { return LayerParameter_LayerType_DUMMY_DATA; @@ -149,11 +149,11 @@ class DummyDataLayer : public Layer { protected: virtual void Forward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) {} + const vector& propagate_down, const vector*>& bottom) {} virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) {} + const vector& propagate_down, const vector*>& bottom) {} vector > > fillers_; vector refill_; @@ -171,10 +171,10 @@ class HDF5DataLayer : public Layer { : Layer(param) {} virtual ~HDF5DataLayer(); virtual void LayerSetUp(const vector*>& bottom, - vector*>* top); + const vector*>& top); // Data layers have no bottoms, so reshaping is trivial. virtual void Reshape(const vector*>& bottom, - vector*>* top) {} + const vector*>& top) {} virtual inline LayerParameter_LayerType type() const { return LayerParameter_LayerType_HDF5_DATA; @@ -184,13 +184,13 @@ class HDF5DataLayer : public Layer { protected: virtual void Forward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) {} + const vector& propagate_down, const vector*>& bottom) {} virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) {} + const vector& propagate_down, const vector*>& bottom) {} virtual void LoadHDF5FileData(const char* filename); std::vector hdf_filenames_; @@ -212,10 +212,10 @@ class HDF5OutputLayer : public Layer { explicit HDF5OutputLayer(const LayerParameter& param); virtual ~HDF5OutputLayer(); virtual void LayerSetUp(const vector*>& bottom, - vector*>* top) {} + const vector*>& top) {} // Data layers have no bottoms, so reshaping is trivial. virtual void Reshape(const vector*>& bottom, - vector*>* top) {} + const vector*>& top) {} virtual inline LayerParameter_LayerType type() const { return LayerParameter_LayerType_HDF5_OUTPUT; @@ -228,13 +228,13 @@ class HDF5OutputLayer : public Layer { protected: virtual void Forward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); virtual void SaveBlobs(); std::string file_name_; @@ -255,7 +255,7 @@ class ImageDataLayer : public BasePrefetchingDataLayer { : BasePrefetchingDataLayer(param) {} virtual ~ImageDataLayer(); virtual void DataLayerSetUp(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual inline LayerParameter_LayerType type() const { return LayerParameter_LayerType_IMAGE_DATA; @@ -283,7 +283,7 @@ class MemoryDataLayer : public BaseDataLayer { explicit MemoryDataLayer(const LayerParameter& param) : BaseDataLayer(param), has_new_data_(false) {} virtual void DataLayerSetUp(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual inline LayerParameter_LayerType type() const { return LayerParameter_LayerType_MEMORY_DATA; @@ -301,7 +301,7 @@ class MemoryDataLayer : public BaseDataLayer { protected: virtual void Forward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); int batch_size_; Dtype* data_; @@ -326,7 +326,7 @@ class WindowDataLayer : public BasePrefetchingDataLayer { : BasePrefetchingDataLayer(param) {} virtual ~WindowDataLayer(); virtual void DataLayerSetUp(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual inline LayerParameter_LayerType type() const { return LayerParameter_LayerType_WINDOW_DATA; diff --git a/include/caffe/layer.hpp b/include/caffe/layer.hpp index e160075..18ff274 100644 --- a/include/caffe/layer.hpp +++ b/include/caffe/layer.hpp @@ -56,8 +56,9 @@ class Layer { * Sets up the loss weight multiplier blobs for any non-zero loss weights. * This method may not be overridden. */ - void SetUp(const vector*>& bottom, vector*>* top) { - CheckBlobCounts(bottom, *top); + void SetUp(const vector*>& bottom, + const vector*>& top) { + CheckBlobCounts(bottom, top); LayerSetUp(bottom, top); Reshape(bottom, top); SetLossWeights(top); @@ -80,7 +81,7 @@ class Layer { * adjust the top blob sizes. */ virtual void LayerSetUp(const vector*>& bottom, - vector*>* top) {} + const vector*>& top) {} /** * @brief Adjust the shapes of top blobs and internal buffers to accomodate @@ -95,7 +96,7 @@ class Layer { * accomodate the bottom blobs. */ virtual void Reshape(const vector*>& bottom, - vector*>* top) = 0; + const vector*>& top) = 0; /** * @brief Given the bottom blobs, compute the top blobs and the loss. @@ -115,7 +116,7 @@ class Layer { * Your layer should implement Forward_cpu and (optionally) Forward_gpu. */ inline Dtype Forward(const vector*>& bottom, - vector*>* top); + const vector*>& top); /** * @brief Given the top blob error gradients, compute the bottom blob error @@ -140,7 +141,7 @@ class Layer { */ inline void Backward(const vector*>& top, const vector& propagate_down, - vector*>* bottom); + const vector*>& bottom); /** * @brief Returns the vector of learnable parameter blobs. @@ -306,13 +307,13 @@ class Layer { /** @brief Using the CPU device, compute the layer output. */ virtual void Forward_cpu(const vector*>& bottom, - vector*>* top) = 0; + const vector*>& top) = 0; /** * @brief Using the GPU device, compute the layer output. * Fall back to Forward_cpu() if unavailable. */ virtual void Forward_gpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { // LOG(WARNING) << "Using CPU code as backup."; return Forward_cpu(bottom, top); } @@ -323,7 +324,7 @@ class Layer { */ virtual void Backward_cpu(const vector*>& top, const vector& propagate_down, - vector*>* bottom) = 0; + const vector*>& bottom) = 0; /** * @brief Using the GPU device, compute the gradients for any parameters and * for the bottom blobs if propagate_down is true. @@ -331,7 +332,7 @@ class Layer { */ virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, - vector*>* bottom) { + const vector*>& bottom) { // LOG(WARNING) << "Using CPU code as backup."; Backward_cpu(top, propagate_down, bottom); } @@ -384,17 +385,17 @@ class Layer { * Called by SetUp to initialize the weights associated with any top blobs in * the loss function. Store non-zero loss weights in the diff blob. */ - inline void SetLossWeights(vector*>* top) { + inline void SetLossWeights(const vector*>& top) { const int num_loss_weights = layer_param_.loss_weight_size(); if (num_loss_weights) { - CHECK_EQ(top->size(), num_loss_weights) << "loss_weight must be " + CHECK_EQ(top.size(), num_loss_weights) << "loss_weight must be " "unspecified or specified once per top blob."; - for (int top_id = 0; top_id < top->size(); ++top_id) { + for (int top_id = 0; top_id < top.size(); ++top_id) { const Dtype loss_weight = layer_param_.loss_weight(top_id); if (loss_weight == Dtype(0)) { continue; } this->set_loss(top_id, loss_weight); - const int count = (*top)[top_id]->count(); - Dtype* loss_multiplier = (*top)[top_id]->mutable_cpu_diff(); + const int count = top[top_id]->count(); + Dtype* loss_multiplier = top[top_id]->mutable_cpu_diff(); caffe_set(count, loss_weight, loss_multiplier); } } @@ -408,27 +409,27 @@ class Layer { // functions. template inline Dtype Layer::Forward(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { Dtype loss = 0; switch (Caffe::mode()) { case Caffe::CPU: Forward_cpu(bottom, top); - for (int top_id = 0; top_id < top->size(); ++top_id) { + for (int top_id = 0; top_id < top.size(); ++top_id) { if (!this->loss(top_id)) { continue; } - const int count = (*top)[top_id]->count(); - const Dtype* data = (*top)[top_id]->cpu_data(); - const Dtype* loss_weights = (*top)[top_id]->cpu_diff(); + const int count = top[top_id]->count(); + const Dtype* data = top[top_id]->cpu_data(); + const Dtype* loss_weights = top[top_id]->cpu_diff(); loss += caffe_cpu_dot(count, data, loss_weights); } break; case Caffe::GPU: Forward_gpu(bottom, top); #ifndef CPU_ONLY - for (int top_id = 0; top_id < top->size(); ++top_id) { + for (int top_id = 0; top_id < top.size(); ++top_id) { if (!this->loss(top_id)) { continue; } - const int count = (*top)[top_id]->count(); - const Dtype* data = (*top)[top_id]->gpu_data(); - const Dtype* loss_weights = (*top)[top_id]->gpu_diff(); + const int count = top[top_id]->count(); + const Dtype* data = top[top_id]->gpu_data(); + const Dtype* loss_weights = top[top_id]->gpu_diff(); Dtype blob_loss = 0; caffe_gpu_dot(count, data, loss_weights, &blob_loss); loss += blob_loss; @@ -444,7 +445,7 @@ inline Dtype Layer::Forward(const vector*>& bottom, template inline void Layer::Backward(const vector*>& top, const vector& propagate_down, - vector*>* bottom) { + const vector*>& bottom) { switch (Caffe::mode()) { case Caffe::CPU: Backward_cpu(top, propagate_down, bottom); diff --git a/include/caffe/loss_layers.hpp b/include/caffe/loss_layers.hpp index 08aa775..9fe58cd 100644 --- a/include/caffe/loss_layers.hpp +++ b/include/caffe/loss_layers.hpp @@ -33,9 +33,9 @@ class AccuracyLayer : public Layer { explicit AccuracyLayer(const LayerParameter& param) : Layer(param) {} virtual void LayerSetUp(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual inline LayerParameter_LayerType type() const { return LayerParameter_LayerType_ACCURACY; @@ -70,12 +70,12 @@ class AccuracyLayer : public Layer { * @f$ */ virtual void Forward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); /// @brief Not implemented -- AccuracyLayer cannot be used as a loss. virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { + const vector& propagate_down, const vector*>& bottom) { for (int i = 0; i < propagate_down.size(); ++i) { if (propagate_down[i]) { NOT_IMPLEMENTED; } } @@ -98,9 +98,9 @@ class LossLayer : public Layer { explicit LossLayer(const LayerParameter& param) : Layer(param) {} virtual void LayerSetUp( - const vector*>& bottom, vector*>* top); + const vector*>& bottom, const vector*>& top); virtual void Reshape( - const vector*>& bottom, vector*>* top); + const vector*>& bottom, const vector*>& top); virtual inline int ExactNumBottomBlobs() const { return 2; } @@ -151,7 +151,7 @@ class ContrastiveLossLayer : public LossLayer { explicit ContrastiveLossLayer(const LayerParameter& param) : LossLayer(param), diff_() {} virtual void LayerSetUp(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual inline int ExactNumBottomBlobs() const { return 3; } virtual inline LayerParameter_LayerType type() const { @@ -168,9 +168,9 @@ class ContrastiveLossLayer : public LossLayer { protected: /// @copydoc ContrastiveLossLayer virtual void Forward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); /** * @brief Computes the Contrastive error gradient w.r.t. the inputs. @@ -198,9 +198,9 @@ class ContrastiveLossLayer : public LossLayer { * propagate_down[1] */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); Blob diff_; // cached for backward pass Blob dist_sq_; // cached for backward pass @@ -240,7 +240,7 @@ class EuclideanLossLayer : public LossLayer { explicit EuclideanLossLayer(const LayerParameter& param) : LossLayer(param), diff_() {} virtual void Reshape(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual inline LayerParameter_LayerType type() const { return LayerParameter_LayerType_EUCLIDEAN_LOSS; @@ -257,9 +257,9 @@ class EuclideanLossLayer : public LossLayer { protected: /// @copydoc EuclideanLossLayer virtual void Forward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); /** * @brief Computes the Euclidean error gradient w.r.t. the inputs. @@ -295,9 +295,9 @@ class EuclideanLossLayer : public LossLayer { * @f$ if propagate_down[1] */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); Blob diff_; }; @@ -358,7 +358,7 @@ class HingeLossLayer : public LossLayer { protected: /// @copydoc HingeLossLayer virtual void Forward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); /** * @brief Computes the hinge loss error gradient w.r.t. the predictions. @@ -388,7 +388,7 @@ class HingeLossLayer : public LossLayer { * the labels -- ignored as we can't compute their error gradients */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); }; /** @@ -429,9 +429,9 @@ class InfogainLossLayer : public LossLayer { explicit InfogainLossLayer(const LayerParameter& param) : LossLayer(param), infogain_() {} virtual void LayerSetUp(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - vector*>* top); + const vector*>& top); // InfogainLossLayer takes 2-3 bottom Blobs; if there are 3 the third should // be the infogain matrix. (Otherwise the infogain matrix is loaded from a @@ -447,7 +447,7 @@ class InfogainLossLayer : public LossLayer { protected: /// @copydoc InfogainLossLayer virtual void Forward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); /** * @brief Computes the infogain loss error gradient w.r.t. the predictions. @@ -482,7 +482,7 @@ class InfogainLossLayer : public LossLayer { * gradient computation is not implemented. */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); Blob infogain_; }; @@ -522,7 +522,7 @@ class MultinomialLogisticLossLayer : public LossLayer { explicit MultinomialLogisticLossLayer(const LayerParameter& param) : LossLayer(param) {} virtual void Reshape(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual inline LayerParameter_LayerType type() const { return LayerParameter_LayerType_MULTINOMIAL_LOGISTIC_LOSS; @@ -531,7 +531,7 @@ class MultinomialLogisticLossLayer : public LossLayer { protected: /// @copydoc MultinomialLogisticLossLayer virtual void Forward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); /** * @brief Computes the multinomial logistic loss error gradient w.r.t. the @@ -562,7 +562,7 @@ class MultinomialLogisticLossLayer : public LossLayer { * the labels -- ignored as we can't compute their error gradients */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); }; /** @@ -602,9 +602,9 @@ class SigmoidCrossEntropyLossLayer : public LossLayer { sigmoid_layer_(new SigmoidLayer(param)), sigmoid_output_(new Blob()) {} virtual void LayerSetUp(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual inline LayerParameter_LayerType type() const { return LayerParameter_LayerType_SIGMOID_CROSS_ENTROPY_LOSS; @@ -613,9 +613,9 @@ class SigmoidCrossEntropyLossLayer : public LossLayer { protected: /// @copydoc SigmoidCrossEntropyLossLayer virtual void Forward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); /** * @brief Computes the sigmoid cross-entropy loss error gradient w.r.t. the @@ -648,9 +648,9 @@ class SigmoidCrossEntropyLossLayer : public LossLayer { * the labels -- ignored as we can't compute their error gradients */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); /// The internal SigmoidLayer used to map predictions to probabilities. shared_ptr > sigmoid_layer_; @@ -700,9 +700,9 @@ class SoftmaxWithLossLayer : public LossLayer { : LossLayer(param), softmax_layer_(new SoftmaxLayer(param)) {} virtual void LayerSetUp(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual inline LayerParameter_LayerType type() const { return LayerParameter_LayerType_SOFTMAX_LOSS; @@ -717,9 +717,9 @@ class SoftmaxWithLossLayer : public LossLayer { protected: /// @copydoc SoftmaxWithLossLayer virtual void Forward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); /** * @brief Computes the softmax loss error gradient w.r.t. the predictions. @@ -749,9 +749,9 @@ class SoftmaxWithLossLayer : public LossLayer { * the labels -- ignored as we can't compute their error gradients */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); /// The internal SoftmaxLayer used to map predictions to a distribution. shared_ptr > softmax_layer_; diff --git a/include/caffe/neuron_layers.hpp b/include/caffe/neuron_layers.hpp index 0968a20..64a1fd3 100644 --- a/include/caffe/neuron_layers.hpp +++ b/include/caffe/neuron_layers.hpp @@ -27,7 +27,7 @@ class NeuronLayer : public Layer { explicit NeuronLayer(const LayerParameter& param) : Layer(param) {} virtual void Reshape(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual inline LayerParameter_LayerType type() const { return LayerParameter_LayerType_NONE; @@ -52,7 +52,7 @@ class AbsValLayer : public NeuronLayer { explicit AbsValLayer(const LayerParameter& param) : NeuronLayer(param) {} virtual void LayerSetUp(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual inline LayerParameter_LayerType type() const { return LayerParameter_LayerType_ABSVAL; @@ -63,9 +63,9 @@ class AbsValLayer : public NeuronLayer { protected: /// @copydoc AbsValLayer virtual void Forward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); /** * @brief Computes the error gradient w.r.t. the absolute value inputs. @@ -85,9 +85,9 @@ class AbsValLayer : public NeuronLayer { * @f$ if propagate_down[0] */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); }; /** @@ -120,9 +120,9 @@ class BNLLLayer : public NeuronLayer { protected: /// @copydoc BNLLLayer virtual void Forward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); /** * @brief Computes the error gradient w.r.t. the BNLL inputs. @@ -141,9 +141,9 @@ class BNLLLayer : public NeuronLayer { * @f$ if propagate_down[0] */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); }; /** @@ -169,9 +169,9 @@ class DropoutLayer : public NeuronLayer { explicit DropoutLayer(const LayerParameter& param) : NeuronLayer(param) {} virtual void LayerSetUp(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual inline LayerParameter_LayerType type() const { return LayerParameter_LayerType_DROPOUT; @@ -195,13 +195,13 @@ class DropoutLayer : public NeuronLayer { * @f$ y_{\mbox{test}} = \mathbb{E}[y_{\mbox{train}}] = x @f$. */ virtual void Forward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); /// when divided by UINT_MAX, the randomly generated values @f$u\sim U(0,1)@f$ Blob rand_vec_; @@ -230,7 +230,7 @@ class PowerLayer : public NeuronLayer { explicit PowerLayer(const LayerParameter& param) : NeuronLayer(param) {} virtual void LayerSetUp(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual inline LayerParameter_LayerType type() const { return LayerParameter_LayerType_POWER; @@ -248,9 +248,9 @@ class PowerLayer : public NeuronLayer { * @f$ */ virtual void Forward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); /** * @brief Computes the error gradient w.r.t. the power inputs. @@ -273,9 +273,9 @@ class PowerLayer : public NeuronLayer { * @f$ if propagate_down[0] */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); /// @brief @f$ \gamma @f$ from layer_param_.power_param() Dtype power_; @@ -320,9 +320,9 @@ class ReLULayer : public NeuronLayer { * the computed outputs are @f$ y = \max(0, x) + \nu \min(0, x) @f$. */ virtual void Forward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); /** * @brief Computes the error gradient w.r.t. the ReLU inputs. @@ -353,9 +353,9 @@ class ReLULayer : public NeuronLayer { * @f$. */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); }; #ifdef USE_CUDNN @@ -368,16 +368,16 @@ class CuDNNReLULayer : public ReLULayer { explicit CuDNNReLULayer(const LayerParameter& param) : ReLULayer(param) {} virtual void LayerSetUp(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual ~CuDNNReLULayer(); protected: virtual void Forward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); cudnnHandle_t handle_; cudnnTensor4dDescriptor_t bottom_desc_; @@ -415,9 +415,9 @@ class SigmoidLayer : public NeuronLayer { * @f$ */ virtual void Forward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); /** * @brief Computes the error gradient w.r.t. the sigmoid inputs. @@ -437,9 +437,9 @@ class SigmoidLayer : public NeuronLayer { * @f$ if propagate_down[0] */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); }; #ifdef USE_CUDNN @@ -452,16 +452,16 @@ class CuDNNSigmoidLayer : public SigmoidLayer { explicit CuDNNSigmoidLayer(const LayerParameter& param) : SigmoidLayer(param) {} virtual void LayerSetUp(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual ~CuDNNSigmoidLayer(); protected: virtual void Forward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); cudnnHandle_t handle_; cudnnTensor4dDescriptor_t bottom_desc_; @@ -499,9 +499,9 @@ class TanHLayer : public NeuronLayer { * @f$ */ virtual void Forward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); /** * @brief Computes the error gradient w.r.t. the sigmoid inputs. @@ -523,9 +523,9 @@ class TanHLayer : public NeuronLayer { * @f$ if propagate_down[0] */ virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); }; #ifdef USE_CUDNN @@ -538,16 +538,16 @@ class CuDNNTanHLayer : public TanHLayer { explicit CuDNNTanHLayer(const LayerParameter& param) : TanHLayer(param) {} virtual void LayerSetUp(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual ~CuDNNTanHLayer(); protected: virtual void Forward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); cudnnHandle_t handle_; cudnnTensor4dDescriptor_t bottom_desc_; @@ -571,7 +571,7 @@ class ThresholdLayer : public NeuronLayer { explicit ThresholdLayer(const LayerParameter& param) : NeuronLayer(param) {} virtual void LayerSetUp(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual inline LayerParameter_LayerType type() const { return LayerParameter_LayerType_THRESHOLD; @@ -593,12 +593,12 @@ class ThresholdLayer : public NeuronLayer { * @f$ */ virtual void Forward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); /// @brief Not implemented (non-differentiable function) virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { + const vector& propagate_down, const vector*>& bottom) { NOT_IMPLEMENTED; } diff --git a/include/caffe/test/test_gradient_check_util.hpp b/include/caffe/test/test_gradient_check_util.hpp index 5a8d382..2293771 100644 --- a/include/caffe/test/test_gradient_check_util.hpp +++ b/include/caffe/test/test_gradient_check_util.hpp @@ -30,24 +30,24 @@ class GradientChecker { // layers. // Note that after the gradient check, we do not guarantee that the data // stored in the layer parameters and the blobs are unchanged. - void CheckGradient(Layer* layer, vector*>* bottom, - vector*>* top, int check_bottom = -1) { - layer->SetUp(*bottom, top); + void CheckGradient(Layer* layer, const vector*>& bottom, + const vector*>& top, int check_bottom = -1) { + layer->SetUp(bottom, top); CheckGradientSingle(layer, bottom, top, check_bottom, -1, -1); } void CheckGradientExhaustive(Layer* layer, - vector*>* bottom, vector*>* top, + const vector*>& bottom, const vector*>& top, int check_bottom = -1); // CheckGradientEltwise can be used to test layers that perform element-wise // computation only (e.g., neuron layers) -- where (d y_i) / (d x_j) = 0 when // i != j. void CheckGradientEltwise(Layer* layer, - vector*>* bottom, vector*>* top); + const vector*>& bottom, const vector*>& top); - void CheckGradientSingle(Layer* layer, vector*>* bottom, - vector*>* top, int check_bottom, int top_id, - int top_data_id, bool element_wise = false); + void CheckGradientSingle(Layer* layer, + const vector*>& bottom, const vector*>& top, + int check_bottom, int top_id, int top_data_id, bool element_wise = false); // Checks the gradient of a network. This network should not have any data // layers or loss layers, since the function does not explicitly deal with @@ -57,8 +57,8 @@ class GradientChecker { const vector*>& input); protected: - Dtype GetObjAndGradient(const Layer& layer, vector*>* top, - int top_id = -1, int top_data_id = -1); + Dtype GetObjAndGradient(const Layer& layer, + const vector*>& top, int top_id = -1, int top_data_id = -1); Dtype stepsize_; Dtype threshold_; unsigned int seed_; @@ -69,40 +69,40 @@ class GradientChecker { template void GradientChecker::CheckGradientSingle(Layer* layer, - vector*>* bottom, vector*>* top, + const vector*>& bottom, const vector*>& top, int check_bottom, int top_id, int top_data_id, bool element_wise) { if (element_wise) { CHECK_EQ(0, layer->blobs().size()); CHECK_LE(0, top_id); CHECK_LE(0, top_data_id); - const int top_count = (*top)[top_id]->count(); - for (int blob_id = 0; blob_id < bottom->size(); ++blob_id) { - CHECK_EQ(top_count, (*bottom)[blob_id]->count()); + const int top_count = top[top_id]->count(); + for (int blob_id = 0; blob_id < bottom.size(); ++blob_id) { + CHECK_EQ(top_count, bottom[blob_id]->count()); } } // First, figure out what blobs we need to check against. vector*> blobs_to_check; - vector propagate_down(bottom->size(), check_bottom < 0); + vector propagate_down(bottom.size(), check_bottom < 0); for (int i = 0; i < layer->blobs().size(); ++i) { blobs_to_check.push_back(layer->blobs()[i].get()); } if (check_bottom < 0) { - for (int i = 0; i < bottom->size(); ++i) { - blobs_to_check.push_back((*bottom)[i]); + for (int i = 0; i < bottom.size(); ++i) { + blobs_to_check.push_back(bottom[i]); } } else { - CHECK_LT(check_bottom, bottom->size()); - blobs_to_check.push_back((*bottom)[check_bottom]); + CHECK_LT(check_bottom, bottom.size()); + blobs_to_check.push_back(bottom[check_bottom]); propagate_down[check_bottom] = true; } // Compute the gradient analytically using Backward Caffe::set_random_seed(seed_); // Ignore the loss from the layer (it's just the weighted sum of the losses // from the top blobs, whose gradients we may want to test individually). - layer->Forward(*bottom, top); + layer->Forward(bottom, top); // Get additional loss from the objective GetObjAndGradient(*layer, top, top_id, top_data_id); - layer->Backward(*top, propagate_down, bottom); + layer->Backward(top, propagate_down, bottom); // Store computed gradients for all checked blobs vector > > computed_gradient_blobs(blobs_to_check.size()); @@ -127,8 +127,8 @@ void GradientChecker::CheckGradientSingle(Layer* layer, // << current_blob->count() << " parameters."; for (int feat_id = 0; feat_id < current_blob->count(); ++feat_id) { // For an element-wise layer, we only need to do finite differencing to - // compute the derivative of (*top)[top_id][top_data_id] w.r.t. - // (*bottom)[blob_id][i] only for i == top_data_id. For any other + // compute the derivative of top[top_id][top_data_id] w.r.t. + // bottom[blob_id][i] only for i == top_data_id. For any other // i != top_data_id, we know the derivative is 0 by definition, and simply // check that that's true. Dtype estimated_gradient = 0; @@ -139,13 +139,13 @@ void GradientChecker::CheckGradientSingle(Layer* layer, // Compute loss with stepsize_ added to input. current_blob->mutable_cpu_data()[feat_id] += stepsize_; Caffe::set_random_seed(seed_); - layer->Forward(*bottom, top); + layer->Forward(bottom, top); positive_objective = GetObjAndGradient(*layer, top, top_id, top_data_id); // Compute loss with stepsize_ subtracted from input. current_blob->mutable_cpu_data()[feat_id] -= stepsize_ * 2; Caffe::set_random_seed(seed_); - layer->Forward(*bottom, top); + layer->Forward(bottom, top); negative_objective = GetObjAndGradient(*layer, top, top_id, top_data_id); // Recover original input value. @@ -179,13 +179,14 @@ void GradientChecker::CheckGradientSingle(Layer* layer, template void GradientChecker::CheckGradientExhaustive(Layer* layer, - vector*>* bottom, vector*>* top, int check_bottom) { - layer->SetUp(*bottom, top); - CHECK_GT(top->size(), 0) << "Exhaustive mode requires at least one top blob."; + const vector*>& bottom, const vector*>& top, + int check_bottom) { + layer->SetUp(bottom, top); + CHECK_GT(top.size(), 0) << "Exhaustive mode requires at least one top blob."; // LOG(ERROR) << "Exhaustive Mode."; - for (int i = 0; i < top->size(); ++i) { + for (int i = 0; i < top.size(); ++i) { // LOG(ERROR) << "Exhaustive: blob " << i << " size " << top[i]->count(); - for (int j = 0; j < (*top)[i]->count(); ++j) { + for (int j = 0; j < top[i]->count(); ++j) { // LOG(ERROR) << "Exhaustive: blob " << i << " data " << j; CheckGradientSingle(layer, bottom, top, check_bottom, i, j); } @@ -194,13 +195,13 @@ void GradientChecker::CheckGradientExhaustive(Layer* layer, template void GradientChecker::CheckGradientEltwise(Layer* layer, - vector*>* bottom, vector*>* top) { - layer->SetUp(*bottom, top); - CHECK_GT(top->size(), 0) << "Eltwise mode requires at least one top blob."; + const vector*>& bottom, const vector*>& top) { + layer->SetUp(bottom, top); + CHECK_GT(top.size(), 0) << "Eltwise mode requires at least one top blob."; const int check_bottom = -1; const bool element_wise = true; - for (int i = 0; i < top->size(); ++i) { - for (int j = 0; j < (*top)[i]->count(); ++j) { + for (int i = 0; i < top.size(); ++i) { + for (int j = 0; j < top[i]->count(); ++j) { CheckGradientSingle(layer, bottom, top, check_bottom, i, j, element_wise); } } @@ -221,12 +222,12 @@ void GradientChecker::CheckGradientNet( template Dtype GradientChecker::GetObjAndGradient(const Layer& layer, - vector*>* top, int top_id, int top_data_id) { + const vector*>& top, int top_id, int top_data_id) { Dtype loss = 0; if (top_id < 0) { // the loss will be half of the sum of squares of all outputs - for (int i = 0; i < top->size(); ++i) { - Blob* top_blob = (*top)[i]; + for (int i = 0; i < top.size(); ++i) { + Blob* top_blob = top[i]; const Dtype* top_blob_data = top_blob->cpu_data(); Dtype* top_blob_diff = top_blob->mutable_cpu_diff(); int count = top_blob->count(); @@ -239,14 +240,14 @@ Dtype GradientChecker::GetObjAndGradient(const Layer& layer, loss /= 2.; } else { // the loss will be the top_data_id-th element in the top_id-th blob. - for (int i = 0; i < top->size(); ++i) { - Blob* top_blob = (*top)[i]; + for (int i = 0; i < top.size(); ++i) { + Blob* top_blob = top[i]; Dtype* top_blob_diff = top_blob->mutable_cpu_diff(); caffe_set(top_blob->count(), Dtype(0), top_blob_diff); } const Dtype loss_weight = 2; - loss = (*top)[top_id]->cpu_data()[top_data_id] * loss_weight; - (*top)[top_id]->mutable_cpu_diff()[top_data_id] = loss_weight; + loss = top[top_id]->cpu_data()[top_data_id] * loss_weight; + top[top_id]->mutable_cpu_diff()[top_data_id] = loss_weight; } return loss; } diff --git a/include/caffe/util/device_alternate.hpp b/include/caffe/util/device_alternate.hpp index 3df28a4..5a45691 100644 --- a/include/caffe/util/device_alternate.hpp +++ b/include/caffe/util/device_alternate.hpp @@ -12,22 +12,22 @@ #define STUB_GPU(classname) \ template \ void classname::Forward_gpu(const vector*>& bottom, \ - vector*>* top) { NO_GPU; } \ + const vector*>& top) { NO_GPU; } \ template \ void classname::Backward_gpu(const vector*>& top, \ const vector& propagate_down, \ - vector*>* bottom) { NO_GPU; } \ + const vector*>& bottom) { NO_GPU; } \ #define STUB_GPU_FORWARD(classname, funcname) \ template \ void classname::funcname##_##gpu(const vector*>& bottom, \ - vector*>* top) { NO_GPU; } \ + const vector*>& top) { NO_GPU; } \ #define STUB_GPU_BACKWARD(classname, funcname) \ template \ void classname::funcname##_##gpu(const vector*>& top, \ const vector& propagate_down, \ - vector*>* bottom) { NO_GPU; } \ + const vector*>& bottom) { NO_GPU; } \ #else // Normal GPU + CPU Caffe. diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index 1e7f3fc..0f1b2d9 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -66,9 +66,9 @@ class ConvolutionLayer : public Layer { explicit ConvolutionLayer(const LayerParameter& param) : Layer(param) {} virtual void LayerSetUp(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual inline LayerParameter_LayerType type() const { return LayerParameter_LayerType_CONVOLUTION; @@ -79,13 +79,13 @@ class ConvolutionLayer : public Layer { protected: virtual void Forward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); int kernel_h_, kernel_w_; int stride_h_, stride_w_; @@ -132,16 +132,16 @@ class CuDNNConvolutionLayer : public ConvolutionLayer { explicit CuDNNConvolutionLayer(const LayerParameter& param) : ConvolutionLayer(param) {} virtual void LayerSetUp(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual ~CuDNNConvolutionLayer(); protected: virtual void Forward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); cudnnHandle_t* handle_; cudaStream_t* stream_; @@ -166,9 +166,9 @@ class Im2colLayer : public Layer { explicit Im2colLayer(const LayerParameter& param) : Layer(param) {} virtual void LayerSetUp(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual inline LayerParameter_LayerType type() const { return LayerParameter_LayerType_IM2COL; @@ -178,13 +178,13 @@ class Im2colLayer : public Layer { protected: virtual void Forward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); int kernel_h_, kernel_w_; int stride_h_, stride_w_; @@ -208,9 +208,9 @@ class LRNLayer : public Layer { explicit LRNLayer(const LayerParameter& param) : Layer(param) {} virtual void LayerSetUp(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual inline LayerParameter_LayerType type() const { return LayerParameter_LayerType_LRN; @@ -220,26 +220,26 @@ class LRNLayer : public Layer { protected: virtual void Forward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); virtual void CrossChannelForward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void CrossChannelForward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void WithinChannelForward(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void CrossChannelBackward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); virtual void CrossChannelBackward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); virtual void WithinChannelBackward(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); int size_; int pre_pad_; @@ -285,9 +285,9 @@ class PoolingLayer : public Layer { explicit PoolingLayer(const LayerParameter& param) : Layer(param) {} virtual void LayerSetUp(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual inline LayerParameter_LayerType type() const { return LayerParameter_LayerType_POOLING; @@ -303,13 +303,13 @@ class PoolingLayer : public Layer { protected: virtual void Forward_cpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Forward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); int kernel_h_, kernel_w_; int stride_h_, stride_w_; @@ -332,16 +332,16 @@ class CuDNNPoolingLayer : public PoolingLayer { explicit CuDNNPoolingLayer(const LayerParameter& param) : PoolingLayer(param) {} virtual void LayerSetUp(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Reshape(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual ~CuDNNPoolingLayer(); protected: virtual void Forward_gpu(const vector*>& bottom, - vector*>* top); + const vector*>& top); virtual void Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom); + const vector& propagate_down, const vector*>& bottom); cudnnHandle_t handle_; cudnnTensor4dDescriptor_t bottom_desc_, top_desc_; diff --git a/src/caffe/layers/absval_layer.cpp b/src/caffe/layers/absval_layer.cpp index ce9d05c..54e484b 100644 --- a/src/caffe/layers/absval_layer.cpp +++ b/src/caffe/layers/absval_layer.cpp @@ -8,29 +8,29 @@ namespace caffe { template void AbsValLayer::LayerSetUp(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { NeuronLayer::LayerSetUp(bottom, top); - CHECK_NE((*top)[0], bottom[0]) << this->type_name() << " Layer does not " + CHECK_NE(top[0], bottom[0]) << this->type_name() << " Layer does not " "allow in-place computation."; } template void AbsValLayer::Forward_cpu( - const vector*>& bottom, vector*>* top) { - const int count = (*top)[0]->count(); - Dtype* top_data = (*top)[0]->mutable_cpu_data(); + const vector*>& bottom, const vector*>& top) { + const int count = top[0]->count(); + Dtype* top_data = top[0]->mutable_cpu_data(); caffe_abs(count, bottom[0]->cpu_data(), top_data); } template void AbsValLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { + const vector& propagate_down, const vector*>& bottom) { const int count = top[0]->count(); const Dtype* top_data = top[0]->cpu_data(); const Dtype* top_diff = top[0]->cpu_diff(); if (propagate_down[0]) { - const Dtype* bottom_data = (*bottom)[0]->cpu_data(); - Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); caffe_div(count, top_data, bottom_data, bottom_diff); caffe_mul(count, bottom_diff, top_diff, bottom_diff); } diff --git a/src/caffe/layers/absval_layer.cu b/src/caffe/layers/absval_layer.cu index 46778aa..48c7664 100644 --- a/src/caffe/layers/absval_layer.cu +++ b/src/caffe/layers/absval_layer.cu @@ -8,21 +8,21 @@ namespace caffe { template void AbsValLayer::Forward_gpu( - const vector*>& bottom, vector*>* top) { - const int count = (*top)[0]->count(); - Dtype* top_data = (*top)[0]->mutable_gpu_data(); + const vector*>& bottom, const vector*>& top) { + const int count = top[0]->count(); + Dtype* top_data = top[0]->mutable_gpu_data(); caffe_gpu_abs(count, bottom[0]->gpu_data(), top_data); } template void AbsValLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { + const vector& propagate_down, const vector*>& bottom) { const int count = top[0]->count(); const Dtype* top_data = top[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); if (propagate_down[0]) { - const Dtype* bottom_data = (*bottom)[0]->gpu_data(); - Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); caffe_gpu_div(count, top_data, bottom_data, bottom_diff); caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); } diff --git a/src/caffe/layers/accuracy_layer.cpp b/src/caffe/layers/accuracy_layer.cpp index 3e69bc8..399cf2a 100644 --- a/src/caffe/layers/accuracy_layer.cpp +++ b/src/caffe/layers/accuracy_layer.cpp @@ -12,13 +12,13 @@ namespace caffe { template void AccuracyLayer::LayerSetUp( - const vector*>& bottom, vector*>* top) { + const vector*>& bottom, const vector*>& top) { top_k_ = this->layer_param_.accuracy_param().top_k(); } template void AccuracyLayer::Reshape( - const vector*>& bottom, vector*>* top) { + const vector*>& bottom, const vector*>& top) { CHECK_EQ(bottom[0]->num(), bottom[1]->num()) << "The data and label should have the same number."; CHECK_LE(top_k_, bottom[0]->count() / bottom[0]->num()) @@ -26,12 +26,12 @@ void AccuracyLayer::Reshape( CHECK_EQ(bottom[1]->channels(), 1); CHECK_EQ(bottom[1]->height(), 1); CHECK_EQ(bottom[1]->width(), 1); - (*top)[0]->Reshape(1, 1, 1, 1); + top[0]->Reshape(1, 1, 1, 1); } template void AccuracyLayer::Forward_cpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { Dtype accuracy = 0; const Dtype* bottom_data = bottom[0]->cpu_data(); const Dtype* bottom_label = bottom[1]->cpu_data(); @@ -59,7 +59,7 @@ void AccuracyLayer::Forward_cpu(const vector*>& bottom, } // LOG(INFO) << "Accuracy: " << accuracy; - (*top)[0]->mutable_cpu_data()[0] = accuracy / num; + top[0]->mutable_cpu_data()[0] = accuracy / num; // Accuracy layer should not be used as a loss function. } diff --git a/src/caffe/layers/argmax_layer.cpp b/src/caffe/layers/argmax_layer.cpp index 0d1a107..dbe0a83 100644 --- a/src/caffe/layers/argmax_layer.cpp +++ b/src/caffe/layers/argmax_layer.cpp @@ -10,7 +10,7 @@ namespace caffe { template void ArgMaxLayer::LayerSetUp(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { out_max_val_ = this->layer_param_.argmax_param().out_max_val(); top_k_ = this->layer_param_.argmax_param().top_k(); CHECK_GE(top_k_, 1) << " top k must not be less than 1."; @@ -20,21 +20,21 @@ void ArgMaxLayer::LayerSetUp(const vector*>& bottom, template void ArgMaxLayer::Reshape(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { if (out_max_val_) { // Produces max_ind and max_val - (*top)[0]->Reshape(bottom[0]->num(), 2, top_k_, 1); + top[0]->Reshape(bottom[0]->num(), 2, top_k_, 1); } else { // Produces only max_ind - (*top)[0]->Reshape(bottom[0]->num(), 1, top_k_, 1); + top[0]->Reshape(bottom[0]->num(), 1, top_k_, 1); } } template void ArgMaxLayer::Forward_cpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); - Dtype* top_data = (*top)[0]->mutable_cpu_data(); + Dtype* top_data = top[0]->mutable_cpu_data(); int num = bottom[0]->num(); int dim = bottom[0]->count() / bottom[0]->num(); for (int i = 0; i < num; ++i) { @@ -47,11 +47,11 @@ void ArgMaxLayer::Forward_cpu(const vector*>& bottom, bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_, bottom_data_vector.end(), std::greater >()); for (int j = 0; j < top_k_; ++j) { - top_data[(*top)[0]->offset(i, 0, j)] = bottom_data_vector[j].second; + top_data[top[0]->offset(i, 0, j)] = bottom_data_vector[j].second; } if (out_max_val_) { for (int j = 0; j < top_k_; ++j) { - top_data[(*top)[0]->offset(i, 1, j)] = bottom_data_vector[j].first; + top_data[top[0]->offset(i, 1, j)] = bottom_data_vector[j].first; } } } diff --git a/src/caffe/layers/base_data_layer.cpp b/src/caffe/layers/base_data_layer.cpp index 9b1d558..86abbae 100644 --- a/src/caffe/layers/base_data_layer.cpp +++ b/src/caffe/layers/base_data_layer.cpp @@ -15,8 +15,8 @@ BaseDataLayer::BaseDataLayer(const LayerParameter& param) template void BaseDataLayer::LayerSetUp(const vector*>& bottom, - vector*>* top) { - if (top->size() == 1) { + const vector*>& top) { + if (top.size() == 1) { output_labels_ = false; } else { output_labels_ = true; @@ -51,7 +51,7 @@ void BaseDataLayer::LayerSetUp(const vector*>& bottom, template void BasePrefetchingDataLayer::LayerSetUp( - const vector*>& bottom, vector*>* top) { + const vector*>& bottom, const vector*>& top) { BaseDataLayer::LayerSetUp(bottom, top); // Now, start the prefetch thread. Before calling prefetch, we make two // cpu_data calls so that the prefetch thread does not accidentally make @@ -80,15 +80,15 @@ void BasePrefetchingDataLayer::JoinPrefetchThread() { template void BasePrefetchingDataLayer::Forward_cpu( - const vector*>& bottom, vector*>* top) { + const vector*>& bottom, const vector*>& top) { // First, join the thread JoinPrefetchThread(); // Copy the data caffe_copy(prefetch_data_.count(), prefetch_data_.cpu_data(), - (*top)[0]->mutable_cpu_data()); + top[0]->mutable_cpu_data()); if (this->output_labels_) { caffe_copy(prefetch_label_.count(), prefetch_label_.cpu_data(), - (*top)[1]->mutable_cpu_data()); + top[1]->mutable_cpu_data()); } // Start a new prefetch thread CreatePrefetchThread(); diff --git a/src/caffe/layers/base_data_layer.cu b/src/caffe/layers/base_data_layer.cu index 8189c79..ff15103 100644 --- a/src/caffe/layers/base_data_layer.cu +++ b/src/caffe/layers/base_data_layer.cu @@ -6,15 +6,15 @@ namespace caffe { template void BasePrefetchingDataLayer::Forward_gpu( - const vector*>& bottom, vector*>* top) { + const vector*>& bottom, const vector*>& top) { // First, join the thread JoinPrefetchThread(); // Copy the data caffe_copy(prefetch_data_.count(), prefetch_data_.cpu_data(), - (*top)[0]->mutable_gpu_data()); + top[0]->mutable_gpu_data()); if (this->output_labels_) { caffe_copy(prefetch_label_.count(), prefetch_label_.cpu_data(), - (*top)[1]->mutable_gpu_data()); + top[1]->mutable_gpu_data()); } // Start a new prefetch thread CreatePrefetchThread(); diff --git a/src/caffe/layers/bnll_layer.cpp b/src/caffe/layers/bnll_layer.cpp index ef98326..bae6925 100644 --- a/src/caffe/layers/bnll_layer.cpp +++ b/src/caffe/layers/bnll_layer.cpp @@ -10,9 +10,9 @@ const float kBNLL_THRESHOLD = 50.; template void BNLLLayer::Forward_cpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); - Dtype* top_data = (*top)[0]->mutable_cpu_data(); + Dtype* top_data = top[0]->mutable_cpu_data(); const int count = bottom[0]->count(); for (int i = 0; i < count; ++i) { top_data[i] = bottom_data[i] > 0 ? @@ -24,12 +24,12 @@ void BNLLLayer::Forward_cpu(const vector*>& bottom, template void BNLLLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, - vector*>* bottom) { + const vector*>& bottom) { if (propagate_down[0]) { - const Dtype* bottom_data = (*bottom)[0]->cpu_data(); + const Dtype* bottom_data = bottom[0]->cpu_data(); const Dtype* top_diff = top[0]->cpu_diff(); - Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); - const int count = (*bottom)[0]->count(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + const int count = bottom[0]->count(); Dtype expval; for (int i = 0; i < count; ++i) { expval = exp(std::min(bottom_data[i], Dtype(kBNLL_THRESHOLD))); diff --git a/src/caffe/layers/bnll_layer.cu b/src/caffe/layers/bnll_layer.cu index b940133..3993bcc 100644 --- a/src/caffe/layers/bnll_layer.cu +++ b/src/caffe/layers/bnll_layer.cu @@ -19,9 +19,9 @@ __global__ void BNLLForward(const int n, const Dtype* in, Dtype* out) { template void BNLLLayer::Forward_gpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); - Dtype* top_data = (*top)[0]->mutable_gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) BNLLForward<<>>( @@ -41,12 +41,12 @@ __global__ void BNLLBackward(const int n, const Dtype* in_diff, template void BNLLLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, - vector*>* bottom) { + const vector*>& bottom) { if (propagate_down[0]) { - const Dtype* bottom_data = (*bottom)[0]->gpu_data(); + const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); - Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); - const int count = (*bottom)[0]->count(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) BNLLBackward<<>>( count, top_diff, bottom_data, bottom_diff); diff --git a/src/caffe/layers/concat_layer.cpp b/src/caffe/layers/concat_layer.cpp index 10a11f1..f8f7356 100644 --- a/src/caffe/layers/concat_layer.cpp +++ b/src/caffe/layers/concat_layer.cpp @@ -8,7 +8,7 @@ namespace caffe { template void ConcatLayer::LayerSetUp(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { concat_dim_ = this->layer_param_.concat_param().concat_dim(); CHECK_GE(concat_dim_, 0) << "concat_dim should be >= 0"; @@ -18,7 +18,7 @@ void ConcatLayer::LayerSetUp(const vector*>& bottom, template void ConcatLayer::Reshape(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { // Initialize with the first blob. count_ = bottom[0]->count(); num_ = bottom[0]->num(); @@ -37,20 +37,20 @@ void ConcatLayer::Reshape(const vector*>& bottom, width_ += bottom[i]->width(); } } - (*top)[0]->Reshape(num_, channels_, height_, width_); - CHECK_EQ(count_, (*top)[0]->count()); + top[0]->Reshape(num_, channels_, height_, width_); + CHECK_EQ(count_, top[0]->count()); } template void ConcatLayer::Forward_cpu(const vector*>& bottom, - vector*>* top) { - Dtype* top_data = (*top)[0]->mutable_cpu_data(); + const vector*>& top) { + Dtype* top_data = top[0]->mutable_cpu_data(); if (concat_dim_== 0) { int offset_num = 0; for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->cpu_data(); int num_elem = bottom[i]->count(); - caffe_copy(num_elem, bottom_data, top_data+(*top)[0]->offset(offset_num)); + caffe_copy(num_elem, bottom_data, top_data+top[0]->offset(offset_num)); offset_num += bottom[i]->num(); } } else if (concat_dim_ == 1) { @@ -61,7 +61,7 @@ void ConcatLayer::Forward_cpu(const vector*>& bottom, bottom[i]->channels()*bottom[i]->height()*bottom[i]->width(); for (int n = 0; n < num_; ++n) { caffe_copy(num_elem, bottom_data+bottom[i]->offset(n), - top_data+(*top)[0]->offset(n, offset_channel)); + top_data+top[0]->offset(n, offset_channel)); } offset_channel += bottom[i]->channels(); } // concat_dim_ is guaranteed to be 0 or 1 by LayerSetUp. @@ -70,12 +70,12 @@ void ConcatLayer::Forward_cpu(const vector*>& bottom, template void ConcatLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { + const vector& propagate_down, const vector*>& bottom) { const Dtype* top_diff = top[0]->cpu_diff(); if (concat_dim_ == 0) { int offset_num = 0; - for (int i = 0; i < bottom->size(); ++i) { - Blob* blob = (*bottom)[i]; + for (int i = 0; i < bottom.size(); ++i) { + Blob* blob = bottom[i]; if (propagate_down[i]) { Dtype* bottom_diff = blob->mutable_cpu_diff(); caffe_copy(blob->count(), top_diff + top[0]->offset(offset_num), @@ -85,8 +85,8 @@ void ConcatLayer::Backward_cpu(const vector*>& top, } } else if (concat_dim_ == 1) { int offset_channel = 0; - for (int i = 0; i < bottom->size(); ++i) { - Blob* blob = (*bottom)[i]; + for (int i = 0; i < bottom.size(); ++i) { + Blob* blob = bottom[i]; if (propagate_down[i]) { Dtype* bottom_diff = blob->mutable_cpu_diff(); int num_elem = blob->channels()*blob->height()*blob->width(); diff --git a/src/caffe/layers/concat_layer.cu b/src/caffe/layers/concat_layer.cu index 99c55da..1b4e40a 100644 --- a/src/caffe/layers/concat_layer.cu +++ b/src/caffe/layers/concat_layer.cu @@ -8,14 +8,14 @@ namespace caffe { template void ConcatLayer::Forward_gpu(const vector*>& bottom, - vector*>* top) { - Dtype* top_data = (*top)[0]->mutable_gpu_data(); + const vector*>& top) { + Dtype* top_data = top[0]->mutable_gpu_data(); if (concat_dim_ == 0) { int offset_num = 0; for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); caffe_copy(bottom[i]->count(), bottom_data, - top_data + (*top)[0]->offset(offset_num)); + top_data + top[0]->offset(offset_num)); offset_num += bottom[i]->num(); } } else if (concat_dim_ == 1) { @@ -26,7 +26,7 @@ void ConcatLayer::Forward_gpu(const vector*>& bottom, bottom[i]->channels() * bottom[i]->height() * bottom[i]->width(); for (int n = 0; n < num_; ++n) { caffe_copy(num_elem, bottom_data+bottom[i]->offset(n), - top_data + (*top)[0]->offset(n, offset_channel)); + top_data + top[0]->offset(n, offset_channel)); } offset_channel += bottom[i]->channels(); } @@ -38,12 +38,12 @@ void ConcatLayer::Forward_gpu(const vector*>& bottom, template void ConcatLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { + const vector& propagate_down, const vector*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); if (concat_dim_ == 0) { int offset_num = 0; - for (int i = 0; i < bottom->size(); ++i) { - Blob* blob = (*bottom)[i]; + for (int i = 0; i < bottom.size(); ++i) { + Blob* blob = bottom[i]; if (propagate_down[i]) { Dtype* bottom_diff = blob->mutable_gpu_diff(); caffe_copy(blob->count(), top_diff + top[0]->offset(offset_num), @@ -53,8 +53,8 @@ void ConcatLayer::Backward_gpu(const vector*>& top, } } else if (concat_dim_ == 1) { int offset_channel = 0; - for (int i = 0; i < bottom->size(); ++i) { - Blob* blob = (*bottom)[i]; + for (int i = 0; i < bottom.size(); ++i) { + Blob* blob = bottom[i]; if (propagate_down[i]) { Dtype* bottom_diff = blob->mutable_gpu_diff(); int num_elem = blob->channels()*blob->height()*blob->width(); diff --git a/src/caffe/layers/contrastive_loss_layer.cpp b/src/caffe/layers/contrastive_loss_layer.cpp index 072a5a5..d716d14 100644 --- a/src/caffe/layers/contrastive_loss_layer.cpp +++ b/src/caffe/layers/contrastive_loss_layer.cpp @@ -10,7 +10,7 @@ namespace caffe { template void ContrastiveLossLayer::LayerSetUp( - const vector*>& bottom, vector*>* top) { + const vector*>& bottom, const vector*>& top) { LossLayer::LayerSetUp(bottom, top); CHECK_EQ(bottom[0]->channels(), bottom[1]->channels()); CHECK_EQ(bottom[0]->height(), 1); @@ -32,7 +32,7 @@ void ContrastiveLossLayer::LayerSetUp( template void ContrastiveLossLayer::Forward_cpu( const vector*>& bottom, - vector*>* top) { + const vector*>& top) { int count = bottom[0]->count(); caffe_sub( count, @@ -52,23 +52,23 @@ void ContrastiveLossLayer::Forward_cpu( } } loss = loss / static_cast(bottom[0]->num()) / Dtype(2); - (*top)[0]->mutable_cpu_data()[0] = loss; + top[0]->mutable_cpu_data()[0] = loss; } template void ContrastiveLossLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { + const vector& propagate_down, const vector*>& bottom) { Dtype margin = this->layer_param_.contrastive_loss_param().margin(); for (int i = 0; i < 2; ++i) { if (propagate_down[i]) { const Dtype sign = (i == 0) ? 1 : -1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast((*bottom)[i]->num()); - int num = (*bottom)[i]->num(); - int channels = (*bottom)[i]->channels(); + static_cast(bottom[i]->num()); + int num = bottom[i]->num(); + int channels = bottom[i]->channels(); for (int j = 0; j < num; ++j) { - Dtype* bout = (*bottom)[i]->mutable_cpu_diff(); - if (static_cast((*bottom)[2]->cpu_data()[j])) { // similar pairs + Dtype* bout = bottom[i]->mutable_cpu_diff(); + if (static_cast(bottom[2]->cpu_data()[j])) { // similar pairs caffe_cpu_axpby( channels, alpha, diff --git a/src/caffe/layers/contrastive_loss_layer.cu b/src/caffe/layers/contrastive_loss_layer.cu index 672ad5b..14b55b3 100644 --- a/src/caffe/layers/contrastive_loss_layer.cu +++ b/src/caffe/layers/contrastive_loss_layer.cu @@ -10,7 +10,7 @@ namespace caffe { template void ContrastiveLossLayer::Forward_gpu( - const vector*>& bottom, vector*>* top) { + const vector*>& bottom, const vector*>& top) { const int count = bottom[0]->count(); caffe_gpu_sub( count, @@ -41,7 +41,7 @@ void ContrastiveLossLayer::Forward_gpu( } } loss = loss / static_cast(bottom[0]->num()) / Dtype(2); - (*top)[0]->mutable_cpu_data()[0] = loss; + top[0]->mutable_cpu_data()[0] = loss; } template @@ -65,22 +65,22 @@ __global__ void CLLForward(const int count, const int channels, template void ContrastiveLossLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { + const vector& propagate_down, const vector*>& bottom) { for (int i = 0; i < 2; ++i) { if (propagate_down[i]) { - const int count = (*bottom)[0]->count(); - const int channels = (*bottom)[0]->channels(); + const int count = bottom[0]->count(); + const int channels = bottom[0]->channels(); Dtype margin = this->layer_param_.contrastive_loss_param().margin(); const Dtype sign = (i == 0) ? 1 : -1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / - static_cast((*bottom)[0]->num()); + static_cast(bottom[0]->num()); // NOLINT_NEXT_LINE(whitespace/operators) CLLForward<<>>( count, channels, margin, alpha, - (*bottom)[2]->gpu_data(), // pair similarity 0 or 1 + bottom[2]->gpu_data(), // pair similarity 0 or 1 diff_.gpu_data(), // the cached eltwise difference between a and b dist_sq_.gpu_data(), // the cached square distance between a and b - (*bottom)[i]->mutable_gpu_diff()); + bottom[i]->mutable_gpu_diff()); CUDA_POST_KERNEL_CHECK; } } diff --git a/src/caffe/layers/conv_layer.cpp b/src/caffe/layers/conv_layer.cpp index 58918fd..8c08c10 100644 --- a/src/caffe/layers/conv_layer.cpp +++ b/src/caffe/layers/conv_layer.cpp @@ -10,7 +10,7 @@ namespace caffe { template void ConvolutionLayer::LayerSetUp(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { // Configure the kernel size, padding, stride, and inputs. ConvolutionParameter conv_param = this->layer_param_.convolution_param(); CHECK(!conv_param.has_kernel_size() != @@ -89,7 +89,7 @@ void ConvolutionLayer::LayerSetUp(const vector*>& bottom, template void ConvolutionLayer::Reshape(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { num_ = bottom[0]->num(); height_ = bottom[0]->height(); width_ = bottom[0]->width(); @@ -109,8 +109,8 @@ void ConvolutionLayer::Reshape(const vector*>& bottom, height_out_ = (height_ + 2 * pad_h_ - kernel_h_) / stride_h_ + 1; width_out_ = (width_ + 2 * pad_w_ - kernel_w_) / stride_w_ + 1; - for (int top_id = 0; top_id < top->size(); ++top_id) { - (*top)[top_id]->Reshape(num_, num_output_, height_out_, width_out_); + for (int top_id = 0; top_id < top.size(); ++top_id) { + top[top_id]->Reshape(num_, num_output_, height_out_, width_out_); } // Prepare the matrix multiplication computation. // Each input will be convolved as a single GEMM. @@ -121,8 +121,8 @@ void ConvolutionLayer::Reshape(const vector*>& bottom, // overly large memory usage. col_buffer_.Reshape( 1, channels_ * kernel_h_ * kernel_w_, height_out_, width_out_); - for (int top_id = 0; top_id < top->size(); ++top_id) { - (*top)[top_id]->Reshape(num_, num_output_, height_out_, width_out_); + for (int top_id = 0; top_id < top.size(); ++top_id) { + top[top_id]->Reshape(num_, num_output_, height_out_, width_out_); } // Set up the all ones "bias multiplier" for adding biases by BLAS if (bias_term_) { @@ -133,10 +133,10 @@ void ConvolutionLayer::Reshape(const vector*>& bottom, template void ConvolutionLayer::Forward_cpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->cpu_data(); - Dtype* top_data = (*top)[i]->mutable_cpu_data(); + Dtype* top_data = top[i]->mutable_cpu_data(); Dtype* col_data = col_buffer_.mutable_cpu_data(); const Dtype* weight = this->blobs_[0]->cpu_data(); int weight_offset = M_ * K_; // number of filter parameters in a group @@ -152,14 +152,14 @@ void ConvolutionLayer::Forward_cpu(const vector*>& bottom, for (int g = 0; g < group_; ++g) { caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, M_, N_, K_, (Dtype)1., weight + weight_offset * g, col_data + col_offset * g, - (Dtype)0., top_data + (*top)[i]->offset(n) + top_offset * g); + (Dtype)0., top_data + top[i]->offset(n) + top_offset * g); } // Add bias. if (bias_term_) { caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num_output_, N_, 1, (Dtype)1., this->blobs_[1]->cpu_data(), bias_multiplier_.cpu_data(), - (Dtype)1., top_data + (*top)[i]->offset(n)); + (Dtype)1., top_data + top[i]->offset(n)); } } } @@ -167,7 +167,7 @@ void ConvolutionLayer::Forward_cpu(const vector*>& bottom, template void ConvolutionLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { + const vector& propagate_down, const vector*>& bottom) { const Dtype* weight = NULL; Dtype* weight_diff = NULL; if (this->param_propagate_down_[0]) { @@ -201,12 +201,12 @@ void ConvolutionLayer::Backward_cpu(const vector*>& top, } Dtype* col_data = col_buffer_.mutable_cpu_data(); Dtype* col_diff = col_buffer_.mutable_cpu_diff(); - const Dtype* bottom_data = (*bottom)[i]->cpu_data(); - Dtype* bottom_diff = (*bottom)[i]->mutable_cpu_diff(); + const Dtype* bottom_data = bottom[i]->cpu_data(); + Dtype* bottom_diff = bottom[i]->mutable_cpu_diff(); for (int n = 0; n < num_; ++n) { // Since we saved memory in the forward pass by not storing all col // data, we will need to recompute them. - im2col_cpu(bottom_data + (*bottom)[i]->offset(n), channels_, height_, + im2col_cpu(bottom_data + bottom[i]->offset(n), channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, col_data); // gradient w.r.t. weight. Note that we will accumulate diffs. @@ -232,7 +232,7 @@ void ConvolutionLayer::Backward_cpu(const vector*>& top, // col2im back to the data col2im_cpu(col_diff, channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, - stride_h_, stride_w_, bottom_diff + (*bottom)[i]->offset(n)); + stride_h_, stride_w_, bottom_diff + bottom[i]->offset(n)); } } } diff --git a/src/caffe/layers/conv_layer.cu b/src/caffe/layers/conv_layer.cu index 43f76a2..908f5a6 100644 --- a/src/caffe/layers/conv_layer.cu +++ b/src/caffe/layers/conv_layer.cu @@ -11,10 +11,10 @@ namespace caffe { /// @brief refer to CPU forward -- the BLAS implementation is the same. template void ConvolutionLayer::Forward_gpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); - Dtype* top_data = (*top)[i]->mutable_gpu_data(); + Dtype* top_data = top[i]->mutable_gpu_data(); Dtype* col_data = col_buffer_.mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); int weight_offset = M_ * K_; @@ -30,14 +30,14 @@ void ConvolutionLayer::Forward_gpu(const vector*>& bottom, for (int g = 0; g < group_; ++g) { caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, M_, N_, K_, (Dtype)1., weight + weight_offset * g, col_data + col_offset * g, - (Dtype)0., top_data + (*top)[i]->offset(n) + top_offset * g); + (Dtype)0., top_data + top[i]->offset(n) + top_offset * g); } // Add bias. if (bias_term_) { caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num_output_, N_, 1, (Dtype)1., this->blobs_[1]->gpu_data(), bias_multiplier_.gpu_data(), - (Dtype)1., top_data + (*top)[i]->offset(n)); + (Dtype)1., top_data + top[i]->offset(n)); } } } @@ -46,7 +46,7 @@ void ConvolutionLayer::Forward_gpu(const vector*>& bottom, /// @brief refer to CPU backward -- the BLAS implementation is the same. template void ConvolutionLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { + const vector& propagate_down, const vector*>& bottom) { const Dtype* weight = NULL; Dtype* weight_diff = NULL; if (this->param_propagate_down_[0]) { @@ -80,12 +80,12 @@ void ConvolutionLayer::Backward_gpu(const vector*>& top, } Dtype* col_data = col_buffer_.mutable_gpu_data(); Dtype* col_diff = col_buffer_.mutable_gpu_diff(); - const Dtype* bottom_data = (*bottom)[i]->gpu_data(); - Dtype* bottom_diff = (*bottom)[i]->mutable_gpu_diff(); + const Dtype* bottom_data = bottom[i]->gpu_data(); + Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); for (int n = 0; n < num_; ++n) { // Since we saved memory in the forward pass by not storing all col // data, we will need to recompute them. - im2col_gpu(bottom_data + (*bottom)[i]->offset(n), channels_, height_, + im2col_gpu(bottom_data + bottom[i]->offset(n), channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, col_data); // gradient w.r.t. weight. Note that we will accumulate diffs. @@ -111,7 +111,7 @@ void ConvolutionLayer::Backward_gpu(const vector*>& top, // col2im back to the data col2im_gpu(col_diff, channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, - bottom_diff + (*bottom)[i]->offset(n)); + bottom_diff + bottom[i]->offset(n)); } } } diff --git a/src/caffe/layers/cudnn_conv_layer.cpp b/src/caffe/layers/cudnn_conv_layer.cpp index 137bbab..f74a3db 100644 --- a/src/caffe/layers/cudnn_conv_layer.cpp +++ b/src/caffe/layers/cudnn_conv_layer.cpp @@ -19,7 +19,7 @@ namespace caffe { */ template void CuDNNConvolutionLayer::LayerSetUp( - const vector*>& bottom, vector*>* top) { + const vector*>& bottom, const vector*>& top) { ConvolutionLayer::LayerSetUp(bottom, top); // Initialize CUDA streams and cuDNN. stream_ = new cudaStream_t[this->group_ * CUDNN_STREAMS_PER_GROUP]; @@ -62,7 +62,7 @@ void CuDNNConvolutionLayer::LayerSetUp( template void CuDNNConvolutionLayer::Reshape( - const vector*>& bottom, vector*>* top) { + const vector*>& bottom, const vector*>& top) { ConvolutionLayer::Reshape(bottom, top); bottom_offset_ = (this->channels_ / this->group_) * this->height_ * this->width_; diff --git a/src/caffe/layers/cudnn_conv_layer.cu b/src/caffe/layers/cudnn_conv_layer.cu index 2af1330..6961bbe 100644 --- a/src/caffe/layers/cudnn_conv_layer.cu +++ b/src/caffe/layers/cudnn_conv_layer.cu @@ -13,10 +13,10 @@ __global__ void sync_conv_groups() { } template void CuDNNConvolutionLayer::Forward_gpu( - const vector*>& bottom, vector*>* top) { + const vector*>& bottom, const vector*>& top) { for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); - Dtype* top_data = (*top)[i]->mutable_gpu_data(); + Dtype* top_data = top[i]->mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); // Forward through cuDNN in parallel over groups. @@ -48,7 +48,7 @@ void CuDNNConvolutionLayer::Forward_gpu( template void CuDNNConvolutionLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { + const vector& propagate_down, const vector*>& bottom) { const Dtype* weight = NULL; Dtype* weight_diff = NULL; if (this->param_propagate_down_[0]) { @@ -75,7 +75,7 @@ void CuDNNConvolutionLayer::Backward_gpu(const vector*>& top, // Gradient w.r.t. weights. if (this->param_propagate_down_[0]) { - const Dtype* bottom_data = (*bottom)[i]->gpu_data(); + const Dtype* bottom_data = bottom[i]->gpu_data(); CUDNN_CHECK(cudnnConvolutionBackwardFilter(handle_[1*this->group_ + g], bottom_descs_[i], bottom_data + bottom_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, @@ -86,7 +86,7 @@ void CuDNNConvolutionLayer::Backward_gpu(const vector*>& top, // Gradient w.r.t. bottom data. if (propagate_down[i]) { - Dtype* bottom_diff = (*bottom)[i]->mutable_gpu_diff(); + Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); CUDNN_CHECK(cudnnConvolutionBackwardData(handle_[2*this->group_ + g], filter_desc_, weight + weight_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, diff --git a/src/caffe/layers/cudnn_pooling_layer.cpp b/src/caffe/layers/cudnn_pooling_layer.cpp index 5aea0dc..2af9d21 100644 --- a/src/caffe/layers/cudnn_pooling_layer.cpp +++ b/src/caffe/layers/cudnn_pooling_layer.cpp @@ -11,7 +11,7 @@ namespace caffe { template void CuDNNPoolingLayer::LayerSetUp(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { PoolingLayer::LayerSetUp(bottom, top); CUDNN_CHECK(cudnnCreate(&handle_)); @@ -24,7 +24,7 @@ void CuDNNPoolingLayer::LayerSetUp(const vector*>& bottom, template void CuDNNPoolingLayer::Reshape(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { PoolingLayer::Reshape(bottom, top); cudnn::setTensor4dDesc(&bottom_desc_, bottom[0]->num(), this->channels_, this->height_, this->width_); diff --git a/src/caffe/layers/cudnn_pooling_layer.cu b/src/caffe/layers/cudnn_pooling_layer.cu index 99c409d..9d270ed 100644 --- a/src/caffe/layers/cudnn_pooling_layer.cu +++ b/src/caffe/layers/cudnn_pooling_layer.cu @@ -11,22 +11,22 @@ namespace caffe { template void CuDNNPoolingLayer::Forward_gpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { // Fallback to Caffe for padded pooling, max top mask. - if ((this->pad_h_ > 0 || this->pad_w_ > 0) || (*top).size() > 1) { + if ((this->pad_h_ > 0 || this->pad_w_ > 0) || top.size() > 1) { LOG(WARNING) << "Falling back to standard Caffe for padded pooling."; return PoolingLayer::Forward_gpu(bottom, top); } const Dtype* bottom_data = bottom[0]->gpu_data(); - Dtype* top_data = (*top)[0]->mutable_gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); CUDNN_CHECK(cudnnPoolingForward(handle_, pooling_desc_, bottom_desc_, bottom_data, top_desc_, top_data)); } template void CuDNNPoolingLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { + const vector& propagate_down, const vector*>& bottom) { if (!propagate_down[0]) { return; } @@ -39,8 +39,8 @@ void CuDNNPoolingLayer::Backward_gpu(const vector*>& top, const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* top_data = top[0]->gpu_data(); - const Dtype* bottom_data = (*bottom)[0]->gpu_data(); - Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); CUDNN_CHECK(cudnnPoolingBackward(handle_, pooling_desc_, top_desc_, top_data, top_desc_, top_diff, bottom_desc_, bottom_data, bottom_desc_, bottom_diff)); diff --git a/src/caffe/layers/cudnn_relu_layer.cpp b/src/caffe/layers/cudnn_relu_layer.cpp index 083868f..20f486f 100644 --- a/src/caffe/layers/cudnn_relu_layer.cpp +++ b/src/caffe/layers/cudnn_relu_layer.cpp @@ -9,7 +9,7 @@ namespace caffe { template void CuDNNReLULayer::LayerSetUp(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { ReLULayer::LayerSetUp(bottom, top); // initialize cuDNN CUDNN_CHECK(cudnnCreate(&handle_)); @@ -19,7 +19,7 @@ void CuDNNReLULayer::LayerSetUp(const vector*>& bottom, template void CuDNNReLULayer::Reshape(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { ReLULayer::Reshape(bottom, top); const int N = bottom[0]->num(); const int K = bottom[0]->channels(); diff --git a/src/caffe/layers/cudnn_relu_layer.cu b/src/caffe/layers/cudnn_relu_layer.cu index a8519f3..e3900f3 100644 --- a/src/caffe/layers/cudnn_relu_layer.cu +++ b/src/caffe/layers/cudnn_relu_layer.cu @@ -9,14 +9,14 @@ namespace caffe { template void CuDNNReLULayer::Forward_gpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { // Fallback to standard Caffe for leaky ReLU. if (ReLULayer::layer_param_.relu_param().negative_slope() != 0) { return ReLULayer::Forward_gpu(bottom, top); } const Dtype* bottom_data = bottom[0]->gpu_data(); - Dtype* top_data = (*top)[0]->mutable_gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); CUDNN_CHECK(cudnnActivationForward(this->handle_, CUDNN_ACTIVATION_RELU, this->bottom_desc_, bottom_data, this->top_desc_, top_data)); @@ -25,7 +25,7 @@ void CuDNNReLULayer::Forward_gpu(const vector*>& bottom, template void CuDNNReLULayer::Backward_gpu(const vector*>& top, const vector& propagate_down, - vector*>* bottom) { + const vector*>& bottom) { if (!propagate_down[0]) { return; } @@ -37,8 +37,8 @@ void CuDNNReLULayer::Backward_gpu(const vector*>& top, const Dtype* top_data = top[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); - const Dtype* bottom_data = (*bottom)[0]->gpu_data(); - Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); CUDNN_CHECK(cudnnActivationBackward(this->handle_, CUDNN_ACTIVATION_RELU, this->top_desc_, top_data, this->top_desc_, top_diff, diff --git a/src/caffe/layers/cudnn_sigmoid_layer.cpp b/src/caffe/layers/cudnn_sigmoid_layer.cpp index 3fe800d..a94c004 100644 --- a/src/caffe/layers/cudnn_sigmoid_layer.cpp +++ b/src/caffe/layers/cudnn_sigmoid_layer.cpp @@ -9,7 +9,7 @@ namespace caffe { template void CuDNNSigmoidLayer::LayerSetUp(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { SigmoidLayer::LayerSetUp(bottom, top); // initialize cuDNN CUDNN_CHECK(cudnnCreate(&handle_)); @@ -19,7 +19,7 @@ void CuDNNSigmoidLayer::LayerSetUp(const vector*>& bottom, template void CuDNNSigmoidLayer::Reshape(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { SigmoidLayer::Reshape(bottom, top); const int N = bottom[0]->num(); const int K = bottom[0]->channels(); diff --git a/src/caffe/layers/cudnn_sigmoid_layer.cu b/src/caffe/layers/cudnn_sigmoid_layer.cu index 43019bd..50bf5fc 100644 --- a/src/caffe/layers/cudnn_sigmoid_layer.cu +++ b/src/caffe/layers/cudnn_sigmoid_layer.cu @@ -9,9 +9,9 @@ namespace caffe { template void CuDNNSigmoidLayer::Forward_gpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); - Dtype* top_data = (*top)[0]->mutable_gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); CUDNN_CHECK(cudnnActivationForward(this->handle_, CUDNN_ACTIVATION_SIGMOID, this->bottom_desc_, bottom_data, this->top_desc_, top_data)); @@ -20,15 +20,15 @@ void CuDNNSigmoidLayer::Forward_gpu(const vector*>& bottom, template void CuDNNSigmoidLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, - vector*>* bottom) { + const vector*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* top_data = top[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); - const Dtype* bottom_data = (*bottom)[0]->gpu_data(); - Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); CUDNN_CHECK(cudnnActivationBackward(this->handle_, CUDNN_ACTIVATION_SIGMOID, this->top_desc_, top_data, this->top_desc_, top_diff, diff --git a/src/caffe/layers/cudnn_softmax_layer.cpp b/src/caffe/layers/cudnn_softmax_layer.cpp index 79ba523..1a0f406 100644 --- a/src/caffe/layers/cudnn_softmax_layer.cpp +++ b/src/caffe/layers/cudnn_softmax_layer.cpp @@ -13,7 +13,7 @@ namespace caffe { template void CuDNNSoftmaxLayer::LayerSetUp(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { SoftmaxLayer::LayerSetUp(bottom, top); // Initialize CUDNN. CUDNN_CHECK(cudnnCreate(&handle_)); @@ -23,7 +23,7 @@ void CuDNNSoftmaxLayer::LayerSetUp(const vector*>& bottom, template void CuDNNSoftmaxLayer::Reshape(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { SoftmaxLayer::Reshape(bottom, top); int N = bottom[0]->num(); int K = bottom[0]->channels(); diff --git a/src/caffe/layers/cudnn_softmax_layer.cu b/src/caffe/layers/cudnn_softmax_layer.cu index 300bdc4..aef6673 100644 --- a/src/caffe/layers/cudnn_softmax_layer.cu +++ b/src/caffe/layers/cudnn_softmax_layer.cu @@ -13,9 +13,9 @@ namespace caffe { template void CuDNNSoftmaxLayer::Forward_gpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); - Dtype* top_data = (*top)[0]->mutable_gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); CUDNN_CHECK(cudnnSoftmaxForward(handle_, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL, bottom_desc_, bottom_data, top_desc_, top_data)); @@ -23,12 +23,12 @@ void CuDNNSoftmaxLayer::Forward_gpu(const vector*>& bottom, template void CuDNNSoftmaxLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { + const vector& propagate_down, const vector*>& bottom) { if (propagate_down[0]) { const Dtype* top_data = top[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); - const Dtype* bottom_data = (*bottom)[0]->gpu_data(); - Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); CUDNN_CHECK(cudnnSoftmaxBackward(handle_, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL, top_desc_, top_data, top_desc_, top_diff, bottom_desc_, bottom_diff)); diff --git a/src/caffe/layers/cudnn_tanh_layer.cpp b/src/caffe/layers/cudnn_tanh_layer.cpp index 7a5c06f..39a3e14 100644 --- a/src/caffe/layers/cudnn_tanh_layer.cpp +++ b/src/caffe/layers/cudnn_tanh_layer.cpp @@ -9,7 +9,7 @@ namespace caffe { template void CuDNNTanHLayer::LayerSetUp(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { TanHLayer::LayerSetUp(bottom, top); // initialize cuDNN CUDNN_CHECK(cudnnCreate(&handle_)); @@ -19,7 +19,7 @@ void CuDNNTanHLayer::LayerSetUp(const vector*>& bottom, template void CuDNNTanHLayer::Reshape(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { TanHLayer::Reshape(bottom, top); const int N = bottom[0]->num(); const int K = bottom[0]->channels(); diff --git a/src/caffe/layers/cudnn_tanh_layer.cu b/src/caffe/layers/cudnn_tanh_layer.cu index c475b08..20f887d 100644 --- a/src/caffe/layers/cudnn_tanh_layer.cu +++ b/src/caffe/layers/cudnn_tanh_layer.cu @@ -9,9 +9,9 @@ namespace caffe { template void CuDNNTanHLayer::Forward_gpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); - Dtype* top_data = (*top)[0]->mutable_gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); CUDNN_CHECK(cudnnActivationForward(this->handle_, CUDNN_ACTIVATION_TANH, this->bottom_desc_, bottom_data, this->top_desc_, top_data)); @@ -20,15 +20,15 @@ void CuDNNTanHLayer::Forward_gpu(const vector*>& bottom, template void CuDNNTanHLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, - vector*>* bottom) { + const vector*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* top_data = top[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); - const Dtype* bottom_data = (*bottom)[0]->gpu_data(); - Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); CUDNN_CHECK(cudnnActivationBackward(this->handle_, CUDNN_ACTIVATION_TANH, this->top_desc_, top_data, this->top_desc_, top_diff, diff --git a/src/caffe/layers/data_layer.cpp b/src/caffe/layers/data_layer.cpp index d2071e2..b3b6bf3 100644 --- a/src/caffe/layers/data_layer.cpp +++ b/src/caffe/layers/data_layer.cpp @@ -34,7 +34,7 @@ DataLayer::~DataLayer() { template void DataLayer::DataLayerSetUp(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { // Initialize DB switch (this->layer_param_.data_param().backend()) { case DataParameter_DB_LEVELDB: @@ -114,23 +114,23 @@ void DataLayer::DataLayerSetUp(const vector*>& bottom, // image int crop_size = this->layer_param_.transform_param().crop_size(); if (crop_size > 0) { - (*top)[0]->Reshape(this->layer_param_.data_param().batch_size(), + top[0]->Reshape(this->layer_param_.data_param().batch_size(), datum.channels(), crop_size, crop_size); this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(), datum.channels(), crop_size, crop_size); } else { - (*top)[0]->Reshape( + top[0]->Reshape( this->layer_param_.data_param().batch_size(), datum.channels(), datum.height(), datum.width()); this->prefetch_data_.Reshape(this->layer_param_.data_param().batch_size(), datum.channels(), datum.height(), datum.width()); } - LOG(INFO) << "output data size: " << (*top)[0]->num() << "," - << (*top)[0]->channels() << "," << (*top)[0]->height() << "," - << (*top)[0]->width(); + LOG(INFO) << "output data size: " << top[0]->num() << "," + << top[0]->channels() << "," << top[0]->height() << "," + << top[0]->width(); // label if (this->output_labels_) { - (*top)[1]->Reshape(this->layer_param_.data_param().batch_size(), 1, 1, 1); + top[1]->Reshape(this->layer_param_.data_param().batch_size(), 1, 1, 1); this->prefetch_label_.Reshape(this->layer_param_.data_param().batch_size(), 1, 1, 1); } diff --git a/src/caffe/layers/dropout_layer.cpp b/src/caffe/layers/dropout_layer.cpp index 47feb1d..f88027c 100644 --- a/src/caffe/layers/dropout_layer.cpp +++ b/src/caffe/layers/dropout_layer.cpp @@ -12,7 +12,7 @@ namespace caffe { template void DropoutLayer::LayerSetUp(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { NeuronLayer::LayerSetUp(bottom, top); threshold_ = this->layer_param_.dropout_param().dropout_ratio(); DCHECK(threshold_ > 0.); @@ -23,7 +23,7 @@ void DropoutLayer::LayerSetUp(const vector*>& bottom, template void DropoutLayer::Reshape(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { NeuronLayer::Reshape(bottom, top); // Set up the cache for random number generation rand_vec_.Reshape(bottom[0]->num(), bottom[0]->channels(), @@ -32,9 +32,9 @@ void DropoutLayer::Reshape(const vector*>& bottom, template void DropoutLayer::Forward_cpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); - Dtype* top_data = (*top)[0]->mutable_cpu_data(); + Dtype* top_data = top[0]->mutable_cpu_data(); unsigned int* mask = rand_vec_.mutable_cpu_data(); const int count = bottom[0]->count(); if (Caffe::phase() == Caffe::TRAIN) { @@ -51,13 +51,13 @@ void DropoutLayer::Forward_cpu(const vector*>& bottom, template void DropoutLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, - vector*>* bottom) { + const vector*>& bottom) { if (propagate_down[0]) { const Dtype* top_diff = top[0]->cpu_diff(); - Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); if (Caffe::phase() == Caffe::TRAIN) { const unsigned int* mask = rand_vec_.cpu_data(); - const int count = (*bottom)[0]->count(); + const int count = bottom[0]->count(); for (int i = 0; i < count; ++i) { bottom_diff[i] = top_diff[i] * mask[i] * scale_; } diff --git a/src/caffe/layers/dropout_layer.cu b/src/caffe/layers/dropout_layer.cu index 9756c86..fa737b8 100644 --- a/src/caffe/layers/dropout_layer.cu +++ b/src/caffe/layers/dropout_layer.cu @@ -22,9 +22,9 @@ __global__ void DropoutForward(const int n, const Dtype* in, template void DropoutLayer::Forward_gpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); - Dtype* top_data = (*top)[0]->mutable_gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); if (Caffe::phase() == Caffe::TRAIN) { unsigned int* mask = @@ -52,14 +52,14 @@ __global__ void DropoutBackward(const int n, const Dtype* in_diff, template void DropoutLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, - vector*>* bottom) { + const vector*>& bottom) { if (propagate_down[0]) { const Dtype* top_diff = top[0]->gpu_diff(); - Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); if (Caffe::phase() == Caffe::TRAIN) { const unsigned int* mask = static_cast(rand_vec_.gpu_data()); - const int count = (*bottom)[0]->count(); + const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) DropoutBackward<<>>( diff --git a/src/caffe/layers/dummy_data_layer.cpp b/src/caffe/layers/dummy_data_layer.cpp index 883f252..6aeba54 100644 --- a/src/caffe/layers/dummy_data_layer.cpp +++ b/src/caffe/layers/dummy_data_layer.cpp @@ -8,8 +8,8 @@ namespace caffe { template void DummyDataLayer::LayerSetUp(const vector*>& bottom, - vector*>* top) { - const int num_top = top->size(); + const vector*>& top) { + const int num_top = top.size(); const DummyDataParameter& param = this->layer_param_.dummy_data_param(); const int num_data_filler = param.data_filler_size(); CHECK(num_data_filler == 0 || num_data_filler == 1 || @@ -70,7 +70,7 @@ void DummyDataLayer::LayerSetUp(const vector*>& bottom, (param.height_size() == 1) ? param.height(0) : param.height(i); const int width = (param.width_size() == 1) ? param.width(0) : param.width(i); - (*top)[i]->Reshape(num, channels, height, width); + top[i]->Reshape(num, channels, height, width); } // Run Forward once, with refill_ inverted, to fill the constant Blobs. this->Forward(bottom, top); @@ -83,11 +83,11 @@ void DummyDataLayer::LayerSetUp(const vector*>& bottom, template void DummyDataLayer::Forward_cpu(const vector*>& bottom, - vector*>* top) { - for (int i = 0; i < top->size(); ++i) { + const vector*>& top) { + for (int i = 0; i < top.size(); ++i) { const int filler_id = (fillers_.size() > 1) ? i : 0; if (refill_[filler_id]) { - fillers_[filler_id]->Fill((*top)[i]); + fillers_[filler_id]->Fill(top[i]); } } } diff --git a/src/caffe/layers/eltwise_layer.cpp b/src/caffe/layers/eltwise_layer.cpp index 569560f..2830d6d 100644 --- a/src/caffe/layers/eltwise_layer.cpp +++ b/src/caffe/layers/eltwise_layer.cpp @@ -9,7 +9,7 @@ namespace caffe { template void EltwiseLayer::LayerSetUp(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { CHECK(this->layer_param().eltwise_param().coeff_size() == 0 || this->layer_param().eltwise_param().coeff_size() == bottom.size()) << "Eltwise Layer takes one coefficient per bottom blob."; @@ -30,7 +30,7 @@ void EltwiseLayer::LayerSetUp(const vector*>& bottom, template void EltwiseLayer::Reshape(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const int num = bottom[0]->num(); const int channels = bottom[0]->channels(); const int height = bottom[0]->height(); @@ -41,22 +41,22 @@ void EltwiseLayer::Reshape(const vector*>& bottom, CHECK_EQ(height, bottom[i]->height()); CHECK_EQ(width, bottom[i]->width()); } - (*top)[0]->Reshape(num, channels, height, width); + top[0]->Reshape(num, channels, height, width); // If max operation, we will initialize the vector index part. if (this->layer_param_.eltwise_param().operation() == - EltwiseParameter_EltwiseOp_MAX && top->size() == 1) { + EltwiseParameter_EltwiseOp_MAX && top.size() == 1) { max_idx_.Reshape(bottom[0]->num(), channels, height, width); } } template void EltwiseLayer::Forward_cpu( - const vector*>& bottom, vector*>* top) { + const vector*>& bottom, const vector*>& top) { int* mask = NULL; const Dtype* bottom_data_a = NULL; const Dtype* bottom_data_b = NULL; - const int count = (*top)[0]->count(); - Dtype* top_data = (*top)[0]->mutable_cpu_data(); + const int count = top[0]->count(); + Dtype* top_data = top[0]->mutable_cpu_data(); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: caffe_mul(count, bottom[0]->cpu_data(), bottom[1]->cpu_data(), top_data); @@ -106,26 +106,26 @@ void EltwiseLayer::Forward_cpu( template void EltwiseLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { + const vector& propagate_down, const vector*>& bottom) { const int* mask = NULL; const int count = top[0]->count(); const Dtype* top_data = top[0]->cpu_data(); const Dtype* top_diff = top[0]->cpu_diff(); - for (int i = 0; i < bottom->size(); ++i) { + for (int i = 0; i < bottom.size(); ++i) { if (propagate_down[i]) { - const Dtype* bottom_data = (*bottom)[i]->cpu_data(); - Dtype* bottom_diff = (*bottom)[i]->mutable_cpu_diff(); + const Dtype* bottom_data = bottom[i]->cpu_data(); + Dtype* bottom_diff = bottom[i]->mutable_cpu_diff(); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: if (stable_prod_grad_) { bool initialized = false; - for (int j = 0; j < bottom->size(); ++j) { + for (int j = 0; j < bottom.size(); ++j) { if (i == j) { continue; } if (!initialized) { - caffe_copy(count, (*bottom)[j]->cpu_data(), bottom_diff); + caffe_copy(count, bottom[j]->cpu_data(), bottom_diff); initialized = true; } else { - caffe_mul(count, (*bottom)[j]->cpu_data(), bottom_diff, + caffe_mul(count, bottom[j]->cpu_data(), bottom_diff, bottom_diff); } } diff --git a/src/caffe/layers/eltwise_layer.cu b/src/caffe/layers/eltwise_layer.cu index 16cb6cc..9295cc6 100644 --- a/src/caffe/layers/eltwise_layer.cu +++ b/src/caffe/layers/eltwise_layer.cu @@ -33,10 +33,10 @@ __global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a, template void EltwiseLayer::Forward_gpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { int* mask = NULL; - const int count = (*top)[0]->count(); - Dtype* top_data = (*top)[0]->mutable_gpu_data(); + const int count = top[0]->count(); + Dtype* top_data = top[0]->mutable_gpu_data(); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: caffe_gpu_mul(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), @@ -82,26 +82,26 @@ __global__ void MaxBackward(const int nthreads, const Dtype* top_diff, template void EltwiseLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { + const vector& propagate_down, const vector*>& bottom) { const int* mask = NULL; const int count = top[0]->count(); const Dtype* top_data = top[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); - for (int i = 0; i < bottom->size(); ++i) { + for (int i = 0; i < bottom.size(); ++i) { if (propagate_down[i]) { - const Dtype* bottom_data = (*bottom)[i]->gpu_data(); - Dtype* bottom_diff = (*bottom)[i]->mutable_gpu_diff(); + const Dtype* bottom_data = bottom[i]->gpu_data(); + Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: if (stable_prod_grad_) { bool initialized = false; - for (int j = 0; j < bottom->size(); ++j) { + for (int j = 0; j < bottom.size(); ++j) { if (i == j) { continue; } if (!initialized) { - caffe_copy(count, (*bottom)[j]->gpu_data(), bottom_diff); + caffe_copy(count, bottom[j]->gpu_data(), bottom_diff); initialized = true; } else { - caffe_gpu_mul(count, (*bottom)[j]->gpu_data(), bottom_diff, + caffe_gpu_mul(count, bottom[j]->gpu_data(), bottom_diff, bottom_diff); } } diff --git a/src/caffe/layers/euclidean_loss_layer.cpp b/src/caffe/layers/euclidean_loss_layer.cpp index 1b4a13d..abc8610 100644 --- a/src/caffe/layers/euclidean_loss_layer.cpp +++ b/src/caffe/layers/euclidean_loss_layer.cpp @@ -9,7 +9,7 @@ namespace caffe { template void EuclideanLossLayer::Reshape( - const vector*>& bottom, vector*>* top) { + const vector*>& bottom, const vector*>& top) { LossLayer::Reshape(bottom, top); CHECK_EQ(bottom[0]->channels(), bottom[1]->channels()); CHECK_EQ(bottom[0]->height(), bottom[1]->height()); @@ -20,7 +20,7 @@ void EuclideanLossLayer::Reshape( template void EuclideanLossLayer::Forward_cpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { int count = bottom[0]->count(); caffe_sub( count, @@ -29,22 +29,22 @@ void EuclideanLossLayer::Forward_cpu(const vector*>& bottom, diff_.mutable_cpu_data()); Dtype dot = caffe_cpu_dot(count, diff_.cpu_data(), diff_.cpu_data()); Dtype loss = dot / bottom[0]->num() / Dtype(2); - (*top)[0]->mutable_cpu_data()[0] = loss; + top[0]->mutable_cpu_data()[0] = loss; } template void EuclideanLossLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { + const vector& propagate_down, const vector*>& bottom) { for (int i = 0; i < 2; ++i) { if (propagate_down[i]) { const Dtype sign = (i == 0) ? 1 : -1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / (*bottom)[i]->num(); + const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num(); caffe_cpu_axpby( - (*bottom)[i]->count(), // count + bottom[i]->count(), // count alpha, // alpha diff_.cpu_data(), // a Dtype(0), // beta - (*bottom)[i]->mutable_cpu_diff()); // b + bottom[i]->mutable_cpu_diff()); // b } } } diff --git a/src/caffe/layers/euclidean_loss_layer.cu b/src/caffe/layers/euclidean_loss_layer.cu index 70b1b9e..0f0c1a8 100644 --- a/src/caffe/layers/euclidean_loss_layer.cu +++ b/src/caffe/layers/euclidean_loss_layer.cu @@ -9,7 +9,7 @@ namespace caffe { template void EuclideanLossLayer::Forward_gpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { int count = bottom[0]->count(); caffe_gpu_sub( count, @@ -19,22 +19,22 @@ void EuclideanLossLayer::Forward_gpu(const vector*>& bottom, Dtype dot; caffe_gpu_dot(count, diff_.gpu_data(), diff_.gpu_data(), &dot); Dtype loss = dot / bottom[0]->num() / Dtype(2); - (*top)[0]->mutable_cpu_data()[0] = loss; + top[0]->mutable_cpu_data()[0] = loss; } template void EuclideanLossLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { + const vector& propagate_down, const vector*>& bottom) { for (int i = 0; i < 2; ++i) { if (propagate_down[i]) { const Dtype sign = (i == 0) ? 1 : -1; - const Dtype alpha = sign * top[0]->cpu_diff()[0] / (*bottom)[i]->num(); + const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num(); caffe_gpu_axpby( - (*bottom)[i]->count(), // count + bottom[i]->count(), // count alpha, // alpha diff_.gpu_data(), // a Dtype(0), // beta - (*bottom)[i]->mutable_gpu_diff()); // b + bottom[i]->mutable_gpu_diff()); // b } } } diff --git a/src/caffe/layers/flatten_layer.cpp b/src/caffe/layers/flatten_layer.cpp index 65310cd..44df96d 100644 --- a/src/caffe/layers/flatten_layer.cpp +++ b/src/caffe/layers/flatten_layer.cpp @@ -8,25 +8,25 @@ namespace caffe { template void FlattenLayer::Reshape(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { int channels_out = bottom[0]->channels() * bottom[0]->height() * bottom[0]->width(); - (*top)[0]->Reshape(bottom[0]->num(), channels_out, 1, 1); + top[0]->Reshape(bottom[0]->num(), channels_out, 1, 1); count_ = bottom[0]->num() * channels_out; CHECK_EQ(count_, bottom[0]->count()); - CHECK_EQ(count_, (*top)[0]->count()); + CHECK_EQ(count_, top[0]->count()); } template void FlattenLayer::Forward_cpu(const vector*>& bottom, - vector*>* top) { - (*top)[0]->ShareData(*bottom[0]); + const vector*>& top) { + top[0]->ShareData(*bottom[0]); } template void FlattenLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { - (*bottom)[0]->ShareDiff(*top[0]); + const vector& propagate_down, const vector*>& bottom) { + bottom[0]->ShareDiff(*top[0]); } #ifdef CPU_ONLY diff --git a/src/caffe/layers/flatten_layer.cu b/src/caffe/layers/flatten_layer.cu index ff23f52..ef89bab 100644 --- a/src/caffe/layers/flatten_layer.cu +++ b/src/caffe/layers/flatten_layer.cu @@ -8,14 +8,14 @@ namespace caffe { template void FlattenLayer::Forward_gpu(const vector*>& bottom, - vector*>* top) { - (*top)[0]->ShareData(*bottom[0]); + const vector*>& top) { + top[0]->ShareData(*bottom[0]); } template void FlattenLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { - (*bottom)[0]->ShareDiff(*top[0]); + const vector& propagate_down, const vector*>& bottom) { + bottom[0]->ShareDiff(*top[0]); } INSTANTIATE_CLASS(FlattenLayer); diff --git a/src/caffe/layers/hdf5_data_layer.cpp b/src/caffe/layers/hdf5_data_layer.cpp index 1f2a835..ff25cbb 100644 --- a/src/caffe/layers/hdf5_data_layer.cpp +++ b/src/caffe/layers/hdf5_data_layer.cpp @@ -51,7 +51,7 @@ void HDF5DataLayer::LoadHDF5FileData(const char* filename) { template void HDF5DataLayer::LayerSetUp(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { // Read the source to parse the filenames. const string& source = this->layer_param_.hdf5_data_param().source(); LOG(INFO) << "Loading filename from " << source; @@ -74,21 +74,21 @@ void HDF5DataLayer::LayerSetUp(const vector*>& bottom, // Reshape blobs. const int batch_size = this->layer_param_.hdf5_data_param().batch_size(); - (*top)[0]->Reshape(batch_size, data_blob_.channels(), + top[0]->Reshape(batch_size, data_blob_.channels(), data_blob_.width(), data_blob_.height()); - (*top)[1]->Reshape(batch_size, label_blob_.channels(), + top[1]->Reshape(batch_size, label_blob_.channels(), label_blob_.width(), label_blob_.height()); - LOG(INFO) << "output data size: " << (*top)[0]->num() << "," - << (*top)[0]->channels() << "," << (*top)[0]->height() << "," - << (*top)[0]->width(); + LOG(INFO) << "output data size: " << top[0]->num() << "," + << top[0]->channels() << "," << top[0]->height() << "," + << top[0]->width(); } template void HDF5DataLayer::Forward_cpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const int batch_size = this->layer_param_.hdf5_data_param().batch_size(); - const int data_count = (*top)[0]->count() / (*top)[0]->num(); - const int label_data_count = (*top)[1]->count() / (*top)[1]->num(); + const int data_count = top[0]->count() / top[0]->num(); + const int label_data_count = top[1]->count() / top[1]->num(); for (int i = 0; i < batch_size; ++i, ++current_row_) { if (current_row_ == data_blob_.num()) { @@ -103,10 +103,10 @@ void HDF5DataLayer::Forward_cpu(const vector*>& bottom, current_row_ = 0; } caffe_copy(data_count, &data_blob_.cpu_data()[current_row_ * data_count], - &(*top)[0]->mutable_cpu_data()[i * data_count]); + &top[0]->mutable_cpu_data()[i * data_count]); caffe_copy(label_data_count, &label_blob_.cpu_data()[current_row_ * label_data_count], - &(*top)[1]->mutable_cpu_data()[i * label_data_count]); + &top[1]->mutable_cpu_data()[i * label_data_count]); } } diff --git a/src/caffe/layers/hdf5_data_layer.cu b/src/caffe/layers/hdf5_data_layer.cu index 79cc536..f671ea1 100644 --- a/src/caffe/layers/hdf5_data_layer.cu +++ b/src/caffe/layers/hdf5_data_layer.cu @@ -18,10 +18,10 @@ namespace caffe { template void HDF5DataLayer::Forward_gpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const int batch_size = this->layer_param_.hdf5_data_param().batch_size(); - const int data_count = (*top)[0]->count() / (*top)[0]->num(); - const int label_data_count = (*top)[1]->count() / (*top)[1]->num(); + const int data_count = top[0]->count() / top[0]->num(); + const int label_data_count = top[1]->count() / top[1]->num(); for (int i = 0; i < batch_size; ++i, ++current_row_) { if (current_row_ == data_blob_.num()) { @@ -39,10 +39,10 @@ void HDF5DataLayer::Forward_gpu(const vector*>& bottom, } caffe_copy(data_count, &data_blob_.cpu_data()[current_row_ * data_count], - &(*top)[0]->mutable_gpu_data()[i * data_count]); + &top[0]->mutable_gpu_data()[i * data_count]); caffe_copy(label_data_count, &label_blob_.cpu_data()[current_row_ * label_data_count], - &(*top)[1]->mutable_gpu_data()[i * label_data_count]); + &top[1]->mutable_gpu_data()[i * label_data_count]); } } diff --git a/src/caffe/layers/hdf5_output_layer.cpp b/src/caffe/layers/hdf5_output_layer.cpp index 3cdbbb3..b41d387 100644 --- a/src/caffe/layers/hdf5_output_layer.cpp +++ b/src/caffe/layers/hdf5_output_layer.cpp @@ -40,7 +40,7 @@ void HDF5OutputLayer::SaveBlobs() { template void HDF5OutputLayer::Forward_cpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { CHECK_GE(bottom.size(), 2); CHECK_EQ(bottom[0]->num(), bottom[1]->num()); data_blob_.Reshape(bottom[0]->num(), bottom[0]->channels(), @@ -61,7 +61,7 @@ void HDF5OutputLayer::Forward_cpu(const vector*>& bottom, template void HDF5OutputLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { + const vector& propagate_down, const vector*>& bottom) { return; } diff --git a/src/caffe/layers/hdf5_output_layer.cu b/src/caffe/layers/hdf5_output_layer.cu index 0813c02..929c1ad 100644 --- a/src/caffe/layers/hdf5_output_layer.cu +++ b/src/caffe/layers/hdf5_output_layer.cu @@ -13,7 +13,7 @@ namespace caffe { template void HDF5OutputLayer::Forward_gpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { CHECK_GE(bottom.size(), 2); CHECK_EQ(bottom[0]->num(), bottom[1]->num()); data_blob_.Reshape(bottom[0]->num(), bottom[0]->channels(), @@ -34,7 +34,7 @@ void HDF5OutputLayer::Forward_gpu(const vector*>& bottom, template void HDF5OutputLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { + const vector& propagate_down, const vector*>& bottom) { return; } diff --git a/src/caffe/layers/hinge_loss_layer.cpp b/src/caffe/layers/hinge_loss_layer.cpp index 8022aae..f09916e 100644 --- a/src/caffe/layers/hinge_loss_layer.cpp +++ b/src/caffe/layers/hinge_loss_layer.cpp @@ -12,7 +12,7 @@ namespace caffe { template void HingeLossLayer::Forward_cpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); const Dtype* label = bottom[1]->cpu_data(); @@ -30,7 +30,7 @@ void HingeLossLayer::Forward_cpu(const vector*>& bottom, Dtype(0), 1 + bottom_diff[i * dim + j]); } } - Dtype* loss = (*top)[0]->mutable_cpu_data(); + Dtype* loss = top[0]->mutable_cpu_data(); switch (this->layer_param_.hinge_loss_param().norm()) { case HingeLossParameter_Norm_L1: loss[0] = caffe_cpu_asum(count, bottom_diff) / num; @@ -45,16 +45,16 @@ void HingeLossLayer::Forward_cpu(const vector*>& bottom, template void HingeLossLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { + const vector& propagate_down, const vector*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type_name() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { - Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); - const Dtype* label = (*bottom)[1]->cpu_data(); - int num = (*bottom)[0]->num(); - int count = (*bottom)[0]->count(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + const Dtype* label = bottom[1]->cpu_data(); + int num = bottom[0]->num(); + int count = bottom[0]->count(); int dim = count / num; for (int i = 0; i < num; ++i) { diff --git a/src/caffe/layers/im2col_layer.cpp b/src/caffe/layers/im2col_layer.cpp index 870d5a9..10588ce 100644 --- a/src/caffe/layers/im2col_layer.cpp +++ b/src/caffe/layers/im2col_layer.cpp @@ -9,7 +9,7 @@ namespace caffe { template void Im2colLayer::LayerSetUp(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { ConvolutionParameter conv_param = this->layer_param_.convolution_param(); CHECK(!conv_param.has_kernel_size() != !(conv_param.has_kernel_h() && conv_param.has_kernel_w())) @@ -49,11 +49,11 @@ void Im2colLayer::LayerSetUp(const vector*>& bottom, template void Im2colLayer::Reshape(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { channels_ = bottom[0]->channels(); height_ = bottom[0]->height(); width_ = bottom[0]->width(); - (*top)[0]->Reshape( + top[0]->Reshape( bottom[0]->num(), channels_ * kernel_h_ * kernel_w_, (height_ + 2 * pad_h_ - kernel_h_) / stride_h_ + 1, (width_ + 2 * pad_w_ - kernel_w_) / stride_w_ + 1); @@ -61,25 +61,25 @@ void Im2colLayer::Reshape(const vector*>& bottom, template void Im2colLayer::Forward_cpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); - Dtype* top_data = (*top)[0]->mutable_cpu_data(); + Dtype* top_data = top[0]->mutable_cpu_data(); for (int n = 0; n < bottom[0]->num(); ++n) { im2col_cpu(bottom_data + bottom[0]->offset(n), channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, - stride_h_, stride_w_, top_data + (*top)[0]->offset(n)); + stride_h_, stride_w_, top_data + top[0]->offset(n)); } } template void Im2colLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { + const vector& propagate_down, const vector*>& bottom) { const Dtype* top_diff = top[0]->cpu_diff(); - Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); for (int n = 0; n < top[0]->num(); ++n) { col2im_cpu(top_diff + top[0]->offset(n), channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, - stride_h_, stride_w_, bottom_diff + (*bottom)[0]->offset(n)); + stride_h_, stride_w_, bottom_diff + bottom[0]->offset(n)); } } diff --git a/src/caffe/layers/im2col_layer.cu b/src/caffe/layers/im2col_layer.cu index 8df061d..bad789c 100644 --- a/src/caffe/layers/im2col_layer.cu +++ b/src/caffe/layers/im2col_layer.cu @@ -9,25 +9,25 @@ namespace caffe { template void Im2colLayer::Forward_gpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); - Dtype* top_data = (*top)[0]->mutable_gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); for (int n = 0; n < bottom[0]->num(); ++n) { im2col_gpu(bottom_data + bottom[0]->offset(n), channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, - stride_h_, stride_w_, top_data + (*top)[0]->offset(n)); + stride_h_, stride_w_, top_data + top[0]->offset(n)); } } template void Im2colLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { + const vector& propagate_down, const vector*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); - Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); for (int n = 0; n < top[0]->num(); ++n) { col2im_gpu(top_diff + top[0]->offset(n), channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, - stride_h_, stride_w_, bottom_diff + (*bottom)[0]->offset(n)); + stride_h_, stride_w_, bottom_diff + bottom[0]->offset(n)); } } diff --git a/src/caffe/layers/image_data_layer.cpp b/src/caffe/layers/image_data_layer.cpp index e4a575b..dc972f1 100644 --- a/src/caffe/layers/image_data_layer.cpp +++ b/src/caffe/layers/image_data_layer.cpp @@ -19,7 +19,7 @@ ImageDataLayer::~ImageDataLayer() { template void ImageDataLayer::DataLayerSetUp(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const int new_height = this->layer_param_.image_data_param().new_height(); const int new_width = this->layer_param_.image_data_param().new_width(); CHECK((new_height == 0 && new_width == 0) || @@ -61,20 +61,20 @@ void ImageDataLayer::DataLayerSetUp(const vector*>& bottom, const int crop_size = this->layer_param_.transform_param().crop_size(); const int batch_size = this->layer_param_.image_data_param().batch_size(); if (crop_size > 0) { - (*top)[0]->Reshape(batch_size, datum.channels(), crop_size, crop_size); + top[0]->Reshape(batch_size, datum.channels(), crop_size, crop_size); this->prefetch_data_.Reshape(batch_size, datum.channels(), crop_size, crop_size); } else { - (*top)[0]->Reshape(batch_size, datum.channels(), datum.height(), + top[0]->Reshape(batch_size, datum.channels(), datum.height(), datum.width()); this->prefetch_data_.Reshape(batch_size, datum.channels(), datum.height(), datum.width()); } - LOG(INFO) << "output data size: " << (*top)[0]->num() << "," - << (*top)[0]->channels() << "," << (*top)[0]->height() << "," - << (*top)[0]->width(); + LOG(INFO) << "output data size: " << top[0]->num() << "," + << top[0]->channels() << "," << top[0]->height() << "," + << top[0]->width(); // label - (*top)[1]->Reshape(batch_size, 1, 1, 1); + top[1]->Reshape(batch_size, 1, 1, 1); this->prefetch_label_.Reshape(batch_size, 1, 1, 1); // datum size this->datum_channels_ = datum.channels(); diff --git a/src/caffe/layers/infogain_loss_layer.cpp b/src/caffe/layers/infogain_loss_layer.cpp index 894cb69..c4dc1b4 100644 --- a/src/caffe/layers/infogain_loss_layer.cpp +++ b/src/caffe/layers/infogain_loss_layer.cpp @@ -12,7 +12,7 @@ namespace caffe { template void InfogainLossLayer::LayerSetUp( - const vector*>& bottom, vector*>* top) { + const vector*>& bottom, const vector*>& top) { LossLayer::LayerSetUp(bottom, top); if (bottom.size() < 3) { CHECK(this->layer_param_.infogain_loss_param().has_source()) @@ -26,7 +26,7 @@ void InfogainLossLayer::LayerSetUp( template void InfogainLossLayer::Reshape( - const vector*>& bottom, vector*>* top) { + const vector*>& bottom, const vector*>& top) { LossLayer::Reshape(bottom, top); Blob* infogain = NULL; if (bottom.size() < 3) { @@ -48,7 +48,7 @@ void InfogainLossLayer::Reshape( template void InfogainLossLayer::Forward_cpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); const Dtype* bottom_label = bottom[1]->cpu_data(); const Dtype* infogain_mat = NULL; @@ -67,13 +67,13 @@ void InfogainLossLayer::Forward_cpu(const vector*>& bottom, loss -= infogain_mat[label * dim + j] * log(prob); } } - (*top)[0]->mutable_cpu_data()[0] = loss / num; + top[0]->mutable_cpu_data()[0] = loss / num; } template void InfogainLossLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, - vector*>* bottom) { + const vector*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type_name() << " Layer cannot backpropagate to label inputs."; @@ -83,17 +83,17 @@ void InfogainLossLayer::Backward_cpu(const vector*>& top, << " Layer cannot backpropagate to infogain inputs."; } if (propagate_down[0]) { - const Dtype* bottom_data = (*bottom)[0]->cpu_data(); - const Dtype* bottom_label = (*bottom)[1]->cpu_data(); + const Dtype* bottom_data = bottom[0]->cpu_data(); + const Dtype* bottom_label = bottom[1]->cpu_data(); const Dtype* infogain_mat = NULL; - if (bottom->size() < 3) { + if (bottom.size() < 3) { infogain_mat = infogain_.cpu_data(); } else { - infogain_mat = (*bottom)[2]->cpu_data(); + infogain_mat = bottom[2]->cpu_data(); } - Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); - int num = (*bottom)[0]->num(); - int dim = (*bottom)[0]->count() / (*bottom)[0]->num(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + int num = bottom[0]->num(); + int dim = bottom[0]->count() / bottom[0]->num(); const Dtype scale = - top[0]->cpu_diff()[0] / num; for (int i = 0; i < num; ++i) { const int label = static_cast(bottom_label[i]); diff --git a/src/caffe/layers/inner_product_layer.cpp b/src/caffe/layers/inner_product_layer.cpp index ecd05a0..ffd872c 100644 --- a/src/caffe/layers/inner_product_layer.cpp +++ b/src/caffe/layers/inner_product_layer.cpp @@ -11,7 +11,7 @@ namespace caffe { template void InnerProductLayer::LayerSetUp(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const int num_output = this->layer_param_.inner_product_param().num_output(); bias_term_ = this->layer_param_.inner_product_param().bias_term(); N_ = num_output; @@ -44,12 +44,12 @@ void InnerProductLayer::LayerSetUp(const vector*>& bottom, template void InnerProductLayer::Reshape(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { // Figure out the dimensions M_ = bottom[0]->num(); CHECK_EQ(bottom[0]->count() / bottom[0]->num(), K_) << "Input size " "incompatible with inner product parameters."; - (*top)[0]->Reshape(bottom[0]->num(), N_, 1, 1); + top[0]->Reshape(bottom[0]->num(), N_, 1, 1); // Set up the bias multiplier if (bias_term_) { bias_multiplier_.Reshape(1, 1, 1, M_); @@ -59,9 +59,9 @@ void InnerProductLayer::Reshape(const vector*>& bottom, template void InnerProductLayer::Forward_cpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); - Dtype* top_data = (*top)[0]->mutable_cpu_data(); + Dtype* top_data = top[0]->mutable_cpu_data(); const Dtype* weight = this->blobs_[0]->cpu_data(); caffe_cpu_gemm(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1., bottom_data, weight, (Dtype)0., top_data); @@ -75,10 +75,10 @@ void InnerProductLayer::Forward_cpu(const vector*>& bottom, template void InnerProductLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, - vector*>* bottom) { + const vector*>& bottom) { if (this->param_propagate_down_[0]) { const Dtype* top_diff = top[0]->cpu_diff(); - const Dtype* bottom_data = (*bottom)[0]->cpu_data(); + const Dtype* bottom_data = bottom[0]->cpu_data(); // Gradient with respect to weight caffe_cpu_gemm(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1., top_diff, bottom_data, (Dtype)0., this->blobs_[0]->mutable_cpu_diff()); @@ -95,7 +95,7 @@ void InnerProductLayer::Backward_cpu(const vector*>& top, // Gradient with respect to bottom data caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1., top_diff, this->blobs_[0]->cpu_data(), (Dtype)0., - (*bottom)[0]->mutable_cpu_diff()); + bottom[0]->mutable_cpu_diff()); } } diff --git a/src/caffe/layers/inner_product_layer.cu b/src/caffe/layers/inner_product_layer.cu index 3a0d438..2164b4d 100644 --- a/src/caffe/layers/inner_product_layer.cu +++ b/src/caffe/layers/inner_product_layer.cu @@ -11,9 +11,9 @@ namespace caffe { template void InnerProductLayer::Forward_gpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); - Dtype* top_data = (*top)[0]->mutable_gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); caffe_gpu_gemm(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype)1., bottom_data, weight, (Dtype)0., top_data); @@ -27,10 +27,10 @@ void InnerProductLayer::Forward_gpu(const vector*>& bottom, template void InnerProductLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, - vector*>* bottom) { + const vector*>& bottom) { if (this->param_propagate_down_[0]) { const Dtype* top_diff = top[0]->gpu_diff(); - const Dtype* bottom_data = (*bottom)[0]->gpu_data(); + const Dtype* bottom_data = bottom[0]->gpu_data(); // Gradient with respect to weight caffe_gpu_gemm(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1., top_diff, bottom_data, (Dtype)0., this->blobs_[0]->mutable_gpu_diff()); @@ -47,7 +47,7 @@ void InnerProductLayer::Backward_gpu(const vector*>& top, // Gradient with respect to bottom data caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1., top_diff, this->blobs_[0]->gpu_data(), (Dtype)0., - (*bottom)[0]->mutable_gpu_diff()); + bottom[0]->mutable_gpu_diff()); } } diff --git a/src/caffe/layers/loss_layer.cpp b/src/caffe/layers/loss_layer.cpp index 9eb9dbd..a5b6d11 100644 --- a/src/caffe/layers/loss_layer.cpp +++ b/src/caffe/layers/loss_layer.cpp @@ -12,7 +12,7 @@ namespace caffe { template void LossLayer::LayerSetUp( - const vector*>& bottom, vector*>* top) { + const vector*>& bottom, const vector*>& top) { // LossLayers have a non-zero (1) loss by default. if (this->layer_param_.loss_weight_size() == 0) { this->layer_param_.add_loss_weight(Dtype(1)); @@ -21,10 +21,10 @@ void LossLayer::LayerSetUp( template void LossLayer::Reshape( - const vector*>& bottom, vector*>* top) { + const vector*>& bottom, const vector*>& top) { CHECK_EQ(bottom[0]->num(), bottom[1]->num()) << "The data and label should have the same number."; - (*top)[0]->Reshape(1, 1, 1, 1); + top[0]->Reshape(1, 1, 1, 1); } INSTANTIATE_CLASS(LossLayer); diff --git a/src/caffe/layers/lrn_layer.cpp b/src/caffe/layers/lrn_layer.cpp index d9e41e9..fb74b03 100644 --- a/src/caffe/layers/lrn_layer.cpp +++ b/src/caffe/layers/lrn_layer.cpp @@ -8,7 +8,7 @@ namespace caffe { template void LRNLayer::LayerSetUp(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { size_ = this->layer_param_.lrn_param().local_size(); CHECK_EQ(size_ % 2, 1) << "LRN only supports odd values for local_size"; pre_pad_ = (size_ - 1) / 2; @@ -22,7 +22,7 @@ void LRNLayer::LayerSetUp(const vector*>& bottom, split_top_vec_.push_back(&square_input_); LayerParameter split_param; split_layer_.reset(new SplitLayer(split_param)); - split_layer_->SetUp(bottom, &split_top_vec_); + split_layer_->SetUp(bottom, split_top_vec_); // Set up square_layer_ to square the inputs. square_bottom_vec_.clear(); square_top_vec_.clear(); @@ -31,7 +31,7 @@ void LRNLayer::LayerSetUp(const vector*>& bottom, LayerParameter square_param; square_param.mutable_power_param()->set_power(Dtype(2)); square_layer_.reset(new PowerLayer(square_param)); - square_layer_->SetUp(square_bottom_vec_, &square_top_vec_); + square_layer_->SetUp(square_bottom_vec_, square_top_vec_); // Set up pool_layer_ to sum over square neighborhoods of the input. pool_top_vec_.clear(); pool_top_vec_.push_back(&pool_output_); @@ -41,7 +41,7 @@ void LRNLayer::LayerSetUp(const vector*>& bottom, pool_param.mutable_pooling_param()->set_pad(pre_pad_); pool_param.mutable_pooling_param()->set_kernel_size(size_); pool_layer_.reset(new PoolingLayer(pool_param)); - pool_layer_->SetUp(square_top_vec_, &pool_top_vec_); + pool_layer_->SetUp(square_top_vec_, pool_top_vec_); // Set up power_layer_ to compute (1 + alpha_/N^2 s)^-beta_, where s is // the sum of a squared neighborhood (the output of pool_layer_). power_top_vec_.clear(); @@ -51,7 +51,7 @@ void LRNLayer::LayerSetUp(const vector*>& bottom, power_param.mutable_power_param()->set_scale(alpha_); power_param.mutable_power_param()->set_shift(Dtype(1)); power_layer_.reset(new PowerLayer(power_param)); - power_layer_->SetUp(pool_top_vec_, &power_top_vec_); + power_layer_->SetUp(pool_top_vec_, power_top_vec_); // Set up a product_layer_ to compute outputs by multiplying inputs by the // inverse demoninator computed by the power layer. product_bottom_vec_.clear(); @@ -67,21 +67,21 @@ void LRNLayer::LayerSetUp(const vector*>& bottom, template void LRNLayer::Reshape(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { num_ = bottom[0]->num(); channels_ = bottom[0]->channels(); height_ = bottom[0]->height(); width_ = bottom[0]->width(); switch (this->layer_param_.lrn_param().norm_region()) { case LRNParameter_NormRegion_ACROSS_CHANNELS: - (*top)[0]->Reshape(num_, channels_, height_, width_); + top[0]->Reshape(num_, channels_, height_, width_); scale_.Reshape(num_, channels_, height_, width_); break; case LRNParameter_NormRegion_WITHIN_CHANNEL: - split_layer_->Reshape(bottom, &split_top_vec_); - square_layer_->Reshape(square_bottom_vec_, &square_top_vec_); - pool_layer_->Reshape(square_top_vec_, &pool_top_vec_); - power_layer_->Reshape(pool_top_vec_, &power_top_vec_); + split_layer_->Reshape(bottom, split_top_vec_); + square_layer_->Reshape(square_bottom_vec_, square_top_vec_); + pool_layer_->Reshape(square_top_vec_, pool_top_vec_); + power_layer_->Reshape(pool_top_vec_, power_top_vec_); product_layer_->Reshape(product_bottom_vec_, top); break; } @@ -89,7 +89,7 @@ void LRNLayer::Reshape(const vector*>& bottom, template void LRNLayer::Forward_cpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { switch (this->layer_param_.lrn_param().norm_region()) { case LRNParameter_NormRegion_ACROSS_CHANNELS: CrossChannelForward_cpu(bottom, top); @@ -104,9 +104,9 @@ void LRNLayer::Forward_cpu(const vector*>& bottom, template void LRNLayer::CrossChannelForward_cpu( - const vector*>& bottom, vector*>* top) { + const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); - Dtype* top_data = (*top)[0]->mutable_cpu_data(); + Dtype* top_data = top[0]->mutable_cpu_data(); Dtype* scale_data = scale_.mutable_cpu_data(); // start with the constant value for (int i = 0; i < scale_.count(); ++i) { @@ -151,17 +151,17 @@ void LRNLayer::CrossChannelForward_cpu( template void LRNLayer::WithinChannelForward( - const vector*>& bottom, vector*>* top) { - split_layer_->Forward(bottom, &split_top_vec_); - square_layer_->Forward(square_bottom_vec_, &square_top_vec_); - pool_layer_->Forward(square_top_vec_, &pool_top_vec_); - power_layer_->Forward(pool_top_vec_, &power_top_vec_); + const vector*>& bottom, const vector*>& top) { + split_layer_->Forward(bottom, split_top_vec_); + square_layer_->Forward(square_bottom_vec_, square_top_vec_); + pool_layer_->Forward(square_top_vec_, pool_top_vec_); + power_layer_->Forward(pool_top_vec_, power_top_vec_); product_layer_->Forward(product_bottom_vec_, top); } template void LRNLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { + const vector& propagate_down, const vector*>& bottom) { switch (this->layer_param_.lrn_param().norm_region()) { case LRNParameter_NormRegion_ACROSS_CHANNELS: CrossChannelBackward_cpu(top, propagate_down, bottom); @@ -177,12 +177,12 @@ void LRNLayer::Backward_cpu(const vector*>& top, template void LRNLayer::CrossChannelBackward_cpu( const vector*>& top, const vector& propagate_down, - vector*>* bottom) { + const vector*>& bottom) { const Dtype* top_diff = top[0]->cpu_diff(); const Dtype* top_data = top[0]->cpu_data(); - const Dtype* bottom_data = (*bottom)[0]->cpu_data(); + const Dtype* bottom_data = bottom[0]->cpu_data(); const Dtype* scale_data = scale_.cpu_data(); - Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); Blob padded_ratio(1, channels_ + size_ - 1, height_, width_); Blob accum_ratio(1, 1, height_, width_); Dtype* padded_ratio_data = padded_ratio.mutable_cpu_data(); @@ -232,14 +232,14 @@ void LRNLayer::CrossChannelBackward_cpu( template void LRNLayer::WithinChannelBackward( const vector*>& top, const vector& propagate_down, - vector*>* bottom) { + const vector*>& bottom) { if (propagate_down[0]) { vector product_propagate_down(2, true); - product_layer_->Backward(top, product_propagate_down, &product_bottom_vec_); - power_layer_->Backward(power_top_vec_, propagate_down, &pool_top_vec_); - pool_layer_->Backward(pool_top_vec_, propagate_down, &square_top_vec_); + product_layer_->Backward(top, product_propagate_down, product_bottom_vec_); + power_layer_->Backward(power_top_vec_, propagate_down, pool_top_vec_); + pool_layer_->Backward(pool_top_vec_, propagate_down, square_top_vec_); square_layer_->Backward(square_top_vec_, propagate_down, - &square_bottom_vec_); + square_bottom_vec_); split_layer_->Backward(split_top_vec_, propagate_down, bottom); } } diff --git a/src/caffe/layers/lrn_layer.cu b/src/caffe/layers/lrn_layer.cu index d6cb23b..ee5e359 100644 --- a/src/caffe/layers/lrn_layer.cu +++ b/src/caffe/layers/lrn_layer.cu @@ -55,7 +55,7 @@ __global__ void LRNFillScale(const int nthreads, const Dtype* in, template void LRNLayer::Forward_gpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { switch (this->layer_param_.lrn_param().norm_region()) { case LRNParameter_NormRegion_ACROSS_CHANNELS: CrossChannelForward_gpu(bottom, top); @@ -79,10 +79,10 @@ __global__ void LRNComputeOutput(const int nthreads, const Dtype* in, template void LRNLayer::CrossChannelForward_gpu( - const vector*>& bottom, vector*>* top) { + const vector*>& bottom, const vector*>& top) { // First, compute scale const Dtype* bottom_data = bottom[0]->gpu_data(); - Dtype* top_data = (*top)[0]->mutable_gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* scale_data = scale_.mutable_gpu_data(); // We will launch one kernel for each pixel location, and have the kernel // go through all the channels. @@ -102,7 +102,7 @@ void LRNLayer::CrossChannelForward_gpu( template void LRNLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { + const vector& propagate_down, const vector*>& bottom) { switch (this->layer_param_.lrn_param().norm_region()) { case LRNParameter_NormRegion_ACROSS_CHANNELS: CrossChannelBackward_gpu(top, propagate_down, bottom); @@ -179,14 +179,14 @@ __global__ void LRNComputeDiff(const int nthreads, const Dtype* bottom_data, template void LRNLayer::CrossChannelBackward_gpu( const vector*>& top, const vector& propagate_down, - vector*>* bottom) { + const vector*>& bottom) { int n_threads = num_ * height_ * width_; // NOLINT_NEXT_LINE(whitespace/operators) LRNComputeDiff<<>>( - n_threads, (*bottom)[0]->gpu_data(), top[0]->gpu_data(), + n_threads, bottom[0]->gpu_data(), top[0]->gpu_data(), scale_.gpu_data(), top[0]->gpu_diff(), num_, channels_, height_, width_, size_, -beta_, Dtype(2. * alpha_ * beta_ / size_), - (*bottom)[0]->mutable_gpu_diff()); + bottom[0]->mutable_gpu_diff()); } diff --git a/src/caffe/layers/memory_data_layer.cpp b/src/caffe/layers/memory_data_layer.cpp index ab631a8..d254da3 100644 --- a/src/caffe/layers/memory_data_layer.cpp +++ b/src/caffe/layers/memory_data_layer.cpp @@ -8,7 +8,7 @@ namespace caffe { template void MemoryDataLayer::DataLayerSetUp(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { batch_size_ = this->layer_param_.memory_data_param().batch_size(); this->datum_channels_ = this->layer_param_.memory_data_param().channels(); this->datum_height_ = this->layer_param_.memory_data_param().height(); @@ -18,9 +18,9 @@ void MemoryDataLayer::DataLayerSetUp(const vector*>& bottom, CHECK_GT(batch_size_ * this->datum_size_, 0) << "batch_size, channels, height, and width must be specified and" " positive in memory_data_param"; - (*top)[0]->Reshape(batch_size_, this->datum_channels_, this->datum_height_, + top[0]->Reshape(batch_size_, this->datum_channels_, this->datum_height_, this->datum_width_); - (*top)[1]->Reshape(batch_size_, 1, 1, 1); + top[1]->Reshape(batch_size_, 1, 1, 1); added_data_.Reshape(batch_size_, this->datum_channels_, this->datum_height_, this->datum_width_); added_label_.Reshape(batch_size_, 1, 1, 1); @@ -66,10 +66,10 @@ void MemoryDataLayer::Reset(Dtype* data, Dtype* labels, int n) { template void MemoryDataLayer::Forward_cpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { CHECK(data_) << "MemoryDataLayer needs to be initalized by calling Reset"; - (*top)[0]->set_cpu_data(data_ + pos_ * this->datum_size_); - (*top)[1]->set_cpu_data(labels_ + pos_); + top[0]->set_cpu_data(data_ + pos_ * this->datum_size_); + top[1]->set_cpu_data(labels_ + pos_); pos_ = (pos_ + batch_size_) % n_; has_new_data_ = false; } diff --git a/src/caffe/layers/multinomial_logistic_loss_layer.cpp b/src/caffe/layers/multinomial_logistic_loss_layer.cpp index c0fe196..66f9ba8 100644 --- a/src/caffe/layers/multinomial_logistic_loss_layer.cpp +++ b/src/caffe/layers/multinomial_logistic_loss_layer.cpp @@ -12,7 +12,7 @@ namespace caffe { template void MultinomialLogisticLossLayer::Reshape( - const vector*>& bottom, vector*>* top) { + const vector*>& bottom, const vector*>& top) { LossLayer::Reshape(bottom, top); CHECK_EQ(bottom[1]->channels(), 1); CHECK_EQ(bottom[1]->height(), 1); @@ -21,7 +21,7 @@ void MultinomialLogisticLossLayer::Reshape( template void MultinomialLogisticLossLayer::Forward_cpu( - const vector*>& bottom, vector*>* top) { + const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); const Dtype* bottom_label = bottom[1]->cpu_data(); int num = bottom[0]->num(); @@ -33,24 +33,24 @@ void MultinomialLogisticLossLayer::Forward_cpu( bottom_data[i * dim + label], Dtype(kLOG_THRESHOLD)); loss -= log(prob); } - (*top)[0]->mutable_cpu_data()[0] = loss / num; + top[0]->mutable_cpu_data()[0] = loss / num; } template void MultinomialLogisticLossLayer::Backward_cpu( const vector*>& top, const vector& propagate_down, - vector*>* bottom) { + const vector*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type_name() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { - const Dtype* bottom_data = (*bottom)[0]->cpu_data(); - const Dtype* bottom_label = (*bottom)[1]->cpu_data(); - Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); - int num = (*bottom)[0]->num(); - int dim = (*bottom)[0]->count() / (*bottom)[0]->num(); - caffe_set((*bottom)[0]->count(), Dtype(0), bottom_diff); + const Dtype* bottom_data = bottom[0]->cpu_data(); + const Dtype* bottom_label = bottom[1]->cpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + int num = bottom[0]->num(); + int dim = bottom[0]->count() / bottom[0]->num(); + caffe_set(bottom[0]->count(), Dtype(0), bottom_diff); const Dtype scale = - top[0]->cpu_diff()[0] / num; for (int i = 0; i < num; ++i) { int label = static_cast(bottom_label[i]); diff --git a/src/caffe/layers/mvn_layer.cpp b/src/caffe/layers/mvn_layer.cpp index 6a57b3e..419f170 100644 --- a/src/caffe/layers/mvn_layer.cpp +++ b/src/caffe/layers/mvn_layer.cpp @@ -9,8 +9,8 @@ namespace caffe { template void MVNLayer::Reshape(const vector*>& bottom, - vector*>* top) { - (*top)[0]->Reshape(bottom[0]->num(), bottom[0]->channels(), + const vector*>& top) { + top[0]->Reshape(bottom[0]->num(), bottom[0]->channels(), bottom[0]->height(), bottom[0]->width()); mean_.Reshape(bottom[0]->num(), bottom[0]->channels(), 1, 1); @@ -26,9 +26,9 @@ void MVNLayer::Reshape(const vector*>& bottom, template void MVNLayer::Forward_cpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); - Dtype* top_data = (*top)[0]->mutable_cpu_data(); + Dtype* top_data = top[0]->mutable_cpu_data(); int num; if (this->layer_param_.mvn_param().across_channels()) num = bottom[0]->num(); @@ -89,19 +89,19 @@ void MVNLayer::Forward_cpu(const vector*>& bottom, template void MVNLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, - vector*>* bottom) { + const vector*>& bottom) { const Dtype* top_diff = top[0]->cpu_diff(); const Dtype* top_data = top[0]->cpu_data(); - const Dtype* bottom_data = (*bottom)[0]->cpu_data(); - Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); int num; if (this->layer_param_.mvn_param().across_channels()) - num = (*bottom)[0]->num(); + num = bottom[0]->num(); else - num = (*bottom)[0]->num() * (*bottom)[0]->channels(); + num = bottom[0]->num() * bottom[0]->channels(); - int dim = (*bottom)[0]->count() / num; + int dim = bottom[0]->count() / num; Dtype eps = 1e-10; if (this->layer_param_.mvn_param().normalize_variance()) { diff --git a/src/caffe/layers/mvn_layer.cu b/src/caffe/layers/mvn_layer.cu index 2c02dfe..c2a241f 100644 --- a/src/caffe/layers/mvn_layer.cu +++ b/src/caffe/layers/mvn_layer.cu @@ -9,9 +9,9 @@ namespace caffe { template void MVNLayer::Forward_gpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); - Dtype* top_data = (*top)[0]->mutable_gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); int num; if (this->layer_param_.mvn_param().across_channels()) num = bottom[0]->num(); @@ -73,19 +73,19 @@ void MVNLayer::Forward_gpu(const vector*>& bottom, template void MVNLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, - vector*>* bottom) { + const vector*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* top_data = top[0]->gpu_data(); - const Dtype* bottom_data = (*bottom)[0]->gpu_data(); - Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); int num; if (this->layer_param_.mvn_param().across_channels()) - num = (*bottom)[0]->num(); + num = bottom[0]->num(); else - num = (*bottom)[0]->num() * (*bottom)[0]->channels(); + num = bottom[0]->num() * bottom[0]->channels(); - int dim = (*bottom)[0]->count() / num; + int dim = bottom[0]->count() / num; Dtype eps = 1e-10; diff --git a/src/caffe/layers/neuron_layer.cpp b/src/caffe/layers/neuron_layer.cpp index c28e36e..ba67b43 100644 --- a/src/caffe/layers/neuron_layer.cpp +++ b/src/caffe/layers/neuron_layer.cpp @@ -7,8 +7,8 @@ namespace caffe { template void NeuronLayer::Reshape(const vector*>& bottom, - vector*>* top) { - (*top)[0]->ReshapeLike(*bottom[0]); + const vector*>& top) { + top[0]->ReshapeLike(*bottom[0]); } INSTANTIATE_CLASS(NeuronLayer); diff --git a/src/caffe/layers/pooling_layer.cpp b/src/caffe/layers/pooling_layer.cpp index 8e8ffad..26c92c1 100644 --- a/src/caffe/layers/pooling_layer.cpp +++ b/src/caffe/layers/pooling_layer.cpp @@ -15,7 +15,7 @@ using std::max; template void PoolingLayer::LayerSetUp(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { PoolingParameter pool_param = this->layer_param_.pooling_param(); CHECK(!pool_param.has_kernel_size() != !(pool_param.has_kernel_h() && pool_param.has_kernel_w())) @@ -64,7 +64,7 @@ void PoolingLayer::LayerSetUp(const vector*>& bottom, template void PoolingLayer::Reshape(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { channels_ = bottom[0]->channels(); height_ = bottom[0]->height(); width_ = bottom[0]->width(); @@ -84,14 +84,14 @@ void PoolingLayer::Reshape(const vector*>& bottom, CHECK_LT((pooled_height_ - 1) * stride_h_, height_ + pad_h_); CHECK_LT((pooled_width_ - 1) * stride_w_, width_ + pad_w_); } - (*top)[0]->Reshape(bottom[0]->num(), channels_, pooled_height_, + top[0]->Reshape(bottom[0]->num(), channels_, pooled_height_, pooled_width_); - if (top->size() > 1) { - (*top)[1]->ReshapeLike(*(*top)[0]); + if (top.size() > 1) { + top[1]->ReshapeLike(*top[0]); } // If max pooling, we will initialize the vector index part. if (this->layer_param_.pooling_param().pool() == - PoolingParameter_PoolMethod_MAX && top->size() == 1) { + PoolingParameter_PoolMethod_MAX && top.size() == 1) { max_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_, pooled_width_); } @@ -107,12 +107,12 @@ void PoolingLayer::Reshape(const vector*>& bottom, // case? template void PoolingLayer::Forward_cpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); - Dtype* top_data = (*top)[0]->mutable_cpu_data(); - const int top_count = (*top)[0]->count(); + Dtype* top_data = top[0]->mutable_cpu_data(); + const int top_count = top[0]->count(); // We'll output the mask to top[1] if it's of size >1. - const bool use_top_mask = top->size() > 1; + const bool use_top_mask = top.size() > 1; int* mask = NULL; // suppress warnings about uninitalized variables Dtype* top_mask = NULL; // Different pooling methods. We explicitly do the switch outside the for @@ -121,7 +121,7 @@ void PoolingLayer::Forward_cpu(const vector*>& bottom, case PoolingParameter_PoolMethod_MAX: // Initialize if (use_top_mask) { - top_mask = (*top)[1]->mutable_cpu_data(); + top_mask = top[1]->mutable_cpu_data(); caffe_set(top_count, Dtype(-1), top_mask); } else { mask = max_idx_.mutable_cpu_data(); @@ -157,11 +157,11 @@ void PoolingLayer::Forward_cpu(const vector*>& bottom, } // compute offset bottom_data += bottom[0]->offset(0, 1); - top_data += (*top)[0]->offset(0, 1); + top_data += top[0]->offset(0, 1); if (use_top_mask) { - top_mask += (*top)[0]->offset(0, 1); + top_mask += top[0]->offset(0, 1); } else { - mask += (*top)[0]->offset(0, 1); + mask += top[0]->offset(0, 1); } } } @@ -195,7 +195,7 @@ void PoolingLayer::Forward_cpu(const vector*>& bottom, } // compute offset bottom_data += bottom[0]->offset(0, 1); - top_data += (*top)[0]->offset(0, 1); + top_data += top[0]->offset(0, 1); } } break; @@ -209,15 +209,15 @@ void PoolingLayer::Forward_cpu(const vector*>& bottom, template void PoolingLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { + const vector& propagate_down, const vector*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* top_diff = top[0]->cpu_diff(); - Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); // Different pooling methods. We explicitly do the switch outside the for // loop to save time, although this results in more codes. - caffe_set((*bottom)[0]->count(), Dtype(0), bottom_diff); + caffe_set(bottom[0]->count(), Dtype(0), bottom_diff); // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; const int* mask = NULL; // suppress warnings about uninitialized variables @@ -240,7 +240,7 @@ void PoolingLayer::Backward_cpu(const vector*>& top, bottom_diff[bottom_index] += top_diff[index]; } } - bottom_diff += (*bottom)[0]->offset(0, 1); + bottom_diff += bottom[0]->offset(0, 1); top_diff += top[0]->offset(0, 1); if (use_top_mask) { top_mask += top[0]->offset(0, 1); @@ -274,7 +274,7 @@ void PoolingLayer::Backward_cpu(const vector*>& top, } } // offset - bottom_diff += (*bottom)[0]->offset(0, 1); + bottom_diff += bottom[0]->offset(0, 1); top_diff += top[0]->offset(0, 1); } } diff --git a/src/caffe/layers/pooling_layer.cu b/src/caffe/layers/pooling_layer.cu index e64128b..aec985a 100644 --- a/src/caffe/layers/pooling_layer.cu +++ b/src/caffe/layers/pooling_layer.cu @@ -152,18 +152,18 @@ __global__ void StoPoolForwardTest(const int nthreads, template void PoolingLayer::Forward_gpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); - Dtype* top_data = (*top)[0]->mutable_gpu_data(); - int count = (*top)[0]->count(); + Dtype* top_data = top[0]->mutable_gpu_data(); + int count = top[0]->count(); // We'll output the mask to top[1] if it's of size >1. - const bool use_top_mask = top->size() > 1; + const bool use_top_mask = top.size() > 1; int* mask = NULL; Dtype* top_mask = NULL; switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: if (use_top_mask) { - top_mask = (*top)[1]->mutable_gpu_data(); + top_mask = top[1]->mutable_gpu_data(); } else { mask = max_idx_.mutable_gpu_data(); } @@ -325,13 +325,13 @@ __global__ void StoPoolBackward(const int nthreads, template void PoolingLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { + const vector& propagate_down, const vector*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* top_diff = top[0]->gpu_diff(); - Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); - const int count = (*bottom)[0]->count(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; diff --git a/src/caffe/layers/power_layer.cpp b/src/caffe/layers/power_layer.cpp index bf61955..04c6122 100644 --- a/src/caffe/layers/power_layer.cpp +++ b/src/caffe/layers/power_layer.cpp @@ -9,7 +9,7 @@ namespace caffe { template void PowerLayer::LayerSetUp(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { NeuronLayer::LayerSetUp(bottom, top); power_ = this->layer_param_.power_param().power(); scale_ = this->layer_param_.power_param().scale(); @@ -20,8 +20,8 @@ void PowerLayer::LayerSetUp(const vector*>& bottom, // Compute y = (shift + scale * x)^power template void PowerLayer::Forward_cpu(const vector*>& bottom, - vector*>* top) { - Dtype* top_data = (*top)[0]->mutable_cpu_data(); + const vector*>& top) { + Dtype* top_data = top[0]->mutable_cpu_data(); const int count = bottom[0]->count(); // Special case where we can ignore the input: scale or power is 0. if (diff_scale_ == Dtype(0)) { @@ -45,15 +45,15 @@ void PowerLayer::Forward_cpu(const vector*>& bottom, template void PowerLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, - vector*>* bottom) { + const vector*>& bottom) { if (propagate_down[0]) { - Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); - const int count = (*bottom)[0]->count(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + const int count = bottom[0]->count(); const Dtype* top_diff = top[0]->cpu_diff(); if (diff_scale_ == Dtype(0) || power_ == Dtype(1)) { caffe_set(count, diff_scale_, bottom_diff); } else { - const Dtype* bottom_data = (*bottom)[0]->cpu_data(); + const Dtype* bottom_data = bottom[0]->cpu_data(); // Compute dy/dx = scale * power * (shift + scale * x)^(power - 1) // = diff_scale * y / (shift + scale * x) if (power_ == Dtype(2)) { diff --git a/src/caffe/layers/power_layer.cu b/src/caffe/layers/power_layer.cu index a40bc75..367320a 100644 --- a/src/caffe/layers/power_layer.cu +++ b/src/caffe/layers/power_layer.cu @@ -9,8 +9,8 @@ namespace caffe { template void PowerLayer::Forward_gpu(const vector*>& bottom, - vector*>* top) { - Dtype* top_data = (*top)[0]->mutable_gpu_data(); + const vector*>& top) { + Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); // Special case where we can ignore the input: scale or power is 0. if (diff_scale_ == Dtype(0)) { @@ -34,15 +34,15 @@ void PowerLayer::Forward_gpu(const vector*>& bottom, template void PowerLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, - vector*>* bottom) { + const vector*>& bottom) { if (propagate_down[0]) { - Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); - const int count = (*bottom)[0]->count(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + const int count = bottom[0]->count(); const Dtype* top_diff = top[0]->gpu_diff(); if (diff_scale_ == Dtype(0) || power_ == Dtype(1)) { caffe_gpu_set(count, diff_scale_, bottom_diff); } else { - const Dtype* bottom_data = (*bottom)[0]->gpu_data(); + const Dtype* bottom_data = bottom[0]->gpu_data(); // Compute dy/dx = scale * power * (shift + scale * x)^(power - 1) // = diff_scale * y / (shift + scale * x) if (power_ == Dtype(2)) { diff --git a/src/caffe/layers/relu_layer.cpp b/src/caffe/layers/relu_layer.cpp index b50352f..7d5e603 100644 --- a/src/caffe/layers/relu_layer.cpp +++ b/src/caffe/layers/relu_layer.cpp @@ -8,9 +8,9 @@ namespace caffe { template void ReLULayer::Forward_cpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); - Dtype* top_data = (*top)[0]->mutable_cpu_data(); + Dtype* top_data = top[0]->mutable_cpu_data(); const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); for (int i = 0; i < count; ++i) { @@ -22,12 +22,12 @@ void ReLULayer::Forward_cpu(const vector*>& bottom, template void ReLULayer::Backward_cpu(const vector*>& top, const vector& propagate_down, - vector*>* bottom) { + const vector*>& bottom) { if (propagate_down[0]) { - const Dtype* bottom_data = (*bottom)[0]->cpu_data(); + const Dtype* bottom_data = bottom[0]->cpu_data(); const Dtype* top_diff = top[0]->cpu_diff(); - Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); - const int count = (*bottom)[0]->count(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); for (int i = 0; i < count; ++i) { bottom_diff[i] = top_diff[i] * ((bottom_data[i] > 0) diff --git a/src/caffe/layers/relu_layer.cu b/src/caffe/layers/relu_layer.cu index def2bbc..22d5f4b 100644 --- a/src/caffe/layers/relu_layer.cu +++ b/src/caffe/layers/relu_layer.cu @@ -16,9 +16,9 @@ __global__ void ReLUForward(const int n, const Dtype* in, Dtype* out, template void ReLULayer::Forward_gpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); - Dtype* top_data = (*top)[0]->mutable_gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); // NOLINT_NEXT_LINE(whitespace/operators) @@ -44,12 +44,12 @@ __global__ void ReLUBackward(const int n, const Dtype* in_diff, template void ReLULayer::Backward_gpu(const vector*>& top, const vector& propagate_down, - vector*>* bottom) { + const vector*>& bottom) { if (propagate_down[0]) { - const Dtype* bottom_data = (*bottom)[0]->gpu_data(); + const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); - Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); - const int count = (*bottom)[0]->count(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); // NOLINT_NEXT_LINE(whitespace/operators) ReLUBackward<<>>( diff --git a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp index 6a48099..11850ac 100644 --- a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp +++ b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp @@ -10,30 +10,30 @@ namespace caffe { template void SigmoidCrossEntropyLossLayer::LayerSetUp( - const vector*>& bottom, vector*>* top) { + const vector*>& bottom, const vector*>& top) { LossLayer::LayerSetUp(bottom, top); sigmoid_bottom_vec_.clear(); sigmoid_bottom_vec_.push_back(bottom[0]); sigmoid_top_vec_.clear(); sigmoid_top_vec_.push_back(sigmoid_output_.get()); - sigmoid_layer_->SetUp(sigmoid_bottom_vec_, &sigmoid_top_vec_); + sigmoid_layer_->SetUp(sigmoid_bottom_vec_, sigmoid_top_vec_); } template void SigmoidCrossEntropyLossLayer::Reshape( - const vector*>& bottom, vector*>* top) { + const vector*>& bottom, const vector*>& top) { LossLayer::Reshape(bottom, top); CHECK_EQ(bottom[0]->count(), bottom[1]->count()) << "SIGMOID_CROSS_ENTROPY_LOSS layer inputs must have the same count."; - sigmoid_layer_->Reshape(sigmoid_bottom_vec_, &sigmoid_top_vec_); + sigmoid_layer_->Reshape(sigmoid_bottom_vec_, sigmoid_top_vec_); } template void SigmoidCrossEntropyLossLayer::Forward_cpu( - const vector*>& bottom, vector*>* top) { + const vector*>& bottom, const vector*>& top) { // The forward pass computes the sigmoid outputs. sigmoid_bottom_vec_[0] = bottom[0]; - sigmoid_layer_->Forward(sigmoid_bottom_vec_, &sigmoid_top_vec_); + sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_); // Compute the loss (negative log likelihood) const int count = bottom[0]->count(); const int num = bottom[0]->num(); @@ -45,24 +45,24 @@ void SigmoidCrossEntropyLossLayer::Forward_cpu( loss -= input_data[i] * (target[i] - (input_data[i] >= 0)) - log(1 + exp(input_data[i] - 2 * input_data[i] * (input_data[i] >= 0))); } - (*top)[0]->mutable_cpu_data()[0] = loss / num; + top[0]->mutable_cpu_data()[0] = loss / num; } template void SigmoidCrossEntropyLossLayer::Backward_cpu( const vector*>& top, const vector& propagate_down, - vector*>* bottom) { + const vector*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type_name() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { // First, compute the diff - const int count = (*bottom)[0]->count(); - const int num = (*bottom)[0]->num(); + const int count = bottom[0]->count(); + const int num = bottom[0]->num(); const Dtype* sigmoid_output_data = sigmoid_output_->cpu_data(); - const Dtype* target = (*bottom)[1]->cpu_data(); - Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); + const Dtype* target = bottom[1]->cpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); caffe_sub(count, sigmoid_output_data, target, bottom_diff); // Scale down gradient const Dtype loss_weight = top[0]->cpu_diff()[0]; diff --git a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu index 8d0fdc6..175f6f8 100644 --- a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu +++ b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cu @@ -10,10 +10,10 @@ namespace caffe { template void SigmoidCrossEntropyLossLayer::Forward_gpu( - const vector*>& bottom, vector*>* top) { + const vector*>& bottom, const vector*>& top) { // The forward pass computes the sigmoid outputs. sigmoid_bottom_vec_[0] = bottom[0]; - sigmoid_layer_->Forward(sigmoid_bottom_vec_, &sigmoid_top_vec_); + sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_); // Compute the loss (negative log likelihood) const int count = bottom[0]->count(); const int num = bottom[0]->num(); @@ -25,24 +25,24 @@ void SigmoidCrossEntropyLossLayer::Forward_gpu( loss -= input_data[i] * (target[i] - (input_data[i] >= 0)) - log(1 + exp(input_data[i] - 2 * input_data[i] * (input_data[i] >= 0))); } - (*top)[0]->mutable_cpu_data()[0] = loss / num; + top[0]->mutable_cpu_data()[0] = loss / num; } template void SigmoidCrossEntropyLossLayer::Backward_gpu( const vector*>& top, const vector& propagate_down, - vector*>* bottom) { + const vector*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type_name() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { // First, compute the diff - const int count = (*bottom)[0]->count(); - const int num = (*bottom)[0]->num(); + const int count = bottom[0]->count(); + const int num = bottom[0]->num(); const Dtype* sigmoid_output_data = sigmoid_output_->gpu_data(); - const Dtype* target = (*bottom)[1]->gpu_data(); - Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); + const Dtype* target = bottom[1]->gpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); caffe_copy(count, sigmoid_output_data, bottom_diff); caffe_gpu_axpy(count, Dtype(-1), target, bottom_diff); // Scale down gradient diff --git a/src/caffe/layers/sigmoid_layer.cpp b/src/caffe/layers/sigmoid_layer.cpp index d7bba7f..48c3849 100644 --- a/src/caffe/layers/sigmoid_layer.cpp +++ b/src/caffe/layers/sigmoid_layer.cpp @@ -14,9 +14,9 @@ inline Dtype sigmoid(Dtype x) { template void SigmoidLayer::Forward_cpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); - Dtype* top_data = (*top)[0]->mutable_cpu_data(); + Dtype* top_data = top[0]->mutable_cpu_data(); const int count = bottom[0]->count(); for (int i = 0; i < count; ++i) { top_data[i] = sigmoid(bottom_data[i]); @@ -26,12 +26,12 @@ void SigmoidLayer::Forward_cpu(const vector*>& bottom, template void SigmoidLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, - vector*>* bottom) { + const vector*>& bottom) { if (propagate_down[0]) { const Dtype* top_data = top[0]->cpu_data(); const Dtype* top_diff = top[0]->cpu_diff(); - Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); - const int count = (*bottom)[0]->count(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + const int count = bottom[0]->count(); for (int i = 0; i < count; ++i) { const Dtype sigmoid_x = top_data[i]; bottom_diff[i] = top_diff[i] * sigmoid_x * (1. - sigmoid_x); diff --git a/src/caffe/layers/sigmoid_layer.cu b/src/caffe/layers/sigmoid_layer.cu index e1ebb1f..6879ff7 100644 --- a/src/caffe/layers/sigmoid_layer.cu +++ b/src/caffe/layers/sigmoid_layer.cu @@ -16,9 +16,9 @@ __global__ void SigmoidForward(const int n, const Dtype* in, Dtype* out) { template void SigmoidLayer::Forward_gpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); - Dtype* top_data = (*top)[0]->mutable_gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) SigmoidForward<<>>( @@ -43,12 +43,12 @@ __global__ void SigmoidBackward(const int n, const Dtype* in_diff, template void SigmoidLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, - vector*>* bottom) { + const vector*>& bottom) { if (propagate_down[0]) { const Dtype* top_data = top[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); - Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); - const int count = (*bottom)[0]->count(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) SigmoidBackward<<>>( count, top_diff, top_data, bottom_diff); diff --git a/src/caffe/layers/silence_layer.cpp b/src/caffe/layers/silence_layer.cpp index 75dbbf3..0fd8858 100644 --- a/src/caffe/layers/silence_layer.cpp +++ b/src/caffe/layers/silence_layer.cpp @@ -8,11 +8,11 @@ namespace caffe { template void SilenceLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { - for (int i = 0; i < bottom->size(); ++i) { + const vector& propagate_down, const vector*>& bottom) { + for (int i = 0; i < bottom.size(); ++i) { if (propagate_down[i]) { - caffe_set((*bottom)[i]->count(), Dtype(0), - (*bottom)[i]->mutable_cpu_data()); + caffe_set(bottom[i]->count(), Dtype(0), + bottom[i]->mutable_cpu_data()); } } } diff --git a/src/caffe/layers/silence_layer.cu b/src/caffe/layers/silence_layer.cu index 735abe6..b350b19 100644 --- a/src/caffe/layers/silence_layer.cu +++ b/src/caffe/layers/silence_layer.cu @@ -8,17 +8,17 @@ namespace caffe { template void SilenceLayer::Forward_gpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { // Do nothing. } template void SilenceLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { - for (int i = 0; i < bottom->size(); ++i) { + const vector& propagate_down, const vector*>& bottom) { + for (int i = 0; i < bottom.size(); ++i) { if (propagate_down[i]) { - caffe_gpu_set((*bottom)[i]->count(), Dtype(0), - (*bottom)[i]->mutable_gpu_data()); + caffe_gpu_set(bottom[i]->count(), Dtype(0), + bottom[i]->mutable_gpu_data()); } } } diff --git a/src/caffe/layers/slice_layer.cpp b/src/caffe/layers/slice_layer.cpp index ed679a9..17144c1 100644 --- a/src/caffe/layers/slice_layer.cpp +++ b/src/caffe/layers/slice_layer.cpp @@ -9,7 +9,7 @@ namespace caffe { template void SliceLayer::LayerSetUp(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const SliceParameter& slice_param = this->layer_param_.slice_param(); slice_dim_ = slice_param.slice_dim(); CHECK_GE(slice_dim_, 0); @@ -22,18 +22,18 @@ void SliceLayer::LayerSetUp(const vector*>& bottom, template void SliceLayer::Reshape(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { count_ = 0; num_ = bottom[0]->num(); channels_ = bottom[0]->channels(); height_ = bottom[0]->height(); width_ = bottom[0]->width(); if (slice_point_.size() != 0) { - CHECK_EQ(slice_point_.size(), top->size() - 1); + CHECK_EQ(slice_point_.size(), top.size() - 1); if (slice_dim_ == 0) { - CHECK_LE(top->size(), num_); + CHECK_LE(top.size(), num_); } else { - CHECK_LE(top->size(), channels_); + CHECK_LE(top.size(), channels_); } int prev = 0; vector slices; @@ -44,32 +44,32 @@ void SliceLayer::Reshape(const vector*>& bottom, } if (slice_dim_ == 0) { slices.push_back(num_ - prev); - for (int i = 0; i < top->size(); ++i) { - (*top)[i]->Reshape(slices[i], channels_, height_, width_); - count_ += (*top)[i]->count(); + for (int i = 0; i < top.size(); ++i) { + top[i]->Reshape(slices[i], channels_, height_, width_); + count_ += top[i]->count(); } } else { slices.push_back(channels_ - prev); - for (int i = 0; i < top->size(); ++i) { - (*top)[i]->Reshape(num_, slices[i], height_, width_); - count_ += (*top)[i]->count(); + for (int i = 0; i < top.size(); ++i) { + top[i]->Reshape(num_, slices[i], height_, width_); + count_ += top[i]->count(); } } } else { if (slice_dim_ == 0) { - CHECK_EQ(num_ % top->size(), 0) - << "Number of top blobs (" << top->size() << ") " + CHECK_EQ(num_ % top.size(), 0) + << "Number of top blobs (" << top.size() << ") " << "should evenly divide input num ( " << num_ << ")"; - num_ = num_ / top->size(); + num_ = num_ / top.size(); } else { - CHECK_EQ(channels_ % top->size(), 0) - << "Number of top blobs (" << top->size() << ") " + CHECK_EQ(channels_ % top.size(), 0) + << "Number of top blobs (" << top.size() << ") " << "should evenly divide input channels ( " << channels_ << ")"; - channels_ = channels_ / top->size(); + channels_ = channels_ / top.size(); } - for (int i = 0; i < top->size(); ++i) { - (*top)[i]->Reshape(num_, channels_, height_, width_); - count_ += (*top)[i]->count(); + for (int i = 0; i < top.size(); ++i) { + top[i]->Reshape(num_, channels_, height_, width_); + count_ += top[i]->count(); } } CHECK_EQ(count_, bottom[0]->count()); @@ -77,12 +77,12 @@ void SliceLayer::Reshape(const vector*>& bottom, template void SliceLayer::Forward_cpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->mutable_cpu_data(); if (slice_dim_ == 0) { int offset_num = 0; - for (int i = 0; i < top->size(); ++i) { - Blob* blob = (*top)[i]; + for (int i = 0; i < top.size(); ++i) { + Blob* blob = top[i]; Dtype* top_data = blob->mutable_cpu_data(); caffe_copy(blob->count(), bottom_data + bottom[0]->offset(offset_num), top_data); @@ -90,8 +90,8 @@ void SliceLayer::Forward_cpu(const vector*>& bottom, } } else if (slice_dim_ == 1) { int offset_channel = 0; - for (int i = 0; i < top->size(); ++i) { - Blob* blob = (*top)[i]; + for (int i = 0; i < top.size(); ++i) { + Blob* blob = top[i]; Dtype* top_data = blob->mutable_cpu_data(); const int num_elem = blob->channels() * blob->height() * blob->width(); for (int n = 0; n < num_; ++n) { @@ -105,16 +105,16 @@ void SliceLayer::Forward_cpu(const vector*>& bottom, template void SliceLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { + const vector& propagate_down, const vector*>& bottom) { if (!propagate_down[0]) { return; } - Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); if (slice_dim_ == 0) { int offset_num = 0; for (int i = 0; i < top.size(); ++i) { Blob* blob = top[i]; const Dtype* top_diff = blob->cpu_diff(); caffe_copy(blob->count(), top_diff, - bottom_diff + (*bottom)[0]->offset(offset_num)); + bottom_diff + bottom[0]->offset(offset_num)); offset_num += blob->num(); } } else if (slice_dim_ == 1) { @@ -125,7 +125,7 @@ void SliceLayer::Backward_cpu(const vector*>& top, const int num_elem = blob->channels() * blob->height() * blob->width(); for (int n = 0; n < num_; ++n) { caffe_copy(num_elem, top_diff + blob->offset(n), - bottom_diff + (*bottom)[0]->offset(n, offset_channel)); + bottom_diff + bottom[0]->offset(n, offset_channel)); } offset_channel += blob->channels(); } diff --git a/src/caffe/layers/slice_layer.cu b/src/caffe/layers/slice_layer.cu index f64e575..6578408 100644 --- a/src/caffe/layers/slice_layer.cu +++ b/src/caffe/layers/slice_layer.cu @@ -8,12 +8,12 @@ namespace caffe { template void SliceLayer::Forward_gpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->mutable_gpu_data(); if (slice_dim_ == 0) { int offset_num = 0; - for (int i = 0; i < top->size(); ++i) { - Blob* blob = (*top)[i]; + for (int i = 0; i < top.size(); ++i) { + Blob* blob = top[i]; Dtype* top_data = blob->mutable_gpu_data(); caffe_copy(blob->count(), bottom_data + bottom[0]->offset(offset_num), top_data); @@ -21,8 +21,8 @@ void SliceLayer::Forward_gpu(const vector*>& bottom, } } else if (slice_dim_ == 1) { int offset_channel = 0; - for (int i = 0; i < top->size(); ++i) { - Blob* blob = (*top)[i]; + for (int i = 0; i < top.size(); ++i) { + Blob* blob = top[i]; Dtype* top_data = blob->mutable_gpu_data(); const int num_elem = blob->channels() * blob->height() * blob->width(); for (int n = 0; n < num_; ++n) { @@ -36,16 +36,16 @@ void SliceLayer::Forward_gpu(const vector*>& bottom, template void SliceLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { + const vector& propagate_down, const vector*>& bottom) { if (!propagate_down[0]) { return; } - Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); if (slice_dim_ == 0) { int offset_num = 0; for (int i = 0; i < top.size(); ++i) { Blob* blob = top[i]; const Dtype* top_diff = blob->gpu_diff(); caffe_copy(blob->count(), top_diff, - bottom_diff + (*bottom)[0]->offset(offset_num)); + bottom_diff + bottom[0]->offset(offset_num)); offset_num += blob->num(); } } else if (slice_dim_ == 1) { @@ -56,7 +56,7 @@ void SliceLayer::Backward_gpu(const vector*>& top, const int num_elem = blob->channels() * blob->height() * blob->width(); for (int n = 0; n < num_; ++n) { caffe_copy(num_elem, top_diff + blob->offset(n), - bottom_diff + (*bottom)[0]->offset(n, offset_channel)); + bottom_diff + bottom[0]->offset(n, offset_channel)); } offset_channel += blob->channels(); } diff --git a/src/caffe/layers/softmax_layer.cpp b/src/caffe/layers/softmax_layer.cpp index 60668a3..04e8c4f 100644 --- a/src/caffe/layers/softmax_layer.cpp +++ b/src/caffe/layers/softmax_layer.cpp @@ -9,8 +9,8 @@ namespace caffe { template void SoftmaxLayer::Reshape(const vector*>& bottom, - vector*>* top) { - (*top)[0]->Reshape(bottom[0]->num(), bottom[0]->channels(), + const vector*>& top) { + top[0]->Reshape(bottom[0]->num(), bottom[0]->channels(), bottom[0]->height(), bottom[0]->width()); sum_multiplier_.Reshape(1, bottom[0]->channels(), 1, 1); Dtype* multiplier_data = sum_multiplier_.mutable_cpu_data(); @@ -22,9 +22,9 @@ void SoftmaxLayer::Reshape(const vector*>& bottom, template void SoftmaxLayer::Forward_cpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); - Dtype* top_data = (*top)[0]->mutable_cpu_data(); + Dtype* top_data = top[0]->mutable_cpu_data(); Dtype* scale_data = scale_.mutable_cpu_data(); int num = bottom[0]->num(); int channels = bottom[0]->channels(); @@ -52,8 +52,8 @@ void SoftmaxLayer::Forward_cpu(const vector*>& bottom, top_data + i * dim, sum_multiplier_.cpu_data(), 0., scale_data); // division for (int j = 0; j < channels; j++) { - caffe_div(spatial_dim, top_data + (*top)[0]->offset(i, j), scale_data, - top_data + (*top)[0]->offset(i, j)); + caffe_div(spatial_dim, top_data + top[0]->offset(i, j), scale_data, + top_data + top[0]->offset(i, j)); } } } @@ -61,10 +61,10 @@ void SoftmaxLayer::Forward_cpu(const vector*>& bottom, template void SoftmaxLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, - vector*>* bottom) { + const vector*>& bottom) { const Dtype* top_diff = top[0]->cpu_diff(); const Dtype* top_data = top[0]->cpu_data(); - Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); Dtype* scale_data = scale_.mutable_cpu_data(); int num = top[0]->num(); int channels = top[0]->channels(); diff --git a/src/caffe/layers/softmax_layer.cu b/src/caffe/layers/softmax_layer.cu index f97eafc..8ba31d7 100644 --- a/src/caffe/layers/softmax_layer.cu +++ b/src/caffe/layers/softmax_layer.cu @@ -87,9 +87,9 @@ __global__ void kernel_channel_dot(const int num, const int channels, template void SoftmaxLayer::Forward_gpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); - Dtype* top_data = (*top)[0]->mutable_gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* scale_data = scale_.mutable_gpu_data(); int num = bottom[0]->num(); int channels = bottom[0]->channels(); @@ -126,10 +126,10 @@ void SoftmaxLayer::Forward_gpu(const vector*>& bottom, template void SoftmaxLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { + const vector& propagate_down, const vector*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* top_data = top[0]->gpu_data(); - Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); Dtype* scale_data = scale_.mutable_gpu_data(); int num = top[0]->num(); int channels = top[0]->channels(); diff --git a/src/caffe/layers/softmax_loss_layer.cpp b/src/caffe/layers/softmax_loss_layer.cpp index 55392c3..651320c 100644 --- a/src/caffe/layers/softmax_loss_layer.cpp +++ b/src/caffe/layers/softmax_loss_layer.cpp @@ -10,31 +10,31 @@ namespace caffe { template void SoftmaxWithLossLayer::LayerSetUp( - const vector*>& bottom, vector*>* top) { + const vector*>& bottom, const vector*>& top) { LossLayer::LayerSetUp(bottom, top); softmax_bottom_vec_.clear(); softmax_bottom_vec_.push_back(bottom[0]); softmax_top_vec_.clear(); softmax_top_vec_.push_back(&prob_); - softmax_layer_->SetUp(softmax_bottom_vec_, &softmax_top_vec_); + softmax_layer_->SetUp(softmax_bottom_vec_, softmax_top_vec_); } template void SoftmaxWithLossLayer::Reshape( - const vector*>& bottom, vector*>* top) { + const vector*>& bottom, const vector*>& top) { LossLayer::Reshape(bottom, top); - softmax_layer_->Reshape(softmax_bottom_vec_, &softmax_top_vec_); - if (top->size() >= 2) { + softmax_layer_->Reshape(softmax_bottom_vec_, softmax_top_vec_); + if (top.size() >= 2) { // softmax output - (*top)[1]->ReshapeLike(*bottom[0]); + top[1]->ReshapeLike(*bottom[0]); } } template void SoftmaxWithLossLayer::Forward_cpu( - const vector*>& bottom, vector*>* top) { + const vector*>& bottom, const vector*>& top) { // The forward pass computes the softmax prob values. - softmax_layer_->Forward(softmax_bottom_vec_, &softmax_top_vec_); + softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.cpu_data(); const Dtype* label = bottom[1]->cpu_data(); int num = prob_.num(); @@ -48,25 +48,25 @@ void SoftmaxWithLossLayer::Forward_cpu( Dtype(FLT_MIN))); } } - (*top)[0]->mutable_cpu_data()[0] = loss / num / spatial_dim; - if (top->size() == 2) { - (*top)[1]->ShareData(prob_); + top[0]->mutable_cpu_data()[0] = loss / num / spatial_dim; + if (top.size() == 2) { + top[1]->ShareData(prob_); } } template void SoftmaxWithLossLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, - vector*>* bottom) { + const vector*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type_name() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { - Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); const Dtype* prob_data = prob_.cpu_data(); caffe_copy(prob_.count(), prob_data, bottom_diff); - const Dtype* label = (*bottom)[1]->cpu_data(); + const Dtype* label = bottom[1]->cpu_data(); int num = prob_.num(); int dim = prob_.count() / num; int spatial_dim = prob_.height() * prob_.width(); diff --git a/src/caffe/layers/softmax_loss_layer.cu b/src/caffe/layers/softmax_loss_layer.cu index 9ef8dd2..060dc24 100644 --- a/src/caffe/layers/softmax_loss_layer.cu +++ b/src/caffe/layers/softmax_loss_layer.cu @@ -10,13 +10,13 @@ namespace caffe { template void SoftmaxWithLossLayer::Forward_gpu( - const vector*>& bottom, vector*>* top) { + const vector*>& bottom, const vector*>& top) { Forward_cpu(bottom, top); } template void SoftmaxWithLossLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { + const vector& propagate_down, const vector*>& bottom) { // TODO(Yangqing): implement the GPU version of softmax. Backward_cpu(top, propagate_down, bottom); } diff --git a/src/caffe/layers/split_layer.cpp b/src/caffe/layers/split_layer.cpp index 40d3600..ee6b5a9 100644 --- a/src/caffe/layers/split_layer.cpp +++ b/src/caffe/layers/split_layer.cpp @@ -8,44 +8,44 @@ namespace caffe { template void SplitLayer::Reshape(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { count_ = bottom[0]->count(); - for (int i = 0; i < top->size(); ++i) { + for (int i = 0; i < top.size(); ++i) { // Do not allow in-place computation in the SplitLayer. Instead, share data // by reference in the forward pass, and keep separate diff allocations in // the backward pass. (Technically, it should be possible to share the diff // blob of the first split output with the input, but this seems to cause // some strange effects in practice...) - CHECK_NE((*top)[i], bottom[0]) << this->type_name() << " Layer does not " + CHECK_NE(top[i], bottom[0]) << this->type_name() << " Layer does not " "allow in-place computation."; - (*top)[i]->Reshape(bottom[0]->num(), bottom[0]->channels(), + top[i]->Reshape(bottom[0]->num(), bottom[0]->channels(), bottom[0]->height(), bottom[0]->width()); - CHECK_EQ(count_, (*top)[i]->count()); + CHECK_EQ(count_, top[i]->count()); } } template void SplitLayer::Forward_cpu(const vector*>& bottom, - vector*>* top) { - for (int i = 0; i < top->size(); ++i) { - (*top)[i]->ShareData(*bottom[0]); + const vector*>& top) { + for (int i = 0; i < top.size(); ++i) { + top[i]->ShareData(*bottom[0]); } } template void SplitLayer::Backward_cpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { + const vector& propagate_down, const vector*>& bottom) { if (!propagate_down[0]) { return; } if (top.size() == 1) { - caffe_copy(count_, top[0]->cpu_diff(), (*bottom)[0]->mutable_cpu_diff()); + caffe_copy(count_, top[0]->cpu_diff(), bottom[0]->mutable_cpu_diff()); return; } caffe_add(count_, top[0]->cpu_diff(), top[1]->cpu_diff(), - (*bottom)[0]->mutable_cpu_diff()); + bottom[0]->mutable_cpu_diff()); // Add remaining top blob diffs. for (int i = 2; i < top.size(); ++i) { const Dtype* top_diff = top[i]->cpu_diff(); - Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); caffe_axpy(count_, Dtype(1.), top_diff, bottom_diff); } } diff --git a/src/caffe/layers/split_layer.cu b/src/caffe/layers/split_layer.cu index fcc0917..0513b20 100644 --- a/src/caffe/layers/split_layer.cu +++ b/src/caffe/layers/split_layer.cu @@ -8,26 +8,26 @@ namespace caffe { template void SplitLayer::Forward_gpu(const vector*>& bottom, - vector*>* top) { - for (int i = 0; i < top->size(); ++i) { - (*top)[i]->ShareData(*bottom[0]); + const vector*>& top) { + for (int i = 0; i < top.size(); ++i) { + top[i]->ShareData(*bottom[0]); } } template void SplitLayer::Backward_gpu(const vector*>& top, - const vector& propagate_down, vector*>* bottom) { + const vector& propagate_down, const vector*>& bottom) { if (!propagate_down[0]) { return; } if (top.size() == 1) { - caffe_copy(count_, top[0]->gpu_diff(), (*bottom)[0]->mutable_gpu_diff()); + caffe_copy(count_, top[0]->gpu_diff(), bottom[0]->mutable_gpu_diff()); return; } caffe_gpu_add(count_, top[0]->gpu_diff(), top[1]->gpu_diff(), - (*bottom)[0]->mutable_gpu_diff()); + bottom[0]->mutable_gpu_diff()); // Add remaining top blob diffs. for (int i = 2; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); - Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); caffe_gpu_axpy(count_, Dtype(1.), top_diff, bottom_diff); } } diff --git a/src/caffe/layers/tanh_layer.cpp b/src/caffe/layers/tanh_layer.cpp index 8dae005..18413d9 100644 --- a/src/caffe/layers/tanh_layer.cpp +++ b/src/caffe/layers/tanh_layer.cpp @@ -11,9 +11,9 @@ namespace caffe { template void TanHLayer::Forward_cpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); - Dtype* top_data = (*top)[0]->mutable_cpu_data(); + Dtype* top_data = top[0]->mutable_cpu_data(); Dtype exp2x; const int count = bottom[0]->count(); for (int i = 0; i < count; ++i) { @@ -25,12 +25,12 @@ void TanHLayer::Forward_cpu(const vector*>& bottom, template void TanHLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, - vector*>* bottom) { + const vector*>& bottom) { if (propagate_down[0]) { const Dtype* top_data = top[0]->cpu_data(); const Dtype* top_diff = top[0]->cpu_diff(); - Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); - const int count = (*bottom)[0]->count(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + const int count = bottom[0]->count(); Dtype tanhx; for (int i = 0; i < count; ++i) { tanhx = top_data[i]; diff --git a/src/caffe/layers/tanh_layer.cu b/src/caffe/layers/tanh_layer.cu index bdb7a94..a141f8e 100644 --- a/src/caffe/layers/tanh_layer.cu +++ b/src/caffe/layers/tanh_layer.cu @@ -19,9 +19,9 @@ __global__ void TanHForward(const int n, const Dtype* in, Dtype* out) { template void TanHLayer::Forward_gpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); - Dtype* top_data = (*top)[0]->mutable_gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) TanHForward<<>>( @@ -41,12 +41,12 @@ __global__ void TanHBackward(const int n, const Dtype* in_diff, template void TanHLayer::Backward_gpu(const vector*>& top, const vector& propagate_down, - vector*>* bottom) { + const vector*>& bottom) { if (propagate_down[0]) { const Dtype* top_data = top[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); - Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); - const int count = (*bottom)[0]->count(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) TanHBackward<<>>( count, top_diff, top_data, bottom_diff); diff --git a/src/caffe/layers/threshold_layer.cpp b/src/caffe/layers/threshold_layer.cpp index 180ea6a..b4b2df5 100644 --- a/src/caffe/layers/threshold_layer.cpp +++ b/src/caffe/layers/threshold_layer.cpp @@ -8,16 +8,16 @@ namespace caffe { template void ThresholdLayer::LayerSetUp(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { NeuronLayer::LayerSetUp(bottom, top); threshold_ = this->layer_param_.threshold_param().threshold(); } template void ThresholdLayer::Forward_cpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); - Dtype* top_data = (*top)[0]->mutable_cpu_data(); + Dtype* top_data = top[0]->mutable_cpu_data(); const int count = bottom[0]->count(); for (int i = 0; i < count; ++i) { top_data[i] = (bottom_data[i] > threshold_) ? Dtype(1) : Dtype(0); diff --git a/src/caffe/layers/threshold_layer.cu b/src/caffe/layers/threshold_layer.cu index 9343081..7e65129 100644 --- a/src/caffe/layers/threshold_layer.cu +++ b/src/caffe/layers/threshold_layer.cu @@ -16,9 +16,9 @@ __global__ void ThresholdForward(const int n, const Dtype threshold, template void ThresholdLayer::Forward_gpu(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); - Dtype* top_data = (*top)[0]->mutable_gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) ThresholdForward<<>>( diff --git a/src/caffe/layers/window_data_layer.cpp b/src/caffe/layers/window_data_layer.cpp index a24d7de..cf1f386 100644 --- a/src/caffe/layers/window_data_layer.cpp +++ b/src/caffe/layers/window_data_layer.cpp @@ -30,7 +30,7 @@ WindowDataLayer::~WindowDataLayer() { template void WindowDataLayer::DataLayerSetUp(const vector*>& bottom, - vector*>* top) { + const vector*>& top) { // LayerSetUp runs through the window_file and creates two structures // that hold windows: one for foreground (object) windows and one // for background (non-object) windows. We use an overlap threshold @@ -152,20 +152,20 @@ void WindowDataLayer::DataLayerSetUp(const vector*>& bottom, int crop_size = this->layer_param_.window_data_param().crop_size(); CHECK_GT(crop_size, 0); const int batch_size = this->layer_param_.window_data_param().batch_size(); - (*top)[0]->Reshape(batch_size, channels, crop_size, crop_size); + top[0]->Reshape(batch_size, channels, crop_size, crop_size); this->prefetch_data_.Reshape(batch_size, channels, crop_size, crop_size); - LOG(INFO) << "output data size: " << (*top)[0]->num() << "," - << (*top)[0]->channels() << "," << (*top)[0]->height() << "," - << (*top)[0]->width(); + LOG(INFO) << "output data size: " << top[0]->num() << "," + << top[0]->channels() << "," << top[0]->height() << "," + << top[0]->width(); // datum size - this->datum_channels_ = (*top)[0]->channels(); - this->datum_height_ = (*top)[0]->height(); - this->datum_width_ = (*top)[0]->width(); + this->datum_channels_ = top[0]->channels(); + this->datum_height_ = top[0]->height(); + this->datum_width_ = top[0]->width(); this->datum_size_ = - (*top)[0]->channels() * (*top)[0]->height() * (*top)[0]->width(); + top[0]->channels() * top[0]->height() * top[0]->width(); // label - (*top)[1]->Reshape(batch_size, 1, 1, 1); + top[1]->Reshape(batch_size, 1, 1, 1); this->prefetch_label_.Reshape(batch_size, 1, 1, 1); } diff --git a/src/caffe/net.cpp b/src/caffe/net.cpp index 6f4a651..21ab15f 100644 --- a/src/caffe/net.cpp +++ b/src/caffe/net.cpp @@ -94,7 +94,7 @@ void Net::Init(const NetParameter& in_param) { } // After this layer is connected, set it up. LOG(INFO) << "Setting up " << layer_names_[layer_id]; - layers_[layer_id]->SetUp(bottom_vecs_[layer_id], &top_vecs_[layer_id]); + layers_[layer_id]->SetUp(bottom_vecs_[layer_id], top_vecs_[layer_id]); for (int top_id = 0; top_id < top_vecs_[layer_id].size(); ++top_id) { if (blob_loss_weights_.size() <= top_id_vecs_[layer_id][top_id]) { blob_loss_weights_.resize(top_id_vecs_[layer_id][top_id] + 1, Dtype(0)); @@ -504,8 +504,8 @@ Dtype Net::ForwardFromTo(int start, int end) { Dtype loss = 0; for (int i = start; i <= end; ++i) { // LOG(ERROR) << "Forwarding " << layer_names_[i]; - layers_[i]->Reshape(bottom_vecs_[i], &top_vecs_[i]); - Dtype layer_loss = layers_[i]->Forward(bottom_vecs_[i], &top_vecs_[i]); + layers_[i]->Reshape(bottom_vecs_[i], top_vecs_[i]); + Dtype layer_loss = layers_[i]->Forward(bottom_vecs_[i], top_vecs_[i]); loss += layer_loss; if (debug_info_) { ForwardDebugInfo(i); } } @@ -570,7 +570,7 @@ void Net::BackwardFromTo(int start, int end) { for (int i = start; i >= end; --i) { if (layer_need_backward_[i]) { layers_[i]->Backward( - top_vecs_[i], bottom_need_backward_[i], &bottom_vecs_[i]); + top_vecs_[i], bottom_need_backward_[i], bottom_vecs_[i]); if (debug_info_) { BackwardDebugInfo(i); } } } @@ -683,7 +683,7 @@ void Net::Backward() { template void Net::Reshape() { for (int i = 0; i < layers_.size(); ++i) { - layers_[i]->Reshape(bottom_vecs_[i], &top_vecs_[i]); + layers_[i]->Reshape(bottom_vecs_[i], top_vecs_[i]); } } diff --git a/src/caffe/test/test_accuracy_layer.cpp b/src/caffe/test/test_accuracy_layer.cpp index e11e3f2..fa59fab 100644 --- a/src/caffe/test/test_accuracy_layer.cpp +++ b/src/caffe/test/test_accuracy_layer.cpp @@ -59,7 +59,7 @@ TYPED_TEST_CASE(AccuracyLayerTest, TestDtypes); TYPED_TEST(AccuracyLayerTest, TestSetup) { LayerParameter layer_param; AccuracyLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), 1); EXPECT_EQ(this->blob_top_->channels(), 1); EXPECT_EQ(this->blob_top_->height(), 1); @@ -72,7 +72,7 @@ TYPED_TEST(AccuracyLayerTest, TestSetupTopK) { layer_param.mutable_accuracy_param(); accuracy_param->set_top_k(5); AccuracyLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), 1); EXPECT_EQ(this->blob_top_->channels(), 1); EXPECT_EQ(this->blob_top_->height(), 1); @@ -83,8 +83,8 @@ TYPED_TEST(AccuracyLayerTest, TestForwardCPU) { LayerParameter layer_param; Caffe::set_mode(Caffe::CPU); AccuracyLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); TypeParam max_value; int max_id; @@ -111,8 +111,8 @@ TYPED_TEST(AccuracyLayerTest, TestForwardCPUTopK) { AccuracyParameter* accuracy_param = layer_param.mutable_accuracy_param(); accuracy_param->set_top_k(this->top_k_); AccuracyLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); TypeParam current_value; int current_rank; diff --git a/src/caffe/test/test_argmax_layer.cpp b/src/caffe/test/test_argmax_layer.cpp index fb3951c..3487d42 100644 --- a/src/caffe/test/test_argmax_layer.cpp +++ b/src/caffe/test/test_argmax_layer.cpp @@ -41,7 +41,7 @@ TYPED_TEST_CASE(ArgMaxLayerTest, TestDtypes); TYPED_TEST(ArgMaxLayerTest, TestSetup) { LayerParameter layer_param; ArgMaxLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); EXPECT_EQ(this->blob_top_->channels(), 1); } @@ -51,7 +51,7 @@ TYPED_TEST(ArgMaxLayerTest, TestSetupMaxVal) { ArgMaxParameter* argmax_param = layer_param.mutable_argmax_param(); argmax_param->set_out_max_val(true); ArgMaxLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); EXPECT_EQ(this->blob_top_->channels(), 2); } @@ -59,8 +59,8 @@ TYPED_TEST(ArgMaxLayerTest, TestSetupMaxVal) { TYPED_TEST(ArgMaxLayerTest, TestCPU) { LayerParameter layer_param; ArgMaxLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Now, check values const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); const TypeParam* top_data = this->blob_top_->cpu_data(); @@ -84,8 +84,8 @@ TYPED_TEST(ArgMaxLayerTest, TestCPUMaxVal) { ArgMaxParameter* argmax_param = layer_param.mutable_argmax_param(); argmax_param->set_out_max_val(true); ArgMaxLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Now, check values const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); const TypeParam* top_data = this->blob_top_->cpu_data(); @@ -110,8 +110,8 @@ TYPED_TEST(ArgMaxLayerTest, TestCPUTopK) { ArgMaxParameter* argmax_param = layer_param.mutable_argmax_param(); argmax_param->set_top_k(this->top_k_); ArgMaxLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Now, check values int max_ind; TypeParam max_val; @@ -140,8 +140,8 @@ TYPED_TEST(ArgMaxLayerTest, TestCPUMaxValTopK) { argmax_param->set_out_max_val(true); argmax_param->set_top_k(this->top_k_); ArgMaxLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Now, check values int max_ind; TypeParam max_val; diff --git a/src/caffe/test/test_concat_layer.cpp b/src/caffe/test/test_concat_layer.cpp index c60b7f7..f14f1d2 100644 --- a/src/caffe/test/test_concat_layer.cpp +++ b/src/caffe/test/test_concat_layer.cpp @@ -63,7 +63,7 @@ TYPED_TEST(ConcatLayerTest, TestSetupNum) { LayerParameter layer_param; layer_param.mutable_concat_param()->set_concat_dim(0); ConcatLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_1, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_1, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_0->num() + this->blob_bottom_2->num()); EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_0->channels()); @@ -75,7 +75,7 @@ TYPED_TEST(ConcatLayerTest, TestSetupChannels) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; ConcatLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_0, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_0, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_0->num()); EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_0->channels()+this->blob_bottom_1->channels()); @@ -88,8 +88,8 @@ TYPED_TEST(ConcatLayerTest, TestNum) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; ConcatLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_0, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_0, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_0, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_0, this->blob_top_vec_); for (int n = 0; n < this->blob_top_->num(); ++n) { for (int c = 0; c < this->blob_bottom_0->channels(); ++c) { for (int h = 0; h < this->blob_top_->height(); ++h) { @@ -115,8 +115,8 @@ TYPED_TEST(ConcatLayerTest, TestGradient) { LayerParameter layer_param; ConcatLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2); - checker.CheckGradient(&layer, &(this->blob_bottom_vec_0), - &(this->blob_top_vec_)); + checker.CheckGradient(&layer, this->blob_bottom_vec_0, + this->blob_top_vec_); } } // namespace caffe diff --git a/src/caffe/test/test_contrastive_loss_layer.cpp b/src/caffe/test/test_contrastive_loss_layer.cpp index a5bef4c..d269fbc 100644 --- a/src/caffe/test/test_contrastive_loss_layer.cpp +++ b/src/caffe/test/test_contrastive_loss_layer.cpp @@ -62,8 +62,8 @@ TYPED_TEST(ContrastiveLossLayerTest, TestForward) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; ContrastiveLossLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // manually compute to compare const Dtype margin = layer_param.contrastive_loss_param().margin(); const int num = this->blob_bottom_data_i_->num(); @@ -90,13 +90,13 @@ TYPED_TEST(ContrastiveLossLayerTest, TestGradient) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; ContrastiveLossLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); GradientChecker checker(1e-2, 1e-2, 1701); // check the gradient for the first two bottom layers - checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_), 0); - checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_), 1); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 1); } } // namespace caffe diff --git a/src/caffe/test/test_convolution_layer.cpp b/src/caffe/test/test_convolution_layer.cpp index a38ad3f..aef9092 100644 --- a/src/caffe/test/test_convolution_layer.cpp +++ b/src/caffe/test/test_convolution_layer.cpp @@ -157,7 +157,7 @@ TYPED_TEST(ConvolutionLayerTest, TestSetup) { this->blob_top_vec_.push_back(this->blob_top_2_); shared_ptr > layer( new ConvolutionLayer(layer_param)); - layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), 2); EXPECT_EQ(this->blob_top_->channels(), 4); EXPECT_EQ(this->blob_top_->height(), 2); @@ -170,7 +170,7 @@ TYPED_TEST(ConvolutionLayerTest, TestSetup) { convolution_param->set_num_output(3); convolution_param->set_group(3); layer.reset(new ConvolutionLayer(layer_param)); - layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), 2); EXPECT_EQ(this->blob_top_->channels(), 3); EXPECT_EQ(this->blob_top_->height(), 2); @@ -197,8 +197,8 @@ TYPED_TEST(ConvolutionLayerTest, TestSimpleConvolution) { convolution_param->mutable_bias_filler()->set_value(0.1); shared_ptr > layer( new ConvolutionLayer(layer_param)); - layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Check against reference convolution. const Dtype* top_data; const Dtype* ref_top_data; @@ -233,8 +233,8 @@ TYPED_TEST(ConvolutionLayerTest, TestSimpleConvolutionGroup) { convolution_param->mutable_bias_filler()->set_value(0.1); shared_ptr > layer( new ConvolutionLayer(layer_param)); - layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Check against reference convolution. const Dtype* top_data; const Dtype* ref_top_data; @@ -284,8 +284,8 @@ TYPED_TEST(ConvolutionLayerTest, TestSobelConvolution) { weights[i + 7] = 0; weights[i + 8] = 1; } - layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Compute Sobel G_x operator as separable 3 x 1 and 1 x 3 convolutions. // (1) the [1 2 1] column filter vector*> sep_blob_bottom_vec; @@ -311,8 +311,8 @@ TYPED_TEST(ConvolutionLayerTest, TestSobelConvolution) { weights_1[i + 1] = 2; weights_1[i + 2] = 1; } - layer->SetUp(sep_blob_bottom_vec, &(sep_blob_top_vec)); - layer->Forward(sep_blob_bottom_vec, &(sep_blob_top_vec)); + layer->SetUp(sep_blob_bottom_vec, sep_blob_top_vec); + layer->Forward(sep_blob_bottom_vec, sep_blob_top_vec); // (2) the [-1 0 1] row filter blob_sep->CopyFrom(*this->blob_top_2_, false, true); sep_blob_bottom_vec.clear(); @@ -333,8 +333,8 @@ TYPED_TEST(ConvolutionLayerTest, TestSobelConvolution) { weights_2[i + 1] = 0; weights_2[i + 2] = 1; } - layer->SetUp(sep_blob_bottom_vec, &(sep_blob_top_vec)); - layer->Forward(sep_blob_bottom_vec, &(sep_blob_top_vec)); + layer->SetUp(sep_blob_bottom_vec, sep_blob_top_vec); + layer->Forward(sep_blob_bottom_vec, sep_blob_top_vec); // Test equivalence of full and separable filters. const Dtype* top_data = this->blob_top_->cpu_data(); const Dtype* sep_top_data = this->blob_top_2_->cpu_data(); @@ -357,8 +357,8 @@ TYPED_TEST(ConvolutionLayerTest, TestGradient) { convolution_param->mutable_bias_filler()->set_type("gaussian"); ConvolutionLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } TYPED_TEST(ConvolutionLayerTest, TestGradientGroup) { @@ -374,8 +374,8 @@ TYPED_TEST(ConvolutionLayerTest, TestGradientGroup) { convolution_param->mutable_bias_filler()->set_type("gaussian"); ConvolutionLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } #ifdef USE_CUDNN @@ -437,7 +437,7 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSetupCuDNN) { this->blob_top_vec_.push_back(this->blob_top_2_); shared_ptr > layer( new CuDNNConvolutionLayer(layer_param)); - layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), 2); EXPECT_EQ(this->blob_top_->channels(), 4); EXPECT_EQ(this->blob_top_->height(), 2); @@ -450,7 +450,7 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSetupCuDNN) { convolution_param->set_num_output(3); convolution_param->set_group(3); layer.reset(new CuDNNConvolutionLayer(layer_param)); - layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), 2); EXPECT_EQ(this->blob_top_->channels(), 3); EXPECT_EQ(this->blob_top_->height(), 2); @@ -477,8 +477,8 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionCuDNN) { convolution_param->mutable_bias_filler()->set_value(0.1); shared_ptr > layer( new CuDNNConvolutionLayer(layer_param)); - layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Check against reference convolution. const TypeParam* top_data; const TypeParam* ref_top_data; @@ -513,8 +513,8 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSimpleConvolutionGroupCuDNN) { convolution_param->mutable_bias_filler()->set_value(0.1); shared_ptr > layer( new CuDNNConvolutionLayer(layer_param)); - layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Check against reference convolution. const TypeParam* top_data; const TypeParam* ref_top_data; @@ -564,8 +564,8 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSobelConvolutionCuDNN) { weights[i + 7] = 0; weights[i + 8] = 1; } - layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Compute Sobel G_x operator as separable 3 x 1 and 1 x 3 convolutions. // (1) the [1 2 1] column filter vector*> sep_blob_bottom_vec; @@ -591,8 +591,8 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSobelConvolutionCuDNN) { weights_1[i + 1] = 2; weights_1[i + 2] = 1; } - layer->SetUp(sep_blob_bottom_vec, &(sep_blob_top_vec)); - layer->Forward(sep_blob_bottom_vec, &(sep_blob_top_vec)); + layer->SetUp(sep_blob_bottom_vec, sep_blob_top_vec); + layer->Forward(sep_blob_bottom_vec, sep_blob_top_vec); // (2) the [-1 0 1] row filter blob_sep->CopyFrom(*this->blob_top_2_, false, true); sep_blob_bottom_vec.clear(); @@ -613,8 +613,8 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestSobelConvolutionCuDNN) { weights_2[i + 1] = 0; weights_2[i + 2] = 1; } - layer->SetUp(sep_blob_bottom_vec, &(sep_blob_top_vec)); - layer->Forward(sep_blob_bottom_vec, &(sep_blob_top_vec)); + layer->SetUp(sep_blob_bottom_vec, sep_blob_top_vec); + layer->Forward(sep_blob_bottom_vec, sep_blob_top_vec); // Test equivalence of full and separable filters. const TypeParam* top_data = this->blob_top_->cpu_data(); const TypeParam* sep_top_data = this->blob_top_2_->cpu_data(); @@ -637,8 +637,8 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientCuDNN) { convolution_param->mutable_bias_filler()->set_type("gaussian"); CuDNNConvolutionLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientGroupCuDNN) { @@ -654,8 +654,8 @@ TYPED_TEST(CuDNNConvolutionLayerTest, TestGradientGroupCuDNN) { convolution_param->mutable_bias_filler()->set_type("gaussian"); CuDNNConvolutionLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } #endif diff --git a/src/caffe/test/test_data_layer.cpp b/src/caffe/test/test_data_layer.cpp index 887124a..657ffde 100644 --- a/src/caffe/test/test_data_layer.cpp +++ b/src/caffe/test/test_data_layer.cpp @@ -125,7 +125,7 @@ class DataLayerTest : public MultiDeviceTest { transform_param->set_scale(scale); DataLayer layer(param); - layer.SetUp(blob_bottom_vec_, &blob_top_vec_); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); EXPECT_EQ(blob_top_data_->num(), 5); EXPECT_EQ(blob_top_data_->channels(), 2); EXPECT_EQ(blob_top_data_->height(), 3); @@ -136,7 +136,7 @@ class DataLayerTest : public MultiDeviceTest { EXPECT_EQ(blob_top_label_->width(), 1); for (int iter = 0; iter < 100; ++iter) { - layer.Forward(blob_bottom_vec_, &blob_top_vec_); + layer.Forward(blob_bottom_vec_, blob_top_vec_); for (int i = 0; i < 5; ++i) { EXPECT_EQ(i, blob_top_label_->cpu_data()[i]); } @@ -165,7 +165,7 @@ class DataLayerTest : public MultiDeviceTest { transform_param->set_crop_size(1); DataLayer layer(param); - layer.SetUp(blob_bottom_vec_, &blob_top_vec_); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); EXPECT_EQ(blob_top_data_->num(), 5); EXPECT_EQ(blob_top_data_->channels(), 2); EXPECT_EQ(blob_top_data_->height(), 1); @@ -176,7 +176,7 @@ class DataLayerTest : public MultiDeviceTest { EXPECT_EQ(blob_top_label_->width(), 1); for (int iter = 0; iter < 2; ++iter) { - layer.Forward(blob_bottom_vec_, &blob_top_vec_); + layer.Forward(blob_bottom_vec_, blob_top_vec_); for (int i = 0; i < 5; ++i) { EXPECT_EQ(i, blob_top_label_->cpu_data()[i]); } @@ -219,9 +219,9 @@ class DataLayerTest : public MultiDeviceTest { vector > crop_sequence; { DataLayer layer1(param); - layer1.SetUp(blob_bottom_vec_, &blob_top_vec_); + layer1.SetUp(blob_bottom_vec_, blob_top_vec_); for (int iter = 0; iter < 2; ++iter) { - layer1.Forward(blob_bottom_vec_, &blob_top_vec_); + layer1.Forward(blob_bottom_vec_, blob_top_vec_); for (int i = 0; i < 5; ++i) { EXPECT_EQ(i, blob_top_label_->cpu_data()[i]); } @@ -240,9 +240,9 @@ class DataLayerTest : public MultiDeviceTest { // Check that the sequence is the same as the original. Caffe::set_random_seed(seed_); DataLayer layer2(param); - layer2.SetUp(blob_bottom_vec_, &blob_top_vec_); + layer2.SetUp(blob_bottom_vec_, blob_top_vec_); for (int iter = 0; iter < 2; ++iter) { - layer2.Forward(blob_bottom_vec_, &blob_top_vec_); + layer2.Forward(blob_bottom_vec_, blob_top_vec_); for (int i = 0; i < 5; ++i) { EXPECT_EQ(i, blob_top_label_->cpu_data()[i]); } @@ -274,9 +274,9 @@ class DataLayerTest : public MultiDeviceTest { vector > crop_sequence; { DataLayer layer1(param); - layer1.SetUp(blob_bottom_vec_, &blob_top_vec_); + layer1.SetUp(blob_bottom_vec_, blob_top_vec_); for (int iter = 0; iter < 2; ++iter) { - layer1.Forward(blob_bottom_vec_, &blob_top_vec_); + layer1.Forward(blob_bottom_vec_, blob_top_vec_); for (int i = 0; i < 5; ++i) { EXPECT_EQ(i, blob_top_label_->cpu_data()[i]); } @@ -295,9 +295,9 @@ class DataLayerTest : public MultiDeviceTest { // srand with 1701. Check that the sequence differs from the original. srand(seed_); DataLayer layer2(param); - layer2.SetUp(blob_bottom_vec_, &blob_top_vec_); + layer2.SetUp(blob_bottom_vec_, blob_top_vec_); for (int iter = 0; iter < 2; ++iter) { - layer2.Forward(blob_bottom_vec_, &blob_top_vec_); + layer2.Forward(blob_bottom_vec_, blob_top_vec_); for (int i = 0; i < 5; ++i) { EXPECT_EQ(i, blob_top_label_->cpu_data()[i]); } diff --git a/src/caffe/test/test_dummy_data_layer.cpp b/src/caffe/test/test_dummy_data_layer.cpp index 4188bb6..da121fa 100644 --- a/src/caffe/test/test_dummy_data_layer.cpp +++ b/src/caffe/test/test_dummy_data_layer.cpp @@ -56,7 +56,7 @@ TYPED_TEST(DummyDataLayerTest, TestOneTopConstant) { dummy_data_param->add_width(4); this->blob_top_vec_.resize(1); DummyDataLayer layer(param); - layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_a_->num(), 5); EXPECT_EQ(this->blob_top_a_->channels(), 3); EXPECT_EQ(this->blob_top_a_->height(), 2); @@ -68,7 +68,7 @@ TYPED_TEST(DummyDataLayerTest, TestOneTopConstant) { EXPECT_EQ(0, this->blob_top_vec_[i]->cpu_data()[j]); } } - layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); for (int i = 0; i < this->blob_top_vec_.size(); ++i) { for (int j = 0; j < this->blob_top_vec_[i]->count(); ++j) { EXPECT_EQ(0, this->blob_top_vec_[i]->cpu_data()[j]); @@ -92,7 +92,7 @@ TYPED_TEST(DummyDataLayerTest, TestTwoTopConstant) { data_filler_param->set_value(7); this->blob_top_vec_.resize(2); DummyDataLayer layer(param); - layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_a_->num(), 5); EXPECT_EQ(this->blob_top_a_->channels(), 3); EXPECT_EQ(this->blob_top_a_->height(), 2); @@ -107,7 +107,7 @@ TYPED_TEST(DummyDataLayerTest, TestTwoTopConstant) { EXPECT_EQ(7, this->blob_top_vec_[i]->cpu_data()[j]); } } - layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); for (int i = 0; i < this->blob_top_vec_.size(); ++i) { for (int j = 0; j < this->blob_top_vec_[i]->count(); ++j) { EXPECT_EQ(7, this->blob_top_vec_[i]->cpu_data()[j]); @@ -134,7 +134,7 @@ TYPED_TEST(DummyDataLayerTest, TestThreeTopConstantGaussianConstant) { FillerParameter* data_filler_param_c = dummy_data_param->add_data_filler(); data_filler_param_c->set_value(9); DummyDataLayer layer(param); - layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_a_->num(), 5); EXPECT_EQ(this->blob_top_a_->channels(), 3); EXPECT_EQ(this->blob_top_a_->height(), 2); @@ -160,7 +160,7 @@ TYPED_TEST(DummyDataLayerTest, TestThreeTopConstantGaussianConstant) { } // Do a Forward pass to fill in Blob b with Gaussian data. - layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); for (int i = 0; i < this->blob_top_a_->count(); ++i) { EXPECT_EQ(7, this->blob_top_a_->cpu_data()[i]); } @@ -180,7 +180,7 @@ TYPED_TEST(DummyDataLayerTest, TestThreeTopConstantGaussianConstant) { // Do another Forward pass to fill in Blob b with Gaussian data again, // checking that we get different values. - layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); for (int i = 0; i < this->blob_top_a_->count(); ++i) { EXPECT_EQ(7, this->blob_top_a_->cpu_data()[i]); } diff --git a/src/caffe/test/test_eltwise_layer.cpp b/src/caffe/test/test_eltwise_layer.cpp index d5cf082..be0c134 100644 --- a/src/caffe/test/test_eltwise_layer.cpp +++ b/src/caffe/test/test_eltwise_layer.cpp @@ -58,7 +58,7 @@ TYPED_TEST(EltwiseLayerTest, TestSetUp) { eltwise_param->set_operation(EltwiseParameter_EltwiseOp_PROD); shared_ptr > layer( new EltwiseLayer(layer_param)); - layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), 2); EXPECT_EQ(this->blob_top_->channels(), 3); EXPECT_EQ(this->blob_top_->height(), 4); @@ -72,8 +72,8 @@ TYPED_TEST(EltwiseLayerTest, TestProd) { eltwise_param->set_operation(EltwiseParameter_EltwiseOp_PROD); shared_ptr > layer( new EltwiseLayer(layer_param)); - layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); const Dtype* data = this->blob_top_->cpu_data(); const int count = this->blob_top_->count(); const Dtype* in_data_a = this->blob_bottom_a_->cpu_data(); @@ -91,8 +91,8 @@ TYPED_TEST(EltwiseLayerTest, TestSum) { eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM); shared_ptr > layer( new EltwiseLayer(layer_param)); - layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); const Dtype* data = this->blob_top_->cpu_data(); const int count = this->blob_top_->count(); const Dtype* in_data_a = this->blob_bottom_a_->cpu_data(); @@ -113,8 +113,8 @@ TYPED_TEST(EltwiseLayerTest, TestSumCoeff) { eltwise_param->add_coeff(2); shared_ptr > layer( new EltwiseLayer(layer_param)); - layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); const Dtype* data = this->blob_top_->cpu_data(); const int count = this->blob_top_->count(); const Dtype* in_data_a = this->blob_bottom_a_->cpu_data(); @@ -134,8 +134,8 @@ TYPED_TEST(EltwiseLayerTest, TestStableProdGradient) { eltwise_param->set_stable_prod_grad(true); EltwiseLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } TYPED_TEST(EltwiseLayerTest, TestUnstableProdGradient) { @@ -146,8 +146,8 @@ TYPED_TEST(EltwiseLayerTest, TestUnstableProdGradient) { eltwise_param->set_stable_prod_grad(false); EltwiseLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } TYPED_TEST(EltwiseLayerTest, TestSumGradient) { @@ -157,8 +157,8 @@ TYPED_TEST(EltwiseLayerTest, TestSumGradient) { eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM); EltwiseLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } TYPED_TEST(EltwiseLayerTest, TestSumCoeffGradient) { @@ -171,8 +171,8 @@ TYPED_TEST(EltwiseLayerTest, TestSumCoeffGradient) { eltwise_param->add_coeff(2); EltwiseLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } TYPED_TEST(EltwiseLayerTest, TestMax) { @@ -182,8 +182,8 @@ TYPED_TEST(EltwiseLayerTest, TestMax) { eltwise_param->set_operation(EltwiseParameter_EltwiseOp_MAX); shared_ptr > layer( new EltwiseLayer(layer_param)); - layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); const Dtype* data = this->blob_top_->cpu_data(); const int count = this->blob_top_->count(); const Dtype* in_data_a = this->blob_bottom_a_->cpu_data(); @@ -202,8 +202,8 @@ TYPED_TEST(EltwiseLayerTest, TestMaxGradient) { eltwise_param->set_operation(EltwiseParameter_EltwiseOp_MAX); EltwiseLayer layer(layer_param); GradientChecker checker(1e-4, 1e-3); - checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } } // namespace caffe diff --git a/src/caffe/test/test_euclidean_loss_layer.cpp b/src/caffe/test/test_euclidean_loss_layer.cpp index d7d2de7..1949742 100644 --- a/src/caffe/test/test_euclidean_loss_layer.cpp +++ b/src/caffe/test/test_euclidean_loss_layer.cpp @@ -44,18 +44,18 @@ class EuclideanLossLayerTest : public MultiDeviceTest { // equivalent to explicitly specifiying a weight of 1. LayerParameter layer_param; EuclideanLossLayer layer_weight_1(layer_param); - layer_weight_1.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + layer_weight_1.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); const Dtype loss_weight_1 = - layer_weight_1.Forward(this->blob_bottom_vec_, &this->blob_top_vec_); + layer_weight_1.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Get the loss again with a different objective weight; check that it is // scaled appropriately. const Dtype kLossWeight = 3.7; layer_param.add_loss_weight(kLossWeight); EuclideanLossLayer layer_weight_2(layer_param); - layer_weight_2.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + layer_weight_2.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); const Dtype loss_weight_2 = - layer_weight_2.Forward(this->blob_bottom_vec_, &this->blob_top_vec_); + layer_weight_2.Forward(this->blob_bottom_vec_, this->blob_top_vec_); const Dtype kErrorMargin = 1e-5; EXPECT_NEAR(loss_weight_1 * kLossWeight, loss_weight_2, kErrorMargin); // Make sure the loss is non-trivial. @@ -82,10 +82,10 @@ TYPED_TEST(EuclideanLossLayerTest, TestGradient) { const Dtype kLossWeight = 3.7; layer_param.add_loss_weight(kLossWeight); EuclideanLossLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); GradientChecker checker(1e-2, 1e-2, 1701); - checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } } // namespace caffe diff --git a/src/caffe/test/test_flatten_layer.cpp b/src/caffe/test/test_flatten_layer.cpp index cbd01f2..3042d29 100644 --- a/src/caffe/test/test_flatten_layer.cpp +++ b/src/caffe/test/test_flatten_layer.cpp @@ -41,7 +41,7 @@ TYPED_TEST(FlattenLayerTest, TestSetup) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; FlattenLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), 2); EXPECT_EQ(this->blob_top_->channels(), 3 * 6 * 5); EXPECT_EQ(this->blob_top_->height(), 1); @@ -52,8 +52,8 @@ TYPED_TEST(FlattenLayerTest, Test) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; FlattenLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); for (int c = 0; c < 3 * 6 * 5; ++c) { EXPECT_EQ(this->blob_top_->data_at(0, c, 0, 0), this->blob_bottom_->data_at(0, c / (6 * 5), (c / 5) % 6, c % 5)); @@ -67,8 +67,8 @@ TYPED_TEST(FlattenLayerTest, TestGradient) { LayerParameter layer_param; FlattenLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2); - checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } diff --git a/src/caffe/test/test_hdf5_output_layer.cpp b/src/caffe/test/test_hdf5_output_layer.cpp index eb09c8d..c828223 100644 --- a/src/caffe/test/test_hdf5_output_layer.cpp +++ b/src/caffe/test/test_hdf5_output_layer.cpp @@ -96,8 +96,8 @@ TYPED_TEST(HDF5OutputLayerTest, TestForward) { { HDF5OutputLayer layer(param); EXPECT_EQ(layer.file_name(), this->output_file_name_); - layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); - layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); } file_id = H5Fopen(this->output_file_name_.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT); diff --git a/src/caffe/test/test_hdf5data_layer.cpp b/src/caffe/test/test_hdf5data_layer.cpp index acca75b..db9068b 100644 --- a/src/caffe/test/test_hdf5data_layer.cpp +++ b/src/caffe/test/test_hdf5data_layer.cpp @@ -67,7 +67,7 @@ TYPED_TEST(HDF5DataLayerTest, TestRead) { // Test that the layer setup got the correct parameters. HDF5DataLayer layer(param); - layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_data_->num(), batch_size); EXPECT_EQ(this->blob_top_data_->channels(), num_cols); EXPECT_EQ(this->blob_top_data_->height(), height); @@ -78,12 +78,12 @@ TYPED_TEST(HDF5DataLayerTest, TestRead) { EXPECT_EQ(this->blob_top_label_->height(), 1); EXPECT_EQ(this->blob_top_label_->width(), 1); - layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); // Go through the data 10 times (5 batches). const int data_size = num_cols * height * width; for (int iter = 0; iter < 10; ++iter) { - layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // On even iterations, we're reading the first half of the data. // On odd iterations, we're reading the second half of the data. diff --git a/src/caffe/test/test_hinge_loss_layer.cpp b/src/caffe/test/test_hinge_loss_layer.cpp index 3c11b9a..b6a9902 100644 --- a/src/caffe/test/test_hinge_loss_layer.cpp +++ b/src/caffe/test/test_hinge_loss_layer.cpp @@ -57,8 +57,8 @@ TYPED_TEST(HingeLossLayerTest, TestGradientL1) { LayerParameter layer_param; HingeLossLayer layer(layer_param); GradientChecker checker(1e-2, 2e-3, 1701, 1, 0.01); - checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_), 0); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); } TYPED_TEST(HingeLossLayerTest, TestGradientL2) { @@ -69,8 +69,8 @@ TYPED_TEST(HingeLossLayerTest, TestGradientL2) { hinge_loss_param->set_norm(HingeLossParameter_Norm_L2); HingeLossLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2, 1701); - checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_), 0); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); } } // namespace caffe diff --git a/src/caffe/test/test_im2col_layer.cpp b/src/caffe/test/test_im2col_layer.cpp index 32cf636..f50abe1 100644 --- a/src/caffe/test/test_im2col_layer.cpp +++ b/src/caffe/test/test_im2col_layer.cpp @@ -44,7 +44,7 @@ TYPED_TEST(Im2colLayerTest, TestSetup) { convolution_param->set_kernel_size(3); convolution_param->set_stride(2); Im2colLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), 2); EXPECT_EQ(this->blob_top_->channels(), 27); EXPECT_EQ(this->blob_top_->height(), 2); @@ -59,8 +59,8 @@ TYPED_TEST(Im2colLayerTest, TestForward) { convolution_param->set_kernel_size(3); convolution_param->set_stride(2); Im2colLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // We are lazy and will only check the top left block for (int c = 0; c < 27; ++c) { EXPECT_EQ(this->blob_bottom_->data_at(0, (c / 9), (c / 3) % 3, c % 3), @@ -77,8 +77,8 @@ TYPED_TEST(Im2colLayerTest, TestGradient) { convolution_param->set_stride(2); Im2colLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2); - checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } @@ -91,8 +91,8 @@ TYPED_TEST(Im2colLayerTest, TestRect) { convolution_param->set_kernel_w(3); convolution_param->set_stride(2); Im2colLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // We are lazy and will only check the top left block for (int c = 0; c < 45; ++c) { EXPECT_EQ(this->blob_top_->data_at(0, c, 0, 0), @@ -111,8 +111,8 @@ TYPED_TEST(Im2colLayerTest, TestRectGradient) { convolution_param->set_stride(2); Im2colLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2); - checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } } // namespace caffe diff --git a/src/caffe/test/test_image_data_layer.cpp b/src/caffe/test/test_image_data_layer.cpp index d098c76..77523ef 100644 --- a/src/caffe/test/test_image_data_layer.cpp +++ b/src/caffe/test/test_image_data_layer.cpp @@ -61,7 +61,7 @@ TYPED_TEST(ImageDataLayerTest, TestRead) { image_data_param->set_source(this->filename_.c_str()); image_data_param->set_shuffle(false); ImageDataLayer layer(param); - layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_data_->num(), 5); EXPECT_EQ(this->blob_top_data_->channels(), 3); EXPECT_EQ(this->blob_top_data_->height(), 360); @@ -72,7 +72,7 @@ TYPED_TEST(ImageDataLayerTest, TestRead) { EXPECT_EQ(this->blob_top_label_->width(), 1); // Go through the data twice for (int iter = 0; iter < 2; ++iter) { - layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); for (int i = 0; i < 5; ++i) { EXPECT_EQ(i, this->blob_top_label_->cpu_data()[i]); } @@ -89,7 +89,7 @@ TYPED_TEST(ImageDataLayerTest, TestResize) { image_data_param->set_new_width(256); image_data_param->set_shuffle(false); ImageDataLayer layer(param); - layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_data_->num(), 5); EXPECT_EQ(this->blob_top_data_->channels(), 3); EXPECT_EQ(this->blob_top_data_->height(), 256); @@ -100,7 +100,7 @@ TYPED_TEST(ImageDataLayerTest, TestResize) { EXPECT_EQ(this->blob_top_label_->width(), 1); // Go through the data twice for (int iter = 0; iter < 2; ++iter) { - layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); for (int i = 0; i < 5; ++i) { EXPECT_EQ(i, this->blob_top_label_->cpu_data()[i]); } @@ -115,7 +115,7 @@ TYPED_TEST(ImageDataLayerTest, TestShuffle) { image_data_param->set_source(this->filename_.c_str()); image_data_param->set_shuffle(true); ImageDataLayer layer(param); - layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_data_->num(), 5); EXPECT_EQ(this->blob_top_data_->channels(), 3); EXPECT_EQ(this->blob_top_data_->height(), 360); @@ -126,7 +126,7 @@ TYPED_TEST(ImageDataLayerTest, TestShuffle) { EXPECT_EQ(this->blob_top_label_->width(), 1); // Go through the data twice for (int iter = 0; iter < 2; ++iter) { - layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); map values_to_indices; int num_in_order = 0; for (int i = 0; i < 5; ++i) { diff --git a/src/caffe/test/test_infogain_loss_layer.cpp b/src/caffe/test/test_infogain_loss_layer.cpp index de2f901..7ec2f80 100644 --- a/src/caffe/test/test_infogain_loss_layer.cpp +++ b/src/caffe/test/test_infogain_loss_layer.cpp @@ -63,8 +63,8 @@ TYPED_TEST(InfogainLossLayerTest, TestGradient) { LayerParameter layer_param; InfogainLossLayer layer(layer_param); GradientChecker checker(1e-4, 2e-2, 1701, 1, 0.01); - checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_), 0); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); } } // namespace caffe diff --git a/src/caffe/test/test_inner_product_layer.cpp b/src/caffe/test/test_inner_product_layer.cpp index 5f9729c..c03df17 100644 --- a/src/caffe/test/test_inner_product_layer.cpp +++ b/src/caffe/test/test_inner_product_layer.cpp @@ -48,7 +48,7 @@ TYPED_TEST(InnerProductLayerTest, TestSetUp) { inner_product_param->set_num_output(10); shared_ptr > layer( new InnerProductLayer(layer_param)); - layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), 2); EXPECT_EQ(this->blob_top_->height(), 1); EXPECT_EQ(this->blob_top_->width(), 1); @@ -73,8 +73,8 @@ TYPED_TEST(InnerProductLayerTest, TestForward) { inner_product_param->mutable_bias_filler()->set_max(2); shared_ptr > layer( new InnerProductLayer(layer_param)); - layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); const Dtype* data = this->blob_top_->cpu_data(); const int count = this->blob_top_->count(); for (int i = 0; i < count; ++i) { @@ -103,8 +103,8 @@ TYPED_TEST(InnerProductLayerTest, TestGradient) { inner_product_param->mutable_bias_filler()->set_max(2); InnerProductLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } else { LOG(ERROR) << "Skipping test due to old architecture."; } diff --git a/src/caffe/test/test_lrn_layer.cpp b/src/caffe/test/test_lrn_layer.cpp index 3bd62fd..07425df 100644 --- a/src/caffe/test/test_lrn_layer.cpp +++ b/src/caffe/test/test_lrn_layer.cpp @@ -116,7 +116,7 @@ TYPED_TEST(LRNLayerTest, TestSetupAcrossChannels) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; LRNLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), 2); EXPECT_EQ(this->blob_top_->channels(), 7); EXPECT_EQ(this->blob_top_->height(), 3); @@ -127,8 +127,8 @@ TYPED_TEST(LRNLayerTest, TestForwardAcrossChannels) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; LRNLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); Blob top_reference; this->ReferenceLRNForward(*(this->blob_bottom_), layer_param, &top_reference); @@ -143,20 +143,20 @@ TYPED_TEST(LRNLayerTest, TestGradientAcrossChannels) { LayerParameter layer_param; LRNLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); for (int i = 0; i < this->blob_top_->count(); ++i) { this->blob_top_->mutable_cpu_diff()[i] = 1.; } vector propagate_down(this->blob_bottom_vec_.size(), true); layer.Backward(this->blob_top_vec_, propagate_down, - &(this->blob_bottom_vec_)); + this->blob_bottom_vec_); // for (int i = 0; i < this->blob_bottom_->count(); ++i) { // std::cout << "CPU diff " << this->blob_bottom_->cpu_diff()[i] // << std::endl; // } - checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } TYPED_TEST(LRNLayerTest, TestSetupWithinChannel) { @@ -166,7 +166,7 @@ TYPED_TEST(LRNLayerTest, TestSetupWithinChannel) { LRNParameter_NormRegion_WITHIN_CHANNEL); layer_param.mutable_lrn_param()->set_local_size(3); LRNLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), 2); EXPECT_EQ(this->blob_top_->channels(), 7); EXPECT_EQ(this->blob_top_->height(), 3); @@ -180,8 +180,8 @@ TYPED_TEST(LRNLayerTest, TestForwardWithinChannel) { LRNParameter_NormRegion_WITHIN_CHANNEL); layer_param.mutable_lrn_param()->set_local_size(3); LRNLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); Blob top_reference; this->ReferenceLRNForward(*(this->blob_bottom_), layer_param, &top_reference); @@ -199,13 +199,13 @@ TYPED_TEST(LRNLayerTest, TestGradientWithinChannel) { layer_param.mutable_lrn_param()->set_local_size(3); LRNLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); for (int i = 0; i < this->blob_top_->count(); ++i) { this->blob_top_->mutable_cpu_diff()[i] = 1.; } - checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } diff --git a/src/caffe/test/test_maxpool_dropout_layers.cpp b/src/caffe/test/test_maxpool_dropout_layers.cpp index 311c778..b1f4e4e 100644 --- a/src/caffe/test/test_maxpool_dropout_layers.cpp +++ b/src/caffe/test/test_maxpool_dropout_layers.cpp @@ -47,9 +47,9 @@ TYPED_TEST(MaxPoolingDropoutTest, TestSetup) { pooling_param->set_kernel_size(3); pooling_param->set_stride(2); PoolingLayer max_layer(layer_param); - max_layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + max_layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); DropoutLayer dropout_layer(layer_param); - dropout_layer.SetUp(this->blob_top_vec_, &(this->blob_top_vec_)); + dropout_layer.SetUp(this->blob_top_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels()); EXPECT_EQ(this->blob_top_->height(), 3); @@ -64,8 +64,8 @@ TYPED_TEST(MaxPoolingDropoutTest, TestForward) { pooling_param->set_kernel_size(3); pooling_param->set_stride(2); PoolingLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); const Dtype* top_data = this->blob_top_->cpu_data(); Dtype sum = 0.; for (int i = 0; i < this->blob_top_->count(); ++i) { @@ -74,8 +74,8 @@ TYPED_TEST(MaxPoolingDropoutTest, TestForward) { EXPECT_EQ(sum, this->blob_top_->count()); // Dropout in-place DropoutLayer dropout_layer(layer_param); - dropout_layer.SetUp(this->blob_top_vec_, &(this->blob_top_vec_)); - dropout_layer.Forward(this->blob_top_vec_, &(this->blob_top_vec_)); + dropout_layer.SetUp(this->blob_top_vec_, this->blob_top_vec_); + dropout_layer.Forward(this->blob_top_vec_, this->blob_top_vec_); sum = 0.; Dtype scale = 1. / (1. - layer_param.dropout_param().dropout_ratio()); top_data = this->blob_top_->cpu_data(); @@ -94,14 +94,14 @@ TYPED_TEST(MaxPoolingDropoutTest, TestBackward) { pooling_param->set_kernel_size(3); pooling_param->set_stride(2); PoolingLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); for (int i = 0; i < this->blob_top_->count(); ++i) { this->blob_top_->mutable_cpu_diff()[i] = 1.; } vector propagate_down(this->blob_bottom_vec_.size(), true); layer.Backward(this->blob_top_vec_, propagate_down, - &(this->blob_bottom_vec_)); + this->blob_bottom_vec_); const Dtype* bottom_diff = this->blob_bottom_->cpu_diff(); Dtype sum = 0.; for (int i = 0; i < this->blob_bottom_->count(); ++i) { @@ -110,12 +110,12 @@ TYPED_TEST(MaxPoolingDropoutTest, TestBackward) { EXPECT_EQ(sum, this->blob_top_->count()); // Dropout in-place DropoutLayer dropout_layer(layer_param); - dropout_layer.SetUp(this->blob_top_vec_, &(this->blob_top_vec_)); - dropout_layer.Forward(this->blob_top_vec_, &(this->blob_top_vec_)); + dropout_layer.SetUp(this->blob_top_vec_, this->blob_top_vec_); + dropout_layer.Forward(this->blob_top_vec_, this->blob_top_vec_); dropout_layer.Backward(this->blob_top_vec_, propagate_down, - &(this->blob_top_vec_)); + this->blob_top_vec_); layer.Backward(this->blob_top_vec_, propagate_down, - &(this->blob_bottom_vec_)); + this->blob_bottom_vec_); Dtype sum_with_dropout = 0.; bottom_diff = this->blob_bottom_->cpu_diff(); for (int i = 0; i < this->blob_bottom_->count(); ++i) { diff --git a/src/caffe/test/test_memory_data_layer.cpp b/src/caffe/test/test_memory_data_layer.cpp index 3dc0034..497ab0d 100644 --- a/src/caffe/test/test_memory_data_layer.cpp +++ b/src/caffe/test/test_memory_data_layer.cpp @@ -70,7 +70,7 @@ TYPED_TEST(MemoryDataLayerTest, TestSetup) { md_param->set_width(this->width_); shared_ptr > layer( new MemoryDataLayer(layer_param)); - layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->data_blob_->num(), this->batch_size_); EXPECT_EQ(this->data_blob_->channels(), this->channels_); EXPECT_EQ(this->data_blob_->height(), this->height_); @@ -93,12 +93,12 @@ TYPED_TEST(MemoryDataLayerTest, TestForward) { md_param->set_width(this->width_); shared_ptr > layer( new MemoryDataLayer(layer_param)); - layer->DataLayerSetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer->DataLayerSetUp(this->blob_bottom_vec_, this->blob_top_vec_); layer->Reset(this->data_->mutable_cpu_data(), this->labels_->mutable_cpu_data(), this->data_->num()); for (int i = 0; i < this->batches_ * 6; ++i) { int batch_num = i % this->batches_; - layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); for (int j = 0; j < this->data_blob_->count(); ++j) { EXPECT_EQ(this->data_blob_->cpu_data()[j], this->data_->cpu_data()[ @@ -121,7 +121,7 @@ TYPED_TEST(MemoryDataLayerTest, AddDatumVectorDefaultTransform) { memory_data_param->set_height(this->height_); memory_data_param->set_width(this->width_); MemoryDataLayer layer(param); - layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); vector datum_vector(this->batch_size_); const size_t count = this->channels_ * this->height_ * this->width_; @@ -144,7 +144,7 @@ TYPED_TEST(MemoryDataLayerTest, AddDatumVectorDefaultTransform) { int data_index; // Go through the data 5 times for (int iter = 0; iter < 5; ++iter) { - layer.Forward(this->blob_bottom_vec_, &this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); const Dtype* data = this->data_blob_->cpu_data(); size_t index = 0; for (int i = 0; i < this->batch_size_; ++i) { diff --git a/src/caffe/test/test_multinomial_logistic_loss_layer.cpp b/src/caffe/test/test_multinomial_logistic_loss_layer.cpp index 1fc4c42..9038017 100644 --- a/src/caffe/test/test_multinomial_logistic_loss_layer.cpp +++ b/src/caffe/test/test_multinomial_logistic_loss_layer.cpp @@ -53,10 +53,10 @@ TYPED_TEST(MultinomialLogisticLossLayerTest, TestGradientCPU) { LayerParameter layer_param; Caffe::set_mode(Caffe::CPU); MultinomialLogisticLossLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); GradientChecker checker(1e-2, 2*1e-2, 1701, 0, 0.05); - checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_), 0); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); } } // namespace caffe diff --git a/src/caffe/test/test_mvn_layer.cpp b/src/caffe/test/test_mvn_layer.cpp index d3d8012..933b432 100644 --- a/src/caffe/test/test_mvn_layer.cpp +++ b/src/caffe/test/test_mvn_layer.cpp @@ -40,8 +40,8 @@ TYPED_TEST(MVNLayerTest, TestForward) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; MVNLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Test mean int num = this->blob_bottom_->num(); int channels = this->blob_bottom_->channels(); @@ -75,8 +75,8 @@ TYPED_TEST(MVNLayerTest, TestForwardMeanOnly) { LayerParameter layer_param; layer_param.ParseFromString("mvn_param{normalize_variance: false}"); MVNLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Test mean int num = this->blob_bottom_->num(); int channels = this->blob_bottom_->channels(); @@ -107,8 +107,8 @@ TYPED_TEST(MVNLayerTest, TestForwardAcrossChannels) { LayerParameter layer_param; layer_param.ParseFromString("mvn_param{across_channels: true}"); MVNLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Test mean int num = this->blob_bottom_->num(); int channels = this->blob_bottom_->channels(); @@ -142,8 +142,8 @@ TYPED_TEST(MVNLayerTest, TestGradient) { LayerParameter layer_param; MVNLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } TYPED_TEST(MVNLayerTest, TestGradientMeanOnly) { @@ -152,8 +152,8 @@ TYPED_TEST(MVNLayerTest, TestGradientMeanOnly) { layer_param.ParseFromString("mvn_param{normalize_variance: false}"); MVNLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } TYPED_TEST(MVNLayerTest, TestGradientAcrossChannels) { @@ -162,8 +162,8 @@ TYPED_TEST(MVNLayerTest, TestGradientAcrossChannels) { layer_param.ParseFromString("mvn_param{across_channels: true}"); MVNLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } } // namespace caffe diff --git a/src/caffe/test/test_neuron_layer.cpp b/src/caffe/test/test_neuron_layer.cpp index 4c19d3f..fdbb63c 100644 --- a/src/caffe/test/test_neuron_layer.cpp +++ b/src/caffe/test/test_neuron_layer.cpp @@ -44,8 +44,8 @@ class NeuronLayerTest : public MultiDeviceTest { } Caffe::set_phase(Caffe::TRAIN); DropoutLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Now, check values const Dtype* bottom_data = this->blob_bottom_->cpu_data(); const Dtype* top_data = this->blob_top_->cpu_data(); @@ -74,8 +74,8 @@ TYPED_TEST(NeuronLayerTest, TestAbsVal) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; AbsValLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); const Dtype* bottom_data = this->blob_bottom_->cpu_data(); const Dtype* top_data = this->blob_top_->cpu_data(); const int count = this->blob_bottom_->count(); @@ -89,16 +89,16 @@ TYPED_TEST(NeuronLayerTest, TestAbsGradient) { LayerParameter layer_param; AbsValLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); - checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } TYPED_TEST(NeuronLayerTest, TestReLU) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; ReLULayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Now, check values const Dtype* bottom_data = this->blob_bottom_->cpu_data(); const Dtype* top_data = this->blob_top_->cpu_data(); @@ -113,8 +113,8 @@ TYPED_TEST(NeuronLayerTest, TestReLUGradient) { LayerParameter layer_param; ReLULayer layer(layer_param); GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); - checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } TYPED_TEST(NeuronLayerTest, TestReLUWithNegativeSlope) { @@ -122,8 +122,8 @@ TYPED_TEST(NeuronLayerTest, TestReLUWithNegativeSlope) { LayerParameter layer_param; layer_param.ParseFromString("relu_param{negative_slope:0.01}"); ReLULayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Now, check values const Dtype* bottom_data = this->blob_bottom_->cpu_data(); const Dtype* top_data = this->blob_top_->cpu_data(); @@ -139,16 +139,16 @@ TYPED_TEST(NeuronLayerTest, TestReLUGradientWithNegativeSlope) { layer_param.ParseFromString("relu_param{negative_slope:0.01}"); ReLULayer layer(layer_param); GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); - checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } TYPED_TEST(NeuronLayerTest, TestSigmoid) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; SigmoidLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Now, check values const Dtype* bottom_data = this->blob_bottom_->cpu_data(); const Dtype* top_data = this->blob_top_->cpu_data(); @@ -165,16 +165,16 @@ TYPED_TEST(NeuronLayerTest, TestSigmoidGradient) { LayerParameter layer_param; SigmoidLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); - checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } TYPED_TEST(NeuronLayerTest, TestTanH) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; TanHLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Test exact values for (int i = 0; i < this->blob_bottom_->num(); ++i) { for (int j = 0; j < this->blob_bottom_->channels(); ++j) { @@ -197,8 +197,8 @@ TYPED_TEST(NeuronLayerTest, TestTanHGradient) { LayerParameter layer_param; TanHLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } TYPED_TEST(NeuronLayerTest, TestDropoutHalf) { @@ -216,8 +216,8 @@ TYPED_TEST(NeuronLayerTest, TestDropoutTestPhase) { LayerParameter layer_param; Caffe::set_phase(Caffe::TEST); DropoutLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Now, check values const Dtype* bottom_data = this->blob_bottom_->cpu_data(); const Dtype* top_data = this->blob_top_->cpu_data(); @@ -234,8 +234,8 @@ TYPED_TEST(NeuronLayerTest, TestDropoutGradient) { Caffe::set_phase(Caffe::TRAIN); DropoutLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } TYPED_TEST(NeuronLayerTest, TestDropoutGradientTest) { @@ -244,16 +244,16 @@ TYPED_TEST(NeuronLayerTest, TestDropoutGradientTest) { Caffe::set_phase(Caffe::TEST); DropoutLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } TYPED_TEST(NeuronLayerTest, TestBNLL) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; BNLLLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Now, check values const Dtype* bottom_data = this->blob_bottom_->cpu_data(); const Dtype* top_data = this->blob_top_->cpu_data(); @@ -268,8 +268,8 @@ TYPED_TEST(NeuronLayerTest, TestBNLLGradient) { LayerParameter layer_param; BNLLLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } #ifdef USE_CUDNN @@ -300,8 +300,8 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUCuDNN) { Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNReLULayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Now, check values const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); const TypeParam* top_data = this->blob_top_->cpu_data(); @@ -316,8 +316,8 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientCuDNN) { LayerParameter layer_param; CuDNNReLULayer layer(layer_param); GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); - checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } TYPED_TEST(CuDNNNeuronLayerTest, TestReLUWithNegativeSlopeCuDNN) { @@ -325,8 +325,8 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUWithNegativeSlopeCuDNN) { LayerParameter layer_param; layer_param.ParseFromString("relu_param{negative_slope:0.01}"); CuDNNReLULayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Now, check values const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); const TypeParam* top_data = this->blob_top_->cpu_data(); @@ -342,16 +342,16 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestReLUGradientWithNegativeSlopeCuDNN) { layer_param.ParseFromString("relu_param{negative_slope:0.01}"); CuDNNReLULayer layer(layer_param); GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); - checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidCuDNN) { Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNSigmoidLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Now, check values const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); const TypeParam* top_data = this->blob_top_->cpu_data(); @@ -368,16 +368,16 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestSigmoidGradientCuDNN) { LayerParameter layer_param; CuDNNSigmoidLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); - checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } TYPED_TEST(CuDNNNeuronLayerTest, TestTanHCuDNN) { Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNTanHLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Test exact values for (int i = 0; i < this->blob_bottom_->num(); ++i) { for (int j = 0; j < this->blob_bottom_->channels(); ++j) { @@ -400,8 +400,8 @@ TYPED_TEST(CuDNNNeuronLayerTest, TestTanHGradientCuDNN) { LayerParameter layer_param; CuDNNTanHLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } #endif diff --git a/src/caffe/test/test_pooling_layer.cpp b/src/caffe/test/test_pooling_layer.cpp index ec23a68..e298033 100644 --- a/src/caffe/test/test_pooling_layer.cpp +++ b/src/caffe/test/test_pooling_layer.cpp @@ -73,7 +73,7 @@ class PoolingLayerTest : public MultiDeviceTest { blob_bottom_->mutable_cpu_data()[i + 14] = 3; } PoolingLayer layer(layer_param); - layer.SetUp(blob_bottom_vec_, &blob_top_vec_); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); EXPECT_EQ(blob_top_->num(), num); EXPECT_EQ(blob_top_->channels(), channels); EXPECT_EQ(blob_top_->height(), 2); @@ -84,7 +84,7 @@ class PoolingLayerTest : public MultiDeviceTest { EXPECT_EQ(blob_top_mask_->height(), 2); EXPECT_EQ(blob_top_mask_->width(), 4); } - layer.Forward(blob_bottom_vec_, &blob_top_vec_); + layer.Forward(blob_bottom_vec_, blob_top_vec_); // Expected output: 2x 2 channels of: // [9 5 5 8] // [9 5 5 8] @@ -171,7 +171,7 @@ class PoolingLayerTest : public MultiDeviceTest { blob_bottom_->mutable_cpu_data()[i + 35] = 11; } PoolingLayer layer(layer_param); - layer.SetUp(blob_bottom_vec_, &blob_top_vec_); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); EXPECT_EQ(blob_top_->num(), num); EXPECT_EQ(blob_top_->channels(), channels); EXPECT_EQ(blob_top_->height(), 4); @@ -182,7 +182,7 @@ class PoolingLayerTest : public MultiDeviceTest { EXPECT_EQ(blob_top_mask_->height(), 4); EXPECT_EQ(blob_top_mask_->width(), 5); } - layer.Forward(blob_bottom_vec_, &blob_top_vec_); + layer.Forward(blob_bottom_vec_, blob_top_vec_); // Expected output: 2x 2 channels of: // [35 32 26 27 27] // [32 33 33 27 27] @@ -296,7 +296,7 @@ class PoolingLayerTest : public MultiDeviceTest { blob_bottom_->mutable_cpu_data()[i + 35] = 11; } PoolingLayer layer(layer_param); - layer.SetUp(blob_bottom_vec_, &blob_top_vec_); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); EXPECT_EQ(blob_top_->num(), num); EXPECT_EQ(blob_top_->channels(), channels); EXPECT_EQ(blob_top_->height(), 5); @@ -307,7 +307,7 @@ class PoolingLayerTest : public MultiDeviceTest { EXPECT_EQ(blob_top_mask_->height(), 5); EXPECT_EQ(blob_top_mask_->width(), 4); } - layer.Forward(blob_bottom_vec_, &blob_top_vec_); + layer.Forward(blob_bottom_vec_, blob_top_vec_); // Expected output: 2x 2 channels of: // [35 32 26 26] // [32 32 27 27] @@ -377,7 +377,7 @@ TYPED_TEST(PoolingLayerTest, TestSetup) { pooling_param->set_kernel_size(3); pooling_param->set_stride(2); PoolingLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels()); EXPECT_EQ(this->blob_top_->height(), 3); @@ -393,7 +393,7 @@ TYPED_TEST(PoolingLayerTest, TestSetupPadded) { pooling_param->set_pad(1); pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); PoolingLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels()); EXPECT_EQ(this->blob_top_->height(), 4); @@ -407,8 +407,8 @@ TYPED_TEST(PoolingLayerTest, PrintBackward) { layer_param.set_stride(2); layer_param.set_pool(LayerParameter_PoolMethod_MAX); PoolingLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); for (int i = 0; i < this->blob_bottom_->count(); ++i) { cout << "bottom data " << i << " " << this->blob_bottom_->cpu_data()[i] << endl; } @@ -419,7 +419,7 @@ TYPED_TEST(PoolingLayerTest, PrintBackward) { for (int i = 0; i < this->blob_top_->count(); ++i) { this->blob_top_->mutable_cpu_diff()[i] = i; } - layer.Backward(this->blob_top_vec_, true, &(this->blob_bottom_vec_)); + layer.Backward(this->blob_top_vec_, true, this->blob_bottom_vec_); for (int i = 0; i < this->blob_bottom_->count(); ++i) { cout << "bottom diff " << i << " " << this->blob_bottom_->cpu_diff()[i] << endl; } @@ -452,8 +452,8 @@ TYPED_TEST(PoolingLayerTest, TestGradientMax) { pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); PoolingLayer layer(layer_param); GradientChecker checker(1e-4, 1e-2); - checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } } } @@ -481,12 +481,12 @@ TYPED_TEST(PoolingLayerTest, TestForwardMaxPadded) { this->blob_bottom_->mutable_cpu_data()[7] = 2; this->blob_bottom_->mutable_cpu_data()[8] = 1; PoolingLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), 1); EXPECT_EQ(this->blob_top_->channels(), 1); EXPECT_EQ(this->blob_top_->height(), 3); EXPECT_EQ(this->blob_top_->width(), 3); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); Dtype epsilon = 1e-8; // Output: // [ 1 4 4 ] @@ -516,8 +516,8 @@ TYPED_TEST(PoolingLayerTest, TestGradientMaxTopMask) { this->blob_top_vec_.push_back(this->blob_top_mask_); PoolingLayer layer(layer_param); GradientChecker checker(1e-4, 1e-2); - checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); this->blob_top_vec_.pop_back(); } } @@ -537,12 +537,12 @@ TYPED_TEST(PoolingLayerTest, TestForwardAve) { ConstantFiller filler(filler_param); filler.Fill(this->blob_bottom_); PoolingLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), 1); EXPECT_EQ(this->blob_top_->channels(), 1); EXPECT_EQ(this->blob_top_->height(), 3); EXPECT_EQ(this->blob_top_->width(), 3); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); Dtype epsilon = 1e-5; EXPECT_NEAR(this->blob_top_->cpu_data()[0], 8.0 / 9, epsilon); EXPECT_NEAR(this->blob_top_->cpu_data()[1], 4.0 / 3, epsilon); @@ -567,8 +567,8 @@ TYPED_TEST(PoolingLayerTest, TestGradientAve) { pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); PoolingLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2); - checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } } } @@ -586,8 +586,8 @@ TYPED_TEST(PoolingLayerTest, TestGradientAvePadded) { pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); PoolingLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2); - checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } } } @@ -651,7 +651,7 @@ class CuDNNPoolingLayerTest : public ::testing::Test { blob_bottom_->mutable_cpu_data()[i + 14] = 3; } CuDNNPoolingLayer layer(layer_param); - layer.SetUp(blob_bottom_vec_, &blob_top_vec_); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); EXPECT_EQ(blob_top_->num(), num); EXPECT_EQ(blob_top_->channels(), channels); EXPECT_EQ(blob_top_->height(), 2); @@ -662,7 +662,7 @@ class CuDNNPoolingLayerTest : public ::testing::Test { EXPECT_EQ(blob_top_mask_->height(), 2); EXPECT_EQ(blob_top_mask_->width(), 4); } - layer.Forward(blob_bottom_vec_, &blob_top_vec_); + layer.Forward(blob_bottom_vec_, blob_top_vec_); // Expected output: 2x 2 channels of: // [9 5 5 8] // [9 5 5 8] @@ -749,7 +749,7 @@ class CuDNNPoolingLayerTest : public ::testing::Test { blob_bottom_->mutable_cpu_data()[i + 35] = 11; } CuDNNPoolingLayer layer(layer_param); - layer.SetUp(blob_bottom_vec_, &blob_top_vec_); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); EXPECT_EQ(blob_top_->num(), num); EXPECT_EQ(blob_top_->channels(), channels); EXPECT_EQ(blob_top_->height(), 4); @@ -760,7 +760,7 @@ class CuDNNPoolingLayerTest : public ::testing::Test { EXPECT_EQ(blob_top_mask_->height(), 4); EXPECT_EQ(blob_top_mask_->width(), 5); } - layer.Forward(blob_bottom_vec_, &blob_top_vec_); + layer.Forward(blob_bottom_vec_, blob_top_vec_); // Expected output: 2x 2 channels of: // [35 32 26 27 27] // [32 33 33 27 27] @@ -874,7 +874,7 @@ class CuDNNPoolingLayerTest : public ::testing::Test { blob_bottom_->mutable_cpu_data()[i + 35] = 11; } CuDNNPoolingLayer layer(layer_param); - layer.SetUp(blob_bottom_vec_, &blob_top_vec_); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); EXPECT_EQ(blob_top_->num(), num); EXPECT_EQ(blob_top_->channels(), channels); EXPECT_EQ(blob_top_->height(), 5); @@ -885,7 +885,7 @@ class CuDNNPoolingLayerTest : public ::testing::Test { EXPECT_EQ(blob_top_mask_->height(), 5); EXPECT_EQ(blob_top_mask_->width(), 4); } - layer.Forward(blob_bottom_vec_, &blob_top_vec_); + layer.Forward(blob_bottom_vec_, blob_top_vec_); // Expected output: 2x 2 channels of: // [35 32 26 26] // [32 32 27 27] @@ -955,7 +955,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestSetupCuDNN) { pooling_param->set_kernel_size(3); pooling_param->set_stride(2); CuDNNPoolingLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels()); EXPECT_EQ(this->blob_top_->height(), 3); @@ -971,7 +971,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestSetupPaddedCuDNN) { pooling_param->set_pad(1); pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); CuDNNPoolingLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels()); EXPECT_EQ(this->blob_top_->height(), 4); @@ -986,8 +986,8 @@ TYPED_TEST(CuDNNPoolingLayerTest, PrintBackwardCuDNN) { layer_param.set_stride(2); layer_param.set_pool(LayerParameter_PoolMethod_MAX); CuDNNPoolingLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); for (int i = 0; i < this->blob_bottom_->count(); ++i) { cout << "bottom data " << i << " " << this->blob_bottom_->cpu_data()[i] << endl; } @@ -998,7 +998,7 @@ TYPED_TEST(CuDNNPoolingLayerTest, PrintBackwardCuDNN) { for (int i = 0; i < this->blob_top_->count(); ++i) { this->blob_top_->mutable_cpu_diff()[i] = i; } - layer.Backward(this->blob_top_vec_, true, &(this->blob_bottom_vec_)); + layer.Backward(this->blob_top_vec_, true, this->blob_bottom_vec_); for (int i = 0; i < this->blob_bottom_->count(); ++i) { cout << "bottom diff " << i << " " << this->blob_bottom_->cpu_diff()[i] << endl; } @@ -1033,8 +1033,8 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxCuDNN) { pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); CuDNNPoolingLayer layer(layer_param); GradientChecker checker(1e-4, 1e-2); - checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } } } @@ -1062,12 +1062,12 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardMaxPaddedCuDNN) { this->blob_bottom_->mutable_cpu_data()[7] = 2; this->blob_bottom_->mutable_cpu_data()[8] = 1; CuDNNPoolingLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), 1); EXPECT_EQ(this->blob_top_->channels(), 1); EXPECT_EQ(this->blob_top_->height(), 3); EXPECT_EQ(this->blob_top_->width(), 3); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); TypeParam epsilon = 1e-8; // Output: // [ 1 4 4 ] @@ -1097,8 +1097,8 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientMaxTopMaskCuDNN) { this->blob_top_vec_.push_back(this->blob_top_mask_); CuDNNPoolingLayer layer(layer_param); GradientChecker checker(1e-4, 1e-2); - checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); this->blob_top_vec_.pop_back(); } } @@ -1118,12 +1118,12 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestForwardAveCuDNN) { ConstantFiller filler(filler_param); filler.Fill(this->blob_bottom_); CuDNNPoolingLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), 1); EXPECT_EQ(this->blob_top_->channels(), 1); EXPECT_EQ(this->blob_top_->height(), 3); EXPECT_EQ(this->blob_top_->width(), 3); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); TypeParam epsilon = 1e-5; EXPECT_NEAR(this->blob_top_->cpu_data()[0], 8.0 / 9, epsilon); EXPECT_NEAR(this->blob_top_->cpu_data()[1], 4.0 / 3, epsilon); @@ -1148,8 +1148,8 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAveCuDNN) { pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); CuDNNPoolingLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2); - checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } } } @@ -1167,8 +1167,8 @@ TYPED_TEST(CuDNNPoolingLayerTest, TestGradientAvePaddedCuDNN) { pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); CuDNNPoolingLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2); - checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } } } diff --git a/src/caffe/test/test_power_layer.cpp b/src/caffe/test/test_power_layer.cpp index 0c104c2..0d52fa1 100644 --- a/src/caffe/test/test_power_layer.cpp +++ b/src/caffe/test/test_power_layer.cpp @@ -37,8 +37,8 @@ class PowerLayerTest : public MultiDeviceTest { layer_param.mutable_power_param()->set_scale(scale); layer_param.mutable_power_param()->set_shift(shift); PowerLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Now, check values const Dtype* bottom_data = this->blob_bottom_->cpu_data(); const Dtype* top_data = this->blob_top_->cpu_data(); @@ -75,8 +75,8 @@ class PowerLayerTest : public MultiDeviceTest { } } GradientChecker checker(1e-2, 1e-2, 1701, 0., 0.01); - checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } Blob* const blob_bottom_; diff --git a/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp b/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp index 47ccdea..e5737e4 100644 --- a/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp +++ b/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp @@ -79,9 +79,9 @@ class SigmoidCrossEntropyLossLayerTest : public MultiDeviceTest { // Fill the targets vector targets_filler.Fill(this->blob_bottom_targets_); SigmoidCrossEntropyLossLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); Dtype layer_loss = - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); const int count = this->blob_bottom_data_->count(); const int num = this->blob_bottom_data_->num(); const Dtype* blob_bottom_data = this->blob_bottom_data_->cpu_data(); @@ -112,10 +112,10 @@ TYPED_TEST(SigmoidCrossEntropyLossLayerTest, TestGradient) { const Dtype kLossWeight = 3.7; layer_param.add_loss_weight(kLossWeight); SigmoidCrossEntropyLossLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &this->blob_top_vec_); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); GradientChecker checker(1e-2, 1e-2, 1701); - checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_), 0); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); } diff --git a/src/caffe/test/test_slice_layer.cpp b/src/caffe/test/test_slice_layer.cpp index ee88187..395be28 100644 --- a/src/caffe/test/test_slice_layer.cpp +++ b/src/caffe/test/test_slice_layer.cpp @@ -64,7 +64,7 @@ TYPED_TEST(SliceLayerTest, TestSetupNum) { LayerParameter layer_param; layer_param.mutable_slice_param()->set_slice_dim(0); SliceLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_1_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_1_); EXPECT_EQ(this->blob_bottom_->num(), 3 * this->blob_top_0_->num()); EXPECT_EQ(this->blob_top_0_->num(), this->blob_top_1_->num()); EXPECT_EQ(this->blob_top_0_->num(), this->blob_top_2_->num()); @@ -78,7 +78,7 @@ TYPED_TEST(SliceLayerTest, TestSetupChannels) { LayerParameter layer_param; layer_param.mutable_slice_param()->add_slice_point(3); SliceLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_0_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_0_); EXPECT_EQ(this->blob_top_0_->num(), this->blob_bottom_->num()); EXPECT_EQ(this->blob_top_0_->channels(), 3); EXPECT_EQ(this->blob_top_1_->channels(), 9); @@ -93,11 +93,11 @@ TYPED_TEST(SliceLayerTest, TestSliceAcrossNum) { LayerParameter layer_param; layer_param.mutable_slice_param()->set_slice_dim(0); SliceLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_0_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_0_); const int top_num = this->blob_bottom_->num() / 2; ASSERT_EQ(top_num, this->blob_top_0_->num()); ASSERT_EQ(top_num, this->blob_top_1_->num()); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_0_)); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_0_); for (int n = 0; n < top_num; ++n) { for (int c = 0; c < this->blob_top_0_->channels(); ++c) { for (int h = 0; h < this->blob_bottom_->height(); ++h) { @@ -127,12 +127,12 @@ TYPED_TEST(SliceLayerTest, TestSliceAcrossChannels) { layer_param.mutable_slice_param()->add_slice_point(kSlicePoint0); layer_param.mutable_slice_param()->add_slice_point(kSlicePoint1); SliceLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_1_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_1_); ASSERT_EQ(kSlicePoint0, this->blob_top_0_->channels()); ASSERT_EQ(kSlicePoint1 - kSlicePoint0, this->blob_top_1_->channels()); ASSERT_EQ(this->blob_bottom_->channels() - kSlicePoint1, this->blob_top_2_->channels()); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_1_)); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_1_); for (int n = 0; n < this->blob_bottom_->num(); ++n) { for (int c = 0; c < this->blob_top_0_->channels(); ++c) { for (int h = 0; h < this->blob_bottom_->height(); ++h) { @@ -169,8 +169,8 @@ TYPED_TEST(SliceLayerTest, TestGradientAcrossNum) { layer_param.mutable_slice_param()->set_slice_dim(0); SliceLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_0_)); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_0_); } TYPED_TEST(SliceLayerTest, TestGradientAcrossChannels) { @@ -182,8 +182,8 @@ TYPED_TEST(SliceLayerTest, TestGradientAcrossChannels) { layer_param.mutable_slice_param()->add_slice_point(kSlicePoint); SliceLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_0_)); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_0_); } } // namespace caffe diff --git a/src/caffe/test/test_softmax_layer.cpp b/src/caffe/test/test_softmax_layer.cpp index 41f643f..f667442 100644 --- a/src/caffe/test/test_softmax_layer.cpp +++ b/src/caffe/test/test_softmax_layer.cpp @@ -41,8 +41,8 @@ TYPED_TEST(SoftmaxLayerTest, TestForward) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; SoftmaxLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Test sum for (int i = 0; i < this->blob_bottom_->num(); ++i) { for (int k = 0; k < this->blob_bottom_->height(); ++k) { @@ -76,8 +76,8 @@ TYPED_TEST(SoftmaxLayerTest, TestGradient) { LayerParameter layer_param; SoftmaxLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } #ifdef USE_CUDNN @@ -107,8 +107,8 @@ TYPED_TEST(CuDNNSoftmaxLayerTest, TestForwardCuDNN) { Caffe::set_mode(Caffe::GPU); LayerParameter layer_param; CuDNNSoftmaxLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Test sum for (int i = 0; i < this->blob_bottom_->num(); ++i) { for (int k = 0; k < this->blob_bottom_->height(); ++k) { @@ -142,8 +142,8 @@ TYPED_TEST(CuDNNSoftmaxLayerTest, TestGradientCuDNN) { LayerParameter layer_param; CuDNNSoftmaxLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } #endif diff --git a/src/caffe/test/test_softmax_with_loss_layer.cpp b/src/caffe/test/test_softmax_with_loss_layer.cpp index 246d64e..badda3b 100644 --- a/src/caffe/test/test_softmax_with_loss_layer.cpp +++ b/src/caffe/test/test_softmax_with_loss_layer.cpp @@ -57,8 +57,8 @@ TYPED_TEST(SoftmaxWithLossLayerTest, TestGradient) { layer_param.add_loss_weight(3); SoftmaxWithLossLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2, 1701); - checker.CheckGradientExhaustive(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_), 0); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); } } // namespace caffe diff --git a/src/caffe/test/test_split_layer.cpp b/src/caffe/test/test_split_layer.cpp index e9b942c..38e7621 100644 --- a/src/caffe/test/test_split_layer.cpp +++ b/src/caffe/test/test_split_layer.cpp @@ -52,7 +52,7 @@ TYPED_TEST(SplitLayerTest, TestSetup) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; SplitLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_a_->num(), 2); EXPECT_EQ(this->blob_top_a_->channels(), 3); EXPECT_EQ(this->blob_top_a_->height(), 6); @@ -67,8 +67,8 @@ TYPED_TEST(SplitLayerTest, Test) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; SplitLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); for (int i = 0; i < this->blob_bottom_->count(); ++i) { Dtype bottom_value = this->blob_bottom_->cpu_data()[i]; EXPECT_EQ(bottom_value, this->blob_top_a_->cpu_data()[i]); @@ -81,8 +81,8 @@ TYPED_TEST(SplitLayerTest, TestGradient) { LayerParameter layer_param; SplitLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2); - checker.CheckGradientEltwise(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } diff --git a/src/caffe/test/test_stochastic_pooling.cpp b/src/caffe/test/test_stochastic_pooling.cpp index 4f13981..ad51510 100644 --- a/src/caffe/test/test_stochastic_pooling.cpp +++ b/src/caffe/test/test_stochastic_pooling.cpp @@ -53,7 +53,7 @@ TYPED_TEST(StochasticPoolingLayerTest, TestSetup) { pooling_param->set_kernel_size(3); pooling_param->set_stride(2); PoolingLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels()); EXPECT_EQ(this->blob_top_->height(), 3); @@ -69,8 +69,8 @@ TYPED_TEST(StochasticPoolingLayerTest, TestStochasticGPU) { pooling_param->set_stride(2); pooling_param->set_pool(PoolingParameter_PoolMethod_STOCHASTIC); PoolingLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Check if the output is correct - it should do random sampling const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); @@ -113,8 +113,8 @@ TYPED_TEST(StochasticPoolingLayerTest, TestStochasticGPUTestPhase) { pooling_param->set_stride(2); pooling_param->set_pool(PoolingParameter_PoolMethod_STOCHASTIC); PoolingLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Check if the output is correct - it should do random sampling const TypeParam* bottom_data = this->blob_bottom_->cpu_data(); @@ -154,8 +154,8 @@ TYPED_TEST(StochasticPoolingLayerTest, TestGradientGPU) { GradientChecker checker(1e-4, 1e-2); // it is too expensive to call curand multiple times, so we don't do an // exhaustive gradient check. - checker.CheckGradient(&layer, &(this->blob_bottom_vec_), - &(this->blob_top_vec_)); + checker.CheckGradient(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); } diff --git a/src/caffe/test/test_threshold_layer.cpp b/src/caffe/test/test_threshold_layer.cpp index 32dfbee..05ce821 100644 --- a/src/caffe/test/test_threshold_layer.cpp +++ b/src/caffe/test/test_threshold_layer.cpp @@ -40,7 +40,7 @@ TYPED_TEST(ThresholdLayerTest, TestSetup) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; ThresholdLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels()); EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_->height()); @@ -51,8 +51,8 @@ TYPED_TEST(ThresholdLayerTest, Test) { typedef typename TypeParam::Dtype Dtype; LayerParameter layer_param; ThresholdLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Now, check values const Dtype* bottom_data = this->blob_bottom_->cpu_data(); const Dtype* top_data = this->blob_top_->cpu_data(); @@ -76,8 +76,8 @@ TYPED_TEST(ThresholdLayerTest, Test2) { layer_param.mutable_threshold_param(); threshold_param->set_threshold(0.5); ThresholdLayer layer(layer_param); - layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); // Now, check values const Dtype* bottom_data = this->blob_bottom_->cpu_data(); const Dtype* top_data = this->blob_top_->cpu_data(); diff --git a/tools/caffe.cpp b/tools/caffe.cpp index c8c8c1a..bfcd9f1 100644 --- a/tools/caffe.cpp +++ b/tools/caffe.cpp @@ -237,8 +237,8 @@ int time() { for (int j = 0; j < FLAGS_iterations; ++j) { // Although Reshape should be essentially free, we include it here // so that we will notice Reshape performance bugs. - layers[i]->Reshape(bottom_vecs[i], &top_vecs[i]); - layers[i]->Forward(bottom_vecs[i], &top_vecs[i]); + layers[i]->Reshape(bottom_vecs[i], top_vecs[i]); + layers[i]->Forward(bottom_vecs[i], top_vecs[i]); } LOG(INFO) << layername << "\tforward: " << timer.MilliSeconds() << " milliseconds."; @@ -252,7 +252,7 @@ int time() { timer.Start(); for (int j = 0; j < FLAGS_iterations; ++j) { layers[i]->Backward(top_vecs[i], bottom_need_backward[i], - &bottom_vecs[i]); + bottom_vecs[i]); } LOG(INFO) << layername << "\tbackward: " << timer.MilliSeconds() << " milliseconds."; -- 2.7.4