From 22fa0a2945e58d3f748071034b54ec8610fc265a Mon Sep 17 00:00:00 2001 From: Jeff Donahue Date: Tue, 25 Feb 2014 15:21:23 -0800 Subject: [PATCH] fix most linter errors --- include/caffe/filler.hpp | 2 +- include/caffe/layer.hpp | 8 +-- include/caffe/net.hpp | 4 +- include/caffe/solver.hpp | 3 +- include/caffe/util/im2col.hpp | 16 +++--- include/caffe/util/insert_splits.hpp | 2 + src/caffe/common.cpp | 5 +- src/caffe/layers/bnll_layer.cu | 6 ++- src/caffe/layers/conv_layer.cpp | 2 +- src/caffe/layers/data_layer.cpp | 6 ++- src/caffe/layers/dropout_layer.cu | 13 +++-- src/caffe/layers/flatten_layer.cpp | 2 +- src/caffe/layers/im2col_layer.cpp | 13 ++--- src/caffe/layers/inner_product_layer.cpp | 2 +- src/caffe/layers/loss_layer.cu | 15 +++--- src/caffe/layers/lrn_layer.cpp | 2 +- src/caffe/layers/lrn_layer.cu | 5 ++ src/caffe/layers/neuron_layer.cpp | 2 +- src/caffe/layers/pooling_layer.cpp | 2 +- src/caffe/layers/pooling_layer.cu | 17 +++++-- src/caffe/layers/relu_layer.cu | 9 +++- src/caffe/layers/sigmoid_layer.cu | 11 ++-- src/caffe/layers/softmax_layer.cu | 11 ++-- src/caffe/layers/softmax_loss_layer.cu | 10 ++-- src/caffe/layers/split_layer.cpp | 2 +- src/caffe/layers/tanh_layer.cu | 12 +++-- src/caffe/solver.cpp | 5 +- src/caffe/test/test_blob.cpp | 6 +-- src/caffe/test/test_caffe_main.hpp | 6 +-- src/caffe/test/test_common.cpp | 10 ++-- src/caffe/test/test_convolution_layer.cpp | 21 +++++--- src/caffe/test/test_data_layer.cpp | 15 +++--- src/caffe/test/test_euclidean_loss_layer.cpp | 5 +- src/caffe/test/test_filler.cpp | 12 ++--- src/caffe/test/test_flatten_layer.cpp | 13 +++-- src/caffe/test/test_im2col_layer.cpp | 13 +++-- src/caffe/test/test_innerproduct_layer.cpp | 56 ++++++++++---------- src/caffe/test/test_lrn_layer.cpp | 30 ++++++----- .../test/test_multinomial_logistic_loss_layer.cpp | 6 ++- src/caffe/test/test_neuron_layer.cpp | 34 ++++++++----- src/caffe/test/test_platform.cpp | 48 +++++++++++------- src/caffe/test/test_pooling_layer.cpp | 21 +++++--- src/caffe/test/test_protobuf.cpp | 4 +- src/caffe/test/test_softmax_layer.cpp | 10 ++-- src/caffe/test/test_softmax_with_loss_layer.cpp | 6 ++- src/caffe/test/test_split_layer.cpp | 11 ++-- ...stic_pooing.cpp => test_stochastic_pooling.cpp} | 18 ++++--- src/caffe/test/test_syncedmem.cpp | 23 +++++---- src/caffe/test/test_tanh_layer.cpp | 35 ++++++++----- src/caffe/test/test_util_blas.cpp | 33 ++++++------ src/caffe/util/im2col.cpp | 47 ++++++++--------- src/caffe/util/im2col.cu | 59 +++++++++++++--------- src/caffe/util/io.cpp | 7 +-- src/caffe/util/math_functions.cu | 2 + 54 files changed, 435 insertions(+), 303 deletions(-) rename src/caffe/test/{test_stochastic_pooing.cpp => test_stochastic_pooling.cpp} (95%) diff --git a/include/caffe/filler.hpp b/include/caffe/filler.hpp index effe62f..5b934a3 100644 --- a/include/caffe/filler.hpp +++ b/include/caffe/filler.hpp @@ -42,7 +42,7 @@ class ConstantFiller : public Filler { for (int i = 0; i < count; ++i) { data[i] = value; } - }; + } }; template diff --git a/include/caffe/layer.hpp b/include/caffe/layer.hpp index adc6365..a0cb487 100644 --- a/include/caffe/layer.hpp +++ b/include/caffe/layer.hpp @@ -67,7 +67,7 @@ class Layer { vector*>* top) { // LOG(WARNING) << "Using CPU code as backup."; Forward_cpu(bottom, top); - }; + } // Backward functions: the backward function will compute the gradients for // any parameters and also for the bottom blobs if propagate_down is true. @@ -80,7 +80,7 @@ class Layer { vector*>* bottom) { // LOG(WARNING) << "Using CPU code as backup."; return Backward_cpu(top, propagate_down, bottom); - }; + } DISABLE_COPY_AND_ASSIGN(Layer); }; // class Layer @@ -101,7 +101,7 @@ inline void Layer::Forward(const vector*>& bottom, default: LOG(FATAL) << "Unknown caffe mode."; } -}; +} template inline Dtype Layer::Backward(const vector*>& top, @@ -115,7 +115,7 @@ inline Dtype Layer::Backward(const vector*>& top, default: LOG(FATAL) << "Unknown caffe mode."; } -}; +} template void Layer::ToProto(LayerParameter* param, bool write_diff) { diff --git a/include/caffe/net.hpp b/include/caffe/net.hpp index 684d6c5..b5a57b3 100644 --- a/include/caffe/net.hpp +++ b/include/caffe/net.hpp @@ -22,8 +22,8 @@ namespace caffe { template class Net { public: - Net(const NetParameter& param); - Net(const string& param_file); + explicit Net(const NetParameter& param); + explicit Net(const string& param_file); virtual ~Net() {} // Initialize a network with the network parameter. diff --git a/include/caffe/solver.hpp b/include/caffe/solver.hpp index 25ba3b6..a5dafe6 100644 --- a/include/caffe/solver.hpp +++ b/include/caffe/solver.hpp @@ -3,6 +3,7 @@ #ifndef CAFFE_OPTIMIZATION_SOLVER_HPP_ #define CAFFE_OPTIMIZATION_SOLVER_HPP_ +#include #include namespace caffe { @@ -66,6 +67,6 @@ class SGDSolver : public Solver { }; -} // namspace caffe +} // namespace caffe #endif // CAFFE_OPTIMIZATION_SOLVER_HPP_ diff --git a/include/caffe/util/im2col.hpp b/include/caffe/util/im2col.hpp index 521efd3..17da49c 100644 --- a/include/caffe/util/im2col.hpp +++ b/include/caffe/util/im2col.hpp @@ -7,23 +7,23 @@ namespace caffe { template void im2col_cpu(const Dtype* data_im, const int channels, - const int height, const int width, const int ksize, const int pad, const int stride, - Dtype* data_col); + const int height, const int width, const int ksize, const int pad, + const int stride, Dtype* data_col); template void col2im_cpu(const Dtype* data_col, const int channels, - const int height, const int width, const int psize, const int pad, const int stride, - Dtype* data_im); + const int height, const int width, const int psize, const int pad, + const int stride, Dtype* data_im); template void im2col_gpu(const Dtype* data_im, const int channels, - const int height, const int width, const int ksize, const int pad, const int stride, - Dtype* data_col); + const int height, const int width, const int ksize, const int pad, + const int stride, Dtype* data_col); template void col2im_gpu(const Dtype* data_col, const int channels, - const int height, const int width, const int psize, const int pad, const int stride, - Dtype* data_im); + const int height, const int width, const int psize, const int pad, + const int stride, Dtype* data_im); } // namespace caffe diff --git a/include/caffe/util/insert_splits.hpp b/include/caffe/util/insert_splits.hpp index d0df856..37972b3 100644 --- a/include/caffe/util/insert_splits.hpp +++ b/include/caffe/util/insert_splits.hpp @@ -3,6 +3,8 @@ #ifndef _CAFFE_UTIL_INSERT_SPLITS_HPP_ #define _CAFFE_UTIL_INSERT_SPLITS_HPP_ +#include + #include "caffe/proto/caffe.pb.h" using std::pair; diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index 7498579..1109674 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -36,7 +36,8 @@ Caffe::Caffe() } // Try to create a vsl stream. This should almost always work, but we will // check it anyway. - if (vslNewStream(&vsl_stream_, VSL_BRNG_MT19937, cluster_seedgen()) != VSL_STATUS_OK) { + if (vslNewStream(&vsl_stream_, VSL_BRNG_MT19937, + cluster_seedgen()) != VSL_STATUS_OK) { LOG(ERROR) << "Cannot create vsl stream. VSL random number generator " << "won't be available."; } @@ -48,7 +49,7 @@ Caffe::~Caffe() { CURAND_CHECK(curandDestroyGenerator(curand_generator_)); } if (vsl_stream_) VSL_CHECK(vslDeleteStream(&vsl_stream_)); -}; +} void Caffe::set_random_seed(const unsigned int seed) { // Curand seed diff --git a/src/caffe/layers/bnll_layer.cu b/src/caffe/layers/bnll_layer.cu index 2c06a63..c1795bc 100644 --- a/src/caffe/layers/bnll_layer.cu +++ b/src/caffe/layers/bnll_layer.cu @@ -1,8 +1,10 @@ // Copyright 2013 Yangqing Jia +#include +#include + #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" -#include using std::max; @@ -57,6 +59,7 @@ void BNLLLayer::Forward_gpu(const vector*>& bottom, const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = (*top)[0]->mutable_gpu_data(); const int count = bottom[0]->count(); + // NOLINT_NEXTLINE(whitespace/operators) BNLLForward<<>>( count, bottom_data, top_data); CUDA_POST_KERNEL_CHECK; @@ -81,6 +84,7 @@ Dtype BNLLLayer::Backward_gpu(const vector*>& top, const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); const int count = (*bottom)[0]->count(); + // NOLINT_NEXTLINE(whitespace/operators) BNLLBackward<<>>( count, top_diff, bottom_data, bottom_diff); CUDA_POST_KERNEL_CHECK; diff --git a/src/caffe/layers/conv_layer.cpp b/src/caffe/layers/conv_layer.cpp index 69a860b..1549039 100644 --- a/src/caffe/layers/conv_layer.cpp +++ b/src/caffe/layers/conv_layer.cpp @@ -73,7 +73,7 @@ void ConvolutionLayer::SetUp(const vector*>& bottom, bias_multiplier_data[i] = 1.; } } -}; +} template diff --git a/src/caffe/layers/data_layer.cpp b/src/caffe/layers/data_layer.cpp index ffb7fd0..7950313 100644 --- a/src/caffe/layers/data_layer.cpp +++ b/src/caffe/layers/data_layer.cpp @@ -50,12 +50,15 @@ void* DataLayerPrefetch(void* layer_pointer) { int h_off, w_off; // We only do random crop when we do training. if (Caffe::phase() == Caffe::TRAIN) { + // NOLINT_NEXTLINE(runtime/threadsafe_fn) h_off = rand() % (height - cropsize); + // NOLINT_NEXTLINE(runtime/threadsafe_fn) w_off = rand() % (width - cropsize); } else { h_off = (height - cropsize) / 2; w_off = (width - cropsize) / 2; } + // NOLINT_NEXTLINE(runtime/threadsafe_fn) if (mirror && rand() % 2) { // Copy mirrored version for (int c = 0; c < channels; ++c) { @@ -111,7 +114,7 @@ void* DataLayerPrefetch(void* layer_pointer) { } } - return (void*)NULL; + return reinterpret_cast(NULL); } template @@ -140,6 +143,7 @@ void DataLayer::SetUp(const vector*>& bottom, iter_->SeekToFirst(); // Check if we would need to randomly skip a few data points if (this->layer_param_.rand_skip()) { + // NOLINT_NEXTLINE(runtime/threadsafe_fn) unsigned int skip = rand() % this->layer_param_.rand_skip(); LOG(INFO) << "Skipping first " << skip << " data points."; while (skip-- > 0) { diff --git a/src/caffe/layers/dropout_layer.cu b/src/caffe/layers/dropout_layer.cu index df94f2d..0e15b6f 100644 --- a/src/caffe/layers/dropout_layer.cu +++ b/src/caffe/layers/dropout_layer.cu @@ -2,6 +2,7 @@ #include #include +#include #include "caffe/common.hpp" #include "caffe/layer.hpp" @@ -23,14 +24,14 @@ void DropoutLayer::SetUp(const vector*>& bottom, DCHECK(threshold_ < 1.); scale_ = 1. / (1. - threshold_); uint_thres_ = (unsigned int)(UINT_MAX * threshold_); -}; +} template void DropoutLayer::Forward_cpu(const vector*>& bottom, vector*>* top) { const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = (*top)[0]->mutable_cpu_data(); - int* mask = (int*)rand_vec_->mutable_cpu_data(); + int* mask = reinterpret_cast(rand_vec_->mutable_cpu_data()); const int count = bottom[0]->count(); if (Caffe::phase() == Caffe::TRAIN) { // Create random numbers @@ -52,7 +53,7 @@ Dtype DropoutLayer::Backward_cpu(const vector*>& top, if (propagate_down) { const Dtype* top_diff = top[0]->cpu_diff(); Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); - const int* mask = (int*)(rand_vec_->cpu_data()); + const int* mask = reinterpret_cast(rand_vec_->cpu_data()); const int count = (*bottom)[0]->count(); for (int i = 0; i < count; ++i) { bottom_diff[i] = top_diff[i] * mask[i] * scale_; @@ -81,9 +82,10 @@ void DropoutLayer::Forward_gpu(const vector*>& bottom, CURAND_CHECK(curandGenerate(Caffe::curand_generator(), (unsigned int*)(rand_vec_->mutable_gpu_data()), count)); // set thresholds + // NOLINT_NEXTLINE(whitespace/operators) DropoutForward<<>>( - count, bottom_data, (unsigned int*)rand_vec_->gpu_data(), uint_thres_, scale_, - top_data); + count, bottom_data, (unsigned int*)rand_vec_->gpu_data(), uint_thres_, + scale_, top_data); CUDA_POST_KERNEL_CHECK; } else { CUDA_CHECK(cudaMemcpy(top_data, bottom_data, @@ -111,6 +113,7 @@ Dtype DropoutLayer::Backward_gpu(const vector*>& top, Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); const unsigned int* mask = (unsigned int*)rand_vec_->gpu_data(); const int count = (*bottom)[0]->count(); + // NOLINT_NEXTLINE(whitespace/operators) DropoutBackward<<>>( count, top_diff, mask, uint_thres_, scale_, bottom_diff); CUDA_POST_KERNEL_CHECK; diff --git a/src/caffe/layers/flatten_layer.cpp b/src/caffe/layers/flatten_layer.cpp index a202f72..bedf296 100644 --- a/src/caffe/layers/flatten_layer.cpp +++ b/src/caffe/layers/flatten_layer.cpp @@ -19,7 +19,7 @@ void FlattenLayer::SetUp(const vector*>& bottom, count_ = bottom[0]->num() * channels_out; CHECK_EQ(count_, bottom[0]->count()); CHECK_EQ(count_, (*top)[0]->count()); -}; +} template void FlattenLayer::Forward_cpu(const vector*>& bottom, diff --git a/src/caffe/layers/im2col_layer.cpp b/src/caffe/layers/im2col_layer.cpp index 5f9986a..a94209b 100644 --- a/src/caffe/layers/im2col_layer.cpp +++ b/src/caffe/layers/im2col_layer.cpp @@ -21,8 +21,9 @@ void Im2colLayer::SetUp(const vector*>& bottom, HEIGHT_ = bottom[0]->height(); WIDTH_ = bottom[0]->width(); (*top)[0]->Reshape(bottom[0]->num(), CHANNELS_ * KSIZE_ * KSIZE_, - (HEIGHT_ + 2 * PAD_ - KSIZE_) / STRIDE_ + 1, (WIDTH_ + 2 * PAD_ - KSIZE_) / STRIDE_ + 1); -}; + (HEIGHT_ + 2 * PAD_ - KSIZE_) / STRIDE_ + 1, + (WIDTH_ + 2 * PAD_ - KSIZE_) / STRIDE_ + 1); +} template void Im2colLayer::Forward_cpu(const vector*>& bottom, @@ -31,7 +32,7 @@ void Im2colLayer::Forward_cpu(const vector*>& bottom, Dtype* top_data = (*top)[0]->mutable_cpu_data(); for (int n = 0; n < bottom[0]->num(); ++n) { im2col_cpu(bottom_data + bottom[0]->offset(n), CHANNELS_, HEIGHT_, - WIDTH_, KSIZE_, PAD_, STRIDE_, top_data + (*top)[0]->offset(n)); + WIDTH_, KSIZE_, PAD_, STRIDE_, top_data + (*top)[0]->offset(n)); } } @@ -42,7 +43,7 @@ void Im2colLayer::Forward_gpu(const vector*>& bottom, Dtype* top_data = (*top)[0]->mutable_gpu_data(); for (int n = 0; n < bottom[0]->num(); ++n) { im2col_gpu(bottom_data + bottom[0]->offset(n), CHANNELS_, HEIGHT_, - WIDTH_, KSIZE_, PAD_, STRIDE_, top_data + (*top)[0]->offset(n)); + WIDTH_, KSIZE_, PAD_, STRIDE_, top_data + (*top)[0]->offset(n)); } } @@ -53,7 +54,7 @@ Dtype Im2colLayer::Backward_cpu(const vector*>& top, Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); for (int n = 0; n < top[0]->num(); ++n) { col2im_cpu(top_diff + top[0]->offset(n), CHANNELS_, HEIGHT_, - WIDTH_, KSIZE_, PAD_, STRIDE_, bottom_diff + (*bottom)[0]->offset(n)); + WIDTH_, KSIZE_, PAD_, STRIDE_, bottom_diff + (*bottom)[0]->offset(n)); } return Dtype(0.); } @@ -66,7 +67,7 @@ Dtype Im2colLayer::Backward_gpu(const vector*>& top, Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); for (int n = 0; n < top[0]->num(); ++n) { col2im_gpu(top_diff + top[0]->offset(n), CHANNELS_, HEIGHT_, - WIDTH_, KSIZE_, PAD_, STRIDE_, bottom_diff + (*bottom)[0]->offset(n)); + WIDTH_, KSIZE_, PAD_, STRIDE_, bottom_diff + (*bottom)[0]->offset(n)); } return Dtype(0.); } diff --git a/src/caffe/layers/inner_product_layer.cpp b/src/caffe/layers/inner_product_layer.cpp index 18f1df0..d770e23a 100644 --- a/src/caffe/layers/inner_product_layer.cpp +++ b/src/caffe/layers/inner_product_layer.cpp @@ -59,7 +59,7 @@ void InnerProductLayer::SetUp(const vector*>& bottom, bias_multiplier_data[i] = 1.; } } -}; +} template void InnerProductLayer::Forward_cpu(const vector*>& bottom, diff --git a/src/caffe/layers/loss_layer.cu b/src/caffe/layers/loss_layer.cu index ac05ba4..745bfa4 100644 --- a/src/caffe/layers/loss_layer.cu +++ b/src/caffe/layers/loss_layer.cu @@ -1,7 +1,9 @@ // Copyright 2013 Yangqing Jia + #include #include #include +#include #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" @@ -24,12 +26,12 @@ void MultinomialLogisticLossLayer::SetUp( CHECK_EQ(bottom[1]->channels(), 1); CHECK_EQ(bottom[1]->height(), 1); CHECK_EQ(bottom[1]->width(), 1); -}; +} template -Dtype MultinomialLogisticLossLayer::Backward_cpu(const vector*>& top, - const bool propagate_down, +Dtype MultinomialLogisticLossLayer::Backward_cpu( + const vector*>& top, const bool propagate_down, vector*>* bottom) { const Dtype* bottom_data = (*bottom)[0]->cpu_data(); const Dtype* bottom_label = (*bottom)[1]->cpu_data(); @@ -66,7 +68,7 @@ void InfogainLossLayer::SetUp( CHECK_EQ(infogain_.num(), 1); CHECK_EQ(infogain_.channels(), 1); CHECK_EQ(infogain_.height(), infogain_.width()); -}; +} template @@ -154,10 +156,11 @@ void AccuracyLayer::Forward_cpu(const vector*>& bottom, max_id = j; } } - if (max_id == (int)bottom_label[i]) { + if (max_id == static_cast(bottom_label[i])) { ++accuracy; } - Dtype prob = max(bottom_data[i * dim + (int)bottom_label[i]], kLOG_THRESHOLD); + Dtype prob = max(bottom_data[i * dim + static_cast(bottom_label[i])], + kLOG_THRESHOLD); logprob -= log(prob); } // LOG(INFO) << "Accuracy: " << accuracy; diff --git a/src/caffe/layers/lrn_layer.cpp b/src/caffe/layers/lrn_layer.cpp index 337b77b..36dbe41 100644 --- a/src/caffe/layers/lrn_layer.cpp +++ b/src/caffe/layers/lrn_layer.cpp @@ -25,7 +25,7 @@ void LRNLayer::SetUp(const vector*>& bottom, pre_pad_ = (size_ - 1) / 2; alpha_ = this->layer_param_.alpha(); beta_ = this->layer_param_.beta(); -}; +} template void LRNLayer::Forward_cpu(const vector*>& bottom, diff --git a/src/caffe/layers/lrn_layer.cu b/src/caffe/layers/lrn_layer.cu index 2afbf38..9c82b35 100644 --- a/src/caffe/layers/lrn_layer.cu +++ b/src/caffe/layers/lrn_layer.cu @@ -1,5 +1,7 @@ // Copyright 2013 Yangqing Jia +#include + #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" #include "caffe/util/math_functions.hpp" @@ -74,11 +76,13 @@ void LRNLayer::Forward_gpu(const vector*>& bottom, // We will launch one kernel for each pixel location, and have the kernel // go through all the channels. int n_threads = num_ * height_ * width_; + // NOLINT_NEXTLINE(whitespace/operators) LRNFillScale<<>>( n_threads, bottom_data, num_, channels_, height_, width_, size_, alpha_ / size_, scale_data); CUDA_POST_KERNEL_CHECK; n_threads = bottom[0]->count(); + // NOLINT_NEXTLINE(whitespace/operators) LRNComputeOutput<<>>( n_threads, bottom_data, scale_data, -beta_, top_data); CUDA_POST_KERNEL_CHECK; @@ -151,6 +155,7 @@ template Dtype LRNLayer::Backward_gpu(const vector*>& top, const bool propagate_down, vector*>* bottom) { int n_threads = num_ * height_ * width_; + // NOLINT_NEXTLINE(whitespace/operators) LRNComputeDiff<<>>( n_threads, (*bottom)[0]->gpu_data(), top[0]->gpu_data(), scale_.gpu_data(), top[0]->gpu_diff(), num_, channels_, height_, width_, diff --git a/src/caffe/layers/neuron_layer.cpp b/src/caffe/layers/neuron_layer.cpp index dd09dca..5def755 100644 --- a/src/caffe/layers/neuron_layer.cpp +++ b/src/caffe/layers/neuron_layer.cpp @@ -18,7 +18,7 @@ void NeuronLayer::SetUp(const vector*>& bottom, (*top)[0]->Reshape(bottom[0]->num(), bottom[0]->channels(), bottom[0]->height(), bottom[0]->width()); } -}; +} INSTANTIATE_CLASS(NeuronLayer); diff --git a/src/caffe/layers/pooling_layer.cpp b/src/caffe/layers/pooling_layer.cpp index 6141642..ce30e84 100644 --- a/src/caffe/layers/pooling_layer.cpp +++ b/src/caffe/layers/pooling_layer.cpp @@ -34,7 +34,7 @@ void PoolingLayer::SetUp(const vector*>& bottom, rand_idx_.Reshape(bottom[0]->num(), CHANNELS_, POOLED_HEIGHT_, POOLED_WIDTH_); } -}; +} // TODO(Yangqing): Is there a faster way to do pooling in the channel-first // case? diff --git a/src/caffe/layers/pooling_layer.cu b/src/caffe/layers/pooling_layer.cu index 4fd326c..75078b3 100644 --- a/src/caffe/layers/pooling_layer.cu +++ b/src/caffe/layers/pooling_layer.cu @@ -2,6 +2,8 @@ #include #include +#include + #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" #include "caffe/util/math_functions.hpp" @@ -144,12 +146,14 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, int count = (*top)[0]->count(); switch (this->layer_param_.pool()) { case LayerParameter_PoolMethod_MAX: + // NOLINT_NEXTLINE(whitespace/operators) MaxPoolForward<<>>( count, bottom_data, bottom[0]->num(), CHANNELS_, HEIGHT_, WIDTH_, POOLED_HEIGHT_, POOLED_WIDTH_, KSIZE_, STRIDE_, top_data); break; case LayerParameter_PoolMethod_AVE: + // NOLINT_NEXTLINE(whitespace/operators) AvePoolForward<<>>( count, bottom_data, bottom[0]->num(), CHANNELS_, HEIGHT_, WIDTH_, POOLED_HEIGHT_, POOLED_WIDTH_, KSIZE_, STRIDE_, @@ -160,12 +164,16 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, // We need to create the random index as well. CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), rand_idx_.mutable_gpu_data(), count)); - StoPoolForwardTrain<<>>( + // NOLINT_NEXTLINE(whitespace/operators) + StoPoolForwardTrain<<>>( count, bottom_data, bottom[0]->num(), CHANNELS_, HEIGHT_, WIDTH_, POOLED_HEIGHT_, POOLED_WIDTH_, KSIZE_, STRIDE_, rand_idx_.mutable_gpu_data(), top_data); } else { - StoPoolForwardTest<<>>( + // NOLINT_NEXTLINE(whitespace/operators) + StoPoolForwardTest<<>>( count, bottom_data, bottom[0]->num(), CHANNELS_, HEIGHT_, WIDTH_, POOLED_HEIGHT_, POOLED_WIDTH_, KSIZE_, STRIDE_, top_data); @@ -267,7 +275,7 @@ __global__ void StoPoolBackward(const int nthreads, for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { gradient += top_diff[ph * pooled_width + pw] * - (index == int(rand_idx[ph * pooled_width + pw])); + (index == static_cast(rand_idx[ph * pooled_width + pw])); } } bottom_diff[index] = gradient; @@ -286,18 +294,21 @@ Dtype PoolingLayer::Backward_gpu(const vector*>& top, int count = (*bottom)[0]->count(); switch (this->layer_param_.pool()) { case LayerParameter_PoolMethod_MAX: + // NOLINT_NEXTLINE(whitespace/operators) MaxPoolBackward<<>>( count, (*bottom)[0]->gpu_data(), top[0]->gpu_data(), top_diff, top[0]->num(), CHANNELS_, HEIGHT_, WIDTH_, POOLED_HEIGHT_, POOLED_WIDTH_, KSIZE_, STRIDE_, bottom_diff); break; case LayerParameter_PoolMethod_AVE: + // NOLINT_NEXTLINE(whitespace/operators) AvePoolBackward<<>>( count, top_diff, top[0]->num(), CHANNELS_, HEIGHT_, WIDTH_, POOLED_HEIGHT_, POOLED_WIDTH_, KSIZE_, STRIDE_, bottom_diff); break; case LayerParameter_PoolMethod_STOCHASTIC: + // NOLINT_NEXTLINE(whitespace/operators) StoPoolBackward<<>>( count, rand_idx_.gpu_data(), top_diff, top[0]->num(), CHANNELS_, HEIGHT_, WIDTH_, POOLED_HEIGHT_, diff --git a/src/caffe/layers/relu_layer.cu b/src/caffe/layers/relu_layer.cu index b0fc46e..c56d22a 100644 --- a/src/caffe/layers/relu_layer.cu +++ b/src/caffe/layers/relu_layer.cu @@ -1,8 +1,10 @@ // Copyright 2013 Yangqing Jia +#include +#include + #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" -#include using std::max; @@ -49,11 +51,13 @@ void ReLULayer::Forward_gpu(const vector*>& bottom, const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = (*top)[0]->mutable_gpu_data(); const int count = bottom[0]->count(); + // NOLINT_NEXTLINE(whitespace/operators) ReLUForward<<>>( count, bottom_data, top_data); CUDA_POST_KERNEL_CHECK; // << " count: " << count << " bottom_data: " - // << (unsigned long)bottom_data << " top_data: " << (unsigned long)top_data + // << (unsigned long)bottom_data + // << " top_data: " << (unsigned long)top_data // << " blocks: " << CAFFE_GET_BLOCKS(count) // << " threads: " << CAFFE_CUDA_NUM_THREADS; } @@ -76,6 +80,7 @@ Dtype ReLULayer::Backward_gpu(const vector*>& top, const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); const int count = (*bottom)[0]->count(); + // NOLINT_NEXTLINE(whitespace/operators) ReLUBackward<<>>( count, top_diff, bottom_data, bottom_diff); CUDA_POST_KERNEL_CHECK; diff --git a/src/caffe/layers/sigmoid_layer.cu b/src/caffe/layers/sigmoid_layer.cu index f112a52..1680aa6 100644 --- a/src/caffe/layers/sigmoid_layer.cu +++ b/src/caffe/layers/sigmoid_layer.cu @@ -1,9 +1,11 @@ // Copyright 2014 Tobias Domhan -#include "caffe/layer.hpp" -#include "caffe/vision_layers.hpp" #include #include +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" using std::max; @@ -63,11 +65,13 @@ void SigmoidLayer::Forward_gpu(const vector*>& bottom, const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = (*top)[0]->mutable_gpu_data(); const int count = bottom[0]->count(); + // NOLINT_NEXTLINE(whitespace/operators) SigmoidForward<<>>( count, bottom_data, top_data); CUDA_POST_KERNEL_CHECK; // << " count: " << count << " bottom_data: " - // << (unsigned long)bottom_data << " top_data: " << (unsigned long)top_data + // << (unsigned long)bottom_data + // << " top_data: " << (unsigned long)top_data // << " blocks: " << CAFFE_GET_BLOCKS(count) // << " threads: " << CAFFE_CUDA_NUM_THREADS; } @@ -91,6 +95,7 @@ Dtype SigmoidLayer::Backward_gpu(const vector*>& top, const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); const int count = (*bottom)[0]->count(); + // NOLINT_NEXTLINE(whitespace/operators) SigmoidBackward<<>>( count, top_diff, bottom_data, bottom_diff); CUDA_POST_KERNEL_CHECK; diff --git a/src/caffe/layers/softmax_layer.cu b/src/caffe/layers/softmax_layer.cu index a765969..f7adab3 100644 --- a/src/caffe/layers/softmax_layer.cu +++ b/src/caffe/layers/softmax_layer.cu @@ -3,7 +3,8 @@ #include #include #include -#include + +#include "thrust/device_vector.h" #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" @@ -27,7 +28,7 @@ void SoftmaxLayer::SetUp(const vector*>& bottom, multiplier_data[i] = 1.; } scale_.Reshape(bottom[0]->num(), 1, 1, 1); -}; +} template void SoftmaxLayer::Forward_cpu(const vector*>& bottom, @@ -104,19 +105,23 @@ void SoftmaxLayer::Forward_gpu(const vector*>& bottom, // we need to subtract the max to avoid numerical issues, compute the exp, // and then normalize. // Compute max + // NOLINT_NEXTLINE(whitespace/operators) kernel_get_max<<>>( num, dim, bottom_data, scale_data); // subtraction caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, -1., scale_data, sum_multiplier_.gpu_data(), 1., top_data); // Perform exponentiation + // NOLINT_NEXTLINE(whitespace/operators) kernel_exp<<>>( num * dim, top_data, top_data); // sum after exp caffe_gpu_gemv(CblasNoTrans, num, dim, 1., top_data, sum_multiplier_.gpu_data(), 0., scale_data); // Do division - kernel_softmax_div<<>>( + // NOLINT_NEXTLINE(whitespace/operators) + kernel_softmax_div<<>>( num, dim, scale_data, top_data); } diff --git a/src/caffe/layers/softmax_loss_layer.cu b/src/caffe/layers/softmax_loss_layer.cu index 9bb2313..3e26586 100644 --- a/src/caffe/layers/softmax_loss_layer.cu +++ b/src/caffe/layers/softmax_loss_layer.cu @@ -21,19 +21,19 @@ void SoftmaxWithLossLayer::SetUp(const vector*>& bottom, softmax_bottom_vec_.push_back(bottom[0]); softmax_top_vec_.push_back(&prob_); softmax_layer_->SetUp(softmax_bottom_vec_, &softmax_top_vec_); -}; +} template -void SoftmaxWithLossLayer::Forward_cpu(const vector*>& bottom, - vector*>* top) { +void SoftmaxWithLossLayer::Forward_cpu( + const vector*>& bottom, vector*>* top) { // The forward pass computes the softmax prob values. softmax_bottom_vec_[0] = bottom[0]; softmax_layer_->Forward(softmax_bottom_vec_, &softmax_top_vec_); } template -void SoftmaxWithLossLayer::Forward_gpu(const vector*>& bottom, - vector*>* top) { +void SoftmaxWithLossLayer::Forward_gpu( + const vector*>& bottom, vector*>* top) { // The forward pass computes the softmax prob values. softmax_bottom_vec_[0] = bottom[0]; softmax_layer_->Forward(softmax_bottom_vec_, &softmax_top_vec_); diff --git a/src/caffe/layers/split_layer.cpp b/src/caffe/layers/split_layer.cpp index 5accdd0..56e9561 100644 --- a/src/caffe/layers/split_layer.cpp +++ b/src/caffe/layers/split_layer.cpp @@ -25,7 +25,7 @@ void SplitLayer::SetUp(const vector*>& bottom, bottom[0]->height(), bottom[0]->width()); CHECK_EQ(count_, (*top)[i]->count()); } -}; +} template void SplitLayer::Forward_cpu(const vector*>& bottom, diff --git a/src/caffe/layers/tanh_layer.cu b/src/caffe/layers/tanh_layer.cu index 22e0831..2814577 100644 --- a/src/caffe/layers/tanh_layer.cu +++ b/src/caffe/layers/tanh_layer.cu @@ -1,9 +1,12 @@ // Copyright 2014 Aravindh Mahendran -// TanH neuron activation function layer. Adapted from ReLU layer code written by Yangqing Jia +// TanH neuron activation function layer. +// Adapted from ReLU layer code written by Yangqing Jia + +#include +#include #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" -#include namespace caffe { @@ -55,11 +58,13 @@ void TanHLayer::Forward_gpu(const vector*>& bottom, const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = (*top)[0]->mutable_gpu_data(); const int count = bottom[0]->count(); + // NOLINT_NEXTLINE(whitespace/operators) TanHForward<<>>( count, bottom_data, top_data); CUDA_POST_KERNEL_CHECK; // << " count: " << count << " bottom_data: " - // << (unsigned long)bottom_data << " top_data: " << (unsigned long)top_data + // << (unsigned long)bottom_data + // << " top_data: " << (unsigned long)top_data // << " blocks: " << CAFFE_GET_BLOCKS(count) // << " threads: " << CAFFE_CUDA_NUM_THREADS; } @@ -84,6 +89,7 @@ Dtype TanHLayer::Backward_gpu(const vector*>& top, const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); const int count = (*bottom)[0]->count(); + // NOLINT_NEXTLINE(whitespace/operators) TanHBackward<<>>( count, top_diff, bottom_data, bottom_diff); CUDA_POST_KERNEL_CHECK; diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp index 340bbe1..eb02485 100644 --- a/src/caffe/solver.cpp +++ b/src/caffe/solver.cpp @@ -123,8 +123,9 @@ void Solver::Snapshot() { // For intermediate results, we will also dump the gradient values. net_->ToProto(&net_param, param_.snapshot_diff()); string filename(param_.snapshot_prefix()); - char iter_str_buffer[20]; - sprintf(iter_str_buffer, "_iter_%d", iter_); + const int kBufferSize = 20; + char iter_str_buffer[kBufferSize]; + snprintf(iter_str_buffer, kBufferSize, "_iter_%d", iter_); filename += iter_str_buffer; LOG(INFO) << "Snapshotting to " << filename; WriteProtoToBinaryFile(net_param, filename.c_str()); diff --git a/src/caffe/test/test_blob.cpp b/src/caffe/test/test_blob.cpp index 7c3084e..7ce1a38 100644 --- a/src/caffe/test/test_blob.cpp +++ b/src/caffe/test/test_blob.cpp @@ -1,8 +1,8 @@ // Copyright 2013 Yangqing Jia #include -#include +#include "cuda_runtime.h" #include "gtest/gtest.h" #include "caffe/common.hpp" #include "caffe/blob.hpp" @@ -17,7 +17,7 @@ class BlobSimpleTest : public ::testing::Test { protected: BlobSimpleTest() : blob_(new Blob()), - blob_preshaped_(new Blob(2, 3, 4, 5)) {}; + blob_preshaped_(new Blob(2, 3, 4, 5)) {} virtual ~BlobSimpleTest() { delete blob_; delete blob_preshaped_; } Blob* const blob_; Blob* const blob_preshaped_; @@ -57,4 +57,4 @@ TYPED_TEST(BlobSimpleTest, TestReshape) { EXPECT_EQ(this->blob_->count(), 120); } -} +} // namespace caffe diff --git a/src/caffe/test/test_caffe_main.hpp b/src/caffe/test/test_caffe_main.hpp index a8c1657..01cb0c8 100644 --- a/src/caffe/test/test_caffe_main.hpp +++ b/src/caffe/test/test_caffe_main.hpp @@ -11,8 +11,9 @@ #include #include -#include +using std::cout; +using std::endl; namespace caffe { @@ -20,8 +21,7 @@ cudaDeviceProp CAFFE_TEST_CUDA_PROP; } // namespace caffe -using namespace caffe; -using namespace std; +using caffe::CAFFE_TEST_CUDA_PROP; int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); diff --git a/src/caffe/test/test_common.cpp b/src/caffe/test/test_common.cpp index 3afd6d0..8b521cc 100644 --- a/src/caffe/test/test_common.cpp +++ b/src/caffe/test/test_common.cpp @@ -1,8 +1,8 @@ // Copyright 2013 Yangqing Jia #include -#include +#include "cuda_runtime.h" #include "gtest/gtest.h" #include "caffe/common.hpp" #include "caffe/syncedmem.hpp" @@ -40,10 +40,10 @@ TEST_F(CommonTest, TestRandSeedCPU) { SyncedMemory data_b(10 * sizeof(int)); Caffe::set_random_seed(1701); viRngBernoulli(VSL_RNG_METHOD_BERNOULLI_ICDF, Caffe::vsl_stream(), - 10, (int*)data_a.mutable_cpu_data(), 0.5); + 10, reinterpret_cast(data_a.mutable_cpu_data()), 0.5); Caffe::set_random_seed(1701); viRngBernoulli(VSL_RNG_METHOD_BERNOULLI_ICDF, Caffe::vsl_stream(), - 10, (int*)data_b.mutable_cpu_data(), 0.5); + 10, reinterpret_cast(data_b.mutable_cpu_data()), 0.5); for (int i = 0; i < 10; ++i) { EXPECT_EQ(((const int*)(data_a.cpu_data()))[i], ((const int*)(data_b.cpu_data()))[i]); @@ -56,10 +56,10 @@ TEST_F(CommonTest, TestRandSeedGPU) { SyncedMemory data_b(10 * sizeof(unsigned int)); Caffe::set_random_seed(1701); CURAND_CHECK(curandGenerate(Caffe::curand_generator(), - (unsigned int*)data_a.mutable_gpu_data(), 10)); + reinterpret_cast(data_a.mutable_gpu_data()), 10)); Caffe::set_random_seed(1701); CURAND_CHECK(curandGenerate(Caffe::curand_generator(), - (unsigned int*)data_b.mutable_gpu_data(), 10)); + reinterpret_cast(data_b.mutable_gpu_data()), 10)); for (int i = 0; i < 10; ++i) { EXPECT_EQ(((const unsigned int*)(data_a.cpu_data()))[i], ((const unsigned int*)(data_b.cpu_data()))[i]); diff --git a/src/caffe/test/test_convolution_layer.cpp b/src/caffe/test/test_convolution_layer.cpp index ebd3cf4..9f47e6d 100644 --- a/src/caffe/test/test_convolution_layer.cpp +++ b/src/caffe/test/test_convolution_layer.cpp @@ -1,8 +1,9 @@ // Copyright 2013 Yangqing Jia #include -#include +#include +#include "cuda_runtime.h" #include "gtest/gtest.h" #include "caffe/blob.hpp" #include "caffe/common.hpp" @@ -21,7 +22,7 @@ class ConvolutionLayerTest : public ::testing::Test { protected: ConvolutionLayerTest() : blob_bottom_(new Blob()), - blob_top_(new Blob()) {}; + blob_top_(new Blob()) {} virtual void SetUp() { blob_bottom_->Reshape(2, 3, 6, 5); // fill the values @@ -31,7 +32,7 @@ class ConvolutionLayerTest : public ::testing::Test { filler.Fill(this->blob_bottom_); blob_bottom_vec_.push_back(blob_bottom_); blob_top_vec_.push_back(blob_top_); - }; + } virtual ~ConvolutionLayerTest() { delete blob_bottom_; delete blob_top_; } Blob* const blob_bottom_; @@ -174,7 +175,8 @@ TYPED_TEST(ConvolutionLayerTest, TestCPUGradient) { Caffe::set_mode(Caffe::CPU); ConvolutionLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_); + checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, + this->blob_top_vec_); } TYPED_TEST(ConvolutionLayerTest, TestCPUGradientGroup) { @@ -188,7 +190,8 @@ TYPED_TEST(ConvolutionLayerTest, TestCPUGradientGroup) { Caffe::set_mode(Caffe::CPU); ConvolutionLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_); + checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, + this->blob_top_vec_); } TYPED_TEST(ConvolutionLayerTest, TestGPUGradient) { @@ -201,7 +204,8 @@ TYPED_TEST(ConvolutionLayerTest, TestGPUGradient) { Caffe::set_mode(Caffe::GPU); ConvolutionLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_); + checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, + this->blob_top_vec_); } TYPED_TEST(ConvolutionLayerTest, TestGPUGradientGroup) { @@ -215,7 +219,8 @@ TYPED_TEST(ConvolutionLayerTest, TestGPUGradientGroup) { Caffe::set_mode(Caffe::GPU); ConvolutionLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_); + checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, + this->blob_top_vec_); } -} +} // namespace caffe diff --git a/src/caffe/test/test_data_layer.cpp b/src/caffe/test/test_data_layer.cpp index 719c50e..35c3439 100644 --- a/src/caffe/test/test_data_layer.cpp +++ b/src/caffe/test/test_data_layer.cpp @@ -1,10 +1,10 @@ // Copyright 2013 Yangqing Jia -#include -#include - #include +#include +#include "cuda_runtime.h" +#include "leveldb/db.h" #include "gtest/gtest.h" #include "caffe/blob.hpp" #include "caffe/common.hpp" @@ -14,6 +14,7 @@ #include "caffe/test/test_caffe_main.hpp" using std::string; +using std::stringstream; namespace caffe { @@ -25,12 +26,12 @@ class DataLayerTest : public ::testing::Test { DataLayerTest() : blob_top_data_(new Blob()), blob_top_label_(new Blob()), - filename(NULL) {}; + filename(NULL) {} virtual void SetUp() { blob_top_vec_.push_back(blob_top_data_); blob_top_vec_.push_back(blob_top_label_); // Create the leveldb - filename = tmpnam(NULL); // get temp name + filename = tmpnam(NULL); // get temp name LOG(INFO) << "Using temporary leveldb " << filename; leveldb::DB* db; leveldb::Options options; @@ -53,7 +54,7 @@ class DataLayerTest : public ::testing::Test { db->Put(leveldb::WriteOptions(), ss.str(), datum.SerializeAsString()); } delete db; - }; + } virtual ~DataLayerTest() { delete blob_top_data_; delete blob_top_label_; } @@ -112,4 +113,4 @@ TYPED_TEST(DataLayerTest, TestRead) { } } -} +} // namespace caffe diff --git a/src/caffe/test/test_euclidean_loss_layer.cpp b/src/caffe/test/test_euclidean_loss_layer.cpp index 82ea682..121929f 100644 --- a/src/caffe/test/test_euclidean_loss_layer.cpp +++ b/src/caffe/test/test_euclidean_loss_layer.cpp @@ -3,8 +3,9 @@ #include #include #include -#include +#include +#include "cuda_runtime.h" #include "gtest/gtest.h" #include "caffe/blob.hpp" #include "caffe/common.hpp" @@ -55,4 +56,4 @@ TYPED_TEST(EuclideanLossLayerTest, TestGradientCPU) { this->blob_top_vec_, 0, -1, -1); } -} +} // namespace caffe diff --git a/src/caffe/test/test_filler.cpp b/src/caffe/test/test_filler.cpp index 7738ce4..c4388c2 100644 --- a/src/caffe/test/test_filler.cpp +++ b/src/caffe/test/test_filler.cpp @@ -1,8 +1,8 @@ // Copyright 2013 Yangqing Jia #include -#include +#include "cuda_runtime.h" #include "gtest/gtest.h" #include "caffe/filler.hpp" @@ -21,7 +21,7 @@ class ConstantFillerTest : public ::testing::Test { filler_param_.set_value(10.); filler_.reset(new ConstantFiller(filler_param_)); filler_->Fill(blob_); - }; + } virtual ~ConstantFillerTest() { delete blob_; } Blob* const blob_; FillerParameter filler_param_; @@ -50,7 +50,7 @@ class UniformFillerTest : public ::testing::Test { filler_param_.set_max(2.); filler_.reset(new UniformFiller(filler_param_)); filler_->Fill(blob_); - }; + } virtual ~UniformFillerTest() { delete blob_; } Blob* const blob_; FillerParameter filler_param_; @@ -77,7 +77,7 @@ class PositiveUnitballFillerTest : public ::testing::Test { filler_param_() { filler_.reset(new PositiveUnitballFiller(filler_param_)); filler_->Fill(blob_); - }; + } virtual ~PositiveUnitballFillerTest() { delete blob_; } Blob* const blob_; FillerParameter filler_param_; @@ -116,7 +116,7 @@ class GaussianFillerTest : public ::testing::Test { filler_param_.set_std(0.1); filler_.reset(new GaussianFiller(filler_param_)); filler_->Fill(blob_); - }; + } virtual ~GaussianFillerTest() { delete blob_; } Blob* const blob_; FillerParameter filler_param_; @@ -146,4 +146,4 @@ TYPED_TEST(GaussianFillerTest, TestFill) { EXPECT_LE(var, target_var * 5.); } -} +} // namespace caffe diff --git a/src/caffe/test/test_flatten_layer.cpp b/src/caffe/test/test_flatten_layer.cpp index 805fd72..03dff36 100644 --- a/src/caffe/test/test_flatten_layer.cpp +++ b/src/caffe/test/test_flatten_layer.cpp @@ -1,8 +1,9 @@ // Copyright 2013 Yangqing Jia #include -#include +#include +#include "cuda_runtime.h" #include "gtest/gtest.h" #include "caffe/blob.hpp" #include "caffe/common.hpp" @@ -28,7 +29,7 @@ class FlattenLayerTest : public ::testing::Test { filler.Fill(this->blob_bottom_); blob_bottom_vec_.push_back(blob_bottom_); blob_top_vec_.push_back(blob_top_); - }; + } virtual ~FlattenLayerTest() { delete blob_bottom_; delete blob_top_; } Blob* const blob_bottom_; Blob* const blob_top_; @@ -80,7 +81,8 @@ TYPED_TEST(FlattenLayerTest, TestCPUGradient) { Caffe::set_mode(Caffe::CPU); FlattenLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2); - checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_); + checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, + this->blob_top_vec_); } TYPED_TEST(FlattenLayerTest, TestGPUGradient) { @@ -88,8 +90,9 @@ TYPED_TEST(FlattenLayerTest, TestGPUGradient) { Caffe::set_mode(Caffe::GPU); FlattenLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2); - checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_); + checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, + this->blob_top_vec_); } -} +} // namespace caffe diff --git a/src/caffe/test/test_im2col_layer.cpp b/src/caffe/test/test_im2col_layer.cpp index dc6445d..842d3a7 100644 --- a/src/caffe/test/test_im2col_layer.cpp +++ b/src/caffe/test/test_im2col_layer.cpp @@ -1,8 +1,9 @@ // Copyright 2013 Yangqing Jia #include -#include +#include +#include "cuda_runtime.h" #include "gtest/gtest.h" #include "caffe/blob.hpp" #include "caffe/common.hpp" @@ -28,7 +29,7 @@ class Im2colLayerTest : public ::testing::Test { filler.Fill(this->blob_bottom_); blob_bottom_vec_.push_back(blob_bottom_); blob_top_vec_.push_back(blob_top_); - }; + } virtual ~Im2colLayerTest() { delete blob_bottom_; delete blob_top_; } Blob* const blob_bottom_; Blob* const blob_top_; @@ -88,7 +89,8 @@ TYPED_TEST(Im2colLayerTest, TestCPUGradient) { Caffe::set_mode(Caffe::CPU); Im2colLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2); - checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_); + checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, + this->blob_top_vec_); } TYPED_TEST(Im2colLayerTest, TestGPUGradient) { @@ -98,8 +100,9 @@ TYPED_TEST(Im2colLayerTest, TestGPUGradient) { Caffe::set_mode(Caffe::GPU); Im2colLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2); - checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_); + checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, + this->blob_top_vec_); } -} +} // namespace caffe diff --git a/src/caffe/test/test_innerproduct_layer.cpp b/src/caffe/test/test_innerproduct_layer.cpp index 0e2b612..acb4c76 100644 --- a/src/caffe/test/test_innerproduct_layer.cpp +++ b/src/caffe/test/test_innerproduct_layer.cpp @@ -1,8 +1,9 @@ // Copyright 2013 Yangqing Jia #include -#include +#include +#include "cuda_runtime.h" #include "gtest/gtest.h" #include "caffe/blob.hpp" #include "caffe/common.hpp" @@ -28,7 +29,7 @@ class InnerProductLayerTest : public ::testing::Test { filler.Fill(this->blob_bottom_); blob_bottom_vec_.push_back(blob_bottom_); blob_top_vec_.push_back(blob_top_); - }; + } virtual ~InnerProductLayerTest() { delete blob_bottom_; delete blob_top_; } Blob* const blob_bottom_; Blob* const blob_top_; @@ -43,7 +44,7 @@ TYPED_TEST(InnerProductLayerTest, TestSetUp) { LayerParameter layer_param; layer_param.set_num_output(10); shared_ptr > layer( - new InnerProductLayer(layer_param)); + new InnerProductLayer(layer_param)); layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); EXPECT_EQ(this->blob_top_->num(), 2); EXPECT_EQ(this->blob_top_->height(), 1); @@ -60,37 +61,37 @@ TYPED_TEST(InnerProductLayerTest, TestCPU) { layer_param.mutable_bias_filler()->set_min(1); layer_param.mutable_bias_filler()->set_max(2); shared_ptr > layer( - new InnerProductLayer(layer_param)); + new InnerProductLayer(layer_param)); layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); const TypeParam* data = this->blob_top_->cpu_data(); const int count = this->blob_top_->count(); for (int i = 0; i < count; ++i) { - EXPECT_GE(data[i], 1.); + EXPECT_GE(data[i], 1.); } } TYPED_TEST(InnerProductLayerTest, TestGPU) { - if (sizeof(TypeParam) == 4 || CAFFE_TEST_CUDA_PROP.major >= 2) { - LayerParameter layer_param; - Caffe::set_mode(Caffe::GPU); - layer_param.set_num_output(10); - layer_param.mutable_weight_filler()->set_type("uniform"); - layer_param.mutable_bias_filler()->set_type("uniform"); - layer_param.mutable_bias_filler()->set_min(1); - layer_param.mutable_bias_filler()->set_max(2); - shared_ptr > layer( - new InnerProductLayer(layer_param)); - layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); - layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); - const TypeParam* data = this->blob_top_->cpu_data(); - const int count = this->blob_top_->count(); - for (int i = 0; i < count; ++i) { - EXPECT_GE(data[i], 1.); - } - } else { - LOG(ERROR) << "Skipping test due to old architecture."; - } + if (sizeof(TypeParam) == 4 || CAFFE_TEST_CUDA_PROP.major >= 2) { + LayerParameter layer_param; + Caffe::set_mode(Caffe::GPU); + layer_param.set_num_output(10); + layer_param.mutable_weight_filler()->set_type("uniform"); + layer_param.mutable_bias_filler()->set_type("uniform"); + layer_param.mutable_bias_filler()->set_min(1); + layer_param.mutable_bias_filler()->set_max(2); + shared_ptr > layer( + new InnerProductLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_)); + layer->Forward(this->blob_bottom_vec_, &(this->blob_top_vec_)); + const TypeParam* data = this->blob_top_->cpu_data(); + const int count = this->blob_top_->count(); + for (int i = 0; i < count; ++i) { + EXPECT_GE(data[i], 1.); + } + } else { + LOG(ERROR) << "Skipping test due to old architecture."; + } } TYPED_TEST(InnerProductLayerTest, TestCPUGradient) { @@ -103,7 +104,8 @@ TYPED_TEST(InnerProductLayerTest, TestCPUGradient) { layer_param.mutable_bias_filler()->set_max(2); InnerProductLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_); + checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, + this->blob_top_vec_); } TYPED_TEST(InnerProductLayerTest, TestGPUGradient) { @@ -121,4 +123,4 @@ TYPED_TEST(InnerProductLayerTest, TestGPUGradient) { } } -} +} // namespace caffe diff --git a/src/caffe/test/test_lrn_layer.cpp b/src/caffe/test/test_lrn_layer.cpp index 757bac3..6c778df 100644 --- a/src/caffe/test/test_lrn_layer.cpp +++ b/src/caffe/test/test_lrn_layer.cpp @@ -2,9 +2,9 @@ #include #include -#include -#include +#include +#include "cuda_runtime.h" #include "gtest/gtest.h" #include "caffe/blob.hpp" #include "caffe/common.hpp" @@ -26,7 +26,7 @@ class LRNLayerTest : public ::testing::Test { protected: LRNLayerTest() : blob_bottom_(new Blob()), - blob_top_(new Blob()) {}; + blob_top_(new Blob()) {} virtual void SetUp() { Caffe::set_random_seed(1701); blob_bottom_->Reshape(2, 7, 3, 3); @@ -36,7 +36,7 @@ class LRNLayerTest : public ::testing::Test { filler.Fill(this->blob_bottom_); blob_bottom_vec_.push_back(blob_bottom_); blob_top_vec_.push_back(blob_top_); - }; + } virtual ~LRNLayerTest() { delete blob_bottom_; delete blob_top_; } void ReferenceLRNForward(const Blob& blob_bottom, const LayerParameter& layer_param, Blob* blob_top); @@ -135,10 +135,12 @@ TYPED_TEST(LRNLayerTest, TestCPUGradient) { this->blob_top_->mutable_cpu_diff()[i] = 1.; } layer.Backward(this->blob_top_vec_, true, &(this->blob_bottom_vec_)); - //for (int i = 0; i < this->blob_bottom_->count(); ++i) { - // std::cout << "CPU diff " << this->blob_bottom_->cpu_diff()[i] << std::endl; - //} - checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_); + // for (int i = 0; i < this->blob_bottom_->count(); ++i) { + // std::cout << "CPU diff " << this->blob_bottom_->cpu_diff()[i] + // << std::endl; + // } + checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, + this->blob_top_vec_); } TYPED_TEST(LRNLayerTest, TestGPUGradient) { @@ -152,10 +154,12 @@ TYPED_TEST(LRNLayerTest, TestGPUGradient) { this->blob_top_->mutable_cpu_diff()[i] = 1.; } layer.Backward(this->blob_top_vec_, true, &(this->blob_bottom_vec_)); - //for (int i = 0; i < this->blob_bottom_->count(); ++i) { - // std::cout << "GPU diff " << this->blob_bottom_->cpu_diff()[i] << std::endl; - //} - checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_); + // for (int i = 0; i < this->blob_bottom_->count(); ++i) { + // std::cout << "GPU diff " << this->blob_bottom_->cpu_diff()[i] + // << std::endl; + // } + checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, + this->blob_top_vec_); } -} +} // namespace caffe diff --git a/src/caffe/test/test_multinomial_logistic_loss_layer.cpp b/src/caffe/test/test_multinomial_logistic_loss_layer.cpp index 5595c84..835d1b2 100644 --- a/src/caffe/test/test_multinomial_logistic_loss_layer.cpp +++ b/src/caffe/test/test_multinomial_logistic_loss_layer.cpp @@ -3,8 +3,9 @@ #include #include #include -#include +#include +#include "cuda_runtime.h" #include "gtest/gtest.h" #include "caffe/blob.hpp" #include "caffe/common.hpp" @@ -30,6 +31,7 @@ class MultinomialLogisticLossLayerTest : public ::testing::Test { filler.Fill(this->blob_bottom_data_); blob_bottom_vec_.push_back(blob_bottom_data_); for (int i = 0; i < blob_bottom_label_->count(); ++i) { + // NOLINT_NEXTLINE(runtime/threadsafe_fn) blob_bottom_label_->mutable_cpu_data()[i] = rand() % 5; } blob_bottom_vec_.push_back(blob_bottom_label_); @@ -58,4 +60,4 @@ TYPED_TEST(MultinomialLogisticLossLayerTest, TestGradientCPU) { this->blob_top_vec_, 0, -1, -1); } -} +} // namespace caffe diff --git a/src/caffe/test/test_neuron_layer.cpp b/src/caffe/test/test_neuron_layer.cpp index 8674519..9a7ff5c 100644 --- a/src/caffe/test/test_neuron_layer.cpp +++ b/src/caffe/test/test_neuron_layer.cpp @@ -1,8 +1,9 @@ // Copyright 2013 Yangqing Jia #include -#include +#include +#include "cuda_runtime.h" #include "gtest/gtest.h" #include "caffe/blob.hpp" #include "caffe/common.hpp" @@ -28,7 +29,7 @@ class NeuronLayerTest : public ::testing::Test { filler.Fill(this->blob_bottom_); blob_bottom_vec_.push_back(blob_bottom_); blob_top_vec_.push_back(blob_top_); - }; + } virtual ~NeuronLayerTest() { delete blob_bottom_; delete blob_top_; } Blob* const blob_bottom_; Blob* const blob_top_; @@ -60,7 +61,8 @@ TYPED_TEST(NeuronLayerTest, TestReLUGradientCPU) { Caffe::set_mode(Caffe::CPU); ReLULayer layer(layer_param); GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); - checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_); + checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, + this->blob_top_vec_); } @@ -85,7 +87,8 @@ TYPED_TEST(NeuronLayerTest, TestReLUGradientGPU) { Caffe::set_mode(Caffe::GPU); ReLULayer layer(layer_param); GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); - checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_); + checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, + this->blob_top_vec_); } @@ -100,7 +103,7 @@ TYPED_TEST(NeuronLayerTest, TestSigmoidCPU) { const TypeParam* top_data = this->blob_top_->cpu_data(); for (int i = 0; i < this->blob_bottom_->count(); ++i) { EXPECT_FLOAT_EQ(top_data[i], 1. / (1 + exp(-bottom_data[i]))); - //check that we squashed the value between 0 and 1 + // check that we squashed the value between 0 and 1 EXPECT_GE(top_data[i], 0.); EXPECT_LE(top_data[i], 1.); } @@ -112,7 +115,8 @@ TYPED_TEST(NeuronLayerTest, TestSigmoidGradientCPU) { Caffe::set_mode(Caffe::CPU); SigmoidLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); - checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_); + checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, + this->blob_top_vec_); } TYPED_TEST(NeuronLayerTest, TestSigmoidGPU) { @@ -126,7 +130,7 @@ TYPED_TEST(NeuronLayerTest, TestSigmoidGPU) { const TypeParam* top_data = this->blob_top_->cpu_data(); for (int i = 0; i < this->blob_bottom_->count(); ++i) { EXPECT_FLOAT_EQ(top_data[i], 1. / (1 + exp(-bottom_data[i]))); - //check that we squashed the value between 0 and 1 + // check that we squashed the value between 0 and 1 EXPECT_GE(top_data[i], 0.); EXPECT_LE(top_data[i], 1.); } @@ -138,7 +142,8 @@ TYPED_TEST(NeuronLayerTest, TestSigmoidGradientGPU) { Caffe::set_mode(Caffe::GPU); SigmoidLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); - checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_); + checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, + this->blob_top_vec_); } @@ -167,7 +172,8 @@ TYPED_TEST(NeuronLayerTest, TestDropoutGradientCPU) { Caffe::set_mode(Caffe::CPU); DropoutLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_); + checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, + this->blob_top_vec_); } @@ -264,7 +270,8 @@ TYPED_TEST(NeuronLayerTest, TestBNLLGradientCPU) { Caffe::set_mode(Caffe::CPU); BNLLLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_); + checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, + this->blob_top_vec_); } @@ -289,10 +296,9 @@ TYPED_TEST(NeuronLayerTest, TestBNLLGradientGPU) { Caffe::set_mode(Caffe::GPU); BNLLLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_); + checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, + this->blob_top_vec_); } - - -} +} // namespace caffe diff --git a/src/caffe/test/test_platform.cpp b/src/caffe/test/test_platform.cpp index ea3cee2..bd2dcd3 100644 --- a/src/caffe/test/test_platform.cpp +++ b/src/caffe/test/test_platform.cpp @@ -2,11 +2,10 @@ #include #include -#include -#include -#include -#include +#include "cuda_runtime.h" +#include "glog/logging.h" +#include "gtest/gtest.h" #include "caffe/test/test_caffe_main.hpp" namespace caffe { @@ -19,22 +18,35 @@ TEST_F(PlatformTest, TestInitialization) { printf("Major revision number: %d\n", CAFFE_TEST_CUDA_PROP.major); printf("Minor revision number: %d\n", CAFFE_TEST_CUDA_PROP.minor); printf("Name: %s\n", CAFFE_TEST_CUDA_PROP.name); - printf("Total global memory: %lu\n", CAFFE_TEST_CUDA_PROP.totalGlobalMem); - printf("Total shared memory per block: %lu\n", CAFFE_TEST_CUDA_PROP.sharedMemPerBlock); - printf("Total registers per block: %d\n", CAFFE_TEST_CUDA_PROP.regsPerBlock); - printf("Warp size: %d\n", CAFFE_TEST_CUDA_PROP.warpSize); - printf("Maximum memory pitch: %lu\n", CAFFE_TEST_CUDA_PROP.memPitch); - printf("Maximum threads per block: %d\n", CAFFE_TEST_CUDA_PROP.maxThreadsPerBlock); + printf("Total global memory: %lu\n", + CAFFE_TEST_CUDA_PROP.totalGlobalMem); + printf("Total shared memory per block: %lu\n", + CAFFE_TEST_CUDA_PROP.sharedMemPerBlock); + printf("Total registers per block: %d\n", + CAFFE_TEST_CUDA_PROP.regsPerBlock); + printf("Warp size: %d\n", + CAFFE_TEST_CUDA_PROP.warpSize); + printf("Maximum memory pitch: %lu\n", + CAFFE_TEST_CUDA_PROP.memPitch); + printf("Maximum threads per block: %d\n", + CAFFE_TEST_CUDA_PROP.maxThreadsPerBlock); for (int i = 0; i < 3; ++i) - printf("Maximum dimension %d of block: %d\n", i, CAFFE_TEST_CUDA_PROP.maxThreadsDim[i]); + printf("Maximum dimension %d of block: %d\n", i, + CAFFE_TEST_CUDA_PROP.maxThreadsDim[i]); for (int i = 0; i < 3; ++i) - printf("Maximum dimension %d of grid: %d\n", i, CAFFE_TEST_CUDA_PROP.maxGridSize[i]); - printf("Clock rate: %d\n", CAFFE_TEST_CUDA_PROP.clockRate); - printf("Total constant memory: %lu\n", CAFFE_TEST_CUDA_PROP.totalConstMem); - printf("Texture alignment: %lu\n", CAFFE_TEST_CUDA_PROP.textureAlignment); - printf("Concurrent copy and execution: %s\n", (CAFFE_TEST_CUDA_PROP.deviceOverlap ? "Yes" : "No")); - printf("Number of multiprocessors: %d\n", CAFFE_TEST_CUDA_PROP.multiProcessorCount); - printf("Kernel execution timeout: %s\n", (CAFFE_TEST_CUDA_PROP.kernelExecTimeoutEnabled ? "Yes" : "No")); + printf("Maximum dimension %d of grid: %d\n", i, + CAFFE_TEST_CUDA_PROP.maxGridSize[i]); + printf("Clock rate: %d\n", CAFFE_TEST_CUDA_PROP.clockRate); + printf("Total constant memory: %lu\n", + CAFFE_TEST_CUDA_PROP.totalConstMem); + printf("Texture alignment: %lu\n", + CAFFE_TEST_CUDA_PROP.textureAlignment); + printf("Concurrent copy and execution: %s\n", + (CAFFE_TEST_CUDA_PROP.deviceOverlap ? "Yes" : "No")); + printf("Number of multiprocessors: %d\n", + CAFFE_TEST_CUDA_PROP.multiProcessorCount); + printf("Kernel execution timeout: %s\n", + (CAFFE_TEST_CUDA_PROP.kernelExecTimeoutEnabled ? "Yes" : "No")); EXPECT_TRUE(true); } diff --git a/src/caffe/test/test_pooling_layer.cpp b/src/caffe/test/test_pooling_layer.cpp index 67cae13..c08a7c0 100644 --- a/src/caffe/test/test_pooling_layer.cpp +++ b/src/caffe/test/test_pooling_layer.cpp @@ -1,8 +1,9 @@ // Copyright 2013 Yangqing Jia #include -#include +#include +#include "cuda_runtime.h" #include "gtest/gtest.h" #include "caffe/blob.hpp" #include "caffe/common.hpp" @@ -21,7 +22,7 @@ class PoolingLayerTest : public ::testing::Test { protected: PoolingLayerTest() : blob_bottom_(new Blob()), - blob_top_(new Blob()) {}; + blob_top_(new Blob()) {} virtual void SetUp() { Caffe::set_random_seed(1701); blob_bottom_->Reshape(2, 3, 6, 5); @@ -31,7 +32,7 @@ class PoolingLayerTest : public ::testing::Test { filler.Fill(this->blob_bottom_); blob_bottom_vec_.push_back(blob_bottom_); blob_top_vec_.push_back(blob_top_); - }; + } virtual ~PoolingLayerTest() { delete blob_bottom_; delete blob_top_; } Blob* const blob_bottom_; Blob* const blob_top_; @@ -89,7 +90,8 @@ TYPED_TEST(PoolingLayerTest, TestCPUGradientMax) { Caffe::set_mode(Caffe::CPU); PoolingLayer layer(layer_param); GradientChecker checker(1e-4, 1e-2); - checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_); + checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, + this->blob_top_vec_); } TYPED_TEST(PoolingLayerTest, TestGPUGradientMax) { @@ -100,7 +102,8 @@ TYPED_TEST(PoolingLayerTest, TestGPUGradientMax) { Caffe::set_mode(Caffe::GPU); PoolingLayer layer(layer_param); GradientChecker checker(1e-4, 1e-2); - checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_); + checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, + this->blob_top_vec_); } @@ -112,7 +115,8 @@ TYPED_TEST(PoolingLayerTest, TestCPUGradientAve) { Caffe::set_mode(Caffe::CPU); PoolingLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2); - checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_); + checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, + this->blob_top_vec_); } @@ -124,8 +128,9 @@ TYPED_TEST(PoolingLayerTest, TestGPUGradientAve) { Caffe::set_mode(Caffe::GPU); PoolingLayer layer(layer_param); GradientChecker checker(1e-2, 1e-2); - checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_); + checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, + this->blob_top_vec_); } -} +} // namespace caffe diff --git a/src/caffe/test/test_protobuf.cpp b/src/caffe/test/test_protobuf.cpp index 11cdcf6..d8d511d 100644 --- a/src/caffe/test/test_protobuf.cpp +++ b/src/caffe/test/test_protobuf.cpp @@ -4,7 +4,7 @@ // format. Nothing special here and no actual code is being tested. #include -#include +#include "google/protobuf/text_format.h" #include "gtest/gtest.h" #include "caffe/test/test_caffe_main.hpp" #include "caffe/proto/caffe.pb.h" @@ -26,4 +26,4 @@ TEST_F(ProtoTest, TestSerialization) { EXPECT_TRUE(true); } -} +} // namespace caffe diff --git a/src/caffe/test/test_softmax_layer.cpp b/src/caffe/test/test_softmax_layer.cpp index fc1c1b7..d27f40c 100644 --- a/src/caffe/test/test_softmax_layer.cpp +++ b/src/caffe/test/test_softmax_layer.cpp @@ -2,8 +2,9 @@ #include #include -#include +#include +#include "cuda_runtime.h" #include "gtest/gtest.h" #include "caffe/blob.hpp" #include "caffe/common.hpp" @@ -29,7 +30,7 @@ class SoftmaxLayerTest : public ::testing::Test { filler.Fill(this->blob_bottom_); blob_bottom_vec_.push_back(blob_bottom_); blob_top_vec_.push_back(blob_top_); - }; + } virtual ~SoftmaxLayerTest() { delete blob_bottom_; delete blob_top_; } Blob* const blob_bottom_; Blob* const blob_top_; @@ -77,7 +78,8 @@ TYPED_TEST(SoftmaxLayerTest, TestGradientCPU) { Caffe::set_mode(Caffe::CPU); SoftmaxLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_); + checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, + this->blob_top_vec_); } -} +} // namespace caffe diff --git a/src/caffe/test/test_softmax_with_loss_layer.cpp b/src/caffe/test/test_softmax_with_loss_layer.cpp index 328f64b..bf96d99 100644 --- a/src/caffe/test/test_softmax_with_loss_layer.cpp +++ b/src/caffe/test/test_softmax_with_loss_layer.cpp @@ -3,8 +3,9 @@ #include #include #include -#include +#include +#include "cuda_runtime.h" #include "gtest/gtest.h" #include "caffe/blob.hpp" #include "caffe/common.hpp" @@ -31,6 +32,7 @@ class SoftmaxWithLossLayerTest : public ::testing::Test { filler.Fill(this->blob_bottom_data_); blob_bottom_vec_.push_back(blob_bottom_data_); for (int i = 0; i < blob_bottom_label_->count(); ++i) { + // NOLINT_NEXTLINE(runtime/threadsafe_fn) blob_bottom_label_->mutable_cpu_data()[i] = rand() % 5; } blob_bottom_vec_.push_back(blob_bottom_label_); @@ -69,4 +71,4 @@ TYPED_TEST(SoftmaxWithLossLayerTest, TestGradientGPU) { this->blob_top_vec_, 0, -1, -1); } -} +} // namespace caffe diff --git a/src/caffe/test/test_split_layer.cpp b/src/caffe/test/test_split_layer.cpp index 3311c9a..8c1780c 100644 --- a/src/caffe/test/test_split_layer.cpp +++ b/src/caffe/test/test_split_layer.cpp @@ -1,9 +1,11 @@ // Copyright 2014 Jeff Donahue #include -#include -#include +#include +#include +#include "cuda_runtime.h" +#include "google/protobuf/text_format.h" #include "gtest/gtest.h" #include "caffe/blob.hpp" #include "caffe/common.hpp" @@ -32,7 +34,7 @@ class SplitLayerTest : public ::testing::Test { blob_bottom_vec_.push_back(blob_bottom_); blob_top_vec_.push_back(blob_top_a_); blob_top_vec_.push_back(blob_top_b_); - }; + } virtual ~SplitLayerTest() { delete blob_bottom_; delete blob_top_a_; @@ -156,7 +158,6 @@ TYPED_TEST(SplitLayerTest, TestGPUGradientInPlace) { template class SplitLayerInsertionTest : public ::testing::Test { protected: - SplitLayerInsertionTest() { }; void RunInsertionTest( const string& input_param_string, const string& output_param_string) { // Test that insert_splits called on the proto specified by @@ -1125,4 +1126,4 @@ TYPED_TEST(SplitLayerInsertionTest, TestWithInPlace) { this->RunInsertionTest(input_proto, expected_output_proto); } -} +} // namespace caffe diff --git a/src/caffe/test/test_stochastic_pooing.cpp b/src/caffe/test/test_stochastic_pooling.cpp similarity index 95% rename from src/caffe/test/test_stochastic_pooing.cpp rename to src/caffe/test/test_stochastic_pooling.cpp index e2b60ee..7829b94 100644 --- a/src/caffe/test/test_stochastic_pooing.cpp +++ b/src/caffe/test/test_stochastic_pooling.cpp @@ -1,8 +1,10 @@ // Copyright 2013 Yangqing Jia +#include #include -#include +#include +#include "cuda_runtime.h" #include "gtest/gtest.h" #include "caffe/blob.hpp" #include "caffe/common.hpp" @@ -12,6 +14,8 @@ #include "caffe/test/test_caffe_main.hpp" +using std::min; + namespace caffe { extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; @@ -21,7 +25,7 @@ class StochasticPoolingLayerTest : public ::testing::Test { protected: StochasticPoolingLayerTest() : blob_bottom_(new Blob()), - blob_top_(new Blob()) {}; + blob_top_(new Blob()) {} virtual void SetUp() { Caffe::set_random_seed(1701); blob_bottom_->Reshape(2, 3, 6, 5); @@ -33,7 +37,7 @@ class StochasticPoolingLayerTest : public ::testing::Test { filler.Fill(this->blob_bottom_); blob_bottom_vec_.push_back(blob_bottom_); blob_top_vec_.push_back(blob_top_); - }; + } virtual ~StochasticPoolingLayerTest() { delete blob_bottom_; delete blob_top_; @@ -89,7 +93,8 @@ TYPED_TEST(StochasticPoolingLayerTest, TestStochasticGPU) { bool has_equal = false; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - has_equal |= (pooled == bottom_data[this->blob_bottom_->offset(n, c, h, w)]); + has_equal |= (pooled == bottom_data[this->blob_bottom_-> + offset(n, c, h, w)]); } } EXPECT_TRUE(has_equal); @@ -130,7 +135,8 @@ TYPED_TEST(StochasticPoolingLayerTest, TestStochasticGPUTestPhase) { bool smaller_than_max = false; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { - smaller_than_max |= (pooled <= bottom_data[this->blob_bottom_->offset(n, c, h, w)]); + smaller_than_max |= (pooled <= bottom_data[this->blob_bottom_-> + offset(n, c, h, w)]); } } EXPECT_TRUE(smaller_than_max); @@ -159,4 +165,4 @@ TYPED_TEST(StochasticPoolingLayerTest, TestGradientGPU) { -} +} // namespace caffe diff --git a/src/caffe/test/test_syncedmem.cpp b/src/caffe/test/test_syncedmem.cpp index b834710..161ca45 100644 --- a/src/caffe/test/test_syncedmem.cpp +++ b/src/caffe/test/test_syncedmem.cpp @@ -1,8 +1,9 @@ // Copyright 2013 Yangqing Jia #include -#include +#include +#include "cuda_runtime.h" #include "gtest/gtest.h" #include "caffe/common.hpp" #include "caffe/syncedmem.hpp" @@ -36,29 +37,31 @@ TEST_F(SyncedMemoryTest, TestCPUWrite) { EXPECT_EQ(mem.head(), SyncedMemory::HEAD_AT_CPU); memset(cpu_data, 1, mem.size()); for (int i = 0; i < mem.size(); ++i) { - EXPECT_EQ(((char*)cpu_data)[i], 1); + EXPECT_EQ((reinterpret_cast(cpu_data))[i], 1); } const void* gpu_data = mem.gpu_data(); EXPECT_EQ(mem.head(), SyncedMemory::SYNCED); // check if values are the same char* recovered_value = new char[10]; - cudaMemcpy((void*)recovered_value, gpu_data, 10, cudaMemcpyDeviceToHost); + cudaMemcpy(reinterpret_cast(recovered_value), gpu_data, 10, + cudaMemcpyDeviceToHost); for (int i = 0; i < mem.size(); ++i) { - EXPECT_EQ(((char*)recovered_value)[i], 1); + EXPECT_EQ((reinterpret_cast(recovered_value))[i], 1); } // do another round cpu_data = mem.mutable_cpu_data(); EXPECT_EQ(mem.head(), SyncedMemory::HEAD_AT_CPU); memset(cpu_data, 2, mem.size()); for (int i = 0; i < mem.size(); ++i) { - EXPECT_EQ(((char*)cpu_data)[i], 2); + EXPECT_EQ((reinterpret_cast(cpu_data))[i], 2); } gpu_data = mem.gpu_data(); EXPECT_EQ(mem.head(), SyncedMemory::SYNCED); // check if values are the same - cudaMemcpy((void*)recovered_value, gpu_data, 10, cudaMemcpyDeviceToHost); + cudaMemcpy(reinterpret_cast(recovered_value), gpu_data, 10, + cudaMemcpyDeviceToHost); for (int i = 0; i < mem.size(); ++i) { - EXPECT_EQ(((char*)recovered_value)[i], 2); + EXPECT_EQ((reinterpret_cast(recovered_value))[i], 2); } delete[] recovered_value; } @@ -70,7 +73,7 @@ TEST_F(SyncedMemoryTest, TestGPUWrite) { CUDA_CHECK(cudaMemset(gpu_data, 1, mem.size())); const void* cpu_data = mem.cpu_data(); for (int i = 0; i < mem.size(); ++i) { - EXPECT_EQ(((char*)cpu_data)[i], 1); + EXPECT_EQ((reinterpret_cast(cpu_data))[i], 1); } EXPECT_EQ(mem.head(), SyncedMemory::SYNCED); @@ -79,9 +82,9 @@ TEST_F(SyncedMemoryTest, TestGPUWrite) { CUDA_CHECK(cudaMemset(gpu_data, 2, mem.size())); cpu_data = mem.cpu_data(); for (int i = 0; i < mem.size(); ++i) { - EXPECT_EQ(((char*)cpu_data)[i], 2); + EXPECT_EQ((reinterpret_cast(cpu_data))[i], 2); } EXPECT_EQ(mem.head(), SyncedMemory::SYNCED); } -} +} // namespace caffe diff --git a/src/caffe/test/test_tanh_layer.cpp b/src/caffe/test/test_tanh_layer.cpp index a4226a2..9c80ac2 100644 --- a/src/caffe/test/test_tanh_layer.cpp +++ b/src/caffe/test/test_tanh_layer.cpp @@ -1,10 +1,11 @@ // Copyright 2014 Aravindh Mahendran -// Adapted from other test files +// Adapted from other test files #include #include -#include +#include +#include "cuda_runtime.h" #include "gtest/gtest.h" #include "caffe/blob.hpp" #include "caffe/common.hpp" @@ -30,7 +31,7 @@ class TanHLayerTest : public ::testing::Test { filler.Fill(this->blob_bottom_); blob_bottom_vec_.push_back(blob_bottom_); blob_top_vec_.push_back(blob_top_); - }; + } virtual ~TanHLayerTest() { delete blob_bottom_; delete blob_top_; } Blob* const blob_bottom_; Blob* const blob_top_; @@ -52,10 +53,12 @@ TYPED_TEST(TanHLayerTest, TestForwardCPU) { for (int j = 0; j < this->blob_bottom_->channels(); ++j) { for (int k = 0; k < this->blob_bottom_->height(); ++k) { for (int l = 0; l < this->blob_bottom_->width(); ++l) { - EXPECT_GE(this->blob_top_->data_at(i,j,k,l) + 1e-4, - (exp(2*this->blob_bottom_->data_at(i,j,k,l))-1)/(exp(2*this->blob_bottom_->data_at(i,j,k,l))+1)); - EXPECT_LE(this->blob_top_->data_at(i,j,k,l) - 1e-4, - (exp(2*this->blob_bottom_->data_at(i,j,k,l))-1)/(exp(2*this->blob_bottom_->data_at(i,j,k,l))+1)); + EXPECT_GE(this->blob_top_->data_at(i, j, k, l) + 1e-4, + (exp(2*this->blob_bottom_->data_at(i, j, k, l)) - 1) / + (exp(2*this->blob_bottom_->data_at(i, j, k, l)) + 1)); + EXPECT_LE(this->blob_top_->data_at(i, j, k, l) - 1e-4, + (exp(2*this->blob_bottom_->data_at(i, j, k, l)) - 1) / + (exp(2*this->blob_bottom_->data_at(i, j, k, l)) + 1)); } } } @@ -67,7 +70,8 @@ TYPED_TEST(TanHLayerTest, TestGradientCPU) { Caffe::set_mode(Caffe::CPU); TanHLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_); + checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, + this->blob_top_vec_); } TYPED_TEST(TanHLayerTest, TestForwardGPU) { @@ -81,10 +85,12 @@ TYPED_TEST(TanHLayerTest, TestForwardGPU) { for (int j = 0; j < this->blob_bottom_->channels(); ++j) { for (int k = 0; k < this->blob_bottom_->height(); ++k) { for (int l = 0; l < this->blob_bottom_->width(); ++l) { - EXPECT_GE(this->blob_top_->data_at(i,j,k,l) + 1e-4, - (exp(2*this->blob_bottom_->data_at(i,j,k,l))-1)/(exp(2*this->blob_bottom_->data_at(i,j,k,l))+1)); - EXPECT_LE(this->blob_top_->data_at(i,j,k,l) - 1e-4, - (exp(2*this->blob_bottom_->data_at(i,j,k,l))-1)/(exp(2*this->blob_bottom_->data_at(i,j,k,l))+1)); + EXPECT_GE(this->blob_top_->data_at(i, j, k, l) + 1e-4, + (exp(2*this->blob_bottom_->data_at(i, j, k, l)) - 1) / + (exp(2*this->blob_bottom_->data_at(i, j, k, l)) + 1)); + EXPECT_LE(this->blob_top_->data_at(i, j, k, l) - 1e-4, + (exp(2*this->blob_bottom_->data_at(i, j, k, l)) - 1) / + (exp(2*this->blob_bottom_->data_at(i, j, k, l)) + 1)); } } } @@ -96,7 +102,8 @@ TYPED_TEST(TanHLayerTest, TestGradientGPU) { Caffe::set_mode(Caffe::GPU); TanHLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); - checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, this->blob_top_vec_); + checker.CheckGradientExhaustive(layer, this->blob_bottom_vec_, + this->blob_top_vec_); } -} +} // namespace caffe diff --git a/src/caffe/test/test_util_blas.cpp b/src/caffe/test/test_util_blas.cpp index 3fed148..3f3ff8b 100644 --- a/src/caffe/test/test_util_blas.cpp +++ b/src/caffe/test/test_util_blas.cpp @@ -1,9 +1,10 @@ // Copyright 2013 Yangqing Jia #include -#include -#include -#include + +#include "cuda_runtime.h" +#include "mkl.h" +#include "cublas_v2.h" #include "gtest/gtest.h" #include "caffe/blob.hpp" @@ -23,18 +24,18 @@ class GemmTest : public ::testing::Test {}; TYPED_TEST_CASE(GemmTest, Dtypes); TYPED_TEST(GemmTest, TestGemm) { - Blob A(1,1,2,3); - Blob B(1,1,3,4); - Blob C(1,1,2,4); + Blob A(1, 1, 2, 3); + Blob B(1, 1, 3, 4); + Blob C(1, 1, 2, 4); TypeParam data[12] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; TypeParam A_reshape_data[6] = {1, 4, 2, 5, 3, 6}; - TypeParam B_reshape_data[12] = {1,5,9,2,6,10,3,7,11,4,8,12}; - TypeParam result[8] = {38,44,50,56,83,98,113,128}; + TypeParam B_reshape_data[12] = {1, 5, 9, 2, 6, 10, 3, 7, 11, 4, 8, 12}; + TypeParam result[8] = {38, 44, 50, 56, 83, 98, 113, 128}; memcpy(A.mutable_cpu_data(), data, 6 * sizeof(TypeParam)); memcpy(B.mutable_cpu_data(), data, 12 * sizeof(TypeParam)); if (sizeof(TypeParam) == 4 || CAFFE_TEST_CUDA_PROP.major >= 2) { - //[1,2,3; 4 5 6] * [1,2,3,4; 5,6,7,8; 9,10,11,12]; + // [1, 2, 3; 4 5 6] * [1, 2, 3, 4; 5, 6, 7, 8; 9, 10, 11, 12]; caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, 2, 4, 3, 1., A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data()); for (int i = 0; i < 8; ++i) { @@ -47,7 +48,7 @@ TYPED_TEST(GemmTest, TestGemm) { } // Test when we have a transposed A - A.Reshape(1,1,3,2); + A.Reshape(1, 1, 3, 2); memcpy(A.mutable_cpu_data(), A_reshape_data, 6 * sizeof(TypeParam)); caffe_cpu_gemm(CblasTrans, CblasNoTrans, 2, 4, 3, 1., A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data()); @@ -61,7 +62,7 @@ TYPED_TEST(GemmTest, TestGemm) { } // Test when we have a transposed A and a transposed B too - B.Reshape(1,1,4,3); + B.Reshape(1, 1, 4, 3); memcpy(B.mutable_cpu_data(), B_reshape_data, 12 * sizeof(TypeParam)); caffe_cpu_gemm(CblasTrans, CblasTrans, 2, 4, 3, 1., A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data()); @@ -75,7 +76,7 @@ TYPED_TEST(GemmTest, TestGemm) { } // Test when we have a transposed B - A.Reshape(1,1,2,3); + A.Reshape(1, 1, 2, 3); memcpy(A.mutable_cpu_data(), data, 6 * sizeof(TypeParam)); caffe_cpu_gemm(CblasNoTrans, CblasTrans, 2, 4, 3, 1., A.cpu_data(), B.cpu_data(), 0., C.mutable_cpu_data()); @@ -94,9 +95,9 @@ TYPED_TEST(GemmTest, TestGemm) { TYPED_TEST(GemmTest, TestGemv) { - Blob A(1,1,2,3); - Blob x(1,1,1,3); - Blob y(1,1,1,2); + Blob A(1, 1, 2, 3); + Blob x(1, 1, 1, 3); + Blob y(1, 1, 1, 2); TypeParam data[6] = {1, 2, 3, 4, 5, 6}; TypeParam result_2[2] = {14, 32}; TypeParam result_3[3] = {9, 12, 15}; @@ -132,4 +133,4 @@ TYPED_TEST(GemmTest, TestGemv) { } } -} +} // namespace caffe diff --git a/src/caffe/util/im2col.cpp b/src/caffe/util/im2col.cpp index b32f6ee..4ed3af8 100644 --- a/src/caffe/util/im2col.cpp +++ b/src/caffe/util/im2col.cpp @@ -10,8 +10,8 @@ namespace caffe { template void im2col_cpu(const Dtype* data_im, const int channels, - const int height, const int width, const int ksize, const int pad, const int stride, - Dtype* data_col) { + const int height, const int width, const int ksize, const int pad, + const int stride, Dtype* data_col) { int height_col = (height + 2 * pad - ksize) / stride + 1; int width_col = (width + 2 * pad - ksize) / stride + 1; int channels_col = channels * ksize * ksize; @@ -21,13 +21,13 @@ void im2col_cpu(const Dtype* data_im, const int channels, int c_im = c / ksize / ksize; for (int h = 0; h < height_col; ++h) { for (int w = 0; w < width_col; ++w) { - int h_pad = h * stride - pad + h_offset; - int w_pad = w * stride - pad + w_offset; - if (h_pad >= 0 && h_pad < height && w_pad >= 0 && w_pad < width) - data_col[(c * height_col + h) * width_col + w] = - data_im[(c_im * height + h_pad) * width + w_pad]; - else - data_col[(c * height_col + h) * width_col + w] = 0; + int h_pad = h * stride - pad + h_offset; + int w_pad = w * stride - pad + w_offset; + if (h_pad >= 0 && h_pad < height && w_pad >= 0 && w_pad < width) + data_col[(c * height_col + h) * width_col + w] = + data_im[(c_im * height + h_pad) * width + w_pad]; + else + data_col[(c * height_col + h) * width_col + w] = 0; } } } @@ -35,16 +35,16 @@ void im2col_cpu(const Dtype* data_im, const int channels, // Explicit instantiation template void im2col_cpu(const float* data_im, const int channels, - const int height, const int width, const int ksize, const int pad, const int stride, - float* data_col); + const int height, const int width, const int ksize, const int pad, + const int stride, float* data_col); template void im2col_cpu(const double* data_im, const int channels, - const int height, const int width, const int ksize, const int pad, const int stride, - double* data_col); + const int height, const int width, const int ksize, const int pad, + const int stride, double* data_col); template void col2im_cpu(const Dtype* data_col, const int channels, - const int height, const int width, const int ksize, const int pad, const int stride, - Dtype* data_im) { + const int height, const int width, const int ksize, const int pad, + const int stride, Dtype* data_im) { memset(data_im, 0, sizeof(Dtype) * height * width * channels); int height_col = (height + 2 * pad - ksize) / stride + 1; int width_col = (width + 2 * pad - ksize) / stride + 1; @@ -55,10 +55,11 @@ void col2im_cpu(const Dtype* data_col, const int channels, int c_im = c / ksize / ksize; for (int h = 0; h < height_col; ++h) { for (int w = 0; w < width_col; ++w) { - int h_pad = h * stride - pad + h_offset; - int w_pad = w * stride - pad + w_offset; - if (h_pad >= 0 && h_pad < height && w_pad >= 0 && w_pad < width) - data_im[(c_im * height + h_pad) * width + w_pad] += data_col[(c * height_col + h) * width_col + w]; + int h_pad = h * stride - pad + h_offset; + int w_pad = w * stride - pad + w_offset; + if (h_pad >= 0 && h_pad < height && w_pad >= 0 && w_pad < width) + data_im[(c_im * height + h_pad) * width + w_pad] += + data_col[(c * height_col + h) * width_col + w]; } } } @@ -66,10 +67,10 @@ void col2im_cpu(const Dtype* data_col, const int channels, // Explicit instantiation template void col2im_cpu(const float* data_col, const int channels, - const int height, const int width, const int psize, const int pad, const int stride, - float* data_im); + const int height, const int width, const int psize, const int pad, + const int stride, float* data_im); template void col2im_cpu(const double* data_col, const int channels, - const int height, const int width, const int psize, const int pad, const int stride, - double* data_im); + const int height, const int width, const int psize, const int pad, + const int stride, double* data_im); } // namespace caffe diff --git a/src/caffe/util/im2col.cu b/src/caffe/util/im2col.cu index 7f1376d..c3c8736 100644 --- a/src/caffe/util/im2col.cu +++ b/src/caffe/util/im2col.cu @@ -1,5 +1,6 @@ // Copyright 2013 Yangqing Jia +#include #include #include #include @@ -11,8 +12,9 @@ namespace caffe { template __global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, - const int height, const int width, const int ksize, const int pad, - const int stride, const int height_col, const int width_col, Dtype* data_col) { + const int height, const int width, const int ksize, const int pad, + const int stride, const int height_col, const int width_col, + Dtype* data_col) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < n) { int w_out = index % width_col; @@ -26,10 +28,11 @@ __global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, data_im += (channel_in * height + h_in) * width + w_in; for (int i = 0; i < ksize; ++i) { for (int j = 0; j < ksize; ++j) { - int h = h_in + i; - int w = w_in + j; - *data_col = (h >= 0 && w >= 0 && h < width && w < height) ? data_im[i * width + j] : 0; - data_col += height_col * width_col; + int h = h_in + i; + int w = w_in + j; + *data_col = (h >= 0 && w >= 0 && h < width && w < height) ? + data_im[i * width + j] : 0; + data_col += height_col * width_col; } } } @@ -37,32 +40,35 @@ __global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, template void im2col_gpu(const Dtype* data_im, const int channels, - const int height, const int width, const int ksize, const int pad, const int stride, - Dtype* data_col) { + const int height, const int width, const int ksize, const int pad, + const int stride, Dtype* data_col) { // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int height_col = (height + 2 * pad - ksize) / stride + 1; int width_col = (width + 2 * pad - ksize) / stride + 1; int num_kernels = channels * height_col * width_col; - im2col_gpu_kernel<<>>( - num_kernels, data_im, height, width, ksize, pad, stride, height_col, width_col, - data_col); + // NOLINT_NEXTLINE(whitespace/operators) + im2col_gpu_kernel<<>>( + num_kernels, data_im, height, width, ksize, pad, stride, height_col, + width_col, data_col); CUDA_POST_KERNEL_CHECK; } // Explicit instantiation template void im2col_gpu(const float* data_im, const int channels, - const int height, const int width, const int ksize, const int pad, const int stride, - float* data_col); + const int height, const int width, const int ksize, const int pad, + const int stride, float* data_col); template void im2col_gpu(const double* data_im, const int channels, - const int height, const int width, const int ksize, const int pad, const int stride, - double* data_col); + const int height, const int width, const int ksize, const int pad, + const int stride, double* data_col); template __global__ void col2im_gpu_kernel(const int n, const Dtype* data_col, - const int height, const int width, const int channels, const int ksize, const int pad, - const int stride, const int height_col, const int width_col, Dtype* data_im) { + const int height, const int width, const int channels, const int ksize, + const int pad, const int stride, const int height_col, const int width_col, + Dtype* data_im) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < n) { Dtype val = 0; @@ -98,15 +104,18 @@ __global__ void col2im_gpu_kernel(const int n, const Dtype* data_col, template void col2im_gpu(const Dtype* data_col, const int channels, - const int height, const int width, const int ksize, const int pad, const int stride, - Dtype* data_im) { - //CUDA_CHECK(cudaMemset(data_im, 0, sizeof(Dtype) * height * width * channels)); + const int height, const int width, const int ksize, const int pad, + const int stride, Dtype* data_im) { + // CUDA_CHECK(cudaMemset(data_im, 0, + // sizeof(Dtype) * height * width * channels)); int height_col = (height + 2 * pad - ksize) / stride + 1; int width_col = (width + 2 * pad - ksize) / stride + 1; int num_kernels = channels * height * width; // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. - col2im_gpu_kernel<<>>( + // NOLINT_NEXTLINE(whitespace/operators) + col2im_gpu_kernel<<>>( num_kernels, data_col, height, width, channels, ksize, pad, stride, height_col, width_col, data_im); CUDA_POST_KERNEL_CHECK; @@ -115,11 +124,11 @@ void col2im_gpu(const Dtype* data_col, const int channels, // Explicit instantiation template void col2im_gpu(const float* data_col, const int channels, - const int height, const int width, const int psize, const int pad, const int stride, - float* data_im); + const int height, const int width, const int psize, const int pad, + const int stride, float* data_im); template void col2im_gpu(const double* data_col, const int channels, - const int height, const int width, const int psize, const int pad, const int stride, - double* data_im); + const int height, const int width, const int psize, const int pad, + const int stride, double* data_im); } // namespace caffe diff --git a/src/caffe/util/io.cpp b/src/caffe/util/io.cpp index 2ed8127..190b0d3 100644 --- a/src/caffe/util/io.cpp +++ b/src/caffe/util/io.cpp @@ -12,7 +12,6 @@ #include #include -#include #include #include "caffe/common.hpp" @@ -82,9 +81,6 @@ bool ReadImageToDatum(const string& filename, const int label, LOG(ERROR) << "Could not open or find file " << filename; return false; } - if (height > 0 && width > 0) { - - } datum->set_channels(3); datum->set_height(cv_img.rows); datum->set_width(cv_img.cols); @@ -95,7 +91,8 @@ bool ReadImageToDatum(const string& filename, const int label, for (int c = 0; c < 3; ++c) { for (int h = 0; h < cv_img.rows; ++h) { for (int w = 0; w < cv_img.cols; ++w) { - datum_string->push_back(static_cast(cv_img.at(h, w)[c])); + datum_string->push_back( + static_cast(cv_img.at(h, w)[c])); } } } diff --git a/src/caffe/util/math_functions.cu b/src/caffe/util/math_functions.cu index e930581..d063504 100644 --- a/src/caffe/util/math_functions.cu +++ b/src/caffe/util/math_functions.cu @@ -21,6 +21,7 @@ __global__ void mul_kernel(const int n, const Dtype* a, template <> void caffe_gpu_mul(const int N, const float* a, const float* b, float* y) { + // NOLINT_NEXTLINE(whitespace/operators) mul_kernel<<>>( N, a, b, y); } @@ -28,6 +29,7 @@ void caffe_gpu_mul(const int N, const float* a, template <> void caffe_gpu_mul(const int N, const double* a, const double* b, double* y) { + // NOLINT_NEXTLINE(whitespace/operators) mul_kernel<<>>( N, a, b, y); } -- 2.7.4