From: Jeff Donahue Date: Wed, 26 Feb 2014 05:56:49 +0000 (-0800) Subject: cpplint.py -> cpp_lint.py and NOLINT_NEXTLINE -> NOLINT_NEXT_LINE X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=218ca1522debd359892d0e0053d08db36da3a48b;p=platform%2Fupstream%2Fcaffe.git cpplint.py -> cpp_lint.py and NOLINT_NEXTLINE -> NOLINT_NEXT_LINE --- diff --git a/Makefile b/Makefile index df96fc2..e6098ca 100644 --- a/Makefile +++ b/Makefile @@ -121,7 +121,7 @@ linecount: clean cloc --read-lang-def=$(PROJECT).cloc src/$(PROJECT)/ lint: - ./scripts/cpplint.py $(NONGEN_CXX_SRCS) + ./scripts/cpp_lint.py $(NONGEN_CXX_SRCS) test: init $(TEST_BINS) diff --git a/scripts/cpplint.py b/scripts/cpp_lint.py similarity index 99% rename from scripts/cpplint.py rename to scripts/cpp_lint.py index 4143c4b..11bfefa 100755 --- a/scripts/cpplint.py +++ b/scripts/cpp_lint.py @@ -437,8 +437,8 @@ _MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)' _regexp_compile_cache = {} -# Finds occurrences of NOLINT[_NEXTLINE] or NOLINT[_NEXTLINE](...). -_RE_SUPPRESSION = re.compile(r'\bNOLINT(_NEXTLINE)?\b(\([^)]*\))?') +# Finds occurrences of NOLINT[_NEXT_LINE] or NOLINT[_NEXT_LINE](...). +_RE_SUPPRESSION = re.compile(r'\bNOLINT(_NEXT_LINE)?\b(\([^)]*\))?') # {str, set(int)}: a map from error categories to sets of linenumbers # on which those errors are expected and should be suppressed. @@ -472,7 +472,7 @@ def ParseNolintSuppressions(filename, raw_line, linenum, error): # FIXME(adonovan): "NOLINT(" is misparsed as NOLINT(*). matched = _RE_SUPPRESSION.search(raw_line) if matched: - if matched.group(1) == '_NEXTLINE': + if matched.group(1) == '_NEXT_LINE': linenum += 1 category = matched.group(2) if category in (None, '(*)'): # => "suppress all" diff --git a/src/caffe/layers/bnll_layer.cu b/src/caffe/layers/bnll_layer.cu index c1795bc..f61cffa 100644 --- a/src/caffe/layers/bnll_layer.cu +++ b/src/caffe/layers/bnll_layer.cu @@ -59,7 +59,7 @@ void BNLLLayer::Forward_gpu(const vector*>& bottom, const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = (*top)[0]->mutable_gpu_data(); const int count = bottom[0]->count(); - // NOLINT_NEXTLINE(whitespace/operators) + // NOLINT_NEXT_LINE(whitespace/operators) BNLLForward<<>>( count, bottom_data, top_data); CUDA_POST_KERNEL_CHECK; @@ -84,7 +84,7 @@ Dtype BNLLLayer::Backward_gpu(const vector*>& top, const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); const int count = (*bottom)[0]->count(); - // NOLINT_NEXTLINE(whitespace/operators) + // NOLINT_NEXT_LINE(whitespace/operators) BNLLBackward<<>>( count, top_diff, bottom_data, bottom_diff); CUDA_POST_KERNEL_CHECK; diff --git a/src/caffe/layers/data_layer.cpp b/src/caffe/layers/data_layer.cpp index 7950313..f973a56 100644 --- a/src/caffe/layers/data_layer.cpp +++ b/src/caffe/layers/data_layer.cpp @@ -50,15 +50,15 @@ void* DataLayerPrefetch(void* layer_pointer) { int h_off, w_off; // We only do random crop when we do training. if (Caffe::phase() == Caffe::TRAIN) { - // NOLINT_NEXTLINE(runtime/threadsafe_fn) + // NOLINT_NEXT_LINE(runtime/threadsafe_fn) h_off = rand() % (height - cropsize); - // NOLINT_NEXTLINE(runtime/threadsafe_fn) + // NOLINT_NEXT_LINE(runtime/threadsafe_fn) w_off = rand() % (width - cropsize); } else { h_off = (height - cropsize) / 2; w_off = (width - cropsize) / 2; } - // NOLINT_NEXTLINE(runtime/threadsafe_fn) + // NOLINT_NEXT_LINE(runtime/threadsafe_fn) if (mirror && rand() % 2) { // Copy mirrored version for (int c = 0; c < channels; ++c) { @@ -143,7 +143,7 @@ void DataLayer::SetUp(const vector*>& bottom, iter_->SeekToFirst(); // Check if we would need to randomly skip a few data points if (this->layer_param_.rand_skip()) { - // NOLINT_NEXTLINE(runtime/threadsafe_fn) + // NOLINT_NEXT_LINE(runtime/threadsafe_fn) unsigned int skip = rand() % this->layer_param_.rand_skip(); LOG(INFO) << "Skipping first " << skip << " data points."; while (skip-- > 0) { diff --git a/src/caffe/layers/dropout_layer.cu b/src/caffe/layers/dropout_layer.cu index 0e15b6f..efba295 100644 --- a/src/caffe/layers/dropout_layer.cu +++ b/src/caffe/layers/dropout_layer.cu @@ -82,7 +82,7 @@ void DropoutLayer::Forward_gpu(const vector*>& bottom, CURAND_CHECK(curandGenerate(Caffe::curand_generator(), (unsigned int*)(rand_vec_->mutable_gpu_data()), count)); // set thresholds - // NOLINT_NEXTLINE(whitespace/operators) + // NOLINT_NEXT_LINE(whitespace/operators) DropoutForward<<>>( count, bottom_data, (unsigned int*)rand_vec_->gpu_data(), uint_thres_, scale_, top_data); @@ -113,7 +113,7 @@ Dtype DropoutLayer::Backward_gpu(const vector*>& top, Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); const unsigned int* mask = (unsigned int*)rand_vec_->gpu_data(); const int count = (*bottom)[0]->count(); - // NOLINT_NEXTLINE(whitespace/operators) + // NOLINT_NEXT_LINE(whitespace/operators) DropoutBackward<<>>( count, top_diff, mask, uint_thres_, scale_, bottom_diff); CUDA_POST_KERNEL_CHECK; diff --git a/src/caffe/layers/lrn_layer.cu b/src/caffe/layers/lrn_layer.cu index 9c82b35..67c1039 100644 --- a/src/caffe/layers/lrn_layer.cu +++ b/src/caffe/layers/lrn_layer.cu @@ -76,13 +76,13 @@ void LRNLayer::Forward_gpu(const vector*>& bottom, // We will launch one kernel for each pixel location, and have the kernel // go through all the channels. int n_threads = num_ * height_ * width_; - // NOLINT_NEXTLINE(whitespace/operators) + // NOLINT_NEXT_LINE(whitespace/operators) LRNFillScale<<>>( n_threads, bottom_data, num_, channels_, height_, width_, size_, alpha_ / size_, scale_data); CUDA_POST_KERNEL_CHECK; n_threads = bottom[0]->count(); - // NOLINT_NEXTLINE(whitespace/operators) + // NOLINT_NEXT_LINE(whitespace/operators) LRNComputeOutput<<>>( n_threads, bottom_data, scale_data, -beta_, top_data); CUDA_POST_KERNEL_CHECK; @@ -155,7 +155,7 @@ template Dtype LRNLayer::Backward_gpu(const vector*>& top, const bool propagate_down, vector*>* bottom) { int n_threads = num_ * height_ * width_; - // NOLINT_NEXTLINE(whitespace/operators) + // NOLINT_NEXT_LINE(whitespace/operators) LRNComputeDiff<<>>( n_threads, (*bottom)[0]->gpu_data(), top[0]->gpu_data(), scale_.gpu_data(), top[0]->gpu_diff(), num_, channels_, height_, width_, diff --git a/src/caffe/layers/pooling_layer.cu b/src/caffe/layers/pooling_layer.cu index 75078b3..1c8952f 100644 --- a/src/caffe/layers/pooling_layer.cu +++ b/src/caffe/layers/pooling_layer.cu @@ -146,14 +146,14 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, int count = (*top)[0]->count(); switch (this->layer_param_.pool()) { case LayerParameter_PoolMethod_MAX: - // NOLINT_NEXTLINE(whitespace/operators) + // NOLINT_NEXT_LINE(whitespace/operators) MaxPoolForward<<>>( count, bottom_data, bottom[0]->num(), CHANNELS_, HEIGHT_, WIDTH_, POOLED_HEIGHT_, POOLED_WIDTH_, KSIZE_, STRIDE_, top_data); break; case LayerParameter_PoolMethod_AVE: - // NOLINT_NEXTLINE(whitespace/operators) + // NOLINT_NEXT_LINE(whitespace/operators) AvePoolForward<<>>( count, bottom_data, bottom[0]->num(), CHANNELS_, HEIGHT_, WIDTH_, POOLED_HEIGHT_, POOLED_WIDTH_, KSIZE_, STRIDE_, @@ -164,14 +164,14 @@ void PoolingLayer::Forward_gpu(const vector*>& bottom, // We need to create the random index as well. CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), rand_idx_.mutable_gpu_data(), count)); - // NOLINT_NEXTLINE(whitespace/operators) + // NOLINT_NEXT_LINE(whitespace/operators) StoPoolForwardTrain<<>>( count, bottom_data, bottom[0]->num(), CHANNELS_, HEIGHT_, WIDTH_, POOLED_HEIGHT_, POOLED_WIDTH_, KSIZE_, STRIDE_, rand_idx_.mutable_gpu_data(), top_data); } else { - // NOLINT_NEXTLINE(whitespace/operators) + // NOLINT_NEXT_LINE(whitespace/operators) StoPoolForwardTest<<>>( count, bottom_data, bottom[0]->num(), CHANNELS_, @@ -294,21 +294,21 @@ Dtype PoolingLayer::Backward_gpu(const vector*>& top, int count = (*bottom)[0]->count(); switch (this->layer_param_.pool()) { case LayerParameter_PoolMethod_MAX: - // NOLINT_NEXTLINE(whitespace/operators) + // NOLINT_NEXT_LINE(whitespace/operators) MaxPoolBackward<<>>( count, (*bottom)[0]->gpu_data(), top[0]->gpu_data(), top_diff, top[0]->num(), CHANNELS_, HEIGHT_, WIDTH_, POOLED_HEIGHT_, POOLED_WIDTH_, KSIZE_, STRIDE_, bottom_diff); break; case LayerParameter_PoolMethod_AVE: - // NOLINT_NEXTLINE(whitespace/operators) + // NOLINT_NEXT_LINE(whitespace/operators) AvePoolBackward<<>>( count, top_diff, top[0]->num(), CHANNELS_, HEIGHT_, WIDTH_, POOLED_HEIGHT_, POOLED_WIDTH_, KSIZE_, STRIDE_, bottom_diff); break; case LayerParameter_PoolMethod_STOCHASTIC: - // NOLINT_NEXTLINE(whitespace/operators) + // NOLINT_NEXT_LINE(whitespace/operators) StoPoolBackward<<>>( count, rand_idx_.gpu_data(), top_diff, top[0]->num(), CHANNELS_, HEIGHT_, WIDTH_, POOLED_HEIGHT_, diff --git a/src/caffe/layers/relu_layer.cu b/src/caffe/layers/relu_layer.cu index c56d22a..ed1aab4 100644 --- a/src/caffe/layers/relu_layer.cu +++ b/src/caffe/layers/relu_layer.cu @@ -51,7 +51,7 @@ void ReLULayer::Forward_gpu(const vector*>& bottom, const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = (*top)[0]->mutable_gpu_data(); const int count = bottom[0]->count(); - // NOLINT_NEXTLINE(whitespace/operators) + // NOLINT_NEXT_LINE(whitespace/operators) ReLUForward<<>>( count, bottom_data, top_data); CUDA_POST_KERNEL_CHECK; @@ -80,7 +80,7 @@ Dtype ReLULayer::Backward_gpu(const vector*>& top, const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); const int count = (*bottom)[0]->count(); - // NOLINT_NEXTLINE(whitespace/operators) + // NOLINT_NEXT_LINE(whitespace/operators) ReLUBackward<<>>( count, top_diff, bottom_data, bottom_diff); CUDA_POST_KERNEL_CHECK; diff --git a/src/caffe/layers/sigmoid_layer.cu b/src/caffe/layers/sigmoid_layer.cu index 1680aa6..e50260d 100644 --- a/src/caffe/layers/sigmoid_layer.cu +++ b/src/caffe/layers/sigmoid_layer.cu @@ -65,7 +65,7 @@ void SigmoidLayer::Forward_gpu(const vector*>& bottom, const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = (*top)[0]->mutable_gpu_data(); const int count = bottom[0]->count(); - // NOLINT_NEXTLINE(whitespace/operators) + // NOLINT_NEXT_LINE(whitespace/operators) SigmoidForward<<>>( count, bottom_data, top_data); CUDA_POST_KERNEL_CHECK; @@ -95,7 +95,7 @@ Dtype SigmoidLayer::Backward_gpu(const vector*>& top, const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); const int count = (*bottom)[0]->count(); - // NOLINT_NEXTLINE(whitespace/operators) + // NOLINT_NEXT_LINE(whitespace/operators) SigmoidBackward<<>>( count, top_diff, bottom_data, bottom_diff); CUDA_POST_KERNEL_CHECK; diff --git a/src/caffe/layers/softmax_layer.cu b/src/caffe/layers/softmax_layer.cu index f7adab3..af73260 100644 --- a/src/caffe/layers/softmax_layer.cu +++ b/src/caffe/layers/softmax_layer.cu @@ -105,21 +105,21 @@ void SoftmaxLayer::Forward_gpu(const vector*>& bottom, // we need to subtract the max to avoid numerical issues, compute the exp, // and then normalize. // Compute max - // NOLINT_NEXTLINE(whitespace/operators) + // NOLINT_NEXT_LINE(whitespace/operators) kernel_get_max<<>>( num, dim, bottom_data, scale_data); // subtraction caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, num, dim, 1, -1., scale_data, sum_multiplier_.gpu_data(), 1., top_data); // Perform exponentiation - // NOLINT_NEXTLINE(whitespace/operators) + // NOLINT_NEXT_LINE(whitespace/operators) kernel_exp<<>>( num * dim, top_data, top_data); // sum after exp caffe_gpu_gemv(CblasNoTrans, num, dim, 1., top_data, sum_multiplier_.gpu_data(), 0., scale_data); // Do division - // NOLINT_NEXTLINE(whitespace/operators) + // NOLINT_NEXT_LINE(whitespace/operators) kernel_softmax_div<<>>( num, dim, scale_data, top_data); diff --git a/src/caffe/layers/tanh_layer.cu b/src/caffe/layers/tanh_layer.cu index 2814577..a309a60 100644 --- a/src/caffe/layers/tanh_layer.cu +++ b/src/caffe/layers/tanh_layer.cu @@ -58,7 +58,7 @@ void TanHLayer::Forward_gpu(const vector*>& bottom, const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = (*top)[0]->mutable_gpu_data(); const int count = bottom[0]->count(); - // NOLINT_NEXTLINE(whitespace/operators) + // NOLINT_NEXT_LINE(whitespace/operators) TanHForward<<>>( count, bottom_data, top_data); CUDA_POST_KERNEL_CHECK; @@ -89,7 +89,7 @@ Dtype TanHLayer::Backward_gpu(const vector*>& top, const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); const int count = (*bottom)[0]->count(); - // NOLINT_NEXTLINE(whitespace/operators) + // NOLINT_NEXT_LINE(whitespace/operators) TanHBackward<<>>( count, top_diff, bottom_data, bottom_diff); CUDA_POST_KERNEL_CHECK; diff --git a/src/caffe/test/test_multinomial_logistic_loss_layer.cpp b/src/caffe/test/test_multinomial_logistic_loss_layer.cpp index 85285d0..5169b70 100644 --- a/src/caffe/test/test_multinomial_logistic_loss_layer.cpp +++ b/src/caffe/test/test_multinomial_logistic_loss_layer.cpp @@ -31,7 +31,7 @@ class MultinomialLogisticLossLayerTest : public ::testing::Test { filler.Fill(this->blob_bottom_data_); blob_bottom_vec_.push_back(blob_bottom_data_); for (int i = 0; i < blob_bottom_label_->count(); ++i) { - // NOLINT_NEXTLINE(runtime/threadsafe_fn) + // NOLINT_NEXT_LINE(runtime/threadsafe_fn) blob_bottom_label_->mutable_cpu_data()[i] = rand() % 5; } blob_bottom_vec_.push_back(blob_bottom_label_); diff --git a/src/caffe/test/test_softmax_with_loss_layer.cpp b/src/caffe/test/test_softmax_with_loss_layer.cpp index c8c417a..77668e5 100644 --- a/src/caffe/test/test_softmax_with_loss_layer.cpp +++ b/src/caffe/test/test_softmax_with_loss_layer.cpp @@ -32,7 +32,7 @@ class SoftmaxWithLossLayerTest : public ::testing::Test { filler.Fill(this->blob_bottom_data_); blob_bottom_vec_.push_back(blob_bottom_data_); for (int i = 0; i < blob_bottom_label_->count(); ++i) { - // NOLINT_NEXTLINE(runtime/threadsafe_fn) + // NOLINT_NEXT_LINE(runtime/threadsafe_fn) blob_bottom_label_->mutable_cpu_data()[i] = rand() % 5; } blob_bottom_vec_.push_back(blob_bottom_label_); diff --git a/src/caffe/util/im2col.cu b/src/caffe/util/im2col.cu index c3c8736..3156fa3 100644 --- a/src/caffe/util/im2col.cu +++ b/src/caffe/util/im2col.cu @@ -47,7 +47,7 @@ void im2col_gpu(const Dtype* data_im, const int channels, int height_col = (height + 2 * pad - ksize) / stride + 1; int width_col = (width + 2 * pad - ksize) / stride + 1; int num_kernels = channels * height_col * width_col; - // NOLINT_NEXTLINE(whitespace/operators) + // NOLINT_NEXT_LINE(whitespace/operators) im2col_gpu_kernel<<>>( num_kernels, data_im, height, width, ksize, pad, stride, height_col, @@ -113,7 +113,7 @@ void col2im_gpu(const Dtype* data_col, const int channels, int num_kernels = channels * height * width; // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. - // NOLINT_NEXTLINE(whitespace/operators) + // NOLINT_NEXT_LINE(whitespace/operators) col2im_gpu_kernel<<>>( num_kernels, data_col, height, width, channels, ksize, pad, stride, diff --git a/src/caffe/util/math_functions.cu b/src/caffe/util/math_functions.cu index d063504..b29a58a 100644 --- a/src/caffe/util/math_functions.cu +++ b/src/caffe/util/math_functions.cu @@ -21,7 +21,7 @@ __global__ void mul_kernel(const int n, const Dtype* a, template <> void caffe_gpu_mul(const int N, const float* a, const float* b, float* y) { - // NOLINT_NEXTLINE(whitespace/operators) + // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<<>>( N, a, b, y); } @@ -29,7 +29,7 @@ void caffe_gpu_mul(const int N, const float* a, template <> void caffe_gpu_mul(const int N, const double* a, const double* b, double* y) { - // NOLINT_NEXTLINE(whitespace/operators) + // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<<>>( N, a, b, y); }