cpplint.py -> cpp_lint.py and NOLINT_NEXTLINE -> NOLINT_NEXT_LINE
authorJeff Donahue <jeff.donahue@gmail.com>
Wed, 26 Feb 2014 05:56:49 +0000 (21:56 -0800)
committerEvan Shelhamer <shelhamer@imaginarynumber.net>
Wed, 26 Feb 2014 23:42:39 +0000 (15:42 -0800)
15 files changed:
Makefile
scripts/cpp_lint.py [moved from scripts/cpplint.py with 99% similarity]
src/caffe/layers/bnll_layer.cu
src/caffe/layers/data_layer.cpp
src/caffe/layers/dropout_layer.cu
src/caffe/layers/lrn_layer.cu
src/caffe/layers/pooling_layer.cu
src/caffe/layers/relu_layer.cu
src/caffe/layers/sigmoid_layer.cu
src/caffe/layers/softmax_layer.cu
src/caffe/layers/tanh_layer.cu
src/caffe/test/test_multinomial_logistic_loss_layer.cpp
src/caffe/test/test_softmax_with_loss_layer.cpp
src/caffe/util/im2col.cu
src/caffe/util/math_functions.cu

index df96fc2..e6098ca 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -121,7 +121,7 @@ linecount: clean
        cloc --read-lang-def=$(PROJECT).cloc src/$(PROJECT)/
 
 lint:
-       ./scripts/cpplint.py $(NONGEN_CXX_SRCS)
+       ./scripts/cpp_lint.py $(NONGEN_CXX_SRCS)
 
 test: init $(TEST_BINS)
 
similarity index 99%
rename from scripts/cpplint.py
rename to scripts/cpp_lint.py
index 4143c4b..11bfefa 100755 (executable)
@@ -437,8 +437,8 @@ _MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
 
 _regexp_compile_cache = {}
 
-# Finds occurrences of NOLINT[_NEXTLINE] or NOLINT[_NEXTLINE](...).
-_RE_SUPPRESSION = re.compile(r'\bNOLINT(_NEXTLINE)?\b(\([^)]*\))?')
+# Finds occurrences of NOLINT[_NEXT_LINE] or NOLINT[_NEXT_LINE](...).
+_RE_SUPPRESSION = re.compile(r'\bNOLINT(_NEXT_LINE)?\b(\([^)]*\))?')
 
 # {str, set(int)}: a map from error categories to sets of linenumbers
 # on which those errors are expected and should be suppressed.
@@ -472,7 +472,7 @@ def ParseNolintSuppressions(filename, raw_line, linenum, error):
   # FIXME(adonovan): "NOLINT(" is misparsed as NOLINT(*).
   matched = _RE_SUPPRESSION.search(raw_line)
   if matched:
-    if matched.group(1) == '_NEXTLINE':
+    if matched.group(1) == '_NEXT_LINE':
       linenum += 1
     category = matched.group(2)
     if category in (None, '(*)'):  # => "suppress all"
index c1795bc..f61cffa 100644 (file)
@@ -59,7 +59,7 @@ void BNLLLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
   const Dtype* bottom_data = bottom[0]->gpu_data();
   Dtype* top_data = (*top)[0]->mutable_gpu_data();
   const int count = bottom[0]->count();
-  // NOLINT_NEXTLINE(whitespace/operators)
+  // NOLINT_NEXT_LINE(whitespace/operators)
   BNLLForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
       count, bottom_data, top_data);
   CUDA_POST_KERNEL_CHECK;
@@ -84,7 +84,7 @@ Dtype BNLLLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
     const Dtype* top_diff = top[0]->gpu_diff();
     Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
     const int count = (*bottom)[0]->count();
-    // NOLINT_NEXTLINE(whitespace/operators)
+    // NOLINT_NEXT_LINE(whitespace/operators)
     BNLLBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
         count, top_diff, bottom_data, bottom_diff);
     CUDA_POST_KERNEL_CHECK;
index 7950313..f973a56 100644 (file)
@@ -50,15 +50,15 @@ void* DataLayerPrefetch(void* layer_pointer) {
       int h_off, w_off;
       // We only do random crop when we do training.
       if (Caffe::phase() == Caffe::TRAIN) {
-        // NOLINT_NEXTLINE(runtime/threadsafe_fn)
+        // NOLINT_NEXT_LINE(runtime/threadsafe_fn)
         h_off = rand() % (height - cropsize);
-        // NOLINT_NEXTLINE(runtime/threadsafe_fn)
+        // NOLINT_NEXT_LINE(runtime/threadsafe_fn)
         w_off = rand() % (width - cropsize);
       } else {
         h_off = (height - cropsize) / 2;
         w_off = (width - cropsize) / 2;
       }
-      // NOLINT_NEXTLINE(runtime/threadsafe_fn)
+      // NOLINT_NEXT_LINE(runtime/threadsafe_fn)
       if (mirror && rand() % 2) {
         // Copy mirrored version
         for (int c = 0; c < channels; ++c) {
@@ -143,7 +143,7 @@ void DataLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
   iter_->SeekToFirst();
   // Check if we would need to randomly skip a few data points
   if (this->layer_param_.rand_skip()) {
-    // NOLINT_NEXTLINE(runtime/threadsafe_fn)
+    // NOLINT_NEXT_LINE(runtime/threadsafe_fn)
     unsigned int skip = rand() % this->layer_param_.rand_skip();
     LOG(INFO) << "Skipping first " << skip << " data points.";
     while (skip-- > 0) {
index 0e15b6f..efba295 100644 (file)
@@ -82,7 +82,7 @@ void DropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
     CURAND_CHECK(curandGenerate(Caffe::curand_generator(),
         (unsigned int*)(rand_vec_->mutable_gpu_data()), count));
     // set thresholds
-    // NOLINT_NEXTLINE(whitespace/operators)
+    // NOLINT_NEXT_LINE(whitespace/operators)
     DropoutForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
         count, bottom_data, (unsigned int*)rand_vec_->gpu_data(), uint_thres_,
         scale_, top_data);
@@ -113,7 +113,7 @@ Dtype DropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
     Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
     const unsigned int* mask = (unsigned int*)rand_vec_->gpu_data();
     const int count = (*bottom)[0]->count();
-    // NOLINT_NEXTLINE(whitespace/operators)
+    // NOLINT_NEXT_LINE(whitespace/operators)
     DropoutBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
         count, top_diff, mask, uint_thres_, scale_, bottom_diff);
     CUDA_POST_KERNEL_CHECK;
index 9c82b35..67c1039 100644 (file)
@@ -76,13 +76,13 @@ void LRNLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
   // We will launch one kernel for each pixel location, and have the kernel
   // go through all the channels.
   int n_threads = num_ * height_ * width_;
-  // NOLINT_NEXTLINE(whitespace/operators)
+  // NOLINT_NEXT_LINE(whitespace/operators)
   LRNFillScale<<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS>>>(
       n_threads, bottom_data, num_, channels_, height_, width_, size_,
       alpha_ / size_, scale_data);
   CUDA_POST_KERNEL_CHECK;
   n_threads = bottom[0]->count();
-  // NOLINT_NEXTLINE(whitespace/operators)
+  // NOLINT_NEXT_LINE(whitespace/operators)
   LRNComputeOutput<<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS>>>(
       n_threads, bottom_data, scale_data, -beta_, top_data);
   CUDA_POST_KERNEL_CHECK;
@@ -155,7 +155,7 @@ template <typename Dtype>
 Dtype LRNLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
     const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
   int n_threads = num_ * height_ * width_;
-  // NOLINT_NEXTLINE(whitespace/operators)
+  // NOLINT_NEXT_LINE(whitespace/operators)
   LRNComputeDiff<<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS>>>(
       n_threads, (*bottom)[0]->gpu_data(), top[0]->gpu_data(),
       scale_.gpu_data(), top[0]->gpu_diff(), num_, channels_, height_, width_,
index 75078b3..1c8952f 100644 (file)
@@ -146,14 +146,14 @@ void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
   int count = (*top)[0]->count();
   switch (this->layer_param_.pool()) {
   case LayerParameter_PoolMethod_MAX:
-    // NOLINT_NEXTLINE(whitespace/operators)
+    // NOLINT_NEXT_LINE(whitespace/operators)
     MaxPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
         count, bottom_data, bottom[0]->num(), CHANNELS_,
         HEIGHT_, WIDTH_, POOLED_HEIGHT_, POOLED_WIDTH_, KSIZE_, STRIDE_,
         top_data);
     break;
   case LayerParameter_PoolMethod_AVE:
-    // NOLINT_NEXTLINE(whitespace/operators)
+    // NOLINT_NEXT_LINE(whitespace/operators)
     AvePoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
         count, bottom_data, bottom[0]->num(), CHANNELS_,
         HEIGHT_, WIDTH_, POOLED_HEIGHT_, POOLED_WIDTH_, KSIZE_, STRIDE_,
@@ -164,14 +164,14 @@ void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
       // We need to create the random index as well.
       CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(),
           rand_idx_.mutable_gpu_data(), count));
-      // NOLINT_NEXTLINE(whitespace/operators)
+      // NOLINT_NEXT_LINE(whitespace/operators)
       StoPoolForwardTrain<Dtype><<<CAFFE_GET_BLOCKS(count),
                                    CAFFE_CUDA_NUM_THREADS>>>(
           count, bottom_data, bottom[0]->num(), CHANNELS_,
           HEIGHT_, WIDTH_, POOLED_HEIGHT_, POOLED_WIDTH_, KSIZE_, STRIDE_,
           rand_idx_.mutable_gpu_data(), top_data);
     } else {
-      // NOLINT_NEXTLINE(whitespace/operators)
+      // NOLINT_NEXT_LINE(whitespace/operators)
       StoPoolForwardTest<Dtype><<<CAFFE_GET_BLOCKS(count),
                                   CAFFE_CUDA_NUM_THREADS>>>(
           count, bottom_data, bottom[0]->num(), CHANNELS_,
@@ -294,21 +294,21 @@ Dtype PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
   int count = (*bottom)[0]->count();
   switch (this->layer_param_.pool()) {
   case LayerParameter_PoolMethod_MAX:
-    // NOLINT_NEXTLINE(whitespace/operators)
+    // NOLINT_NEXT_LINE(whitespace/operators)
     MaxPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
         count, (*bottom)[0]->gpu_data(), top[0]->gpu_data(), top_diff,
         top[0]->num(), CHANNELS_, HEIGHT_, WIDTH_, POOLED_HEIGHT_,
         POOLED_WIDTH_, KSIZE_, STRIDE_, bottom_diff);
     break;
   case LayerParameter_PoolMethod_AVE:
-    // NOLINT_NEXTLINE(whitespace/operators)
+    // NOLINT_NEXT_LINE(whitespace/operators)
     AvePoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
         count, top_diff, top[0]->num(), CHANNELS_,
         HEIGHT_, WIDTH_, POOLED_HEIGHT_, POOLED_WIDTH_, KSIZE_, STRIDE_,
         bottom_diff);
     break;
   case LayerParameter_PoolMethod_STOCHASTIC:
-    // NOLINT_NEXTLINE(whitespace/operators)
+    // NOLINT_NEXT_LINE(whitespace/operators)
     StoPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
         count, rand_idx_.gpu_data(), top_diff,
         top[0]->num(), CHANNELS_, HEIGHT_, WIDTH_, POOLED_HEIGHT_,
index c56d22a..ed1aab4 100644 (file)
@@ -51,7 +51,7 @@ void ReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
   const Dtype* bottom_data = bottom[0]->gpu_data();
   Dtype* top_data = (*top)[0]->mutable_gpu_data();
   const int count = bottom[0]->count();
-  // NOLINT_NEXTLINE(whitespace/operators)
+  // NOLINT_NEXT_LINE(whitespace/operators)
   ReLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
       count, bottom_data, top_data);
   CUDA_POST_KERNEL_CHECK;
@@ -80,7 +80,7 @@ Dtype ReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
     const Dtype* top_diff = top[0]->gpu_diff();
     Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
     const int count = (*bottom)[0]->count();
-    // NOLINT_NEXTLINE(whitespace/operators)
+    // NOLINT_NEXT_LINE(whitespace/operators)
     ReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
         count, top_diff, bottom_data, bottom_diff);
     CUDA_POST_KERNEL_CHECK;
index 1680aa6..e50260d 100644 (file)
@@ -65,7 +65,7 @@ void SigmoidLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
   const Dtype* bottom_data = bottom[0]->gpu_data();
   Dtype* top_data = (*top)[0]->mutable_gpu_data();
   const int count = bottom[0]->count();
-  // NOLINT_NEXTLINE(whitespace/operators)
+  // NOLINT_NEXT_LINE(whitespace/operators)
   SigmoidForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
       count, bottom_data, top_data);
   CUDA_POST_KERNEL_CHECK;
@@ -95,7 +95,7 @@ Dtype SigmoidLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
     const Dtype* top_diff = top[0]->gpu_diff();
     Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
     const int count = (*bottom)[0]->count();
-    // NOLINT_NEXTLINE(whitespace/operators)
+    // NOLINT_NEXT_LINE(whitespace/operators)
     SigmoidBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
         count, top_diff, bottom_data, bottom_diff);
     CUDA_POST_KERNEL_CHECK;
index f7adab3..af73260 100644 (file)
@@ -105,21 +105,21 @@ void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
   // we need to subtract the max to avoid numerical issues, compute the exp,
   // and then normalize.
   // Compute max
-  // NOLINT_NEXTLINE(whitespace/operators)
+  // NOLINT_NEXT_LINE(whitespace/operators)
   kernel_get_max<Dtype><<<CAFFE_GET_BLOCKS(num), CAFFE_CUDA_NUM_THREADS>>>(
       num, dim, bottom_data, scale_data);
   // subtraction
   caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, dim, 1, -1.,
       scale_data, sum_multiplier_.gpu_data(), 1., top_data);
   // Perform exponentiation
-  // NOLINT_NEXTLINE(whitespace/operators)
+  // NOLINT_NEXT_LINE(whitespace/operators)
   kernel_exp<Dtype><<<CAFFE_GET_BLOCKS(num * dim), CAFFE_CUDA_NUM_THREADS>>>(
       num * dim, top_data, top_data);
   // sum after exp
   caffe_gpu_gemv<Dtype>(CblasNoTrans, num, dim, 1., top_data,
       sum_multiplier_.gpu_data(), 0., scale_data);
   // Do division
-  // NOLINT_NEXTLINE(whitespace/operators)
+  // NOLINT_NEXT_LINE(whitespace/operators)
   kernel_softmax_div<Dtype><<<CAFFE_GET_BLOCKS(num * dim),
                               CAFFE_CUDA_NUM_THREADS>>>(
       num, dim, scale_data, top_data);
index 2814577..a309a60 100644 (file)
@@ -58,7 +58,7 @@ void TanHLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
   const Dtype* bottom_data = bottom[0]->gpu_data();
   Dtype* top_data = (*top)[0]->mutable_gpu_data();
   const int count = bottom[0]->count();
-  // NOLINT_NEXTLINE(whitespace/operators)
+  // NOLINT_NEXT_LINE(whitespace/operators)
   TanHForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
       count, bottom_data, top_data);
   CUDA_POST_KERNEL_CHECK;
@@ -89,7 +89,7 @@ Dtype TanHLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
     const Dtype* top_diff = top[0]->gpu_diff();
     Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
     const int count = (*bottom)[0]->count();
-    // NOLINT_NEXTLINE(whitespace/operators)
+    // NOLINT_NEXT_LINE(whitespace/operators)
     TanHBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
         count, top_diff, bottom_data, bottom_diff);
     CUDA_POST_KERNEL_CHECK;
index 85285d0..5169b70 100644 (file)
@@ -31,7 +31,7 @@ class MultinomialLogisticLossLayerTest : public ::testing::Test {
     filler.Fill(this->blob_bottom_data_);
     blob_bottom_vec_.push_back(blob_bottom_data_);
     for (int i = 0; i < blob_bottom_label_->count(); ++i) {
-      // NOLINT_NEXTLINE(runtime/threadsafe_fn)
+      // NOLINT_NEXT_LINE(runtime/threadsafe_fn)
       blob_bottom_label_->mutable_cpu_data()[i] = rand() % 5;
     }
     blob_bottom_vec_.push_back(blob_bottom_label_);
index c8c417a..77668e5 100644 (file)
@@ -32,7 +32,7 @@ class SoftmaxWithLossLayerTest : public ::testing::Test {
     filler.Fill(this->blob_bottom_data_);
     blob_bottom_vec_.push_back(blob_bottom_data_);
     for (int i = 0; i < blob_bottom_label_->count(); ++i) {
-      // NOLINT_NEXTLINE(runtime/threadsafe_fn)
+      // NOLINT_NEXT_LINE(runtime/threadsafe_fn)
       blob_bottom_label_->mutable_cpu_data()[i] = rand() % 5;
     }
     blob_bottom_vec_.push_back(blob_bottom_label_);
index c3c8736..3156fa3 100644 (file)
@@ -47,7 +47,7 @@ void im2col_gpu(const Dtype* data_im, const int channels,
   int height_col = (height + 2 * pad - ksize) / stride + 1;
   int width_col = (width + 2 * pad - ksize) / stride + 1;
   int num_kernels = channels * height_col * width_col;
-  // NOLINT_NEXTLINE(whitespace/operators)
+  // NOLINT_NEXT_LINE(whitespace/operators)
   im2col_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
                              CAFFE_CUDA_NUM_THREADS>>>(
       num_kernels, data_im, height, width, ksize, pad, stride, height_col,
@@ -113,7 +113,7 @@ void col2im_gpu(const Dtype* data_col, const int channels,
   int num_kernels = channels * height * width;
   // To avoid involving atomic operations, we will launch one kernel per
   // bottom dimension, and then in the kernel add up the top dimensions.
-  // NOLINT_NEXTLINE(whitespace/operators)
+  // NOLINT_NEXT_LINE(whitespace/operators)
   col2im_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
                              CAFFE_CUDA_NUM_THREADS>>>(
       num_kernels, data_col, height, width, channels, ksize, pad, stride,
index d063504..b29a58a 100644 (file)
@@ -21,7 +21,7 @@ __global__ void mul_kernel(const int n, const Dtype* a,
 template <>
 void caffe_gpu_mul<float>(const int N, const float* a,
     const float* b, float* y) {
-  // NOLINT_NEXTLINE(whitespace/operators)
+  // NOLINT_NEXT_LINE(whitespace/operators)
   mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
       N, a, b, y);
 }
@@ -29,7 +29,7 @@ void caffe_gpu_mul<float>(const int N, const float* a,
 template <>
 void caffe_gpu_mul<double>(const int N, const double* a,
     const double* b, double* y) {
-  // NOLINT_NEXTLINE(whitespace/operators)
+  // NOLINT_NEXT_LINE(whitespace/operators)
   mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
       N, a, b, y);
 }