more cpplint
authorYangqing Jia <jiayq84@gmail.com>
Fri, 27 Sep 2013 18:53:12 +0000 (11:53 -0700)
committerYangqing Jia <jiayq84@gmail.com>
Fri, 27 Sep 2013 18:53:12 +0000 (11:53 -0700)
src/caffe/test/lenet.hpp
src/caffe/test/test_caffe_main.hpp
src/caffe/test/test_gradient_check_util.hpp

index 266f0b2..017463a 100644 (file)
@@ -1,3 +1,9 @@
+// Copyright Yangqing Jia 2013
+
+// This file is merely here so we can easily get a string of the lenet.
+// It is actually not the very original LeNet, but with the sigmoid layers
+// replaced by ReLU layers.
+
 #ifndef CAFFE_TEST_LENET_HPP_
 #define CAFFE_TEST_LENET_HPP_
 
 
 namespace caffe {
 
-const char* kLENET = "name: \"LeNet\"\n\
-bottom: \"data\"\n\
-bottom: \"label\"\n\
-layers {\n\
-  layer {\n\
-    name: \"conv1\"\n\
-    type: \"conv\"\n\
-    num_output: 20\n\
-    kernelsize: 5\n\
-    stride: 1\n\
-    weight_filler {\n\
-      type: \"xavier\"\n\
-    }\n\
-    bias_filler {\n\
-      type: \"constant\"\n\
-    }\n\
-  }\n\
-  bottom: \"data\"\n\
-  top: \"conv1\"\n\
-}\n\
-layers {\n\
-  layer {\n\
-    name: \"pool1\"\n\
-    type: \"pool\"\n\
-    kernelsize: 2\n\
-    stride: 2\n\
-    pool: MAX\n\
-  }\n\
-  bottom: \"conv1\"\n\
-  top: \"pool1\"\n\
-}\n\
-layers {\n\
-  layer {\n\
-    name: \"conv2\"\n\
-    type: \"conv\"\n\
-    num_output: 50\n\
-    kernelsize: 5\n\
-    stride: 1\n\
-    weight_filler {\n\
-      type: \"xavier\"\n\
-    }\n\
-    bias_filler {\n\
-      type: \"constant\"\n\
-    }\n\
-  }\n\
-  bottom: \"pool1\"\n\
-  top: \"conv2\"\n\
-}\n\
-layers {\n\
-  layer {\n\
-    name: \"pool2\"\n\
-    type: \"pool\"\n\
-    kernelsize: 2\n\
-    stride: 2\n\
-    pool: MAX\n\
-  }\n\
-  bottom: \"conv2\"\n\
-  top: \"pool2\"\n\
-}\n\
-layers {\n\
-  layer {\n\
-    name: \"ip1\"\n\
-    type: \"innerproduct\"\n\
-    num_output: 500\n\
-    weight_filler {\n\
-      type: \"xavier\"\n\
-    }\n\
-    bias_filler {\n\
-      type: \"constant\"\n\
-    }\n\
-  }\n\
-  bottom: \"pool2\"\n\
-  top: \"ip1\"\n\
-}\n\
-layers {\n\
-  layer {\n\
-    name: \"relu1\"\n\
-    type: \"relu\"\n\
-  }\n\
-  bottom: \"ip1\"\n\
-  top: \"relu1\"\n\
-}\n\
-layers {\n\
-  layer {\n\
-    name: \"ip2\"\n\
-    type: \"innerproduct\"\n\
-    num_output: 10\n\
-    weight_filler {\n\
-      type: \"xavier\"\n\
-    }\n\
-    bias_filler {\n\
-      type: \"constant\"\n\
-    }\n\
-  }\n\
-  bottom: \"relu1\"\n\
-  top: \"ip2\"\n\
-}\n\
-layers {\n\
-  layer {\n\
-    name: \"prob\"\n\
-    type: \"softmax\"\n\
-  }\n\
-  bottom: \"ip2\"\n\
-  top: \"prob\"\n\
-}\n\
-layers {\n\
-  layer {\n\
-    name: \"loss\"\n\
-    type: \"multinomial_logistic_loss\"\n\
-  }\n\
-  bottom: \"prob\"\n\
-  bottom: \"label\"\n\
-}";
+const char* kLENET = "name: \"LeNet\"\n"
+"bottom: \"data\"\n"
+"bottom: \"label\"\n"
+"layers {\n"
+"  layer {\n"
+"    name: \"conv1\"\n"
+"    type: \"conv\"\n"
+"    num_output: 20\n"
+"    kernelsize: 5\n"
+"    stride: 1\n"
+"    weight_filler {\n"
+"      type: \"xavier\"\n"
+"    }\n"
+"    bias_filler {\n"
+"      type: \"constant\"\n"
+"    }\n"
+"  }\n"
+"  bottom: \"data\"\n"
+"  top: \"conv1\"\n"
+"}\n"
+"layers {\n"
+"  layer {\n"
+"    name: \"pool1\"\n"
+"    type: \"pool\"\n"
+"    kernelsize: 2\n"
+"    stride: 2\n"
+"    pool: MAX\n"
+"  }\n"
+"  bottom: \"conv1\"\n"
+"  top: \"pool1\"\n"
+"}\n"
+"layers {\n"
+"  layer {\n"
+"    name: \"conv2\"\n"
+"    type: \"conv\"\n"
+"    num_output: 50\n"
+"    kernelsize: 5\n"
+"    stride: 1\n"
+"    weight_filler {\n"
+"      type: \"xavier\"\n"
+"    }\n"
+"    bias_filler {\n"
+"      type: \"constant\"\n"
+"    }\n"
+"  }\n"
+"  bottom: \"pool1\"\n"
+"  top: \"conv2\"\n"
+"}\n"
+"layers {\n"
+"  layer {\n"
+"    name: \"pool2\"\n"
+"    type: \"pool\"\n"
+"    kernelsize: 2\n"
+"    stride: 2\n"
+"    pool: MAX\n"
+"  }\n"
+"  bottom: \"conv2\"\n"
+"  top: \"pool2\"\n"
+"}\n"
+"layers {\n"
+"  layer {\n"
+"    name: \"ip1\"\n"
+"    type: \"innerproduct\"\n"
+"    num_output: 500\n"
+"    weight_filler {\n"
+"      type: \"xavier\"\n"
+"    }\n"
+"    bias_filler {\n"
+"      type: \"constant\"\n"
+"    }\n"
+"  }\n"
+"  bottom: \"pool2\"\n"
+"  top: \"ip1\"\n"
+"}\n"
+"layers {\n"
+"  layer {\n"
+"    name: \"relu1\"\n"
+"    type: \"relu\"\n"
+"  }\n"
+"  bottom: \"ip1\"\n"
+"  top: \"relu1\"\n"
+"}\n"
+"layers {\n"
+"  layer {\n"
+"    name: \"ip2\"\n"
+"    type: \"innerproduct\"\n"
+"    num_output: 10\n"
+"    weight_filler {\n"
+"      type: \"xavier\"\n"
+"    }\n"
+"    bias_filler {\n"
+"      type: \"constant\"\n"
+"    }\n"
+"  }\n"
+"  bottom: \"relu1\"\n"
+"  top: \"ip2\"\n"
+"}\n"
+"layers {\n"
+"  layer {\n"
+"    name: \"prob\"\n"
+"    type: \"softmax\"\n"
+"  }\n"
+"  bottom: \"ip2\"\n"
+"  top: \"prob\"\n"
+"}\n"
+"layers {\n"
+"  layer {\n"
+"    name: \"loss\"\n"
+"    type: \"multinomial_logistic_loss\"\n"
+"  }\n"
+"  bottom: \"prob\"\n"
+"  bottom: \"label\"\n"
+"}";
 
 }  // namespace caffe
 
index 9ee11a3..a8c1657 100644 (file)
@@ -5,13 +5,14 @@
 #ifndef CAFFE_TEST_TEST_CAFFE_MAIN_HPP_
 #define CAFFE_TEST_TEST_CAFFE_MAIN_HPP_
 
+#include <cuda_runtime.h>
+#include <glog/logging.h>
+#include <gtest/gtest.h>
+
 #include <cstdlib>
 #include <cstdio>
 #include <iostream>
 
-#include <cuda_runtime.h>
-#include <glog/logging.h>
-#include <gtest/gtest.h>
 
 namespace caffe {
 
index 0c34861..c540549 100644 (file)
@@ -3,10 +3,13 @@
 #ifndef CAFFE_TEST_GRADIENT_CHECK_UTIL_H_
 #define CAFFE_TEST_GRADIENT_CHECK_UTIL_H_
 
-#include <algorithm>
-#include <cmath>
 #include <glog/logging.h>
 #include <gtest/gtest.h>
+
+#include <algorithm>
+#include <cmath>
+#include <vector>
+
 #include "caffe/layer.hpp"
 
 using std::max;
@@ -22,7 +25,7 @@ class GradientChecker {
       const unsigned int seed = 1701, const Dtype kink = 0.,
       const Dtype kink_range = -1)
       : stepsize_(stepsize), threshold_(threshold), seed_(seed),
-        kink_(kink), kink_range_(kink_range) {};
+        kink_(kink), kink_range_(kink_range) {}
   // Checks the gradient of a layer, with provided bottom layers and top
   // layers. The gradient checker will check the gradient with respect to
   // the parameters of the layer, as well as the input blobs if check_through
@@ -41,6 +44,7 @@ class GradientChecker {
   void CheckGradientSingle(Layer<Dtype>& layer, vector<Blob<Dtype>*>& bottom,
       vector<Blob<Dtype>*>& top, int check_bottom, int top_id,
       int top_data_id);
+
  protected:
   Dtype GetObjAndGradient(vector<Blob<Dtype>*>& top, int top_id = -1,
       int top_data_id = -1);
@@ -73,11 +77,11 @@ void GradientChecker<Dtype>::CheckGradientSingle(Layer<Dtype>& layer,
     blobs_to_check.push_back(bottom[check_bottom]);
   }
   // go through the bottom and parameter blobs
-  //LOG(ERROR) << "Checking " << blobs_to_check.size() << " blobs.";
+  // LOG(ERROR) << "Checking " << blobs_to_check.size() << " blobs.";
   for (int blobid = 0; blobid < blobs_to_check.size(); ++blobid) {
     Blob<Dtype>* current_blob = blobs_to_check[blobid];
-    //LOG(ERROR) << "Blob " << blobid << ": checking " << current_blob->count()
-    //    << " parameters.";
+    // LOG(ERROR) << "Blob " << blobid << ": checking " << current_blob->count()
+    //     << " parameters.";
     // go through the values
     for (int feat_id = 0; feat_id < current_blob->count(); ++feat_id) {
       // First, obtain the original data
@@ -104,13 +108,13 @@ void GradientChecker<Dtype>::CheckGradientSingle(Layer<Dtype>& layer,
       Dtype estimated_gradient = (positive_objective - negative_objective) /
           stepsize_ / 2.;
       Dtype feature = current_blob->cpu_data()[feat_id];
-      //LOG(ERROR) << "debug: " << current_blob->cpu_data()[feat_id] << " "
-      //    << current_blob->cpu_diff()[feat_id];
+      // LOG(ERROR) << "debug: " << current_blob->cpu_data()[feat_id] << " "
+      //     << current_blob->cpu_diff()[feat_id];
       if (kink_ - kink_range_ > feature || feature > kink_ + kink_range_) {
         // We check relative accuracy, but for too small values, we threshold
         // the scale factor by 1.
-        Dtype scale = max(max(fabs(computed_gradient), fabs(estimated_gradient)),
-            1.);
+        Dtype scale = max(
+            max(fabs(computed_gradient), fabs(estimated_gradient)), 1.);
         EXPECT_GT(computed_gradient, estimated_gradient - threshold_ * scale)
           << "debug: (top_id, top_data_id, blob_id, feat_id)="
           << top_id << "," << top_data_id << "," << blobid << "," << feat_id;
@@ -118,22 +122,23 @@ void GradientChecker<Dtype>::CheckGradientSingle(Layer<Dtype>& layer,
           << "debug: (top_id, top_data_id, blob_id, feat_id)="
           << top_id << "," << top_data_id << "," << blobid << "," << feat_id;
       }
-      //LOG(ERROR) << "Feature: " << current_blob->cpu_data()[feat_id];
-      //LOG(ERROR) << "computed gradient: " << computed_gradient
-      //    << " estimated_gradient: " << estimated_gradient;
+      // LOG(ERROR) << "Feature: " << current_blob->cpu_data()[feat_id];
+      // LOG(ERROR) << "computed gradient: " << computed_gradient
+      //     << " estimated_gradient: " << estimated_gradient;
     }
   }
 }
 
 template <typename Dtype>
 void GradientChecker<Dtype>::CheckGradientExhaustive(Layer<Dtype>& layer,
-    vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>& top, int check_bottom) {
+    vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>& top,
+    int check_bottom) {
   layer.SetUp(bottom, &top);
-  //LOG(ERROR) << "Exhaustive Mode.";
+  // LOG(ERROR) << "Exhaustive Mode.";
   for (int i = 0; i < top.size(); ++i) {
-    //LOG(ERROR) << "Exhaustive: blob " << i << " size " << top[i]->count();
+    // LOG(ERROR) << "Exhaustive: blob " << i << " size " << top[i]->count();
     for (int j = 0; j < top[i]->count(); ++j) {
-      //LOG(ERROR) << "Exhaustive: blob " << i << " data " << j;
+      // LOG(ERROR) << "Exhaustive: blob " << i << " data " << j;
       CheckGradientSingle(layer, bottom, top, check_bottom, i, j);
     }
   }