From e10977f8a598f6753c28d1388e0900e7805280ac Mon Sep 17 00:00:00 2001 From: Ronghang Hu Date: Mon, 7 Jul 2014 11:32:58 -0700 Subject: [PATCH] fixed style errors --- src/caffe/layers/pooling_layer.cpp | 4 ++-- src/caffe/layers/pooling_layer.cu | 34 +++++++++++++++++----------------- src/caffe/test/test_pooling_layer.cpp | 6 ++++-- 3 files changed, 23 insertions(+), 21 deletions(-) diff --git a/src/caffe/layers/pooling_layer.cpp b/src/caffe/layers/pooling_layer.cpp index d8cd2e2..9151ff2 100644 --- a/src/caffe/layers/pooling_layer.cpp +++ b/src/caffe/layers/pooling_layer.cpp @@ -35,11 +35,11 @@ void PoolingLayer::SetUp(const vector*>& bottom, CHECK(pool_param.has_kernel_size() || (pool_param.has_kernel_h() && pool_param.has_kernel_w())) << "For non-square filters both kernel_h and kernel_w are required."; - CHECK((!pool_param.has_pad() && pool_param.has_pad_h() + CHECK((!pool_param.has_pad() && pool_param.has_pad_h() && pool_param.has_pad_w()) || (!pool_param.has_pad_h() && !pool_param.has_pad_w())) << "pad is pad OR pad_h and pad_w are required."; - CHECK((!pool_param.has_stride() && pool_param.has_stride_h() + CHECK((!pool_param.has_stride() && pool_param.has_stride_h() && pool_param.has_stride_w()) || (!pool_param.has_stride_h() && !pool_param.has_stride_w())) << "Stride is stride OR stride_h and stride_w are required."; diff --git a/src/caffe/layers/pooling_layer.cu b/src/caffe/layers/pooling_layer.cu index e38028d..a44b177 100644 --- a/src/caffe/layers/pooling_layer.cu +++ b/src/caffe/layers/pooling_layer.cu @@ -17,7 +17,7 @@ template __global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, - const int kernel_h, const int kernel_w, const int stride_h, + const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* top_data, int* mask, Dtype* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { @@ -55,7 +55,7 @@ template __global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, - const int kernel_h, const int kernel_w, const int stride_h, + const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; @@ -87,7 +87,7 @@ __global__ void StoPoolForwardTrain(const int nthreads, const Dtype* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, - const int kernel_h, const int kernel_w, const int stride_h, + const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* rand_idx, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; @@ -128,7 +128,7 @@ __global__ void StoPoolForwardTest(const int nthreads, const Dtype* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, - const int kernel_h, const int kernel_w, const int stride_h, + const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; @@ -175,15 +175,15 @@ Dtype PoolingLayer::Forward_gpu(const vector*>& bottom, // NOLINT_NEXT_LINE(whitespace/operators) MaxPoolForward<<>>( count, bottom_data, bottom[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, - kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data, + height_, width_, pooled_height_, pooled_width_, kernel_h_, + kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data, mask, top_mask); break; case PoolingParameter_PoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) AvePoolForward<<>>( count, bottom_data, bottom[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); break; case PoolingParameter_PoolMethod_STOCHASTIC: @@ -195,7 +195,7 @@ Dtype PoolingLayer::Forward_gpu(const vector*>& bottom, StoPoolForwardTrain<<>>( count, bottom_data, bottom[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, rand_idx_.mutable_gpu_data(), top_data); } else { @@ -203,7 +203,7 @@ Dtype PoolingLayer::Forward_gpu(const vector*>& bottom, StoPoolForwardTest<<>>( count, bottom_data, bottom[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, top_data); } break; @@ -219,8 +219,8 @@ template __global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff, const int* mask, const Dtype* top_mask, const int num, const int channels, const int height, const int width, const int pooled_height, - const int pooled_width, const int kernel_h, const int kernel_w, - const int stride_h, const int stride_w, const int pad_h, const int pad_w, + const int pooled_width, const int kernel_h, const int kernel_w, + const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index @@ -265,8 +265,8 @@ template __global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, - const int kernel_h, const int kernel_w, const int stride_h, - const int stride_w, const int pad_h, const int pad_w, + const int kernel_h, const int kernel_w, const int stride_h, + const int stride_w, const int pad_h, const int pad_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index @@ -302,7 +302,7 @@ __global__ void StoPoolBackward(const int nthreads, const Dtype* rand_idx, const Dtype* top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, - const int kernel_h, const int kernel_w, const int stride_h, + const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index @@ -354,14 +354,14 @@ void PoolingLayer::Backward_gpu(const vector*>& top, MaxPoolBackward<<>>( count, top_diff, mask, top_mask, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, - kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, + kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; case PoolingParameter_PoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) AvePoolBackward<<>>( count, top_diff, top[0]->num(), channels_, - height_, width_, pooled_height_, pooled_width_, kernel_h_, + height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; case PoolingParameter_PoolMethod_STOCHASTIC: @@ -369,7 +369,7 @@ void PoolingLayer::Backward_gpu(const vector*>& top, StoPoolBackward<<>>( count, rand_idx_.gpu_data(), top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, - pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, + pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, bottom_diff); break; default: diff --git a/src/caffe/test/test_pooling_layer.cpp b/src/caffe/test/test_pooling_layer.cpp index 651d203..bb21113 100644 --- a/src/caffe/test/test_pooling_layer.cpp +++ b/src/caffe/test/test_pooling_layer.cpp @@ -132,7 +132,8 @@ class PoolingLayerTest : public ::testing::Test { // [31 9 2 22 27 20] // [ 8 28 33 17 10 15] // [30 5 34 12 14 16] - // [ 4 36 29 13 18 11] (this is generated by magic(6) in MATLAB) + // [ 4 36 29 13 18 11] + // (this is generated by magic(6) in MATLAB) for (int i = 0; i < 36 * num * channels; i += 36) { blob_bottom_->mutable_cpu_data()[i + 0] = 35; blob_bottom_->mutable_cpu_data()[i + 1] = 1; @@ -256,7 +257,8 @@ class PoolingLayerTest : public ::testing::Test { // [31 9 2 22 27 20] // [ 8 28 33 17 10 15] // [30 5 34 12 14 16] - // [ 4 36 29 13 18 11] (this is generated by magic(6) in MATLAB) + // [ 4 36 29 13 18 11] + // (this is generated by magic(6) in MATLAB) for (int i = 0; i < 36 * num * channels; i += 36) { blob_bottom_->mutable_cpu_data()[i + 0] = 35; blob_bottom_->mutable_cpu_data()[i + 1] = 1; -- 2.7.4