From dd82d97190a2c14ebe3799a7ed01ff2c38578f96 Mon Sep 17 00:00:00 2001 From: Ronghang Hu Date: Thu, 3 Jul 2014 20:42:58 -0700 Subject: [PATCH] Update pooling_layer.cpp Replace pad_, kernel_size_, stride_ with pad_h_, pad_w_, kernel_size_h_, kernel_size_w_, stride_h_, stride_w_ to support pooling on rectangle regions. --- src/caffe/layers/pooling_layer.cpp | 81 +++++++++++++++++++++++++++----------- 1 file changed, 57 insertions(+), 24 deletions(-) diff --git a/src/caffe/layers/pooling_layer.cpp b/src/caffe/layers/pooling_layer.cpp index bc00207..3b64741 100644 --- a/src/caffe/layers/pooling_layer.cpp +++ b/src/caffe/layers/pooling_layer.cpp @@ -28,35 +28,68 @@ void PoolingLayer::SetUp(const vector*>& bottom, max_top_blobs_ = 1; } Layer::SetUp(bottom, top); - kernel_size_ = this->layer_param_.pooling_param().kernel_size(); - stride_ = this->layer_param_.pooling_param().stride(); - pad_ = this->layer_param_.pooling_param().pad(); - if (pad_ != 0) { + PoolingParameter pool_param = this->layer_param_.pooling_param(); + CHECK(!pool_param.has_kernel_size() != + !(pool_param.has_kernel_h() && pool_param.has_kernel_w())) + << "Filter size is kernel_size OR kernel_h and kernel_w; not both"; + CHECK(pool_param.has_kernel_size() || + (pool_param.has_kernel_h() && pool_param.has_kernel_w())) + << "For non-square filters both kernel_h and kernel_w are required."; + CHECK((pool_param.has_pad() + && !(pool_param.has_pad_h() || pool_param.has_pad_w())) + || (!pool_param.has_pad() + && (pool_param.has_pad_h() && pool_param.has_pad_w()) + || (!pool_param.has_pad_h() && !pool_param.has_pad_w()))) + << "Padding size is pad OR pad_h and pad_w; not both"; + CHECK(!pool_param.has_stride() != + !(pool_param.has_stride_h() && pool_param.has_stride_w())) + << "Stride is stride OR stride_h and stride_w are required."; + if (pool_param.has_kernel_size()) { + kernel_h_ = kernel_w_ = pool_param.kernel_size(); + } else { + kernel_h_ = pool_param.kernel_h(); + kernel_w_ = pool_param.kernel_w(); + } + CHECK_GT(kernel_h_ * kernel_w_, 0) << "Filter dimensions cannot be zero."; + if (pool_param.has_pad()) { + pad_h_ = pad_w_ = pool_param.pad(); + } else { + pad_h_ = pool_param.pad_h(); + pad_w_ = pool_param.pad_w(); + } + if (pool_param.has_stride()) { + stride_h_ = stride_w_ = pool_param.stride(); + } else { + stride_h_ = pool_param.stride_h(); + stride_w_ = pool_param.stride_w(); + } + if (pad_h_ != 0 || pad_w_ != 0) { CHECK(this->layer_param_.pooling_param().pool() == PoolingParameter_PoolMethod_AVE || this->layer_param_.pooling_param().pool() == PoolingParameter_PoolMethod_MAX) << "Padding implemented only for average and max pooling."; - CHECK_LT(pad_, kernel_size_); + CHECK_LT(pad_h_, kernel_size_h_); + CHECK_LT(pad_w_, kernel_size_w_); } channels_ = bottom[0]->channels(); height_ = bottom[0]->height(); width_ = bottom[0]->width(); pooled_height_ = static_cast(ceil(static_cast( - height_ + 2 * pad_ - kernel_size_) / stride_)) + 1; + height_ + 2 * pad_h_ - kernel_size_h_) / stride_h_)) + 1; pooled_width_ = static_cast(ceil(static_cast( - width_ + 2 * pad_ - kernel_size_) / stride_)) + 1; - if (pad_) { + width_ + 2 * pad_w_ - kernel_size_w_) / stride_w_)) + 1; + if (pad_h_ || pad_w_) { // If we have padding, ensure that the last pooling starts strictly // inside the image (instead of at the padding); otherwise clip the last. - if ((pooled_height_ - 1) * stride_ >= height_ + pad_) { + if ((pooled_height_ - 1) * stride_h_ >= height_ + pad_h_) { --pooled_height_; } - if ((pooled_width_ - 1) * stride_ >= width_ + pad_) { + if ((pooled_width_ - 1) * stride_w_ >= width_ + pad_w_) { --pooled_width_; } - CHECK_LT((pooled_height_ - 1) * stride_, height_ + pad_); - CHECK_LT((pooled_width_ - 1) * stride_, width_ + pad_); + CHECK_LT((pooled_height_ - 1) * stride_h_, height_ + pad_h_); + CHECK_LT((pooled_width_ - 1) * stride_w_, width_ + pad_w_); } (*top)[0]->Reshape(bottom[0]->num(), channels_, pooled_height_, pooled_width_); @@ -107,10 +140,10 @@ Dtype PoolingLayer::Forward_cpu(const vector*>& bottom, for (int c = 0; c < channels_; ++c) { for (int ph = 0; ph < pooled_height_; ++ph) { for (int pw = 0; pw < pooled_width_; ++pw) { - int hstart = ph * stride_ - pad_; - int wstart = pw * stride_ - pad_; - int hend = min(hstart + kernel_size_, height_); - int wend = min(wstart + kernel_size_, width_); + int hstart = ph * stride_h_ - pad_h_; + int wstart = pw * stride_w_ - pad_w_; + int hend = min(hstart + kernel_size_h_, height_); + int wend = min(wstart + kernel_size_w_, width_); hstart = max(hstart, 0); wstart = max(wstart, 0); const int pool_index = ph * pooled_width_ + pw; @@ -149,10 +182,10 @@ Dtype PoolingLayer::Forward_cpu(const vector*>& bottom, for (int c = 0; c < channels_; ++c) { for (int ph = 0; ph < pooled_height_; ++ph) { for (int pw = 0; pw < pooled_width_; ++pw) { - int hstart = ph * stride_ - pad_; - int wstart = pw * stride_ - pad_; - int hend = min(hstart + kernel_size_, height_ + pad_); - int wend = min(wstart + kernel_size_, width_ + pad_); + int hstart = ph * stride_h_ - pad_h_; + int wstart = pw * stride_w_ - pad_w_; + int hend = min(hstart + kernel_size_h_, height_ + pad_h_); + int wend = min(wstart + kernel_size_w_, width_ + pad_w_); int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); @@ -231,10 +264,10 @@ void PoolingLayer::Backward_cpu(const vector*>& top, for (int c = 0; c < channels_; ++c) { for (int ph = 0; ph < pooled_height_; ++ph) { for (int pw = 0; pw < pooled_width_; ++pw) { - int hstart = ph * stride_ - pad_; - int wstart = pw * stride_ - pad_; - int hend = min(hstart + kernel_size_, height_ + pad_); - int wend = min(wstart + kernel_size_, width_ + pad_); + int hstart = ph * stride_h_ - pad_h_; + int wstart = pw * stride_w_ - pad_w_; + int hend = min(hstart + kernel_size_h_, height_ + pad_h_); + int wend = min(wstart + kernel_size_w_, width_ + pad_w_); int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); -- 2.7.4