From: Ronghang Hu Date: Mon, 24 Aug 2015 21:46:23 +0000 (-0700) Subject: Fix previous mistake on unimplemented top and address pyramid_height_==1 in SPPLayer X-Git-Tag: submit/tizen/20180823.020014~382^2 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=292dbc5866c0b0b2ad56278591dce8b519166b20;p=platform%2Fupstream%2Fcaffeonacl.git Fix previous mistake on unimplemented top and address pyramid_height_==1 in SPPLayer also, do nothing in SPPLayer Reshape if already reshaped once and bottom size unchanged --- diff --git a/include/caffe/vision_layers.hpp b/include/caffe/vision_layers.hpp index a6bd86a..211e3d9 100644 --- a/include/caffe/vision_layers.hpp +++ b/include/caffe/vision_layers.hpp @@ -471,13 +471,7 @@ class SPPLayer : public Layer { virtual inline const char* type() const { return "SPP"; } virtual inline int ExactNumBottomBlobs() const { return 1; } - virtual inline int MinTopBlobs() const { return 1; } - // MAX POOL layers can output an extra top blob for the mask; - // others can only output the pooled inputs. - virtual inline int MaxTopBlobs() const { - return (this->layer_param_.pooling_param().pool() == - PoolingParameter_PoolMethod_MAX) ? 2 : 1; - } + virtual inline int ExactNumTopBlobs() const { return 1; } protected: virtual void Forward_cpu(const vector*>& bottom, @@ -491,9 +485,11 @@ class SPPLayer : public Layer { int pyramid_height_; int bottom_h_, bottom_w_; + int num_; int channels_; int kernel_h_, kernel_w_; int pad_h_, pad_w_; + bool reshaped_first_time_; /// the internal Split layer that feeds the pooling layers shared_ptr > split_layer_; diff --git a/src/caffe/layers/spp_layer.cpp b/src/caffe/layers/spp_layer.cpp index 795dd71..d762291 100644 --- a/src/caffe/layers/spp_layer.cpp +++ b/src/caffe/layers/spp_layer.cpp @@ -66,8 +66,11 @@ void SPPLayer::LayerSetUp(const vector*>& bottom, const vector*>& top) { SPPParameter spp_param = this->layer_param_.spp_param(); + num_ = bottom[0]->num(); + channels_ = bottom[0]->channels(); bottom_h_ = bottom[0]->height(); bottom_w_ = bottom[0]->width(); + reshaped_first_time_ = false; CHECK_GT(bottom_h_, 0) << "Input dimensions cannot be zero."; CHECK_GT(bottom_w_, 0) << "Input dimensions cannot be zero."; @@ -82,6 +85,15 @@ void SPPLayer::LayerSetUp(const vector*>& bottom, flatten_outputs_.clear(); concat_bottom_vec_.clear(); + if (pyramid_height_ == 1) { + // pooling layer setup + LayerParameter pooling_param = GetPoolingParam(0, bottom_h_, bottom_w_, + spp_param); + pooling_layers_.push_back(shared_ptr > ( + new PoolingLayer(pooling_param))); + pooling_layers_[0]->SetUp(bottom, top); + return; + } // split layer output holders setup for (int i = 0; i < pyramid_height_; i++) { split_top_vec_.push_back(new Blob()); @@ -135,10 +147,26 @@ void SPPLayer::Reshape(const vector*>& bottom, const vector*>& top) { CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, " << "corresponding to (num, channels, height, width)"; + // Do nothing if bottom shape is unchanged since last Reshape + if (num_ == bottom[0]->num() && channels_ == bottom[0]->channels() && + bottom_h_ == bottom[0]->height() && bottom_w_ == bottom[0]->width() && + reshaped_first_time_) { + return; + } + num_ = bottom[0]->num(); channels_ = bottom[0]->channels(); bottom_h_ = bottom[0]->height(); bottom_w_ = bottom[0]->width(); + reshaped_first_time_ = true; SPPParameter spp_param = this->layer_param_.spp_param(); + if (pyramid_height_ == 1) { + LayerParameter pooling_param = GetPoolingParam(0, bottom_h_, bottom_w_, + spp_param); + pooling_layers_[0].reset(new PoolingLayer(pooling_param)); + pooling_layers_[0]->SetUp(bottom, top); + pooling_layers_[0]->Reshape(bottom, top); + return; + } split_layer_->Reshape(bottom, split_top_vec_); for (int i = 0; i < pyramid_height_; i++) { LayerParameter pooling_param = GetPoolingParam( @@ -159,6 +187,10 @@ void SPPLayer::Reshape(const vector*>& bottom, template void SPPLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { + if (pyramid_height_ == 1) { + pooling_layers_[0]->Forward(bottom, top); + return; + } split_layer_->Forward(bottom, split_top_vec_); for (int i = 0; i < pyramid_height_; i++) { pooling_layers_[i]->Forward( @@ -175,6 +207,10 @@ void SPPLayer::Backward_cpu(const vector*>& top, if (!propagate_down[0]) { return; } + if (pyramid_height_ == 1) { + pooling_layers_[0]->Backward(top, propagate_down, bottom); + return; + } vector concat_propagate_down(pyramid_height_, true); concat_layer_->Backward(top, concat_propagate_down, concat_bottom_vec_); for (int i = 0; i < pyramid_height_; i++) {