#include <cfloat>
#include <vector>
+#include "caffe/common.hpp"
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
+#include "caffe/syncedmem.hpp"
#include "caffe/util/math_functions.hpp"
using std::max;
width_ + 2 * pad_ - kernel_size_) / stride_)) + 1;
(*top)[0]->Reshape(bottom[0]->num(), channels_, pooled_height_,
pooled_width_);
+ // If max pooling, we will initialize the vector index part.
+ if (this->layer_param_.pool() == LayerParameter_PoolMethod_MAX) {
+ max_idx_.reset(new SyncedMemory((*top)[0]->count() * sizeof(int)));
+ }
// If stochastic pooling, we will initialize the random index part.
if (this->layer_param_.pooling_param().pool() ==
PoolingParameter_PoolMethod_STOCHASTIC) {
// Different pooling methods. We explicitly do the switch outside the for
// loop to save time, although this results in more codes.
int top_count = (*top)[0]->count();
+ int* mask;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
- // Initialize
+ // Initialize
+ mask = (int*)max_idx_->mutable_cpu_data();
for (int i = 0; i < top_count; ++i) {
top_data[i] = -FLT_MAX;
+ mask[i] = 0;
}
// The main loop
for (int n = 0; n < bottom[0]->num(); ++n) {
int wend = min(wstart + kernel_size_, width_);
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
- top_data[ph * pooled_width_ + pw] =
- max(top_data[ph * pooled_width_ + pw],
- bottom_data[h * width_ + w]);
+ if (bottom_data[h * width_ + w] > top_data[ph * pooled_width_ + pw]) {
+ top_data[ph * pooled_width_ + pw] = bottom_data[h * width_ + w];
+ mask[ph * pooled_width_ + pw] = h * width_ + w;
+ }
}
}
}
// compute offset
bottom_data += bottom[0]->offset(0, 1);
top_data += (*top)[0]->offset(0, 1);
+ mask += (*top)[0]->offset(0, 1);
}
}
break;
// Different pooling methods. We explicitly do the switch outside the for
// loop to save time, although this results in more codes.
memset(bottom_diff, 0, (*bottom)[0]->count() * sizeof(Dtype));
+ int* mask;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
// The main loop
+ mask = (int*)max_idx_->cpu_data();
for (int n = 0; n < top[0]->num(); ++n) {
for (int c = 0; c < channels_; ++c) {
for (int ph = 0; ph < pooled_height_; ++ph) {
for (int pw = 0; pw < pooled_width_; ++pw) {
- int hstart = ph * stride_;
- int wstart = pw * stride_;
- int hend = min(hstart + kernel_size_, height_);
- int wend = min(wstart + kernel_size_, width_);
- for (int h = hstart; h < hend; ++h) {
- for (int w = wstart; w < wend; ++w) {
- bottom_diff[h * width_ + w] +=
- top_diff[ph * pooled_width_ + pw] *
- (bottom_data[h * width_ + w] ==
- top_data[ph * pooled_width_ + pw]);
- }
- }
+ bottom_diff[mask[ph * pooled_width_ + pw]]+=top_diff[ph * pooled_width_ + pw];
}
}
// offset
top_data += top[0]->offset(0, 1);
bottom_diff += (*bottom)[0]->offset(0, 1);
top_diff += top[0]->offset(0, 1);
+ mask += top[0]->offset(0, 1);
}
}
break;
}
*/
+/*
+TYPED_TEST(PoolingLayerTest, PrintCPUBackward) {
+ LayerParameter layer_param;
+ layer_param.set_kernelsize(3);
+ layer_param.set_stride(2);
+ layer_param.set_pool(LayerParameter_PoolMethod_MAX);
+ Caffe::set_mode(Caffe::CPU);
+ PoolingLayer<TypeParam> layer(layer_param);
+ layer.SetUp(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ layer.Forward(this->blob_bottom_vec_, &(this->blob_top_vec_));
+ for (int i = 0; i < this->blob_bottom_->count(); ++i) {
+ cout << "bottom data " << i << " " << this->blob_bottom_->cpu_data()[i] << endl;
+ }
+ for (int i = 0; i < this->blob_top_->count(); ++i) {
+ cout << "top data " << i << " " << this->blob_top_->cpu_data()[i] << endl;
+ }
+
+ for (int i = 0; i < this->blob_top_->count(); ++i) {
+ this->blob_top_->mutable_cpu_diff()[i] = i;
+ }
+ layer.Backward(this->blob_top_vec_, true, &(this->blob_bottom_vec_));
+ for (int i = 0; i < this->blob_bottom_->count(); ++i) {
+ cout << "bottom diff " << i << " " << this->blob_bottom_->cpu_diff()[i] << endl;
+ }
+}
+*/
+
TYPED_TEST(PoolingLayerTest, TestCPUGradientMax) {
LayerParameter layer_param;
PoolingParameter* pooling_param = layer_param.mutable_pooling_param();