int width_;
};
-template <typename Dtype>
-class PoolingLayer;
+template <typename Dtype> class PoolingLayer;
+template <typename Dtype> class SplitLayer;
template <typename Dtype>
class LRNMapLayer : public Layer<Dtype> {
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom);
+ shared_ptr<SplitLayer<Dtype> > split_layer_;
+ vector<Blob<Dtype>*> split_top_vec_;
shared_ptr<PowerLayer<Dtype> > square_layer_;
Blob<Dtype> square_input_;
Blob<Dtype> square_output_;
shared_ptr<EltwiseProductLayer<Dtype> > product_layer_;
Blob<Dtype> product_data_input_;
vector<Blob<Dtype>*> product_bottom_vec_;
- vector<Blob<Dtype>*> product_top_vec_;
};
template <typename Dtype>
const Dtype pre_pad = (size_ - 1) / 2;
const Dtype alpha = this->layer_param_.lrn_map_param().alpha();
const Dtype beta = this->layer_param_.lrn_map_param().beta();
+ // Set up split layer to use inputs in the numerator and denominator.
+ split_top_vec_.clear();
+ split_top_vec_.push_back(bottom[0]);
+ split_top_vec_.push_back(&square_input_);
+ LayerParameter split_param;
+ split_layer_.reset(new SplitLayer<Dtype>(split_param));
+ split_layer_->SetUp(bottom, &split_top_vec_);
// Set up square layer to square the inputs.
square_input_.Reshape(num, channels, height, width);
square_bottom_vec_.clear();
CHECK_EQ(power_output_.channels(), channels);
CHECK_EQ(power_output_.height(), height);
CHECK_EQ(power_output_.width(), width);
- // Set up a product layer to compute outputs by multiplying inputs by scale.
+ // Set up a product layer to compute outputs by multiplying inputs by the
+ // demoninator computed by the power layer.
product_bottom_vec_.clear();
product_bottom_vec_.push_back(bottom[0]);
product_bottom_vec_.push_back(&power_output_);
- product_top_vec_.clear();
- product_top_vec_.push_back((*top)[0]);
LayerParameter product_param;
product_layer_.reset(new EltwiseProductLayer<Dtype>(product_param));
- product_layer_->SetUp(product_bottom_vec_, &product_top_vec_);
+ product_layer_->SetUp(product_bottom_vec_, top);
CHECK_EQ((*top)[0]->num(), num);
CHECK_EQ((*top)[0]->channels(), channels);
CHECK_EQ((*top)[0]->height(), height);
template <typename Dtype>
Dtype LRNMapLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
- const int count = bottom[0]->count();
- const Dtype* bottom_data = bottom[0]->cpu_data();
- Dtype* square_bottom_data = square_input_.mutable_cpu_data();
- caffe_copy(count, bottom_data, square_bottom_data);
+ split_layer_->Forward(bottom, &split_top_vec_);
square_layer_->Forward(square_bottom_vec_, &square_top_vec_);
pool_layer_->Forward(square_top_vec_, &pool_top_vec_);
power_layer_->Forward(pool_top_vec_, &power_top_vec_);
- product_layer_->Forward(product_bottom_vec_, &product_top_vec_);
+ product_layer_->Forward(product_bottom_vec_, top);
return Dtype(0.);
}
void LRNMapLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
if (propagate_down) {
- product_layer_->Backward(product_top_vec_, true, &product_bottom_vec_);
+ product_layer_->Backward(top, true, &product_bottom_vec_);
power_layer_->Backward(power_top_vec_, true, &pool_top_vec_);
pool_layer_->Backward(pool_top_vec_, true, &square_top_vec_);
square_layer_->Backward(square_top_vec_, true, &square_bottom_vec_);
- const int count = (*bottom)[0]->count();
- const Dtype* scale_diff = square_input_.cpu_diff();
- Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
- caffe_axpy(count, Dtype(1), scale_diff, bottom_diff);
+ split_layer_->Backward(split_top_vec_, true, bottom);
}
}
template <typename Dtype>
Dtype LRNMapLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
- const int count = bottom[0]->count();
- const Dtype* bottom_data = bottom[0]->gpu_data();
- Dtype* square_bottom_data = square_input_.mutable_gpu_data();
- caffe_gpu_copy(count, bottom_data, square_bottom_data);
+ split_layer_->Forward(bottom, &split_top_vec_);
square_layer_->Forward(square_bottom_vec_, &square_top_vec_);
pool_layer_->Forward(square_top_vec_, &pool_top_vec_);
power_layer_->Forward(pool_top_vec_, &power_top_vec_);
- product_layer_->Forward(product_bottom_vec_, &product_top_vec_);
+ product_layer_->Forward(product_bottom_vec_, top);
return Dtype(0.);
}
void LRNMapLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
if (propagate_down) {
- product_layer_->Backward(product_top_vec_, true, &product_bottom_vec_);
+ product_layer_->Backward(top, true, &product_bottom_vec_);
power_layer_->Backward(power_top_vec_, true, &pool_top_vec_);
pool_layer_->Backward(pool_top_vec_, true, &square_top_vec_);
square_layer_->Backward(square_top_vec_, true, &square_bottom_vec_);
- const int count = (*bottom)[0]->count();
- const Dtype* scale_diff = square_input_.gpu_diff();
- Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
- caffe_gpu_axpy(count, Dtype(1), scale_diff, bottom_diff);
+ split_layer_->Backward(split_top_vec_, true, bottom);
}
}