From: Jonathan L Long Date: Thu, 11 Sep 2014 04:48:51 +0000 (-0700) Subject: split off Reshape for neuron layers X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=256209da4def9f54a6401c19802501bde5cbf1ba;p=platform%2Fupstream%2Fcaffe.git split off Reshape for neuron layers --- diff --git a/include/caffe/neuron_layers.hpp b/include/caffe/neuron_layers.hpp index 36acf96..0968a20 100644 --- a/include/caffe/neuron_layers.hpp +++ b/include/caffe/neuron_layers.hpp @@ -26,7 +26,7 @@ class NeuronLayer : public Layer { public: explicit NeuronLayer(const LayerParameter& param) : Layer(param) {} - virtual void LayerSetUp(const vector*>& bottom, + virtual void Reshape(const vector*>& bottom, vector*>* top); virtual inline LayerParameter_LayerType type() const { @@ -170,6 +170,8 @@ class DropoutLayer : public NeuronLayer { : NeuronLayer(param) {} virtual void LayerSetUp(const vector*>& bottom, vector*>* top); + virtual void Reshape(const vector*>& bottom, + vector*>* top); virtual inline LayerParameter_LayerType type() const { return LayerParameter_LayerType_DROPOUT; @@ -367,6 +369,8 @@ class CuDNNReLULayer : public ReLULayer { : ReLULayer(param) {} virtual void LayerSetUp(const vector*>& bottom, vector*>* top); + virtual void Reshape(const vector*>& bottom, + vector*>* top); virtual ~CuDNNReLULayer(); protected: @@ -449,6 +453,8 @@ class CuDNNSigmoidLayer : public SigmoidLayer { : SigmoidLayer(param) {} virtual void LayerSetUp(const vector*>& bottom, vector*>* top); + virtual void Reshape(const vector*>& bottom, + vector*>* top); virtual ~CuDNNSigmoidLayer(); protected: @@ -533,6 +539,8 @@ class CuDNNTanHLayer : public TanHLayer { : TanHLayer(param) {} virtual void LayerSetUp(const vector*>& bottom, vector*>* top); + virtual void Reshape(const vector*>& bottom, + vector*>* top); virtual ~CuDNNTanHLayer(); protected: diff --git a/src/caffe/layers/cudnn_relu_layer.cpp b/src/caffe/layers/cudnn_relu_layer.cpp index f8bf77f..083868f 100644 --- a/src/caffe/layers/cudnn_relu_layer.cpp +++ b/src/caffe/layers/cudnn_relu_layer.cpp @@ -13,12 +13,20 @@ void CuDNNReLULayer::LayerSetUp(const vector*>& bottom, ReLULayer::LayerSetUp(bottom, top); // initialize cuDNN CUDNN_CHECK(cudnnCreate(&handle_)); + cudnn::createTensor4dDesc(&bottom_desc_); + cudnn::createTensor4dDesc(&top_desc_); +} + +template +void CuDNNReLULayer::Reshape(const vector*>& bottom, + vector*>* top) { + ReLULayer::Reshape(bottom, top); const int N = bottom[0]->num(); const int K = bottom[0]->channels(); const int H = bottom[0]->height(); const int W = bottom[0]->width(); - cudnn::createTensor4dDesc(&bottom_desc_, N, K, H, W); - cudnn::createTensor4dDesc(&top_desc_, N, K, H, W); + cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); + cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); } template diff --git a/src/caffe/layers/cudnn_sigmoid_layer.cpp b/src/caffe/layers/cudnn_sigmoid_layer.cpp index 488c754..3fe800d 100644 --- a/src/caffe/layers/cudnn_sigmoid_layer.cpp +++ b/src/caffe/layers/cudnn_sigmoid_layer.cpp @@ -13,12 +13,20 @@ void CuDNNSigmoidLayer::LayerSetUp(const vector*>& bottom, SigmoidLayer::LayerSetUp(bottom, top); // initialize cuDNN CUDNN_CHECK(cudnnCreate(&handle_)); + cudnn::createTensor4dDesc(&bottom_desc_); + cudnn::createTensor4dDesc(&top_desc_); +} + +template +void CuDNNSigmoidLayer::Reshape(const vector*>& bottom, + vector*>* top) { + SigmoidLayer::Reshape(bottom, top); const int N = bottom[0]->num(); const int K = bottom[0]->channels(); const int H = bottom[0]->height(); const int W = bottom[0]->width(); - cudnn::createTensor4dDesc(&bottom_desc_, N, K, H, W); - cudnn::createTensor4dDesc(&top_desc_, N, K, H, W); + cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); + cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); } template diff --git a/src/caffe/layers/cudnn_tanh_layer.cpp b/src/caffe/layers/cudnn_tanh_layer.cpp index 32b6611..7a5c06f 100644 --- a/src/caffe/layers/cudnn_tanh_layer.cpp +++ b/src/caffe/layers/cudnn_tanh_layer.cpp @@ -13,12 +13,20 @@ void CuDNNTanHLayer::LayerSetUp(const vector*>& bottom, TanHLayer::LayerSetUp(bottom, top); // initialize cuDNN CUDNN_CHECK(cudnnCreate(&handle_)); + cudnn::createTensor4dDesc(&bottom_desc_); + cudnn::createTensor4dDesc(&top_desc_); +} + +template +void CuDNNTanHLayer::Reshape(const vector*>& bottom, + vector*>* top) { + TanHLayer::Reshape(bottom, top); const int N = bottom[0]->num(); const int K = bottom[0]->channels(); const int H = bottom[0]->height(); const int W = bottom[0]->width(); - cudnn::createTensor4dDesc(&bottom_desc_, N, K, H, W); - cudnn::createTensor4dDesc(&top_desc_, N, K, H, W); + cudnn::setTensor4dDesc(&bottom_desc_, N, K, H, W); + cudnn::setTensor4dDesc(&top_desc_, N, K, H, W); } template diff --git a/src/caffe/layers/dropout_layer.cpp b/src/caffe/layers/dropout_layer.cpp index 52537d1..47feb1d 100644 --- a/src/caffe/layers/dropout_layer.cpp +++ b/src/caffe/layers/dropout_layer.cpp @@ -14,9 +14,6 @@ template void DropoutLayer::LayerSetUp(const vector*>& bottom, vector*>* top) { NeuronLayer::LayerSetUp(bottom, top); - // Set up the cache for random number generation - rand_vec_.Reshape(bottom[0]->num(), bottom[0]->channels(), - bottom[0]->height(), bottom[0]->width()); threshold_ = this->layer_param_.dropout_param().dropout_ratio(); DCHECK(threshold_ > 0.); DCHECK(threshold_ < 1.); @@ -25,6 +22,15 @@ void DropoutLayer::LayerSetUp(const vector*>& bottom, } template +void DropoutLayer::Reshape(const vector*>& bottom, + vector*>* top) { + NeuronLayer::Reshape(bottom, top); + // Set up the cache for random number generation + rand_vec_.Reshape(bottom[0]->num(), bottom[0]->channels(), + bottom[0]->height(), bottom[0]->width()); +} + +template void DropoutLayer::Forward_cpu(const vector*>& bottom, vector*>* top) { const Dtype* bottom_data = bottom[0]->cpu_data(); diff --git a/src/caffe/layers/neuron_layer.cpp b/src/caffe/layers/neuron_layer.cpp index eff7948..c28e36e 100644 --- a/src/caffe/layers/neuron_layer.cpp +++ b/src/caffe/layers/neuron_layer.cpp @@ -6,13 +6,9 @@ namespace caffe { template -void NeuronLayer::LayerSetUp(const vector*>& bottom, +void NeuronLayer::Reshape(const vector*>& bottom, vector*>* top) { - // NeuronLayer allows in-place computations. If the computation is not - // in-place, we will need to initialize the top blob. - if ((*top)[0] != bottom[0]) { - (*top)[0]->ReshapeLike(*bottom[0]); - } + (*top)[0]->ReshapeLike(*bottom[0]); } INSTANTIATE_CLASS(NeuronLayer);