From 93afc1585c147257bec1a46eb32cbafc85d2c58f Mon Sep 17 00:00:00 2001 From: Sergio Guadarrama Date: Thu, 27 Feb 2014 19:11:52 -0800 Subject: [PATCH] Splited concat_layer into .cpp and .cu, cleaned lint errors --- src/caffe/layers/concat_layer.cpp | 138 +++++++++++------------------------ src/caffe/layers/concat_layer.cu | 75 +++++++++++++++++++ src/caffe/test/test_concat_layer.cpp | 31 ++++---- 3 files changed, 134 insertions(+), 110 deletions(-) create mode 100644 src/caffe/layers/concat_layer.cu diff --git a/src/caffe/layers/concat_layer.cpp b/src/caffe/layers/concat_layer.cpp index 5c3d348..dc949c1 100644 --- a/src/caffe/layers/concat_layer.cpp +++ b/src/caffe/layers/concat_layer.cpp @@ -11,147 +11,95 @@ namespace caffe { template void ConcatLayer::SetUp(const vector*>& bottom, vector*>* top) { - CHECK_GT(bottom.size(), 1) << "Concat Layer takes at least two blobs as input."; - CHECK_EQ(top->size(), 1) << "Concat Layer takes a single blob as output."; + CHECK_GT(bottom.size(), 1) << + "Concat Layer takes at least two blobs as input."; + CHECK_EQ(top->size(), 1) << + "Concat Layer takes a single blob as output."; concat_dim_ = this->layer_param_.concat_dim(); - CHECK_GE(concat_dim_,0) << "concat_dim should be >= 0"; - CHECK_LE(concat_dim_,1) << + CHECK_GE(concat_dim_, 0) << "concat_dim should be >= 0"; + CHECK_LE(concat_dim_, 1) << "For now concat_dim <=1, it can only concat num and channels"; // Intialize with the first blob COUNT_ = bottom[0]->count(); NUM_ = bottom[0]->num(); CHANNELS_ = bottom[0]->channels(); HEIGHT_ = bottom[0]->height(); - WIDTH_ = bottom[0]->width(); - for (int i=1; iwidth(); + for (int i = 1; i < bottom.size(); ++i) { COUNT_ += bottom[i]->count(); - if (concat_dim_==0) { - NUM_ += bottom[i]->num(); - } else if (concat_dim_ == 1){ + if (concat_dim_== 0) { + NUM_ += bottom[i]->num(); + } else if (concat_dim_ == 1) { CHANNELS_ += bottom[i]->channels(); } else if (concat_dim_ == 2) { HEIGHT_ += bottom[i]->height(); } else if (concat_dim_ == 3) { - WIDTH_ += bottom[i]->width(); + WIDTH_ += bottom[i]->width(); } } - (*top)[0]->Reshape(NUM_, CHANNELS_, HEIGHT_, WIDTH_); + (*top)[0]->Reshape(NUM_, CHANNELS_, HEIGHT_, WIDTH_); CHECK_EQ(COUNT_, (*top)[0]->count()); -}; +} template void ConcatLayer::Forward_cpu(const vector*>& bottom, - vector*>* top) { + vector*>* top) { Dtype* top_data = (*top)[0]->mutable_cpu_data(); - if (concat_dim_==0) { - int offset_num = 0; - for (int i=0; icpu_data(); int num_elem = bottom[i]->count(); caffe_copy(num_elem, bottom_data, top_data+(*top)[0]->offset(offset_num)); offset_num += bottom[i]->num(); } } else if (concat_dim_ == 1) { - int offset_channel = 0; - for (int i=0; icpu_data(); - int num_elem = bottom[i]->channels()*bottom[i]->height()*bottom[i]->width(); - for (int n=0; nchannels()*bottom[i]->height()*bottom[i]->width(); + for (int n = 0; n < NUM_; ++n) { caffe_copy(num_elem, bottom_data+bottom[i]->offset(n), - top_data+(*top)[0]->offset(n,offset_channel)); - } + top_data+(*top)[0]->offset(n, offset_channel)); + } offset_channel += bottom[i]->channels(); } } else { - LOG(FATAL) << "concat_dim along dim" << concat_dim_ << " not implemented yet"; - } -} - -template -void ConcatLayer::Forward_gpu(const vector*>& bottom, - vector*>* top) { - Dtype* top_data = (*top)[0]->mutable_gpu_data(); - if (concat_dim_==0) { - int offset_num = 0; - for (int i=0; igpu_data(); - caffe_gpu_copy(bottom[i]->count(), bottom_data, top_data+(*top)[0]->offset(offset_num)); - offset_num += bottom[i]->num(); - } - } else if (concat_dim_ == 1) { - int offset_channel = 0; - for (int i=0; igpu_data(); - int num_elem = bottom[i]->channels()*bottom[i]->height()*bottom[i]->width(); - for (int n=0; noffset(n), - top_data+(*top)[0]->offset(n,offset_channel)); - } - offset_channel += bottom[i]->channels(); - } - } else { - LOG(FATAL) << "concat_dim along dim" << concat_dim_ << " not implemented yet"; - } + LOG(FATAL) << "concat_dim along dim" << concat_dim_ << + " not implemented yet"; + } } template Dtype ConcatLayer::Backward_cpu(const vector*>& top, const bool propagate_down, vector*>* bottom) { - const Dtype* top_diff = top[0]->cpu_diff(); - if (concat_dim_==0) { - int offset_num = 0; - for (int i=0; i < bottom->size(); ++i) { + const Dtype* top_diff = top[0]->cpu_diff(); + if (concat_dim_ == 0) { + int offset_num = 0; + for (int i = 0; i < bottom->size(); ++i) { Blob* blob = (*bottom)[i]; Dtype* bottom_diff = blob->mutable_cpu_diff(); - caffe_copy(blob->count(), top_diff+top[0]->offset(offset_num),bottom_diff); + caffe_copy(blob->count(), + top_diff+top[0]->offset(offset_num), bottom_diff); offset_num += blob->num(); } } else if (concat_dim_ == 1) { - int offset_channel = 0; - for (int i=0; i < bottom->size(); ++i) { + int offset_channel = 0; + for (int i = 0; i < bottom->size(); ++i) { Blob* blob = (*bottom)[i]; Dtype* bottom_diff = blob->mutable_cpu_diff(); int num_elem = blob->channels()*blob->height()*blob->width(); - for (int n=0; noffset(n,offset_channel), - bottom_diff+blob->offset(n)); - } + for (int n = 0; n < NUM_; ++n) { + caffe_copy(num_elem, top_diff+top[0]->offset(n, offset_channel), + bottom_diff+blob->offset(n)); + } offset_channel += blob->channels(); } } else { - LOG(FATAL) << "concat_dim along dim" << concat_dim_ << " not implemented yet"; - } - return Dtype(0.); -} - - -template -Dtype ConcatLayer::Backward_gpu(const vector*>& top, - const bool propagate_down, vector*>* bottom) { - const Dtype* top_diff = top[0]->gpu_diff(); - if (concat_dim_==0) { - int offset_num = 0; - for (int i=0; i < bottom->size(); ++i) { - Blob* blob = (*bottom)[i]; - Dtype* bottom_diff = blob->mutable_gpu_diff(); - caffe_gpu_copy(blob->count(), top_diff+top[0]->offset(offset_num),bottom_diff); - offset_num += blob->num(); - } - } else if (concat_dim_ == 1) { - int offset_channel = 0; - for (int i=0; i < bottom->size(); ++i) { - Blob* blob = (*bottom)[i]; - Dtype* bottom_diff = blob->mutable_gpu_diff(); - int num_elem = blob->channels()*blob->height()*blob->width(); - for (int n=0; noffset(n,offset_channel), - bottom_diff+blob->offset(n)); - } - offset_channel += blob->channels(); - } - } else { - LOG(FATAL) << "concat_dim along dim" << concat_dim_ << " not implemented yet"; - } + LOG(FATAL) << "concat_dim along dim" << concat_dim_ << + " not implemented yet"; + } return Dtype(0.); } diff --git a/src/caffe/layers/concat_layer.cu b/src/caffe/layers/concat_layer.cu new file mode 100644 index 0000000..616a5e6 --- /dev/null +++ b/src/caffe/layers/concat_layer.cu @@ -0,0 +1,75 @@ +// Copyright 2014 Sergio Guadarrama + +#include + +#include "caffe/layer.hpp" +#include "caffe/vision_layers.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +void ConcatLayer::Forward_gpu(const vector*>& bottom, + vector*>* top) { + Dtype* top_data = (*top)[0]->mutable_gpu_data(); + if (concat_dim_ == 0) { + int offset_num = 0; + for (int i = 0; i < bottom.size(); ++i) { + const Dtype* bottom_data = bottom[i]->gpu_data(); + caffe_gpu_copy(bottom[i]->count(), bottom_data, + top_data+(*top)[0]->offset(offset_num)); + offset_num += bottom[i]->num(); + } + } else if (concat_dim_ == 1) { + int offset_channel = 0; + for (int i = 0; i < bottom.size(); ++i) { + const Dtype* bottom_data = bottom[i]->gpu_data(); + int num_elem = + bottom[i]->channels()*bottom[i]->height()*bottom[i]->width(); + for (int n = 0; n < NUM_; ++n) { + caffe_gpu_copy(num_elem, bottom_data+bottom[i]->offset(n), + top_data+(*top)[0]->offset(n, offset_channel)); + } + offset_channel += bottom[i]->channels(); + } + } else { + LOG(FATAL) << "concat_dim along dim" << concat_dim_ << + " not implemented yet"; + } +} + +template +Dtype ConcatLayer::Backward_gpu(const vector*>& top, + const bool propagate_down, vector*>* bottom) { + const Dtype* top_diff = top[0]->gpu_diff(); + if (concat_dim_ == 0) { + int offset_num = 0; + for (int i = 0; i < bottom->size(); ++i) { + Blob* blob = (*bottom)[i]; + Dtype* bottom_diff = blob->mutable_gpu_diff(); + caffe_gpu_copy(blob->count(), + top_diff+top[0]->offset(offset_num), bottom_diff); + offset_num += blob->num(); + } + } else if (concat_dim_ == 1) { + int offset_channel = 0; + for (int i = 0; i < bottom->size(); ++i) { + Blob* blob = (*bottom)[i]; + Dtype* bottom_diff = blob->mutable_gpu_diff(); + int num_elem = blob->channels()*blob->height()*blob->width(); + for (int n = 0; n < NUM_; ++n) { + caffe_gpu_copy(num_elem, top_diff+top[0]->offset(n, offset_channel), + bottom_diff+blob->offset(n)); + } + offset_channel += blob->channels(); + } + } else { + LOG(FATAL) << "concat_dim along dim" << concat_dim_ << + " not implemented yet"; + } + return Dtype(0.); +} + +INSTANTIATE_CLASS(ConcatLayer); + +} // namespace caffe diff --git a/src/caffe/test/test_concat_layer.cpp b/src/caffe/test/test_concat_layer.cpp index aeee81d..3515ef9 100644 --- a/src/caffe/test/test_concat_layer.cpp +++ b/src/caffe/test/test_concat_layer.cpp @@ -1,8 +1,9 @@ // Copyright 2014 Sergio Guadarrama #include -#include +#include +#include "cuda_runtime.h" #include "gtest/gtest.h" #include "caffe/blob.hpp" #include "caffe/common.hpp" @@ -23,7 +24,7 @@ class ConcatLayerTest : public ::testing::Test { : blob_bottom_0(new Blob(2, 3, 6, 5)), blob_bottom_1(new Blob(2, 5, 6, 5)), blob_bottom_2(new Blob(5, 3, 6, 5)), - blob_top_(new Blob()) {}; + blob_top_(new Blob()) {} virtual void SetUp() { // fill the values FillerParameter filler_param; @@ -39,17 +40,18 @@ class ConcatLayerTest : public ::testing::Test { blob_bottom_vec_1.push_back(blob_bottom_0); blob_bottom_vec_1.push_back(blob_bottom_2); blob_top_vec_.push_back(blob_top_); - }; + } virtual ~ConcatLayerTest() { - delete blob_bottom_0; delete blob_bottom_1; delete blob_bottom_2; delete blob_top_; + delete blob_bottom_0; delete blob_bottom_1; + delete blob_bottom_2; delete blob_top_; } Blob* const blob_bottom_0; Blob* const blob_bottom_1; Blob* const blob_bottom_2; Blob* const blob_top_; - vector*> blob_bottom_vec_0,blob_bottom_vec_1; + vector*> blob_bottom_vec_0, blob_bottom_vec_1; vector*> blob_top_vec_; }; @@ -61,7 +63,8 @@ TYPED_TEST(ConcatLayerTest, TestSetupNum) { layer_param.set_concat_dim(0); ConcatLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_1, &(this->blob_top_vec_)); - EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_0->num()+this->blob_bottom_2->num()); + EXPECT_EQ(this->blob_top_->num(), + this->blob_bottom_0->num() + this->blob_bottom_2->num()); EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_0->channels()); EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_0->height()); EXPECT_EQ(this->blob_top_->width(), this->blob_bottom_0->width()); @@ -72,7 +75,8 @@ TYPED_TEST(ConcatLayerTest, TestSetupChannels) { ConcatLayer layer(layer_param); layer.SetUp(this->blob_bottom_vec_0, &(this->blob_top_vec_)); EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_0->num()); - EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_0->channels()+this->blob_bottom_1->channels()); + EXPECT_EQ(this->blob_top_->channels(), + this->blob_bottom_0->channels()+this->blob_bottom_1->channels()); EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_0->height()); EXPECT_EQ(this->blob_top_->width(), this->blob_bottom_0->width()); } @@ -88,14 +92,16 @@ TYPED_TEST(ConcatLayerTest, TestCPUNum) { for (int c = 0; c < this->blob_bottom_0->channels(); ++c) { for (int h = 0; h < this->blob_top_->height(); ++h) { for (int w = 0; w < this->blob_top_->width(); ++w) { - EXPECT_EQ(this->blob_top_->data_at(n, c, h, w), this->blob_bottom_vec_0[0]->data_at(n, c, h, w)); + EXPECT_EQ(this->blob_top_->data_at(n, c, h, w), + this->blob_bottom_vec_0[0]->data_at(n, c, h, w)); } } } for (int c = 0; c < this->blob_bottom_1->channels(); ++c) { for (int h = 0; h < this->blob_top_->height(); ++h) { for (int w = 0; w < this->blob_top_->width(); ++w) { - EXPECT_EQ(this->blob_top_->data_at(n, c+3, h, w), this->blob_bottom_vec_0[1]->data_at(n, c, h, w)); + EXPECT_EQ(this->blob_top_->data_at(n, c+3, h, w), + this->blob_bottom_vec_0[1]->data_at(n, c, h, w)); } } } @@ -108,8 +114,6 @@ TYPED_TEST(ConcatLayerTest, TestCPUGradient) { Caffe::set_mode(Caffe::CPU); ConcatLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); - // it is too expensive to call curand multiple times, so we don't do an - // exhaustive gradient check. checker.CheckGradient(&layer, &(this->blob_bottom_vec_0), &(this->blob_top_vec_)); } @@ -119,11 +123,8 @@ TYPED_TEST(ConcatLayerTest, TestGPUGradient) { Caffe::set_mode(Caffe::GPU); ConcatLayer layer(layer_param); GradientChecker checker(1e-2, 1e-3); - // it is too expensive to call curand multiple times, so we don't do an - // exhaustive gradient check. checker.CheckGradient(&layer, &(this->blob_bottom_vec_0), &(this->blob_top_vec_)); } - -} +} // namespace caffe -- 2.7.4