From: Jeff Donahue Date: Sat, 12 Apr 2014 01:07:08 +0000 (-0700) Subject: add Adopt{Data,Diff} methods to blobs to enable "virtual copying" X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=7fb71deeb80bb538134e445d298517522823b90c;p=platform%2Fupstream%2Fcaffe.git add Adopt{Data,Diff} methods to blobs to enable "virtual copying" --- diff --git a/include/caffe/blob.hpp b/include/caffe/blob.hpp index 3185263..7b8989b 100644 --- a/include/caffe/blob.hpp +++ b/include/caffe/blob.hpp @@ -17,7 +17,6 @@ class Blob { diff_() {} explicit Blob(const int num, const int channels, const int height, const int width); - virtual ~Blob() {} void Reshape(const int num, const int channels, const int height, const int width); inline int num() const { return num_; } @@ -52,6 +51,16 @@ class Blob { return *(cpu_diff() + offset(n, c, h, w)); } + inline const shared_ptr& data() const { + CHECK(data_); + return data_; + } + + inline const shared_ptr& diff() const { + CHECK(diff_); + return diff_; + } + const Dtype* cpu_data() const; const Dtype* gpu_data() const; const Dtype* cpu_diff() const; @@ -64,6 +73,9 @@ class Blob { void FromProto(const BlobProto& proto); void ToProto(BlobProto* proto, bool write_diff = false) const; + void AdoptData(const Blob& other); + void AdoptDiff(const Blob& other); + protected: shared_ptr data_; shared_ptr diff_; diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp index 5356d05..e17b8bf 100644 --- a/src/caffe/blob.cpp +++ b/src/caffe/blob.cpp @@ -86,6 +86,18 @@ Dtype* Blob::mutable_gpu_diff() { } template +void Blob::AdoptData(const Blob& other) { + CHECK_EQ(count_, other.count()); + data_ = other.data(); +} + +template +void Blob::AdoptDiff(const Blob& other) { + CHECK_EQ(count_, other.count()); + diff_ = other.diff(); +} + +template void Blob::Update() { // We will perform update based on where the data is located. switch (data_->head()) { diff --git a/src/caffe/layers/flatten_layer.cpp b/src/caffe/layers/flatten_layer.cpp index 78e4d0c..83d0502 100644 --- a/src/caffe/layers/flatten_layer.cpp +++ b/src/caffe/layers/flatten_layer.cpp @@ -24,18 +24,14 @@ void FlattenLayer::SetUp(const vector*>& bottom, template Dtype FlattenLayer::Forward_cpu(const vector*>& bottom, vector*>* top) { - const Dtype* bottom_data = bottom[0]->cpu_data(); - Dtype* top_data = (*top)[0]->mutable_cpu_data(); - caffe_copy(count_, bottom_data, top_data); + (*top)[0]->AdoptData(*bottom[0]); return Dtype(0.); } template void FlattenLayer::Backward_cpu(const vector*>& top, const bool propagate_down, vector*>* bottom) { - const Dtype* top_diff = top[0]->cpu_diff(); - Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); - caffe_copy(count_, top_diff, bottom_diff); + (*bottom)[0]->AdoptDiff(*top[0]); } INSTANTIATE_CLASS(FlattenLayer); diff --git a/src/caffe/layers/flatten_layer.cu b/src/caffe/layers/flatten_layer.cu index c1f5e48..a28018d 100644 --- a/src/caffe/layers/flatten_layer.cu +++ b/src/caffe/layers/flatten_layer.cu @@ -11,18 +11,14 @@ namespace caffe { template Dtype FlattenLayer::Forward_gpu(const vector*>& bottom, vector*>* top) { - const Dtype* bottom_data = bottom[0]->gpu_data(); - Dtype* top_data = (*top)[0]->mutable_gpu_data(); - caffe_gpu_copy(count_, bottom_data, top_data); + (*top)[0]->AdoptData(*bottom[0]); return Dtype(0.); } template void FlattenLayer::Backward_gpu(const vector*>& top, const bool propagate_down, vector*>* bottom) { - const Dtype* top_diff = top[0]->gpu_diff(); - Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); - caffe_gpu_copy(count_, top_diff, bottom_diff); + (*bottom)[0]->AdoptDiff(*top[0]); } INSTANTIATE_CLASS(FlattenLayer); diff --git a/src/caffe/layers/split_layer.cpp b/src/caffe/layers/split_layer.cpp index 5ca95f3..59a6604 100644 --- a/src/caffe/layers/split_layer.cpp +++ b/src/caffe/layers/split_layer.cpp @@ -30,13 +30,8 @@ void SplitLayer::SetUp(const vector*>& bottom, template Dtype SplitLayer::Forward_cpu(const vector*>& bottom, vector*>* top) { - const Dtype* bottom_data = bottom[0]->cpu_data(); for (int i = 0; i < top->size(); ++i) { - if (i == 0 && (*top)[i] == bottom[0]) { - continue; - } - Dtype* top_data = (*top)[i]->mutable_cpu_data(); - caffe_copy(count_, bottom_data, top_data); + (*top)[i]->AdoptData(*bottom[0]); } return Dtype(0.); } @@ -45,17 +40,11 @@ template void SplitLayer::Backward_cpu(const vector*>& top, const bool propagate_down, vector*>* bottom) { if (propagate_down) { - const Dtype* top_diff = top[0]->cpu_diff(); - Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); - // Initialize by copying first top blob diff to our diff, unless we're - // doing in-place computation for the first blob, in which case the diff is - // already initialized. - if (top[0] != (*bottom)[0]) { - caffe_copy(count_, top_diff, bottom_diff); - } + (*bottom)[0]->AdoptDiff(*top[0]); // Add remaining top blob diffs. + Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); for (int i = 1; i < top.size(); ++i) { - top_diff = top[i]->cpu_diff(); + const Dtype* top_diff = top[i]->cpu_diff(); caffe_axpy(count_, Dtype(1.), top_diff, bottom_diff); } } diff --git a/src/caffe/layers/split_layer.cu b/src/caffe/layers/split_layer.cu index 5b2814b..a50e3e8 100644 --- a/src/caffe/layers/split_layer.cu +++ b/src/caffe/layers/split_layer.cu @@ -11,13 +11,8 @@ namespace caffe { template Dtype SplitLayer::Forward_gpu(const vector*>& bottom, vector*>* top) { - const Dtype* bottom_data = bottom[0]->gpu_data(); for (int i = 0; i < top->size(); ++i) { - if (i == 0 && (*top)[i] == bottom[0]) { - continue; - } - Dtype* top_data = (*top)[i]->mutable_gpu_data(); - caffe_gpu_copy(count_, bottom_data, top_data); + (*top)[i]->AdoptData(*bottom[0]); } return Dtype(0.); } @@ -26,17 +21,11 @@ template void SplitLayer::Backward_gpu(const vector*>& top, const bool propagate_down, vector*>* bottom) { if (propagate_down) { - const Dtype* top_diff = top[0]->gpu_diff(); - Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); - // Initialize by copying first top blob diff to our diff, unless we're - // doing in-place computation for the first blob, in which case the diff is - // already initialized. - if (top[0] != (*bottom)[0]) { - caffe_gpu_copy(count_, top_diff, bottom_diff); - } + (*bottom)[0]->AdoptDiff(*top[0]); // Add remaining top blob diffs. + Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); for (int i = 1; i < top.size(); ++i) { - top_diff = top[i]->gpu_diff(); + const Dtype* top_diff = top[i]->gpu_diff(); caffe_gpu_axpy(count_, Dtype(1.), top_diff, bottom_diff); } }