From 0d1a8ab55a75d31ef12166cba470c23f58e349d8 Mon Sep 17 00:00:00 2001 From: Jeff Donahue Date: Sat, 12 Apr 2014 00:52:15 -0700 Subject: [PATCH] change Adopt -> Share as suggested by kloudkl --- include/caffe/blob.hpp | 9 +++++++-- src/caffe/blob.cpp | 4 ++-- src/caffe/layers/flatten_layer.cpp | 4 ++-- src/caffe/layers/flatten_layer.cu | 4 ++-- src/caffe/layers/split_layer.cpp | 4 ++-- src/caffe/layers/split_layer.cu | 4 ++-- 6 files changed, 17 insertions(+), 12 deletions(-) diff --git a/include/caffe/blob.hpp b/include/caffe/blob.hpp index 7b8989b..712fc05 100644 --- a/include/caffe/blob.hpp +++ b/include/caffe/blob.hpp @@ -73,8 +73,13 @@ class Blob { void FromProto(const BlobProto& proto); void ToProto(BlobProto* proto, bool write_diff = false) const; - void AdoptData(const Blob& other); - void AdoptDiff(const Blob& other); + // Set the data_/diff_ shared_ptr to point to the SyncedMemory holding the + // data_/diff_ of Blob other -- useful in layers which simply perform a copy + // in their forward or backward pass. + // This deallocates the SyncedMemory holding this blob's data/diff, as + // shared_ptr calls its destructor when reset with the = operator. + void ShareData(const Blob& other); + void ShareDiff(const Blob& other); protected: shared_ptr data_; diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp index e17b8bf..54b6992 100644 --- a/src/caffe/blob.cpp +++ b/src/caffe/blob.cpp @@ -86,13 +86,13 @@ Dtype* Blob::mutable_gpu_diff() { } template -void Blob::AdoptData(const Blob& other) { +void Blob::ShareData(const Blob& other) { CHECK_EQ(count_, other.count()); data_ = other.data(); } template -void Blob::AdoptDiff(const Blob& other) { +void Blob::ShareDiff(const Blob& other) { CHECK_EQ(count_, other.count()); diff_ = other.diff(); } diff --git a/src/caffe/layers/flatten_layer.cpp b/src/caffe/layers/flatten_layer.cpp index 83d0502..e954030 100644 --- a/src/caffe/layers/flatten_layer.cpp +++ b/src/caffe/layers/flatten_layer.cpp @@ -24,14 +24,14 @@ void FlattenLayer::SetUp(const vector*>& bottom, template Dtype FlattenLayer::Forward_cpu(const vector*>& bottom, vector*>* top) { - (*top)[0]->AdoptData(*bottom[0]); + (*top)[0]->ShareData(*bottom[0]); return Dtype(0.); } template void FlattenLayer::Backward_cpu(const vector*>& top, const bool propagate_down, vector*>* bottom) { - (*bottom)[0]->AdoptDiff(*top[0]); + (*bottom)[0]->ShareDiff(*top[0]); } INSTANTIATE_CLASS(FlattenLayer); diff --git a/src/caffe/layers/flatten_layer.cu b/src/caffe/layers/flatten_layer.cu index a28018d..157eeb1 100644 --- a/src/caffe/layers/flatten_layer.cu +++ b/src/caffe/layers/flatten_layer.cu @@ -11,14 +11,14 @@ namespace caffe { template Dtype FlattenLayer::Forward_gpu(const vector*>& bottom, vector*>* top) { - (*top)[0]->AdoptData(*bottom[0]); + (*top)[0]->ShareData(*bottom[0]); return Dtype(0.); } template void FlattenLayer::Backward_gpu(const vector*>& top, const bool propagate_down, vector*>* bottom) { - (*bottom)[0]->AdoptDiff(*top[0]); + (*bottom)[0]->ShareDiff(*top[0]); } INSTANTIATE_CLASS(FlattenLayer); diff --git a/src/caffe/layers/split_layer.cpp b/src/caffe/layers/split_layer.cpp index 59a6604..aa2b6f6 100644 --- a/src/caffe/layers/split_layer.cpp +++ b/src/caffe/layers/split_layer.cpp @@ -31,7 +31,7 @@ template Dtype SplitLayer::Forward_cpu(const vector*>& bottom, vector*>* top) { for (int i = 0; i < top->size(); ++i) { - (*top)[i]->AdoptData(*bottom[0]); + (*top)[i]->ShareData(*bottom[0]); } return Dtype(0.); } @@ -40,7 +40,7 @@ template void SplitLayer::Backward_cpu(const vector*>& top, const bool propagate_down, vector*>* bottom) { if (propagate_down) { - (*bottom)[0]->AdoptDiff(*top[0]); + (*bottom)[0]->ShareDiff(*top[0]); // Add remaining top blob diffs. Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff(); for (int i = 1; i < top.size(); ++i) { diff --git a/src/caffe/layers/split_layer.cu b/src/caffe/layers/split_layer.cu index a50e3e8..e2269b8 100644 --- a/src/caffe/layers/split_layer.cu +++ b/src/caffe/layers/split_layer.cu @@ -12,7 +12,7 @@ template Dtype SplitLayer::Forward_gpu(const vector*>& bottom, vector*>* top) { for (int i = 0; i < top->size(); ++i) { - (*top)[i]->AdoptData(*bottom[0]); + (*top)[i]->ShareData(*bottom[0]); } return Dtype(0.); } @@ -21,7 +21,7 @@ template void SplitLayer::Backward_gpu(const vector*>& top, const bool propagate_down, vector*>* bottom) { if (propagate_down) { - (*bottom)[0]->AdoptDiff(*top[0]); + (*bottom)[0]->ShareDiff(*top[0]); // Add remaining top blob diffs. Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); for (int i = 1; i < top.size(); ++i) { -- 2.7.4