#ifndef CAFFE_LAYER_H_
#define CAFFE_LAYER_H_
-#include <boost/thread.hpp>
#include <algorithm>
#include <string>
#include <vector>
#include "caffe/proto/caffe.pb.h"
#include "caffe/util/device_alternate.hpp"
+/**
+ Forward declare boost::thread instead of including boost/thread.hpp
+ to avoid a boost/NVCC issues (#1009, #1010) on OSX.
+ */
+namespace boost { class mutex; }
+
namespace caffe {
/**
* layer.
*/
explicit Layer(const LayerParameter& param)
- : layer_param_(param) {
+ : layer_param_(param), is_shared_(false) {
// Set phase and copy blobs (if there are any).
phase_ = param.phase();
if (layer_param_.blobs_size() > 0) {
*/
void SetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
+ InitMutex();
CheckBlobCounts(bottom, top);
LayerSetUp(bottom, top);
Reshape(bottom, top);
*/
virtual inline bool ShareInParallel() const { return false; }
+ /** @brief Return whether this layer is actually shared by other nets.
+ * If ShareInParallel() is true and using more than one GPU and the
+ * net has TRAIN phase, then this function is expected return true.
+ */
+ inline bool IsShared() const { return is_shared_; }
+
+ /** @brief Set whether this layer is actually shared by other nets
+ * If ShareInParallel() is true and using more than one GPU and the
+ * net has TRAIN phase, then is_shared should be set true.
+ */
+ inline void SetShared(bool is_shared) {
+ CHECK(ShareInParallel() || !is_shared)
+ << type() << "Layer does not support sharing.";
+ is_shared_ = is_shared;
+ }
+
/**
* @brief Adjust the shapes of top blobs and internal buffers to accommodate
* the shapes of the bottom blobs.
}
private:
- // mutex to lock layer to ensure sequential forward
- boost::mutex forward_mutex_;
+ /** Whether this layer is actually shared by other nets*/
+ bool is_shared_;
+
+ /** The mutex for sequential forward if this layer is shared */
+ shared_ptr<boost::mutex> forward_mutex_;
+
+ /** Initialize forward_mutex_ */
+ void InitMutex();
+ /** Lock forward_mutex_ if this layer is shared */
+ void Lock();
+ /** Unlock forward_mutex_ if this layer is shared */
+ void Unlock();
DISABLE_COPY_AND_ASSIGN(Layer);
}; // class Layer
inline Dtype Layer<Dtype>::Forward(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
// Lock during forward to ensure sequential forward
- boost::mutex::scoped_lock lock(forward_mutex_);
+ Lock();
Dtype loss = 0;
Reshape(bottom, top);
switch (Caffe::mode()) {
default:
LOG(FATAL) << "Unknown caffe mode.";
}
+ Unlock();
return loss;
}
--- /dev/null
+#include <boost/thread.hpp>
+#include "caffe/layer.hpp"
+
+namespace caffe {
+
+template <typename Dtype>
+void Layer<Dtype>::InitMutex() {
+ forward_mutex_.reset(new boost::mutex());
+}
+
+template <typename Dtype>
+void Layer<Dtype>::Lock() {
+ if (IsShared()) {
+ forward_mutex_->lock();
+ }
+}
+
+template <typename Dtype>
+void Layer<Dtype>::Unlock() {
+ if (IsShared()) {
+ forward_mutex_->unlock();
+ }
+}
+
+INSTANTIATE_CLASS(Layer);
+
+} // namespace caffe
bottom_need_backward_.resize(param.layer_size());
for (int layer_id = 0; layer_id < param.layer_size(); ++layer_id) {
// For non-root solvers, whether this layer is shared from root_net_.
- bool is_shared_layer = !Caffe::root_solver()
+ bool share_from_root = !Caffe::root_solver()
&& root_net_->layers_[layer_id]->ShareInParallel();
// Inherit phase from net if unset.
if (!param.layer(layer_id).has_phase()) {
<< "propagate_down param must be specified "
<< "either 0 or bottom_size times ";
}
- if (is_shared_layer) {
+ if (share_from_root) {
LOG(INFO) << "Sharing layer " << layer_param.name() << " from root net";
layers_.push_back(root_net_->layers_[layer_id]);
+ layers_[layer_id]->SetShared(true);
} else {
layers_.push_back(LayerRegistry<Dtype>::CreateLayer(layer_param));
}
}
}
// After this layer is connected, set it up.
- if (is_shared_layer) {
+ if (share_from_root) {
// Set up size of top blobs using root_net_
const vector<Blob<Dtype>*>& base_top = root_net_->top_vecs_[layer_id];
const vector<Blob<Dtype>*>& this_top = this->top_vecs_[layer_id];