This is done by adding PythonLayer as a boost::python HeldType.
# PY$(PROJECT)_SRC is the python wrapper for $(PROJECT)
PY$(PROJECT)_SRC := python/$(PROJECT)/_$(PROJECT).cpp
PY$(PROJECT)_SO := python/$(PROJECT)/_$(PROJECT).so
+PY$(PROJECT)_HXX := include/$(PROJECT)/python_layer.hpp
# MAT$(PROJECT)_SRC is the matlab wrapper for $(PROJECT)
MAT$(PROJECT)_SRC := matlab/$(PROJECT)/mat$(PROJECT).cpp
ifneq ($(MATLAB_DIR),)
py: $(PY$(PROJECT)_SO) $(PROTO_GEN_PY)
-$(PY$(PROJECT)_SO): $(PY$(PROJECT)_SRC) | $(DYNAMIC_NAME)
+$(PY$(PROJECT)_SO): $(PY$(PROJECT)_SRC) $(PY$(PROJECT)_HXX) | $(DYNAMIC_NAME)
@ echo CXX/LD -o $@ $<
$(Q)$(CXX) -shared -o $@ $(PY$(PROJECT)_SRC) \
-o $@ $(LINKFLAGS) $(PYTHON_LDFLAGS) -l$(PROJECT) \
--- /dev/null
+#ifndef CAFFE_PYTHON_LAYER_HPP_
+#define CAFFE_PYTHON_LAYER_HPP_
+
+#include <boost/python.hpp>
+#include <vector>
+
+#include "caffe/layer.hpp"
+
+namespace bp = boost::python;
+
+namespace caffe {
+
+template <typename Dtype>
+class PythonLayer : public Layer<Dtype> {
+ public:
+ PythonLayer(PyObject* self, const LayerParameter& param)
+ : Layer<Dtype>(param), self_(self) { }
+
+ virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
+ vector<Blob<Dtype>*>* top) {
+ try {
+ bp::call_method<bp::object>(self_, "setup", bottom, top);
+ } catch (bp::error_already_set) {
+ PyErr_Print();
+ throw;
+ }
+ }
+
+ virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
+ const vector<Blob<Dtype>*>& top) {
+ try {
+ bp::call_method<bp::object>(self_, "reshape", bottom, top);
+ } catch (bp::error_already_set) {
+ PyErr_Print();
+ throw;
+ }
+ }
+
+ virtual inline const char* type() const { return "Python"; }
+
+ protected:
+ virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
+ const vector<Blob<Dtype>*>& top) {
+ try {
+ bp::call_method<bp::object>(self_, "forward", bottom, top);
+ } catch (bp::error_already_set) {
+ PyErr_Print();
+ throw;
+ }
+ }
+ virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
+ const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
+ try {
+ bp::call_method<bp::object>(self_, "backward", top, propagate_down,
+ bottom);
+ } catch (bp::error_already_set) {
+ PyErr_Print();
+ throw;
+ }
+ }
+
+ private:
+ PyObject* self_;
+};
+
+} // namespace caffe
+
+#endif
#include <fstream> // NOLINT
#include "caffe/caffe.hpp"
+#include "caffe/python_layer.hpp"
// Temporary solution for numpy < 1.7 versions: old macro, no promises.
// You're strongly advised to upgrade to >= 1.7.
.add_property("diff", bp::make_function(&Blob<Dtype>::mutable_cpu_diff,
NdarrayCallPolicies()));
- bp::class_<Layer<Dtype>, shared_ptr<Layer<Dtype> >, boost::noncopyable>(
- "Layer", bp::no_init)
+ bp::class_<Layer<Dtype>, shared_ptr<PythonLayer<Dtype> >,
+ boost::noncopyable>("Layer", bp::init<const LayerParameter&>())
.add_property("blobs", bp::make_function(&Layer<Dtype>::blobs,
bp::return_internal_reference<>()))
.def("setup", &Layer<Dtype>::LayerSetUp)
.def("reshape", &Layer<Dtype>::Reshape)
- .add_property("type_name", bp::make_function(&Layer<Dtype>::type_name,
- bp::return_value_policy<bp::copy_const_reference>()));
+ .add_property("type", bp::make_function(&Layer<Dtype>::type));
+ bp::register_ptr_to_python<shared_ptr<Layer<Dtype> > >();
+
+ bp::class_<LayerParameter>("LayerParameter", bp::no_init);
bp::class_<Solver<Dtype>, shared_ptr<Solver<Dtype> >, boost::noncopyable>(
"Solver", bp::no_init)
// vector wrappers for all the vector types we use
bp::class_<vector<shared_ptr<Blob<Dtype> > > >("BlobVec")
.def(bp::vector_indexing_suite<vector<shared_ptr<Blob<Dtype> > >, true>());
+ bp::class_<vector<Blob<Dtype>*> >("RawBlobVec")
+ .def(bp::vector_indexing_suite<vector<Blob<Dtype>*>, true>());
bp::class_<vector<shared_ptr<Layer<Dtype> > > >("LayerVec")
.def(bp::vector_indexing_suite<vector<shared_ptr<Layer<Dtype> > >, true>());
bp::class_<vector<string> >("StringVec")
.def(bp::vector_indexing_suite<vector<int> >());
bp::class_<vector<shared_ptr<Net<Dtype> > > >("NetVec")
.def(bp::vector_indexing_suite<vector<shared_ptr<Net<Dtype> > >, true>());
+ bp::class_<vector<bool> >("BoolVec")
+ .def(bp::vector_indexing_suite<vector<bool> >());
import_array();
}
#include "caffe/proto/caffe.pb.h"
#include "caffe/vision_layers.hpp"
+#ifdef WITH_PYTHON_LAYER
+#include "caffe/python_layer.hpp"
+#endif
+
namespace caffe {
// Get convolution layer according to engine.
REGISTER_LAYER_CREATOR(TanH, GetTanHLayer);
+#ifdef WITH_PYTHON_LAYER
+template <typename Dtype>
+shared_ptr<Layer<Dtype> > GetPythonLayer(const LayerParameter& param) {
+ Py_Initialize();
+ try {
+ bp::object module = bp::import(param.python_param().module().c_str());
+ bp::object layer = module.attr(param.python_param().layer().c_str())(param);
+ return bp::extract<shared_ptr<PythonLayer<Dtype> > >(layer)();
+ } catch (bp::error_already_set) {
+ PyErr_Print();
+ throw;
+ }
+}
+
+REGISTER_LAYER_CREATOR(Python, GetPythonLayer);
+#endif
+
// Layers that use their constructor as their default creator should be
// registered in their corresponding cpp files. Do not register them here.
} // namespace caffe
// NOTE
// Update the next available ID when you add a new LayerParameter field.
//
-// LayerParameter next available layer-specific ID: 129 (last added: window_data_param)
+// LayerParameter next available layer-specific ID: 131 (last added: python_param)
message LayerParameter {
optional string name = 1; // the layer name
optional string type = 2; // the layer type
optional MVNParameter mvn_param = 120;
optional PoolingParameter pooling_param = 121;
optional PowerParameter power_param = 122;
+ optional PythonParameter python_param = 130;
optional ReLUParameter relu_param = 123;
optional SigmoidParameter sigmoid_param = 124;
optional SoftmaxParameter softmax_param = 125;
optional float shift = 3 [default = 0.0];
}
+// Message that stores parameters used by PythonLayer
+message PythonParameter {
+ optional string module = 1;
+ optional string layer = 2;
+}
+
// Message that stores parameters used by ReLULayer
message ReLUParameter {
// Allow non-zero slope for negative inputs to speed up optimization