Merge pull request #9305 from dkurt:public_dnn_importer_is_deprecated
authorVadim Pisarevsky <vadim.pisarevsky@gmail.com>
Mon, 18 Sep 2017 09:35:35 +0000 (09:35 +0000)
committerVadim Pisarevsky <vadim.pisarevsky@gmail.com>
Mon, 18 Sep 2017 09:35:35 +0000 (09:35 +0000)
18 files changed:
modules/core/include/opencv2/core/mat.hpp
modules/core/src/copy.cpp
modules/core/src/stat.cpp
modules/core/test/test_arithm.cpp
modules/dnn/include/opencv2/dnn/all_layers.hpp
modules/dnn/include/opencv2/dnn/dnn.hpp
modules/dnn/misc/caffe/caffe.pb.cc
modules/dnn/misc/caffe/caffe.pb.h
modules/dnn/src/caffe/caffe.proto
modules/dnn/src/caffe/caffe_importer.cpp
modules/dnn/src/caffe/caffe_shrinker.cpp [new file with mode: 0644]
modules/dnn/src/init.cpp
modules/dnn/src/layers/elementwise_layers.cpp
modules/dnn/src/tensorflow/tf_importer.cpp
modules/dnn/test/test_caffe_importer.cpp
modules/dnn/test/test_tf_importer.cpp
modules/ml/src/svm.cpp
modules/ts/src/ts_func.cpp

index 31d182c..2e927c4 100644 (file)
@@ -1192,8 +1192,8 @@ public:
     /** @overload
     @param m Destination matrix. If it does not have a proper size or type before the operation, it is
     reallocated.
-    @param mask Operation mask. Its non-zero elements indicate which matrix elements need to be copied.
-    The mask has to be of type CV_8U and can have 1 or multiple channels.
+    @param mask Operation mask of the same size as \*this. Its non-zero elements indicate which matrix
+    elements need to be copied. The mask has to be of type CV_8U and can have 1 or multiple channels.
     */
     void copyTo( OutputArray m, InputArray mask ) const;
 
@@ -1229,7 +1229,8 @@ public:
 
     This is an advanced variant of the Mat::operator=(const Scalar& s) operator.
     @param value Assigned scalar converted to the actual array type.
-    @param mask Operation mask of the same size as \*this.
+    @param mask Operation mask of the same size as \*this. Its non-zero elements indicate which matrix
+    elements need to be copied. The mask has to be of type CV_8U and can have 1 or multiple channels
      */
     Mat& setTo(InputArray value, InputArray mask=noArray());
 
index e07a75f..d22e762 100644 (file)
@@ -336,7 +336,7 @@ static bool ipp_copyTo(const Mat &src, Mat &dst, const Mat &mask)
 #ifdef HAVE_IPP_IW
     CV_INSTRUMENT_REGION_IPP()
 
-    if(mask.channels() > 1 && mask.depth() != CV_8U)
+    if(mask.channels() > 1 || mask.depth() != CV_8U)
         return false;
 
     if (src.dims <= 2)
@@ -512,20 +512,23 @@ Mat& Mat::setTo(InputArray _value, InputArray _mask)
     Mat value = _value.getMat(), mask = _mask.getMat();
 
     CV_Assert( checkScalar(value, type(), _value.kind(), _InputArray::MAT ));
-    CV_Assert( mask.empty() || (mask.type() == CV_8U && size == mask.size) );
+    int cn = channels(), mcn = mask.channels();
+    CV_Assert( mask.empty() || (mask.depth() == CV_8U && (mcn == 1 || mcn == cn) && size == mask.size) );
 
     CV_IPP_RUN_FAST(ipp_Mat_setTo_Mat(*this, value, mask), *this)
 
-    size_t esz = elemSize();
+    size_t esz = mcn > 1 ? elemSize1() : elemSize();
     BinaryFunc copymask = getCopyMaskFunc(esz);
 
     const Mat* arrays[] = { this, !mask.empty() ? &mask : 0, 0 };
     uchar* ptrs[2]={0,0};
     NAryMatIterator it(arrays, ptrs);
-    int totalsz = (int)it.size, blockSize0 = std::min(totalsz, (int)((BLOCK_SIZE + esz-1)/esz));
+    int totalsz = (int)it.size*mcn;
+    int blockSize0 = std::min(totalsz, (int)((BLOCK_SIZE + esz-1)/esz));
+    blockSize0 -= blockSize0 % mcn;    // must be divisible without remainder for unrolling and advancing
     AutoBuffer<uchar> _scbuf(blockSize0*esz + 32);
     uchar* scbuf = alignPtr((uchar*)_scbuf, (int)sizeof(double));
-    convertAndUnrollScalar( value, type(), scbuf, blockSize0 );
+    convertAndUnrollScalar( value, type(), scbuf, blockSize0/mcn );
 
     for( size_t i = 0; i < it.nplanes; i++, ++it )
     {
index a978c90..60d3be9 100644 (file)
@@ -4150,7 +4150,9 @@ double cv::PSNR(InputArray _src1, InputArray _src2)
 {
     CV_INSTRUMENT_REGION()
 
-    CV_Assert( _src1.depth() == CV_8U );
+    //Input arrays must have depth CV_8U
+    CV_Assert( _src1.depth() == CV_8U && _src2.depth() == CV_8U );
+
     double diff = std::sqrt(norm(_src1, _src2, NORM_L2SQR)/(_src1.total()*_src1.channels()));
     return 20*log10(255./(diff+DBL_EPSILON));
 }
index 78c0689..ae5e4a7 100644 (file)
@@ -1,4 +1,4 @@
-#include "test_precomp.hpp"
+#include "test_precomp.hpp"
 #include <cmath>
 
 using namespace cv;
@@ -15,7 +15,7 @@ const int ARITHM_MAX_SIZE_LOG = 10;
 
 struct BaseElemWiseOp
 {
-    enum { FIX_ALPHA=1, FIX_BETA=2, FIX_GAMMA=4, REAL_GAMMA=8, SUPPORT_MASK=16, SCALAR_OUTPUT=32 };
+    enum { FIX_ALPHA=1, FIX_BETA=2, FIX_GAMMA=4, REAL_GAMMA=8, SUPPORT_MASK=16, SCALAR_OUTPUT=32, SUPPORT_MULTICHANNELMASK=64 };
     BaseElemWiseOp(int _ninputs, int _flags, double _alpha, double _beta,
                    Scalar _gamma=Scalar::all(0), int _context=1)
     : ninputs(_ninputs), flags(_flags), alpha(_alpha), beta(_beta), gamma(_gamma), context(_context) {}
@@ -467,7 +467,7 @@ struct CmpSOp : public BaseElemWiseOp
 
 struct CopyOp : public BaseElemWiseOp
 {
-    CopyOp() : BaseElemWiseOp(1, FIX_ALPHA+FIX_BETA+FIX_GAMMA+SUPPORT_MASK, 1, 1, Scalar::all(0)) {  }
+    CopyOp() : BaseElemWiseOp(1, FIX_ALPHA+FIX_BETA+FIX_GAMMA+SUPPORT_MASK+SUPPORT_MULTICHANNELMASK, 1, 1, Scalar::all(0)) {  }
     void op(const vector<Mat>& src, Mat& dst, const Mat& mask)
     {
         src[0].copyTo(dst, mask);
@@ -489,7 +489,7 @@ struct CopyOp : public BaseElemWiseOp
 
 struct SetOp : public BaseElemWiseOp
 {
-    SetOp() : BaseElemWiseOp(0, FIX_ALPHA+FIX_BETA+SUPPORT_MASK, 1, 1, Scalar::all(0)) {}
+    SetOp() : BaseElemWiseOp(0, FIX_ALPHA+FIX_BETA+SUPPORT_MASK+SUPPORT_MULTICHANNELMASK, 1, 1, Scalar::all(0)) {}
     void op(const vector<Mat>&, Mat& dst, const Mat& mask)
     {
         dst.setTo(gamma, mask);
@@ -1394,7 +1394,8 @@ TEST_P(ElemWiseTest, accuracy)
         op->getRandomSize(rng, size);
         int type = op->getRandomType(rng);
         int depth = CV_MAT_DEPTH(type);
-        bool haveMask = (op->flags & cvtest::BaseElemWiseOp::SUPPORT_MASK) != 0 && rng.uniform(0, 4) == 0;
+        bool haveMask = ((op->flags & cvtest::BaseElemWiseOp::SUPPORT_MASK) != 0
+                || (op->flags & cvtest::BaseElemWiseOp::SUPPORT_MULTICHANNELMASK) != 0) && rng.uniform(0, 4) == 0;
 
         double minval=0, maxval=0;
         op->getValueRange(depth, minval, maxval);
@@ -1403,8 +1404,12 @@ TEST_P(ElemWiseTest, accuracy)
         for( i = 0; i < ninputs; i++ )
             src[i] = cvtest::randomMat(rng, size, type, minval, maxval, true);
         Mat dst0, dst, mask;
-        if( haveMask )
-            mask = cvtest::randomMat(rng, size, CV_8U, 0, 2, true);
+        if( haveMask ) {
+            bool multiChannelMask = (op->flags & cvtest::BaseElemWiseOp::SUPPORT_MULTICHANNELMASK) != 0
+                    && rng.uniform(0, 2) == 0;
+            int masktype = CV_8UC(multiChannelMask ? CV_MAT_CN(type) : 1);
+            mask = cvtest::randomMat(rng, size, masktype, 0, 2, true);
+        }
 
         if( (haveMask || ninputs == 0) && !(op->flags & cvtest::BaseElemWiseOp::SCALAR_OUTPUT))
         {
index bdb5424..cf47c70 100644 (file)
@@ -359,6 +359,12 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
         static Ptr<ReLULayer> create(const LayerParams &params);
     };
 
+    class CV_EXPORTS ReLU6Layer : public ActivationLayer
+    {
+    public:
+        static Ptr<ReLU6Layer> create(const LayerParams &params);
+    };
+
     class CV_EXPORTS ChannelsPReLULayer : public ActivationLayer
     {
     public:
index be54356..bd79669 100644 (file)
@@ -709,6 +709,19 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
     CV_EXPORTS_W Mat blobFromImages(const std::vector<Mat>& images, double scalefactor=1.0,
                                     Size size = Size(), const Scalar& mean = Scalar(), bool swapRB=true);
 
+    /** @brief Convert all weights of Caffe network to half precision floating point.
+     * @param src Path to origin model from Caffe framework contains single
+     *            precision floating point weights (usually has `.caffemodel` extension).
+     * @param dst Path to destination model with updated weights.
+     *
+     * @note Shrinked model has no origin float32 weights so it can't be used
+     *       in origin Caffe framework anymore. However the structure of data
+     *       is taken from NVidia's Caffe fork: https://github.com/NVIDIA/caffe.
+     *       So the resulting model may be used there.
+     */
+    CV_EXPORTS_W void shrinkCaffeModel(const String& src, const String& dst);
+
+
 //! @}
 CV__DNN_EXPERIMENTAL_NS_END
 }
index f866183..8f5327e 100644 (file)
@@ -250,6 +250,7 @@ const ::google::protobuf::internal::GeneratedMessageReflection*
 const ::google::protobuf::Descriptor* NormalizedBBox_descriptor_ = NULL;
 const ::google::protobuf::internal::GeneratedMessageReflection*
   NormalizedBBox_reflection_ = NULL;
+const ::google::protobuf::EnumDescriptor* Type_descriptor_ = NULL;
 const ::google::protobuf::EnumDescriptor* Phase_descriptor_ = NULL;
 
 }  // namespace
@@ -277,12 +278,14 @@ void protobuf_AssignDesc_caffe_2eproto() {
       sizeof(BlobShape),
       GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(BlobShape, _internal_metadata_));
   BlobProto_descriptor_ = file->message_type(1);
-  static const int BlobProto_offsets_[9] = {
+  static const int BlobProto_offsets_[11] = {
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(BlobProto, shape_),
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(BlobProto, data_),
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(BlobProto, diff_),
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(BlobProto, double_data_),
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(BlobProto, double_diff_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(BlobProto, raw_data_type_),
+    GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(BlobProto, raw_data_),
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(BlobProto, num_),
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(BlobProto, channels_),
     GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(BlobProto, height_),
@@ -1633,7 +1636,8 @@ void protobuf_AssignDesc_caffe_2eproto() {
       -1,
       sizeof(NormalizedBBox),
       GOOGLE_PROTOBUF_GENERATED_MESSAGE_FIELD_OFFSET(NormalizedBBox, _internal_metadata_));
-  Phase_descriptor_ = file->enum_type(0);
+  Type_descriptor_ = file->enum_type(0);
+  Phase_descriptor_ = file->enum_type(1);
 }
 
 namespace {
@@ -1935,6 +1939,7 @@ void protobuf_InitDefaults_caffe_2eproto_impl() {
   GOOGLE_PROTOBUF_VERIFY_VERSION;
 
   BlobShape_default_instance_.DefaultConstruct();
+  ::google::protobuf::internal::GetEmptyString();
   BlobProto_default_instance_.DefaultConstruct();
   BlobProtoVector_default_instance_.DefaultConstruct();
   PermuteParameter_default_instance_.DefaultConstruct();
@@ -2113,427 +2118,430 @@ void protobuf_AddDesc_caffe_2eproto_impl() {
   protobuf_InitDefaults_caffe_2eproto();
   ::google::protobuf::DescriptorPool::InternalAddGeneratedFile(
     "\n\013caffe.proto\022\005caffe\"\034\n\tBlobShape\022\017\n\003dim"
-    "\030\001 \003(\003B\002\020\001\"\314\001\n\tBlobProto\022\037\n\005shape\030\007 \001(\0132"
+    "\030\001 \003(\003B\002\020\001\"\206\002\n\tBlobProto\022\037\n\005shape\030\007 \001(\0132"
     "\020.caffe.BlobShape\022\020\n\004data\030\005 \003(\002B\002\020\001\022\020\n\004d"
     "iff\030\006 \003(\002B\002\020\001\022\027\n\013double_data\030\010 \003(\001B\002\020\001\022\027"
-    "\n\013double_diff\030\t \003(\001B\002\020\001\022\016\n\003num\030\001 \001(\005:\0010\022"
-    "\023\n\010channels\030\002 \001(\005:\0010\022\021\n\006height\030\003 \001(\005:\0010\022"
-    "\020\n\005width\030\004 \001(\005:\0010\"2\n\017BlobProtoVector\022\037\n\005"
-    "blobs\030\001 \003(\0132\020.caffe.BlobProto\"!\n\020Permute"
-    "Parameter\022\r\n\005order\030\001 \003(\r\"\226\001\n\026NormalizeBB"
-    "oxParameter\022\034\n\016across_spatial\030\001 \001(\010:\004tru"
-    "e\022,\n\014scale_filler\030\002 \001(\0132\026.caffe.FillerPa"
-    "rameter\022\034\n\016channel_shared\030\003 \001(\010:\004true\022\022\n"
-    "\003eps\030\004 \001(\002:\0051e-10\"\243\002\n\021PriorBoxParameter\022"
-    "\020\n\010min_size\030\001 \001(\002\022\020\n\010max_size\030\002 \001(\002\022\024\n\014a"
-    "spect_ratio\030\003 \003(\002\022\022\n\004flip\030\004 \001(\010:\004true\022\022\n"
-    "\004clip\030\005 \001(\010:\004true\022\020\n\010variance\030\006 \003(\002\022\020\n\010i"
-    "mg_size\030\007 \001(\r\022\r\n\005img_h\030\010 \001(\r\022\r\n\005img_w\030\t "
-    "\001(\r\022\014\n\004step\030\n \001(\002\022\016\n\006step_h\030\013 \001(\002\022\016\n\006ste"
-    "p_w\030\014 \001(\002\022\023\n\006offset\030\r \001(\002:\0030.5\"\'\n\010CodeTy"
-    "pe\022\n\n\006CORNER\020\001\022\017\n\013CENTER_SIZE\020\002\"\375\002\n\030Dete"
-    "ctionOutputParameter\022\023\n\013num_classes\030\001 \001("
-    "\r\022\034\n\016share_location\030\002 \001(\010:\004true\022\036\n\023backg"
-    "round_label_id\030\003 \001(\005:\0010\0228\n\tnms_param\030\004 \001"
-    "(\0132%.caffe.NonMaximumSuppressionParamete"
-    "r\0225\n\021save_output_param\030\005 \001(\0132\032.caffe.Sav"
-    "eOutputParameter\022<\n\tcode_type\030\006 \001(\0162!.ca"
-    "ffe.PriorBoxParameter.CodeType:\006CORNER\022)"
-    "\n\032variance_encoded_in_target\030\010 \001(\010:\005fals"
-    "e\022\026\n\nkeep_top_k\030\007 \001(\005:\002-1\022\034\n\024confidence_"
-    "threshold\030\t \001(\002\"\201\001\n\005Datum\022\020\n\010channels\030\001 "
-    "\001(\005\022\016\n\006height\030\002 \001(\005\022\r\n\005width\030\003 \001(\005\022\014\n\004da"
-    "ta\030\004 \001(\014\022\r\n\005label\030\005 \001(\005\022\022\n\nfloat_data\030\006 "
-    "\003(\002\022\026\n\007encoded\030\007 \001(\010:\005false\"\212\002\n\017FillerPa"
-    "rameter\022\026\n\004type\030\001 \001(\t:\010constant\022\020\n\005value"
-    "\030\002 \001(\002:\0010\022\016\n\003min\030\003 \001(\002:\0010\022\016\n\003max\030\004 \001(\002:\001"
-    "1\022\017\n\004mean\030\005 \001(\002:\0010\022\016\n\003std\030\006 \001(\002:\0011\022\022\n\006sp"
-    "arse\030\007 \001(\005:\002-1\022B\n\rvariance_norm\030\010 \001(\0162#."
-    "caffe.FillerParameter.VarianceNorm:\006FAN_"
-    "IN\"4\n\014VarianceNorm\022\n\n\006FAN_IN\020\000\022\013\n\007FAN_OU"
-    "T\020\001\022\013\n\007AVERAGE\020\002\"\216\002\n\014NetParameter\022\014\n\004nam"
-    "e\030\001 \001(\t\022\r\n\005input\030\003 \003(\t\022%\n\013input_shape\030\010 "
-    "\003(\0132\020.caffe.BlobShape\022\021\n\tinput_dim\030\004 \003(\005"
-    "\022\035\n\016force_backward\030\005 \001(\010:\005false\022\036\n\005state"
-    "\030\006 \001(\0132\017.caffe.NetState\022\031\n\ndebug_info\030\007 "
-    "\001(\010:\005false\022$\n\005layer\030d \003(\0132\025.caffe.LayerP"
-    "arameter\022\'\n\006layers\030\002 \003(\0132\027.caffe.V1Layer"
-    "Parameter\"\242\n\n\017SolverParameter\022\013\n\003net\030\030 \001"
-    "(\t\022&\n\tnet_param\030\031 \001(\0132\023.caffe.NetParamet"
-    "er\022\021\n\ttrain_net\030\001 \001(\t\022\020\n\010test_net\030\002 \003(\t\022"
-    ",\n\017train_net_param\030\025 \001(\0132\023.caffe.NetPara"
-    "meter\022+\n\016test_net_param\030\026 \003(\0132\023.caffe.Ne"
-    "tParameter\022$\n\013train_state\030\032 \001(\0132\017.caffe."
-    "NetState\022#\n\ntest_state\030\033 \003(\0132\017.caffe.Net"
-    "State\022\021\n\ttest_iter\030\003 \003(\005\022\030\n\rtest_interva"
-    "l\030\004 \001(\005:\0010\022 \n\021test_compute_loss\030\023 \001(\010:\005f"
-    "alse\022!\n\023test_initialization\030  \001(\010:\004true\022"
-    "\017\n\007base_lr\030\005 \001(\002\022\017\n\007display\030\006 \001(\005\022\027\n\014ave"
-    "rage_loss\030! \001(\005:\0011\022\020\n\010max_iter\030\007 \001(\005\022\024\n\t"
-    "iter_size\030$ \001(\005:\0011\022\021\n\tlr_policy\030\010 \001(\t\022\r\n"
-    "\005gamma\030\t \001(\002\022\r\n\005power\030\n \001(\002\022\020\n\010momentum\030"
-    "\013 \001(\002\022\024\n\014weight_decay\030\014 \001(\002\022\037\n\023regulariz"
-    "ation_type\030\035 \001(\t:\002L2\022\020\n\010stepsize\030\r \001(\005\022\021"
-    "\n\tstepvalue\030\" \003(\005\022\032\n\016clip_gradients\030# \001("
-    "\002:\002-1\022\023\n\010snapshot\030\016 \001(\005:\0010\022\027\n\017snapshot_p"
-    "refix\030\017 \001(\t\022\034\n\rsnapshot_diff\030\020 \001(\010:\005fals"
-    "e\022K\n\017snapshot_format\030% \001(\0162%.caffe.Solve"
-    "rParameter.SnapshotFormat:\013BINARYPROTO\022;"
-    "\n\013solver_mode\030\021 \001(\0162!.caffe.SolverParame"
-    "ter.SolverMode:\003GPU\022\024\n\tdevice_id\030\022 \001(\005:\001"
-    "0\022\027\n\013random_seed\030\024 \001(\003:\002-1\022\021\n\004type\030( \001(\t"
-    ":\003SGD\022\024\n\005delta\030\037 \001(\002:\0051e-08\022\030\n\tmomentum2"
-    "\030\' \001(\002:\0050.999\022\027\n\trms_decay\030& \001(\002:\0040.99\022\031"
-    "\n\ndebug_info\030\027 \001(\010:\005false\022\"\n\024snapshot_af"
-    "ter_train\030\034 \001(\010:\004true\022;\n\013solver_type\030\036 \001"
-    "(\0162!.caffe.SolverParameter.SolverType:\003S"
-    "GD\"+\n\016SnapshotFormat\022\010\n\004HDF5\020\000\022\017\n\013BINARY"
-    "PROTO\020\001\"\036\n\nSolverMode\022\007\n\003CPU\020\000\022\007\n\003GPU\020\001\""
-    "U\n\nSolverType\022\007\n\003SGD\020\000\022\014\n\010NESTEROV\020\001\022\013\n\007"
-    "ADAGRAD\020\002\022\013\n\007RMSPROP\020\003\022\014\n\010ADADELTA\020\004\022\010\n\004"
-    "ADAM\020\005\"l\n\013SolverState\022\014\n\004iter\030\001 \001(\005\022\023\n\013l"
-    "earned_net\030\002 \001(\t\022!\n\007history\030\003 \003(\0132\020.caff"
-    "e.BlobProto\022\027\n\014current_step\030\004 \001(\005:\0010\"N\n\010"
-    "NetState\022!\n\005phase\030\001 \001(\0162\014.caffe.Phase:\004T"
-    "EST\022\020\n\005level\030\002 \001(\005:\0010\022\r\n\005stage\030\003 \003(\t\"s\n\014"
-    "NetStateRule\022\033\n\005phase\030\001 \001(\0162\014.caffe.Phas"
-    "e\022\021\n\tmin_level\030\002 \001(\005\022\021\n\tmax_level\030\003 \001(\005\022"
-    "\r\n\005stage\030\004 \003(\t\022\021\n\tnot_stage\030\005 \003(\t\"\243\001\n\tPa"
-    "ramSpec\022\014\n\004name\030\001 \001(\t\0221\n\nshare_mode\030\002 \001("
-    "\0162\035.caffe.ParamSpec.DimCheckMode\022\022\n\007lr_m"
-    "ult\030\003 \001(\002:\0011\022\025\n\ndecay_mult\030\004 \001(\002:\0011\"*\n\014D"
-    "imCheckMode\022\n\n\006STRICT\020\000\022\016\n\nPERMISSIVE\020\001\""
-    "\335\025\n\016LayerParameter\022\014\n\004name\030\001 \001(\t\022\014\n\004type"
-    "\030\002 \001(\t\022\016\n\006bottom\030\003 \003(\t\022\013\n\003top\030\004 \003(\t\022\033\n\005p"
-    "hase\030\n \001(\0162\014.caffe.Phase\022\023\n\013loss_weight\030"
-    "\005 \003(\002\022\037\n\005param\030\006 \003(\0132\020.caffe.ParamSpec\022\037"
-    "\n\005blobs\030\007 \003(\0132\020.caffe.BlobProto\022\026\n\016propa"
-    "gate_down\030\013 \003(\010\022$\n\007include\030\010 \003(\0132\023.caffe"
-    ".NetStateRule\022$\n\007exclude\030\t \003(\0132\023.caffe.N"
-    "etStateRule\0227\n\017transform_param\030d \001(\0132\036.c"
-    "affe.TransformationParameter\022(\n\nloss_par"
-    "am\030e \001(\0132\024.caffe.LossParameter\0220\n\016accura"
-    "cy_param\030f \001(\0132\030.caffe.AccuracyParameter"
-    "\022,\n\014argmax_param\030g \001(\0132\026.caffe.ArgMaxPar"
-    "ameter\0224\n\020batch_norm_param\030\213\001 \001(\0132\031.caff"
-    "e.BatchNormParameter\022)\n\nbias_param\030\215\001 \001("
-    "\0132\024.caffe.BiasParameter\022,\n\014concat_param\030"
-    "h \001(\0132\026.caffe.ConcatParameter\022\?\n\026contras"
-    "tive_loss_param\030i \001(\0132\037.caffe.Contrastiv"
-    "eLossParameter\0226\n\021convolution_param\030j \001("
-    "\0132\033.caffe.ConvolutionParameter\022)\n\ncrop_p"
-    "aram\030\220\001 \001(\0132\024.caffe.CropParameter\022(\n\ndat"
-    "a_param\030k \001(\0132\024.caffe.DataParameter\022@\n\026d"
-    "etection_output_param\030\223\001 \001(\0132\037.caffe.Det"
-    "ectionOutputParameter\022.\n\rdropout_param\030l"
-    " \001(\0132\027.caffe.DropoutParameter\0223\n\020dummy_d"
-    "ata_param\030m \001(\0132\031.caffe.DummyDataParamet"
-    "er\022.\n\reltwise_param\030n \001(\0132\027.caffe.Eltwis"
-    "eParameter\022\'\n\telu_param\030\214\001 \001(\0132\023.caffe.E"
-    "LUParameter\022+\n\013embed_param\030\211\001 \001(\0132\025.caff"
-    "e.EmbedParameter\022&\n\texp_param\030o \001(\0132\023.ca"
-    "ffe.ExpParameter\022/\n\rflatten_param\030\207\001 \001(\013"
-    "2\027.caffe.FlattenParameter\0221\n\017hdf5_data_p"
-    "aram\030p \001(\0132\030.caffe.HDF5DataParameter\0225\n\021"
-    "hdf5_output_param\030q \001(\0132\032.caffe.HDF5Outp"
-    "utParameter\0223\n\020hinge_loss_param\030r \001(\0132\031."
-    "caffe.HingeLossParameter\0223\n\020image_data_p"
-    "aram\030s \001(\0132\031.caffe.ImageDataParameter\0229\n"
-    "\023infogain_loss_param\030t \001(\0132\034.caffe.Infog"
-    "ainLossParameter\0229\n\023inner_product_param\030"
-    "u \001(\0132\034.caffe.InnerProductParameter\022+\n\013i"
-    "nput_param\030\217\001 \001(\0132\025.caffe.InputParameter"
-    "\022\'\n\tlog_param\030\206\001 \001(\0132\023.caffe.LogParamete"
-    "r\022&\n\tlrn_param\030v \001(\0132\023.caffe.LRNParamete"
-    "r\0225\n\021memory_data_param\030w \001(\0132\032.caffe.Mem"
-    "oryDataParameter\022&\n\tmvn_param\030x \001(\0132\023.ca"
-    "ffe.MVNParameter\0222\n\nnorm_param\030\225\001 \001(\0132\035."
-    "caffe.NormalizeBBoxParameter\022/\n\rpermute_"
-    "param\030\224\001 \001(\0132\027.caffe.PermuteParameter\0223\n"
-    "\017parameter_param\030\221\001 \001(\0132\031.caffe.Paramete"
-    "rParameter\022.\n\rpooling_param\030y \001(\0132\027.caff"
-    "e.PoolingParameter\022*\n\013power_param\030z \001(\0132"
-    "\025.caffe.PowerParameter\022+\n\013prelu_param\030\203\001"
-    " \001(\0132\025.caffe.PReLUParameter\0222\n\017prior_box"
-    "_param\030\226\001 \001(\0132\030.caffe.PriorBoxParameter\022"
-    "-\n\014python_param\030\202\001 \001(\0132\026.caffe.PythonPar"
-    "ameter\0223\n\017recurrent_param\030\222\001 \001(\0132\031.caffe"
-    ".RecurrentParameter\0223\n\017reduction_param\030\210"
-    "\001 \001(\0132\031.caffe.ReductionParameter\022(\n\nrelu"
-    "_param\030{ \001(\0132\024.caffe.ReLUParameter\022/\n\rre"
-    "shape_param\030\205\001 \001(\0132\027.caffe.ReshapeParame"
-    "ter\022+\n\013scale_param\030\216\001 \001(\0132\025.caffe.ScaleP"
-    "arameter\022.\n\rsigmoid_param\030| \001(\0132\027.caffe."
-    "SigmoidParameter\022.\n\rsoftmax_param\030} \001(\0132"
-    "\027.caffe.SoftmaxParameter\022\'\n\tspp_param\030\204\001"
-    " \001(\0132\023.caffe.SPPParameter\022*\n\013slice_param"
-    "\030~ \001(\0132\025.caffe.SliceParameter\022(\n\ntanh_pa"
-    "ram\030\177 \001(\0132\024.caffe.TanHParameter\0223\n\017thres"
-    "hold_param\030\200\001 \001(\0132\031.caffe.ThresholdParam"
-    "eter\022)\n\ntile_param\030\212\001 \001(\0132\024.caffe.TilePa"
-    "rameter\0226\n\021window_data_param\030\201\001 \001(\0132\032.ca"
-    "ffe.WindowDataParameter\"\266\001\n\027Transformati"
-    "onParameter\022\020\n\005scale\030\001 \001(\002:\0011\022\025\n\006mirror\030"
-    "\002 \001(\010:\005false\022\024\n\tcrop_size\030\003 \001(\r:\0010\022\021\n\tme"
-    "an_file\030\004 \001(\t\022\022\n\nmean_value\030\005 \003(\002\022\032\n\013for"
-    "ce_color\030\006 \001(\010:\005false\022\031\n\nforce_gray\030\007 \001("
-    "\010:\005false\"\302\001\n\rLossParameter\022\024\n\014ignore_lab"
-    "el\030\001 \001(\005\022D\n\rnormalization\030\003 \001(\0162&.caffe."
-    "LossParameter.NormalizationMode:\005VALID\022\021"
-    "\n\tnormalize\030\002 \001(\010\"B\n\021NormalizationMode\022\010"
-    "\n\004FULL\020\000\022\t\n\005VALID\020\001\022\016\n\nBATCH_SIZE\020\002\022\010\n\004N"
-    "ONE\020\003\"L\n\021AccuracyParameter\022\020\n\005top_k\030\001 \001("
-    "\r:\0011\022\017\n\004axis\030\002 \001(\005:\0011\022\024\n\014ignore_label\030\003 "
-    "\001(\005\"M\n\017ArgMaxParameter\022\032\n\013out_max_val\030\001 "
-    "\001(\010:\005false\022\020\n\005top_k\030\002 \001(\r:\0011\022\014\n\004axis\030\003 \001"
-    "(\005\"9\n\017ConcatParameter\022\017\n\004axis\030\002 \001(\005:\0011\022\025"
-    "\n\nconcat_dim\030\001 \001(\r:\0011\"j\n\022BatchNormParame"
-    "ter\022\030\n\020use_global_stats\030\001 \001(\010\022&\n\027moving_"
-    "average_fraction\030\002 \001(\002:\0050.999\022\022\n\003eps\030\003 \001"
-    "(\002:\0051e-05\"]\n\rBiasParameter\022\017\n\004axis\030\001 \001(\005"
-    ":\0011\022\023\n\010num_axes\030\002 \001(\005:\0011\022&\n\006filler\030\003 \001(\013"
-    "2\026.caffe.FillerParameter\"L\n\030ContrastiveL"
-    "ossParameter\022\021\n\006margin\030\001 \001(\002:\0011\022\035\n\016legac"
-    "y_version\030\002 \001(\010:\005false\"\374\003\n\024ConvolutionPa"
-    "rameter\022\022\n\nnum_output\030\001 \001(\r\022\027\n\tbias_term"
-    "\030\002 \001(\010:\004true\022\013\n\003pad\030\003 \003(\r\022\023\n\013kernel_size"
-    "\030\004 \003(\r\022\016\n\006stride\030\006 \003(\r\022\020\n\010dilation\030\022 \003(\r"
-    "\022\020\n\005pad_h\030\t \001(\r:\0010\022\020\n\005pad_w\030\n \001(\r:\0010\022\020\n\010"
-    "kernel_h\030\013 \001(\r\022\020\n\010kernel_w\030\014 \001(\r\022\020\n\010stri"
-    "de_h\030\r \001(\r\022\020\n\010stride_w\030\016 \001(\r\022\020\n\005group\030\005 "
-    "\001(\r:\0011\022-\n\rweight_filler\030\007 \001(\0132\026.caffe.Fi"
-    "llerParameter\022+\n\013bias_filler\030\010 \001(\0132\026.caf"
-    "fe.FillerParameter\022;\n\006engine\030\017 \001(\0162\".caf"
-    "fe.ConvolutionParameter.Engine:\007DEFAULT\022"
-    "\017\n\004axis\030\020 \001(\005:\0011\022\036\n\017force_nd_im2col\030\021 \001("
-    "\010:\005false\"+\n\006Engine\022\013\n\007DEFAULT\020\000\022\t\n\005CAFFE"
-    "\020\001\022\t\n\005CUDNN\020\002\"0\n\rCropParameter\022\017\n\004axis\030\001"
-    " \001(\005:\0012\022\016\n\006offset\030\002 \003(\r\"\244\002\n\rDataParamete"
-    "r\022\016\n\006source\030\001 \001(\t\022\022\n\nbatch_size\030\004 \001(\r\022\024\n"
-    "\trand_skip\030\007 \001(\r:\0010\0221\n\007backend\030\010 \001(\0162\027.c"
-    "affe.DataParameter.DB:\007LEVELDB\022\020\n\005scale\030"
-    "\002 \001(\002:\0011\022\021\n\tmean_file\030\003 \001(\t\022\024\n\tcrop_size"
-    "\030\005 \001(\r:\0010\022\025\n\006mirror\030\006 \001(\010:\005false\022\"\n\023forc"
-    "e_encoded_color\030\t \001(\010:\005false\022\023\n\010prefetch"
-    "\030\n \001(\r:\0014\"\033\n\002DB\022\013\n\007LEVELDB\020\000\022\010\n\004LMDB\020\001\"["
-    "\n\036NonMaximumSuppressionParameter\022\032\n\rnms_"
-    "threshold\030\001 \001(\002:\0030.3\022\r\n\005top_k\030\002 \001(\005\022\016\n\003e"
-    "ta\030\003 \001(\002:\0011\"\252\001\n\023SaveOutputParameter\022\030\n\020o"
-    "utput_directory\030\001 \001(\t\022\032\n\022output_name_pre"
-    "fix\030\002 \001(\t\022\025\n\routput_format\030\003 \001(\t\022\026\n\016labe"
-    "l_map_file\030\004 \001(\t\022\026\n\016name_size_file\030\005 \001(\t"
-    "\022\026\n\016num_test_image\030\006 \001(\r\".\n\020DropoutParam"
-    "eter\022\032\n\rdropout_ratio\030\001 \001(\002:\0030.5\"\240\001\n\022Dum"
-    "myDataParameter\022+\n\013data_filler\030\001 \003(\0132\026.c"
-    "affe.FillerParameter\022\037\n\005shape\030\006 \003(\0132\020.ca"
-    "ffe.BlobShape\022\013\n\003num\030\002 \003(\r\022\020\n\010channels\030\003"
-    " \003(\r\022\016\n\006height\030\004 \003(\r\022\r\n\005width\030\005 \003(\r\"\245\001\n\020"
-    "EltwiseParameter\0229\n\toperation\030\001 \001(\0162!.ca"
-    "ffe.EltwiseParameter.EltwiseOp:\003SUM\022\r\n\005c"
-    "oeff\030\002 \003(\002\022\036\n\020stable_prod_grad\030\003 \001(\010:\004tr"
-    "ue\"\'\n\tEltwiseOp\022\010\n\004PROD\020\000\022\007\n\003SUM\020\001\022\007\n\003MA"
-    "X\020\002\" \n\014ELUParameter\022\020\n\005alpha\030\001 \001(\002:\0011\"\254\001"
-    "\n\016EmbedParameter\022\022\n\nnum_output\030\001 \001(\r\022\021\n\t"
-    "input_dim\030\002 \001(\r\022\027\n\tbias_term\030\003 \001(\010:\004true"
-    "\022-\n\rweight_filler\030\004 \001(\0132\026.caffe.FillerPa"
-    "rameter\022+\n\013bias_filler\030\005 \001(\0132\026.caffe.Fil"
-    "lerParameter\"D\n\014ExpParameter\022\020\n\004base\030\001 \001"
-    "(\002:\002-1\022\020\n\005scale\030\002 \001(\002:\0011\022\020\n\005shift\030\003 \001(\002:"
-    "\0010\"9\n\020FlattenParameter\022\017\n\004axis\030\001 \001(\005:\0011\022"
-    "\024\n\010end_axis\030\002 \001(\005:\002-1\"O\n\021HDF5DataParamet"
-    "er\022\016\n\006source\030\001 \001(\t\022\022\n\nbatch_size\030\002 \001(\r\022\026"
-    "\n\007shuffle\030\003 \001(\010:\005false\"(\n\023HDF5OutputPara"
-    "meter\022\021\n\tfile_name\030\001 \001(\t\"^\n\022HingeLossPar"
-    "ameter\0220\n\004norm\030\001 \001(\0162\036.caffe.HingeLossPa"
-    "rameter.Norm:\002L1\"\026\n\004Norm\022\006\n\002L1\020\001\022\006\n\002L2\020\002"
-    "\"\227\002\n\022ImageDataParameter\022\016\n\006source\030\001 \001(\t\022"
-    "\025\n\nbatch_size\030\004 \001(\r:\0011\022\024\n\trand_skip\030\007 \001("
-    "\r:\0010\022\026\n\007shuffle\030\010 \001(\010:\005false\022\025\n\nnew_heig"
-    "ht\030\t \001(\r:\0010\022\024\n\tnew_width\030\n \001(\r:\0010\022\026\n\010is_"
-    "color\030\013 \001(\010:\004true\022\020\n\005scale\030\002 \001(\002:\0011\022\021\n\tm"
-    "ean_file\030\003 \001(\t\022\024\n\tcrop_size\030\005 \001(\r:\0010\022\025\n\006"
-    "mirror\030\006 \001(\010:\005false\022\025\n\013root_folder\030\014 \001(\t"
-    ":\000\"\'\n\025InfogainLossParameter\022\016\n\006source\030\001 "
-    "\001(\t\"\313\001\n\025InnerProductParameter\022\022\n\nnum_out"
-    "put\030\001 \001(\r\022\027\n\tbias_term\030\002 \001(\010:\004true\022-\n\rwe"
-    "ight_filler\030\003 \001(\0132\026.caffe.FillerParamete"
-    "r\022+\n\013bias_filler\030\004 \001(\0132\026.caffe.FillerPar"
-    "ameter\022\017\n\004axis\030\005 \001(\005:\0011\022\030\n\ttranspose\030\006 \001"
-    "(\010:\005false\"1\n\016InputParameter\022\037\n\005shape\030\001 \003"
-    "(\0132\020.caffe.BlobShape\"D\n\014LogParameter\022\020\n\004"
-    "base\030\001 \001(\002:\002-1\022\020\n\005scale\030\002 \001(\002:\0011\022\020\n\005shif"
-    "t\030\003 \001(\002:\0010\"\270\002\n\014LRNParameter\022\025\n\nlocal_siz"
-    "e\030\001 \001(\r:\0015\022\020\n\005alpha\030\002 \001(\002:\0011\022\022\n\004beta\030\003 \001"
-    "(\002:\0040.75\022D\n\013norm_region\030\004 \001(\0162\036.caffe.LR"
-    "NParameter.NormRegion:\017ACROSS_CHANNELS\022\014"
-    "\n\001k\030\005 \001(\002:\0011\0223\n\006engine\030\006 \001(\0162\032.caffe.LRN"
-    "Parameter.Engine:\007DEFAULT\"5\n\nNormRegion\022"
-    "\023\n\017ACROSS_CHANNELS\020\000\022\022\n\016WITHIN_CHANNEL\020\001"
-    "\"+\n\006Engine\022\013\n\007DEFAULT\020\000\022\t\n\005CAFFE\020\001\022\t\n\005CU"
-    "DNN\020\002\"Z\n\023MemoryDataParameter\022\022\n\nbatch_si"
-    "ze\030\001 \001(\r\022\020\n\010channels\030\002 \001(\r\022\016\n\006height\030\003 \001"
-    "(\r\022\r\n\005width\030\004 \001(\r\"d\n\014MVNParameter\022 \n\022nor"
-    "malize_variance\030\001 \001(\010:\004true\022\036\n\017across_ch"
-    "annels\030\002 \001(\010:\005false\022\022\n\003eps\030\003 \001(\002:\0051e-09\""
-    "5\n\022ParameterParameter\022\037\n\005shape\030\001 \001(\0132\020.c"
-    "affe.BlobShape\"\242\003\n\020PoolingParameter\0225\n\004p"
-    "ool\030\001 \001(\0162\".caffe.PoolingParameter.PoolM"
-    "ethod:\003MAX\022\016\n\003pad\030\004 \001(\r:\0010\022\020\n\005pad_h\030\t \001("
-    "\r:\0010\022\020\n\005pad_w\030\n \001(\r:\0010\022\023\n\013kernel_size\030\002 "
-    "\001(\r\022\020\n\010kernel_h\030\005 \001(\r\022\020\n\010kernel_w\030\006 \001(\r\022"
-    "\021\n\006stride\030\003 \001(\r:\0011\022\020\n\010stride_h\030\007 \001(\r\022\020\n\010"
-    "stride_w\030\010 \001(\r\0227\n\006engine\030\013 \001(\0162\036.caffe.P"
-    "oolingParameter.Engine:\007DEFAULT\022\035\n\016globa"
-    "l_pooling\030\014 \001(\010:\005false\".\n\nPoolMethod\022\007\n\003"
-    "MAX\020\000\022\007\n\003AVE\020\001\022\016\n\nSTOCHASTIC\020\002\"+\n\006Engine"
-    "\022\013\n\007DEFAULT\020\000\022\t\n\005CAFFE\020\001\022\t\n\005CUDNN\020\002\"F\n\016P"
-    "owerParameter\022\020\n\005power\030\001 \001(\002:\0011\022\020\n\005scale"
-    "\030\002 \001(\002:\0011\022\020\n\005shift\030\003 \001(\002:\0010\"g\n\017PythonPar"
-    "ameter\022\016\n\006module\030\001 \001(\t\022\r\n\005layer\030\002 \001(\t\022\023\n"
-    "\tparam_str\030\003 \001(\t:\000\022 \n\021share_in_parallel\030"
-    "\004 \001(\010:\005false\"\300\001\n\022RecurrentParameter\022\025\n\nn"
-    "um_output\030\001 \001(\r:\0010\022-\n\rweight_filler\030\002 \001("
+    "\n\013double_diff\030\t \003(\001B\002\020\001\022\"\n\rraw_data_type"
+    "\030\n \001(\0162\013.caffe.Type\022\024\n\010raw_data\030\014 \001(\014B\002\020"
+    "\000\022\016\n\003num\030\001 \001(\005:\0010\022\023\n\010channels\030\002 \001(\005:\0010\022\021"
+    "\n\006height\030\003 \001(\005:\0010\022\020\n\005width\030\004 \001(\005:\0010\"2\n\017B"
+    "lobProtoVector\022\037\n\005blobs\030\001 \003(\0132\020.caffe.Bl"
+    "obProto\"!\n\020PermuteParameter\022\r\n\005order\030\001 \003"
+    "(\r\"\226\001\n\026NormalizeBBoxParameter\022\034\n\016across_"
+    "spatial\030\001 \001(\010:\004true\022,\n\014scale_filler\030\002 \001("
+    "\0132\026.caffe.FillerParameter\022\034\n\016channel_sha"
+    "red\030\003 \001(\010:\004true\022\022\n\003eps\030\004 \001(\002:\0051e-10\"\243\002\n\021"
+    "PriorBoxParameter\022\020\n\010min_size\030\001 \001(\002\022\020\n\010m"
+    "ax_size\030\002 \001(\002\022\024\n\014aspect_ratio\030\003 \003(\002\022\022\n\004f"
+    "lip\030\004 \001(\010:\004true\022\022\n\004clip\030\005 \001(\010:\004true\022\020\n\010v"
+    "ariance\030\006 \003(\002\022\020\n\010img_size\030\007 \001(\r\022\r\n\005img_h"
+    "\030\010 \001(\r\022\r\n\005img_w\030\t \001(\r\022\014\n\004step\030\n \001(\002\022\016\n\006s"
+    "tep_h\030\013 \001(\002\022\016\n\006step_w\030\014 \001(\002\022\023\n\006offset\030\r "
+    "\001(\002:\0030.5\"\'\n\010CodeType\022\n\n\006CORNER\020\001\022\017\n\013CENT"
+    "ER_SIZE\020\002\"\375\002\n\030DetectionOutputParameter\022\023"
+    "\n\013num_classes\030\001 \001(\r\022\034\n\016share_location\030\002 "
+    "\001(\010:\004true\022\036\n\023background_label_id\030\003 \001(\005:\001"
+    "0\0228\n\tnms_param\030\004 \001(\0132%.caffe.NonMaximumS"
+    "uppressionParameter\0225\n\021save_output_param"
+    "\030\005 \001(\0132\032.caffe.SaveOutputParameter\022<\n\tco"
+    "de_type\030\006 \001(\0162!.caffe.PriorBoxParameter."
+    "CodeType:\006CORNER\022)\n\032variance_encoded_in_"
+    "target\030\010 \001(\010:\005false\022\026\n\nkeep_top_k\030\007 \001(\005:"
+    "\002-1\022\034\n\024confidence_threshold\030\t \001(\002\"\201\001\n\005Da"
+    "tum\022\020\n\010channels\030\001 \001(\005\022\016\n\006height\030\002 \001(\005\022\r\n"
+    "\005width\030\003 \001(\005\022\014\n\004data\030\004 \001(\014\022\r\n\005label\030\005 \001("
+    "\005\022\022\n\nfloat_data\030\006 \003(\002\022\026\n\007encoded\030\007 \001(\010:\005"
+    "false\"\212\002\n\017FillerParameter\022\026\n\004type\030\001 \001(\t:"
+    "\010constant\022\020\n\005value\030\002 \001(\002:\0010\022\016\n\003min\030\003 \001(\002"
+    ":\0010\022\016\n\003max\030\004 \001(\002:\0011\022\017\n\004mean\030\005 \001(\002:\0010\022\016\n\003"
+    "std\030\006 \001(\002:\0011\022\022\n\006sparse\030\007 \001(\005:\002-1\022B\n\rvari"
+    "ance_norm\030\010 \001(\0162#.caffe.FillerParameter."
+    "VarianceNorm:\006FAN_IN\"4\n\014VarianceNorm\022\n\n\006"
+    "FAN_IN\020\000\022\013\n\007FAN_OUT\020\001\022\013\n\007AVERAGE\020\002\"\216\002\n\014N"
+    "etParameter\022\014\n\004name\030\001 \001(\t\022\r\n\005input\030\003 \003(\t"
+    "\022%\n\013input_shape\030\010 \003(\0132\020.caffe.BlobShape\022"
+    "\021\n\tinput_dim\030\004 \003(\005\022\035\n\016force_backward\030\005 \001"
+    "(\010:\005false\022\036\n\005state\030\006 \001(\0132\017.caffe.NetStat"
+    "e\022\031\n\ndebug_info\030\007 \001(\010:\005false\022$\n\005layer\030d "
+    "\003(\0132\025.caffe.LayerParameter\022\'\n\006layers\030\002 \003"
+    "(\0132\027.caffe.V1LayerParameter\"\242\n\n\017SolverPa"
+    "rameter\022\013\n\003net\030\030 \001(\t\022&\n\tnet_param\030\031 \001(\0132"
+    "\023.caffe.NetParameter\022\021\n\ttrain_net\030\001 \001(\t\022"
+    "\020\n\010test_net\030\002 \003(\t\022,\n\017train_net_param\030\025 \001"
+    "(\0132\023.caffe.NetParameter\022+\n\016test_net_para"
+    "m\030\026 \003(\0132\023.caffe.NetParameter\022$\n\013train_st"
+    "ate\030\032 \001(\0132\017.caffe.NetState\022#\n\ntest_state"
+    "\030\033 \003(\0132\017.caffe.NetState\022\021\n\ttest_iter\030\003 \003"
+    "(\005\022\030\n\rtest_interval\030\004 \001(\005:\0010\022 \n\021test_com"
+    "pute_loss\030\023 \001(\010:\005false\022!\n\023test_initializ"
+    "ation\030  \001(\010:\004true\022\017\n\007base_lr\030\005 \001(\002\022\017\n\007di"
+    "splay\030\006 \001(\005\022\027\n\014average_loss\030! \001(\005:\0011\022\020\n\010"
+    "max_iter\030\007 \001(\005\022\024\n\titer_size\030$ \001(\005:\0011\022\021\n\t"
+    "lr_policy\030\010 \001(\t\022\r\n\005gamma\030\t \001(\002\022\r\n\005power\030"
+    "\n \001(\002\022\020\n\010momentum\030\013 \001(\002\022\024\n\014weight_decay\030"
+    "\014 \001(\002\022\037\n\023regularization_type\030\035 \001(\t:\002L2\022\020"
+    "\n\010stepsize\030\r \001(\005\022\021\n\tstepvalue\030\" \003(\005\022\032\n\016c"
+    "lip_gradients\030# \001(\002:\002-1\022\023\n\010snapshot\030\016 \001("
+    "\005:\0010\022\027\n\017snapshot_prefix\030\017 \001(\t\022\034\n\rsnapsho"
+    "t_diff\030\020 \001(\010:\005false\022K\n\017snapshot_format\030%"
+    " \001(\0162%.caffe.SolverParameter.SnapshotFor"
+    "mat:\013BINARYPROTO\022;\n\013solver_mode\030\021 \001(\0162!."
+    "caffe.SolverParameter.SolverMode:\003GPU\022\024\n"
+    "\tdevice_id\030\022 \001(\005:\0010\022\027\n\013random_seed\030\024 \001(\003"
+    ":\002-1\022\021\n\004type\030( \001(\t:\003SGD\022\024\n\005delta\030\037 \001(\002:\005"
+    "1e-08\022\030\n\tmomentum2\030\' \001(\002:\0050.999\022\027\n\trms_d"
+    "ecay\030& \001(\002:\0040.99\022\031\n\ndebug_info\030\027 \001(\010:\005fa"
+    "lse\022\"\n\024snapshot_after_train\030\034 \001(\010:\004true\022"
+    ";\n\013solver_type\030\036 \001(\0162!.caffe.SolverParam"
+    "eter.SolverType:\003SGD\"+\n\016SnapshotFormat\022\010"
+    "\n\004HDF5\020\000\022\017\n\013BINARYPROTO\020\001\"\036\n\nSolverMode\022"
+    "\007\n\003CPU\020\000\022\007\n\003GPU\020\001\"U\n\nSolverType\022\007\n\003SGD\020\000"
+    "\022\014\n\010NESTEROV\020\001\022\013\n\007ADAGRAD\020\002\022\013\n\007RMSPROP\020\003"
+    "\022\014\n\010ADADELTA\020\004\022\010\n\004ADAM\020\005\"l\n\013SolverState\022"
+    "\014\n\004iter\030\001 \001(\005\022\023\n\013learned_net\030\002 \001(\t\022!\n\007hi"
+    "story\030\003 \003(\0132\020.caffe.BlobProto\022\027\n\014current"
+    "_step\030\004 \001(\005:\0010\"N\n\010NetState\022!\n\005phase\030\001 \001("
+    "\0162\014.caffe.Phase:\004TEST\022\020\n\005level\030\002 \001(\005:\0010\022"
+    "\r\n\005stage\030\003 \003(\t\"s\n\014NetStateRule\022\033\n\005phase\030"
+    "\001 \001(\0162\014.caffe.Phase\022\021\n\tmin_level\030\002 \001(\005\022\021"
+    "\n\tmax_level\030\003 \001(\005\022\r\n\005stage\030\004 \003(\t\022\021\n\tnot_"
+    "stage\030\005 \003(\t\"\243\001\n\tParamSpec\022\014\n\004name\030\001 \001(\t\022"
+    "1\n\nshare_mode\030\002 \001(\0162\035.caffe.ParamSpec.Di"
+    "mCheckMode\022\022\n\007lr_mult\030\003 \001(\002:\0011\022\025\n\ndecay_"
+    "mult\030\004 \001(\002:\0011\"*\n\014DimCheckMode\022\n\n\006STRICT\020"
+    "\000\022\016\n\nPERMISSIVE\020\001\"\335\025\n\016LayerParameter\022\014\n\004"
+    "name\030\001 \001(\t\022\014\n\004type\030\002 \001(\t\022\016\n\006bottom\030\003 \003(\t"
+    "\022\013\n\003top\030\004 \003(\t\022\033\n\005phase\030\n \001(\0162\014.caffe.Pha"
+    "se\022\023\n\013loss_weight\030\005 \003(\002\022\037\n\005param\030\006 \003(\0132\020"
+    ".caffe.ParamSpec\022\037\n\005blobs\030\007 \003(\0132\020.caffe."
+    "BlobProto\022\026\n\016propagate_down\030\013 \003(\010\022$\n\007inc"
+    "lude\030\010 \003(\0132\023.caffe.NetStateRule\022$\n\007exclu"
+    "de\030\t \003(\0132\023.caffe.NetStateRule\0227\n\017transfo"
+    "rm_param\030d \001(\0132\036.caffe.TransformationPar"
+    "ameter\022(\n\nloss_param\030e \001(\0132\024.caffe.LossP"
+    "arameter\0220\n\016accuracy_param\030f \001(\0132\030.caffe"
+    ".AccuracyParameter\022,\n\014argmax_param\030g \001(\013"
+    "2\026.caffe.ArgMaxParameter\0224\n\020batch_norm_p"
+    "aram\030\213\001 \001(\0132\031.caffe.BatchNormParameter\022)"
+    "\n\nbias_param\030\215\001 \001(\0132\024.caffe.BiasParamete"
+    "r\022,\n\014concat_param\030h \001(\0132\026.caffe.ConcatPa"
+    "rameter\022\?\n\026contrastive_loss_param\030i \001(\0132"
+    "\037.caffe.ContrastiveLossParameter\0226\n\021conv"
+    "olution_param\030j \001(\0132\033.caffe.ConvolutionP"
+    "arameter\022)\n\ncrop_param\030\220\001 \001(\0132\024.caffe.Cr"
+    "opParameter\022(\n\ndata_param\030k \001(\0132\024.caffe."
+    "DataParameter\022@\n\026detection_output_param\030"
+    "\223\001 \001(\0132\037.caffe.DetectionOutputParameter\022"
+    ".\n\rdropout_param\030l \001(\0132\027.caffe.DropoutPa"
+    "rameter\0223\n\020dummy_data_param\030m \001(\0132\031.caff"
+    "e.DummyDataParameter\022.\n\reltwise_param\030n "
+    "\001(\0132\027.caffe.EltwiseParameter\022\'\n\telu_para"
+    "m\030\214\001 \001(\0132\023.caffe.ELUParameter\022+\n\013embed_p"
+    "aram\030\211\001 \001(\0132\025.caffe.EmbedParameter\022&\n\tex"
+    "p_param\030o \001(\0132\023.caffe.ExpParameter\022/\n\rfl"
+    "atten_param\030\207\001 \001(\0132\027.caffe.FlattenParame"
+    "ter\0221\n\017hdf5_data_param\030p \001(\0132\030.caffe.HDF"
+    "5DataParameter\0225\n\021hdf5_output_param\030q \001("
+    "\0132\032.caffe.HDF5OutputParameter\0223\n\020hinge_l"
+    "oss_param\030r \001(\0132\031.caffe.HingeLossParamet"
+    "er\0223\n\020image_data_param\030s \001(\0132\031.caffe.Ima"
+    "geDataParameter\0229\n\023infogain_loss_param\030t"
+    " \001(\0132\034.caffe.InfogainLossParameter\0229\n\023in"
+    "ner_product_param\030u \001(\0132\034.caffe.InnerPro"
+    "ductParameter\022+\n\013input_param\030\217\001 \001(\0132\025.ca"
+    "ffe.InputParameter\022\'\n\tlog_param\030\206\001 \001(\0132\023"
+    ".caffe.LogParameter\022&\n\tlrn_param\030v \001(\0132\023"
+    ".caffe.LRNParameter\0225\n\021memory_data_param"
+    "\030w \001(\0132\032.caffe.MemoryDataParameter\022&\n\tmv"
+    "n_param\030x \001(\0132\023.caffe.MVNParameter\0222\n\nno"
+    "rm_param\030\225\001 \001(\0132\035.caffe.NormalizeBBoxPar"
+    "ameter\022/\n\rpermute_param\030\224\001 \001(\0132\027.caffe.P"
+    "ermuteParameter\0223\n\017parameter_param\030\221\001 \001("
+    "\0132\031.caffe.ParameterParameter\022.\n\rpooling_"
+    "param\030y \001(\0132\027.caffe.PoolingParameter\022*\n\013"
+    "power_param\030z \001(\0132\025.caffe.PowerParameter"
+    "\022+\n\013prelu_param\030\203\001 \001(\0132\025.caffe.PReLUPara"
+    "meter\0222\n\017prior_box_param\030\226\001 \001(\0132\030.caffe."
+    "PriorBoxParameter\022-\n\014python_param\030\202\001 \001(\013"
+    "2\026.caffe.PythonParameter\0223\n\017recurrent_pa"
+    "ram\030\222\001 \001(\0132\031.caffe.RecurrentParameter\0223\n"
+    "\017reduction_param\030\210\001 \001(\0132\031.caffe.Reductio"
+    "nParameter\022(\n\nrelu_param\030{ \001(\0132\024.caffe.R"
+    "eLUParameter\022/\n\rreshape_param\030\205\001 \001(\0132\027.c"
+    "affe.ReshapeParameter\022+\n\013scale_param\030\216\001 "
+    "\001(\0132\025.caffe.ScaleParameter\022.\n\rsigmoid_pa"
+    "ram\030| \001(\0132\027.caffe.SigmoidParameter\022.\n\rso"
+    "ftmax_param\030} \001(\0132\027.caffe.SoftmaxParamet"
+    "er\022\'\n\tspp_param\030\204\001 \001(\0132\023.caffe.SPPParame"
+    "ter\022*\n\013slice_param\030~ \001(\0132\025.caffe.SlicePa"
+    "rameter\022(\n\ntanh_param\030\177 \001(\0132\024.caffe.TanH"
+    "Parameter\0223\n\017threshold_param\030\200\001 \001(\0132\031.ca"
+    "ffe.ThresholdParameter\022)\n\ntile_param\030\212\001 "
+    "\001(\0132\024.caffe.TileParameter\0226\n\021window_data"
+    "_param\030\201\001 \001(\0132\032.caffe.WindowDataParamete"
+    "r\"\266\001\n\027TransformationParameter\022\020\n\005scale\030\001"
+    " \001(\002:\0011\022\025\n\006mirror\030\002 \001(\010:\005false\022\024\n\tcrop_s"
+    "ize\030\003 \001(\r:\0010\022\021\n\tmean_file\030\004 \001(\t\022\022\n\nmean_"
+    "value\030\005 \003(\002\022\032\n\013force_color\030\006 \001(\010:\005false\022"
+    "\031\n\nforce_gray\030\007 \001(\010:\005false\"\302\001\n\rLossParam"
+    "eter\022\024\n\014ignore_label\030\001 \001(\005\022D\n\rnormalizat"
+    "ion\030\003 \001(\0162&.caffe.LossParameter.Normaliz"
+    "ationMode:\005VALID\022\021\n\tnormalize\030\002 \001(\010\"B\n\021N"
+    "ormalizationMode\022\010\n\004FULL\020\000\022\t\n\005VALID\020\001\022\016\n"
+    "\nBATCH_SIZE\020\002\022\010\n\004NONE\020\003\"L\n\021AccuracyParam"
+    "eter\022\020\n\005top_k\030\001 \001(\r:\0011\022\017\n\004axis\030\002 \001(\005:\0011\022"
+    "\024\n\014ignore_label\030\003 \001(\005\"M\n\017ArgMaxParameter"
+    "\022\032\n\013out_max_val\030\001 \001(\010:\005false\022\020\n\005top_k\030\002 "
+    "\001(\r:\0011\022\014\n\004axis\030\003 \001(\005\"9\n\017ConcatParameter\022"
+    "\017\n\004axis\030\002 \001(\005:\0011\022\025\n\nconcat_dim\030\001 \001(\r:\0011\""
+    "j\n\022BatchNormParameter\022\030\n\020use_global_stat"
+    "s\030\001 \001(\010\022&\n\027moving_average_fraction\030\002 \001(\002"
+    ":\0050.999\022\022\n\003eps\030\003 \001(\002:\0051e-05\"]\n\rBiasParam"
+    "eter\022\017\n\004axis\030\001 \001(\005:\0011\022\023\n\010num_axes\030\002 \001(\005:"
+    "\0011\022&\n\006filler\030\003 \001(\0132\026.caffe.FillerParamet"
+    "er\"L\n\030ContrastiveLossParameter\022\021\n\006margin"
+    "\030\001 \001(\002:\0011\022\035\n\016legacy_version\030\002 \001(\010:\005false"
+    "\"\374\003\n\024ConvolutionParameter\022\022\n\nnum_output\030"
+    "\001 \001(\r\022\027\n\tbias_term\030\002 \001(\010:\004true\022\013\n\003pad\030\003 "
+    "\003(\r\022\023\n\013kernel_size\030\004 \003(\r\022\016\n\006stride\030\006 \003(\r"
+    "\022\020\n\010dilation\030\022 \003(\r\022\020\n\005pad_h\030\t \001(\r:\0010\022\020\n\005"
+    "pad_w\030\n \001(\r:\0010\022\020\n\010kernel_h\030\013 \001(\r\022\020\n\010kern"
+    "el_w\030\014 \001(\r\022\020\n\010stride_h\030\r \001(\r\022\020\n\010stride_w"
+    "\030\016 \001(\r\022\020\n\005group\030\005 \001(\r:\0011\022-\n\rweight_fille"
+    "r\030\007 \001(\0132\026.caffe.FillerParameter\022+\n\013bias_"
+    "filler\030\010 \001(\0132\026.caffe.FillerParameter\022;\n\006"
+    "engine\030\017 \001(\0162\".caffe.ConvolutionParamete"
+    "r.Engine:\007DEFAULT\022\017\n\004axis\030\020 \001(\005:\0011\022\036\n\017fo"
+    "rce_nd_im2col\030\021 \001(\010:\005false\"+\n\006Engine\022\013\n\007"
+    "DEFAULT\020\000\022\t\n\005CAFFE\020\001\022\t\n\005CUDNN\020\002\"0\n\rCropP"
+    "arameter\022\017\n\004axis\030\001 \001(\005:\0012\022\016\n\006offset\030\002 \003("
+    "\r\"\244\002\n\rDataParameter\022\016\n\006source\030\001 \001(\t\022\022\n\nb"
+    "atch_size\030\004 \001(\r\022\024\n\trand_skip\030\007 \001(\r:\0010\0221\n"
+    "\007backend\030\010 \001(\0162\027.caffe.DataParameter.DB:"
+    "\007LEVELDB\022\020\n\005scale\030\002 \001(\002:\0011\022\021\n\tmean_file\030"
+    "\003 \001(\t\022\024\n\tcrop_size\030\005 \001(\r:\0010\022\025\n\006mirror\030\006 "
+    "\001(\010:\005false\022\"\n\023force_encoded_color\030\t \001(\010:"
+    "\005false\022\023\n\010prefetch\030\n \001(\r:\0014\"\033\n\002DB\022\013\n\007LEV"
+    "ELDB\020\000\022\010\n\004LMDB\020\001\"[\n\036NonMaximumSuppressio"
+    "nParameter\022\032\n\rnms_threshold\030\001 \001(\002:\0030.3\022\r"
+    "\n\005top_k\030\002 \001(\005\022\016\n\003eta\030\003 \001(\002:\0011\"\252\001\n\023SaveOu"
+    "tputParameter\022\030\n\020output_directory\030\001 \001(\t\022"
+    "\032\n\022output_name_prefix\030\002 \001(\t\022\025\n\routput_fo"
+    "rmat\030\003 \001(\t\022\026\n\016label_map_file\030\004 \001(\t\022\026\n\016na"
+    "me_size_file\030\005 \001(\t\022\026\n\016num_test_image\030\006 \001"
+    "(\r\".\n\020DropoutParameter\022\032\n\rdropout_ratio\030"
+    "\001 \001(\002:\0030.5\"\240\001\n\022DummyDataParameter\022+\n\013dat"
+    "a_filler\030\001 \003(\0132\026.caffe.FillerParameter\022\037"
+    "\n\005shape\030\006 \003(\0132\020.caffe.BlobShape\022\013\n\003num\030\002"
+    " \003(\r\022\020\n\010channels\030\003 \003(\r\022\016\n\006height\030\004 \003(\r\022\r"
+    "\n\005width\030\005 \003(\r\"\245\001\n\020EltwiseParameter\0229\n\top"
+    "eration\030\001 \001(\0162!.caffe.EltwiseParameter.E"
+    "ltwiseOp:\003SUM\022\r\n\005coeff\030\002 \003(\002\022\036\n\020stable_p"
+    "rod_grad\030\003 \001(\010:\004true\"\'\n\tEltwiseOp\022\010\n\004PRO"
+    "D\020\000\022\007\n\003SUM\020\001\022\007\n\003MAX\020\002\" \n\014ELUParameter\022\020\n"
+    "\005alpha\030\001 \001(\002:\0011\"\254\001\n\016EmbedParameter\022\022\n\nnu"
+    "m_output\030\001 \001(\r\022\021\n\tinput_dim\030\002 \001(\r\022\027\n\tbia"
+    "s_term\030\003 \001(\010:\004true\022-\n\rweight_filler\030\004 \001("
     "\0132\026.caffe.FillerParameter\022+\n\013bias_filler"
-    "\030\003 \001(\0132\026.caffe.FillerParameter\022\031\n\ndebug_"
-    "info\030\004 \001(\010:\005false\022\034\n\rexpose_hidden\030\005 \001(\010"
-    ":\005false\"\255\001\n\022ReductionParameter\022=\n\toperat"
-    "ion\030\001 \001(\0162%.caffe.ReductionParameter.Red"
-    "uctionOp:\003SUM\022\017\n\004axis\030\002 \001(\005:\0010\022\020\n\005coeff\030"
-    "\003 \001(\002:\0011\"5\n\013ReductionOp\022\007\n\003SUM\020\001\022\010\n\004ASUM"
-    "\020\002\022\t\n\005SUMSQ\020\003\022\010\n\004MEAN\020\004\"\215\001\n\rReLUParamete"
-    "r\022\031\n\016negative_slope\030\001 \001(\002:\0010\0224\n\006engine\030\002"
-    " \001(\0162\033.caffe.ReLUParameter.Engine:\007DEFAU"
+    "\030\005 \001(\0132\026.caffe.FillerParameter\"D\n\014ExpPar"
+    "ameter\022\020\n\004base\030\001 \001(\002:\002-1\022\020\n\005scale\030\002 \001(\002:"
+    "\0011\022\020\n\005shift\030\003 \001(\002:\0010\"9\n\020FlattenParameter"
+    "\022\017\n\004axis\030\001 \001(\005:\0011\022\024\n\010end_axis\030\002 \001(\005:\002-1\""
+    "O\n\021HDF5DataParameter\022\016\n\006source\030\001 \001(\t\022\022\n\n"
+    "batch_size\030\002 \001(\r\022\026\n\007shuffle\030\003 \001(\010:\005false"
+    "\"(\n\023HDF5OutputParameter\022\021\n\tfile_name\030\001 \001"
+    "(\t\"^\n\022HingeLossParameter\0220\n\004norm\030\001 \001(\0162\036"
+    ".caffe.HingeLossParameter.Norm:\002L1\"\026\n\004No"
+    "rm\022\006\n\002L1\020\001\022\006\n\002L2\020\002\"\227\002\n\022ImageDataParamete"
+    "r\022\016\n\006source\030\001 \001(\t\022\025\n\nbatch_size\030\004 \001(\r:\0011"
+    "\022\024\n\trand_skip\030\007 \001(\r:\0010\022\026\n\007shuffle\030\010 \001(\010:"
+    "\005false\022\025\n\nnew_height\030\t \001(\r:\0010\022\024\n\tnew_wid"
+    "th\030\n \001(\r:\0010\022\026\n\010is_color\030\013 \001(\010:\004true\022\020\n\005s"
+    "cale\030\002 \001(\002:\0011\022\021\n\tmean_file\030\003 \001(\t\022\024\n\tcrop"
+    "_size\030\005 \001(\r:\0010\022\025\n\006mirror\030\006 \001(\010:\005false\022\025\n"
+    "\013root_folder\030\014 \001(\t:\000\"\'\n\025InfogainLossPara"
+    "meter\022\016\n\006source\030\001 \001(\t\"\313\001\n\025InnerProductPa"
+    "rameter\022\022\n\nnum_output\030\001 \001(\r\022\027\n\tbias_term"
+    "\030\002 \001(\010:\004true\022-\n\rweight_filler\030\003 \001(\0132\026.ca"
+    "ffe.FillerParameter\022+\n\013bias_filler\030\004 \001(\013"
+    "2\026.caffe.FillerParameter\022\017\n\004axis\030\005 \001(\005:\001"
+    "1\022\030\n\ttranspose\030\006 \001(\010:\005false\"1\n\016InputPara"
+    "meter\022\037\n\005shape\030\001 \003(\0132\020.caffe.BlobShape\"D"
+    "\n\014LogParameter\022\020\n\004base\030\001 \001(\002:\002-1\022\020\n\005scal"
+    "e\030\002 \001(\002:\0011\022\020\n\005shift\030\003 \001(\002:\0010\"\270\002\n\014LRNPara"
+    "meter\022\025\n\nlocal_size\030\001 \001(\r:\0015\022\020\n\005alpha\030\002 "
+    "\001(\002:\0011\022\022\n\004beta\030\003 \001(\002:\0040.75\022D\n\013norm_regio"
+    "n\030\004 \001(\0162\036.caffe.LRNParameter.NormRegion:"
+    "\017ACROSS_CHANNELS\022\014\n\001k\030\005 \001(\002:\0011\0223\n\006engine"
+    "\030\006 \001(\0162\032.caffe.LRNParameter.Engine:\007DEFA"
+    "ULT\"5\n\nNormRegion\022\023\n\017ACROSS_CHANNELS\020\000\022\022"
+    "\n\016WITHIN_CHANNEL\020\001\"+\n\006Engine\022\013\n\007DEFAULT\020"
+    "\000\022\t\n\005CAFFE\020\001\022\t\n\005CUDNN\020\002\"Z\n\023MemoryDataPar"
+    "ameter\022\022\n\nbatch_size\030\001 \001(\r\022\020\n\010channels\030\002"
+    " \001(\r\022\016\n\006height\030\003 \001(\r\022\r\n\005width\030\004 \001(\r\"d\n\014M"
+    "VNParameter\022 \n\022normalize_variance\030\001 \001(\010:"
+    "\004true\022\036\n\017across_channels\030\002 \001(\010:\005false\022\022\n"
+    "\003eps\030\003 \001(\002:\0051e-09\"5\n\022ParameterParameter\022"
+    "\037\n\005shape\030\001 \001(\0132\020.caffe.BlobShape\"\242\003\n\020Poo"
+    "lingParameter\0225\n\004pool\030\001 \001(\0162\".caffe.Pool"
+    "ingParameter.PoolMethod:\003MAX\022\016\n\003pad\030\004 \001("
+    "\r:\0010\022\020\n\005pad_h\030\t \001(\r:\0010\022\020\n\005pad_w\030\n \001(\r:\0010"
+    "\022\023\n\013kernel_size\030\002 \001(\r\022\020\n\010kernel_h\030\005 \001(\r\022"
+    "\020\n\010kernel_w\030\006 \001(\r\022\021\n\006stride\030\003 \001(\r:\0011\022\020\n\010"
+    "stride_h\030\007 \001(\r\022\020\n\010stride_w\030\010 \001(\r\0227\n\006engi"
+    "ne\030\013 \001(\0162\036.caffe.PoolingParameter.Engine"
+    ":\007DEFAULT\022\035\n\016global_pooling\030\014 \001(\010:\005false"
+    "\".\n\nPoolMethod\022\007\n\003MAX\020\000\022\007\n\003AVE\020\001\022\016\n\nSTOC"
+    "HASTIC\020\002\"+\n\006Engine\022\013\n\007DEFAULT\020\000\022\t\n\005CAFFE"
+    "\020\001\022\t\n\005CUDNN\020\002\"F\n\016PowerParameter\022\020\n\005power"
+    "\030\001 \001(\002:\0011\022\020\n\005scale\030\002 \001(\002:\0011\022\020\n\005shift\030\003 \001"
+    "(\002:\0010\"g\n\017PythonParameter\022\016\n\006module\030\001 \001(\t"
+    "\022\r\n\005layer\030\002 \001(\t\022\023\n\tparam_str\030\003 \001(\t:\000\022 \n\021"
+    "share_in_parallel\030\004 \001(\010:\005false\"\300\001\n\022Recur"
+    "rentParameter\022\025\n\nnum_output\030\001 \001(\r:\0010\022-\n\r"
+    "weight_filler\030\002 \001(\0132\026.caffe.FillerParame"
+    "ter\022+\n\013bias_filler\030\003 \001(\0132\026.caffe.FillerP"
+    "arameter\022\031\n\ndebug_info\030\004 \001(\010:\005false\022\034\n\re"
+    "xpose_hidden\030\005 \001(\010:\005false\"\255\001\n\022ReductionP"
+    "arameter\022=\n\toperation\030\001 \001(\0162%.caffe.Redu"
+    "ctionParameter.ReductionOp:\003SUM\022\017\n\004axis\030"
+    "\002 \001(\005:\0010\022\020\n\005coeff\030\003 \001(\002:\0011\"5\n\013ReductionO"
+    "p\022\007\n\003SUM\020\001\022\010\n\004ASUM\020\002\022\t\n\005SUMSQ\020\003\022\010\n\004MEAN\020"
+    "\004\"\215\001\n\rReLUParameter\022\031\n\016negative_slope\030\001 "
+    "\001(\002:\0010\0224\n\006engine\030\002 \001(\0162\033.caffe.ReLUParam"
+    "eter.Engine:\007DEFAULT\"+\n\006Engine\022\013\n\007DEFAUL"
+    "T\020\000\022\t\n\005CAFFE\020\001\022\t\n\005CUDNN\020\002\"Z\n\020ReshapePara"
+    "meter\022\037\n\005shape\030\001 \001(\0132\020.caffe.BlobShape\022\017"
+    "\n\004axis\030\002 \001(\005:\0010\022\024\n\010num_axes\030\003 \001(\005:\002-1\"\245\001"
+    "\n\016ScaleParameter\022\017\n\004axis\030\001 \001(\005:\0011\022\023\n\010num"
+    "_axes\030\002 \001(\005:\0011\022&\n\006filler\030\003 \001(\0132\026.caffe.F"
+    "illerParameter\022\030\n\tbias_term\030\004 \001(\010:\005false"
+    "\022+\n\013bias_filler\030\005 \001(\0132\026.caffe.FillerPara"
+    "meter\"x\n\020SigmoidParameter\0227\n\006engine\030\001 \001("
+    "\0162\036.caffe.SigmoidParameter.Engine:\007DEFAU"
     "LT\"+\n\006Engine\022\013\n\007DEFAULT\020\000\022\t\n\005CAFFE\020\001\022\t\n\005"
-    "CUDNN\020\002\"Z\n\020ReshapeParameter\022\037\n\005shape\030\001 \001"
-    "(\0132\020.caffe.BlobShape\022\017\n\004axis\030\002 \001(\005:\0010\022\024\n"
-    "\010num_axes\030\003 \001(\005:\002-1\"\245\001\n\016ScaleParameter\022\017"
-    "\n\004axis\030\001 \001(\005:\0011\022\023\n\010num_axes\030\002 \001(\005:\0011\022&\n\006"
-    "filler\030\003 \001(\0132\026.caffe.FillerParameter\022\030\n\t"
-    "bias_term\030\004 \001(\010:\005false\022+\n\013bias_filler\030\005 "
-    "\001(\0132\026.caffe.FillerParameter\"x\n\020SigmoidPa"
-    "rameter\0227\n\006engine\030\001 \001(\0162\036.caffe.SigmoidP"
-    "arameter.Engine:\007DEFAULT\"+\n\006Engine\022\013\n\007DE"
-    "FAULT\020\000\022\t\n\005CAFFE\020\001\022\t\n\005CUDNN\020\002\"L\n\016SlicePa"
-    "rameter\022\017\n\004axis\030\003 \001(\005:\0011\022\023\n\013slice_point\030"
-    "\002 \003(\r\022\024\n\tslice_dim\030\001 \001(\r:\0011\"\211\001\n\020SoftmaxP"
-    "arameter\0227\n\006engine\030\001 \001(\0162\036.caffe.Softmax"
-    "Parameter.Engine:\007DEFAULT\022\017\n\004axis\030\002 \001(\005:"
-    "\0011\"+\n\006Engine\022\013\n\007DEFAULT\020\000\022\t\n\005CAFFE\020\001\022\t\n\005"
-    "CUDNN\020\002\"r\n\rTanHParameter\0224\n\006engine\030\001 \001(\016"
-    "2\033.caffe.TanHParameter.Engine:\007DEFAULT\"+"
-    "\n\006Engine\022\013\n\007DEFAULT\020\000\022\t\n\005CAFFE\020\001\022\t\n\005CUDN"
-    "N\020\002\"/\n\rTileParameter\022\017\n\004axis\030\001 \001(\005:\0011\022\r\n"
-    "\005tiles\030\002 \001(\005\"*\n\022ThresholdParameter\022\024\n\tth"
-    "reshold\030\001 \001(\002:\0010\"\301\002\n\023WindowDataParameter"
-    "\022\016\n\006source\030\001 \001(\t\022\020\n\005scale\030\002 \001(\002:\0011\022\021\n\tme"
-    "an_file\030\003 \001(\t\022\022\n\nbatch_size\030\004 \001(\r\022\024\n\tcro"
-    "p_size\030\005 \001(\r:\0010\022\025\n\006mirror\030\006 \001(\010:\005false\022\031"
-    "\n\014fg_threshold\030\007 \001(\002:\0030.5\022\031\n\014bg_threshol"
-    "d\030\010 \001(\002:\0030.5\022\031\n\013fg_fraction\030\t \001(\002:\0040.25\022"
-    "\026\n\013context_pad\030\n \001(\r:\0010\022\027\n\tcrop_mode\030\013 \001"
-    "(\t:\004warp\022\033\n\014cache_images\030\014 \001(\010:\005false\022\025\n"
-    "\013root_folder\030\r \001(\t:\000\"\353\001\n\014SPPParameter\022\026\n"
-    "\016pyramid_height\030\001 \001(\r\0221\n\004pool\030\002 \001(\0162\036.ca"
-    "ffe.SPPParameter.PoolMethod:\003MAX\0223\n\006engi"
-    "ne\030\006 \001(\0162\032.caffe.SPPParameter.Engine:\007DE"
-    "FAULT\".\n\nPoolMethod\022\007\n\003MAX\020\000\022\007\n\003AVE\020\001\022\016\n"
-    "\nSTOCHASTIC\020\002\"+\n\006Engine\022\013\n\007DEFAULT\020\000\022\t\n\005"
-    "CAFFE\020\001\022\t\n\005CUDNN\020\002\"\340\023\n\020V1LayerParameter\022"
-    "\016\n\006bottom\030\002 \003(\t\022\013\n\003top\030\003 \003(\t\022\014\n\004name\030\004 \001"
-    "(\t\022$\n\007include\030  \003(\0132\023.caffe.NetStateRule"
-    "\022$\n\007exclude\030! \003(\0132\023.caffe.NetStateRule\022/"
-    "\n\004type\030\005 \001(\0162!.caffe.V1LayerParameter.La"
-    "yerType\022\037\n\005blobs\030\006 \003(\0132\020.caffe.BlobProto"
-    "\022\016\n\005param\030\351\007 \003(\t\022>\n\017blob_share_mode\030\352\007 \003"
-    "(\0162$.caffe.V1LayerParameter.DimCheckMode"
-    "\022\020\n\010blobs_lr\030\007 \003(\002\022\024\n\014weight_decay\030\010 \003(\002"
-    "\022\023\n\013loss_weight\030# \003(\002\0220\n\016accuracy_param\030"
-    "\033 \001(\0132\030.caffe.AccuracyParameter\022,\n\014argma"
-    "x_param\030\027 \001(\0132\026.caffe.ArgMaxParameter\022,\n"
-    "\014concat_param\030\t \001(\0132\026.caffe.ConcatParame"
-    "ter\022\?\n\026contrastive_loss_param\030( \001(\0132\037.ca"
-    "ffe.ContrastiveLossParameter\0226\n\021convolut"
-    "ion_param\030\n \001(\0132\033.caffe.ConvolutionParam"
-    "eter\022(\n\ndata_param\030\013 \001(\0132\024.caffe.DataPar"
-    "ameter\022.\n\rdropout_param\030\014 \001(\0132\027.caffe.Dr"
-    "opoutParameter\0223\n\020dummy_data_param\030\032 \001(\013"
-    "2\031.caffe.DummyDataParameter\022.\n\reltwise_p"
-    "aram\030\030 \001(\0132\027.caffe.EltwiseParameter\022&\n\te"
-    "xp_param\030) \001(\0132\023.caffe.ExpParameter\0221\n\017h"
-    "df5_data_param\030\r \001(\0132\030.caffe.HDF5DataPar"
-    "ameter\0225\n\021hdf5_output_param\030\016 \001(\0132\032.caff"
-    "e.HDF5OutputParameter\0223\n\020hinge_loss_para"
-    "m\030\035 \001(\0132\031.caffe.HingeLossParameter\0223\n\020im"
-    "age_data_param\030\017 \001(\0132\031.caffe.ImageDataPa"
-    "rameter\0229\n\023infogain_loss_param\030\020 \001(\0132\034.c"
-    "affe.InfogainLossParameter\0229\n\023inner_prod"
-    "uct_param\030\021 \001(\0132\034.caffe.InnerProductPara"
-    "meter\022&\n\tlrn_param\030\022 \001(\0132\023.caffe.LRNPara"
-    "meter\0225\n\021memory_data_param\030\026 \001(\0132\032.caffe"
-    ".MemoryDataParameter\022&\n\tmvn_param\030\" \001(\0132"
-    "\023.caffe.MVNParameter\022.\n\rpooling_param\030\023 "
-    "\001(\0132\027.caffe.PoolingParameter\022*\n\013power_pa"
-    "ram\030\025 \001(\0132\025.caffe.PowerParameter\022(\n\nrelu"
-    "_param\030\036 \001(\0132\024.caffe.ReLUParameter\022.\n\rsi"
-    "gmoid_param\030& \001(\0132\027.caffe.SigmoidParamet"
-    "er\022.\n\rsoftmax_param\030\' \001(\0132\027.caffe.Softma"
-    "xParameter\022*\n\013slice_param\030\037 \001(\0132\025.caffe."
-    "SliceParameter\022(\n\ntanh_param\030% \001(\0132\024.caf"
-    "fe.TanHParameter\0222\n\017threshold_param\030\031 \001("
-    "\0132\031.caffe.ThresholdParameter\0225\n\021window_d"
-    "ata_param\030\024 \001(\0132\032.caffe.WindowDataParame"
-    "ter\0227\n\017transform_param\030$ \001(\0132\036.caffe.Tra"
-    "nsformationParameter\022(\n\nloss_param\030* \001(\013"
-    "2\024.caffe.LossParameter\022&\n\005layer\030\001 \001(\0132\027."
-    "caffe.V0LayerParameter\"\330\004\n\tLayerType\022\010\n\004"
-    "NONE\020\000\022\n\n\006ABSVAL\020#\022\014\n\010ACCURACY\020\001\022\n\n\006ARGM"
-    "AX\020\036\022\010\n\004BNLL\020\002\022\n\n\006CONCAT\020\003\022\024\n\020CONTRASTIV"
-    "E_LOSS\020%\022\017\n\013CONVOLUTION\020\004\022\010\n\004DATA\020\005\022\021\n\rD"
-    "ECONVOLUTION\020\'\022\013\n\007DROPOUT\020\006\022\016\n\nDUMMY_DAT"
-    "A\020 \022\022\n\016EUCLIDEAN_LOSS\020\007\022\013\n\007ELTWISE\020\031\022\007\n\003"
-    "EXP\020&\022\013\n\007FLATTEN\020\010\022\r\n\tHDF5_DATA\020\t\022\017\n\013HDF"
-    "5_OUTPUT\020\n\022\016\n\nHINGE_LOSS\020\034\022\n\n\006IM2COL\020\013\022\016"
-    "\n\nIMAGE_DATA\020\014\022\021\n\rINFOGAIN_LOSS\020\r\022\021\n\rINN"
-    "ER_PRODUCT\020\016\022\007\n\003LRN\020\017\022\017\n\013MEMORY_DATA\020\035\022\035"
-    "\n\031MULTINOMIAL_LOGISTIC_LOSS\020\020\022\007\n\003MVN\020\"\022\013"
-    "\n\007POOLING\020\021\022\t\n\005POWER\020\032\022\010\n\004RELU\020\022\022\013\n\007SIGM"
-    "OID\020\023\022\036\n\032SIGMOID_CROSS_ENTROPY_LOSS\020\033\022\013\n"
-    "\007SILENCE\020$\022\013\n\007SOFTMAX\020\024\022\020\n\014SOFTMAX_LOSS\020"
-    "\025\022\t\n\005SPLIT\020\026\022\t\n\005SLICE\020!\022\010\n\004TANH\020\027\022\017\n\013WIN"
-    "DOW_DATA\020\030\022\r\n\tTHRESHOLD\020\037\"*\n\014DimCheckMod"
-    "e\022\n\n\006STRICT\020\000\022\016\n\nPERMISSIVE\020\001\"\375\007\n\020V0Laye"
-    "rParameter\022\014\n\004name\030\001 \001(\t\022\014\n\004type\030\002 \001(\t\022\022"
-    "\n\nnum_output\030\003 \001(\r\022\026\n\010biasterm\030\004 \001(\010:\004tr"
-    "ue\022-\n\rweight_filler\030\005 \001(\0132\026.caffe.Filler"
-    "Parameter\022+\n\013bias_filler\030\006 \001(\0132\026.caffe.F"
-    "illerParameter\022\016\n\003pad\030\007 \001(\r:\0010\022\022\n\nkernel"
-    "size\030\010 \001(\r\022\020\n\005group\030\t \001(\r:\0011\022\021\n\006stride\030\n"
-    " \001(\r:\0011\0225\n\004pool\030\013 \001(\0162\".caffe.V0LayerPar"
-    "ameter.PoolMethod:\003MAX\022\032\n\rdropout_ratio\030"
-    "\014 \001(\002:\0030.5\022\025\n\nlocal_size\030\r \001(\r:\0015\022\020\n\005alp"
-    "ha\030\016 \001(\002:\0011\022\022\n\004beta\030\017 \001(\002:\0040.75\022\014\n\001k\030\026 \001"
-    "(\002:\0011\022\016\n\006source\030\020 \001(\t\022\020\n\005scale\030\021 \001(\002:\0011\022"
-    "\020\n\010meanfile\030\022 \001(\t\022\021\n\tbatchsize\030\023 \001(\r\022\023\n\010"
-    "cropsize\030\024 \001(\r:\0010\022\025\n\006mirror\030\025 \001(\010:\005false"
-    "\022\037\n\005blobs\0302 \003(\0132\020.caffe.BlobProto\022\020\n\010blo"
-    "bs_lr\0303 \003(\002\022\024\n\014weight_decay\0304 \003(\002\022\024\n\tran"
-    "d_skip\0305 \001(\r:\0010\022\035\n\020det_fg_threshold\0306 \001("
-    "\002:\0030.5\022\035\n\020det_bg_threshold\0307 \001(\002:\0030.5\022\035\n"
-    "\017det_fg_fraction\0308 \001(\002:\0040.25\022\032\n\017det_cont"
-    "ext_pad\030: \001(\r:\0010\022\033\n\rdet_crop_mode\030; \001(\t:"
-    "\004warp\022\022\n\007new_num\030< \001(\005:\0010\022\027\n\014new_channel"
-    "s\030= \001(\005:\0010\022\025\n\nnew_height\030> \001(\005:\0010\022\024\n\tnew"
-    "_width\030\? \001(\005:\0010\022\035\n\016shuffle_images\030@ \001(\010:"
-    "\005false\022\025\n\nconcat_dim\030A \001(\r:\0011\0226\n\021hdf5_ou"
-    "tput_param\030\351\007 \001(\0132\032.caffe.HDF5OutputPara"
-    "meter\".\n\nPoolMethod\022\007\n\003MAX\020\000\022\007\n\003AVE\020\001\022\016\n"
-    "\nSTOCHASTIC\020\002\"W\n\016PReLUParameter\022&\n\006fille"
-    "r\030\001 \001(\0132\026.caffe.FillerParameter\022\035\n\016chann"
-    "el_shared\030\002 \001(\010:\005false\"\207\001\n\016NormalizedBBo"
-    "x\022\014\n\004xmin\030\001 \001(\002\022\014\n\004ymin\030\002 \001(\002\022\014\n\004xmax\030\003 "
-    "\001(\002\022\014\n\004ymax\030\004 \001(\002\022\r\n\005label\030\005 \001(\005\022\021\n\tdiff"
-    "icult\030\006 \001(\010\022\r\n\005score\030\007 \001(\002\022\014\n\004size\030\010 \001(\002"
-    "*\034\n\005Phase\022\t\n\005TRAIN\020\000\022\010\n\004TEST\020\001", 16870);
+    "CUDNN\020\002\"L\n\016SliceParameter\022\017\n\004axis\030\003 \001(\005:"
+    "\0011\022\023\n\013slice_point\030\002 \003(\r\022\024\n\tslice_dim\030\001 \001"
+    "(\r:\0011\"\211\001\n\020SoftmaxParameter\0227\n\006engine\030\001 \001"
+    "(\0162\036.caffe.SoftmaxParameter.Engine:\007DEFA"
+    "ULT\022\017\n\004axis\030\002 \001(\005:\0011\"+\n\006Engine\022\013\n\007DEFAUL"
+    "T\020\000\022\t\n\005CAFFE\020\001\022\t\n\005CUDNN\020\002\"r\n\rTanHParamet"
+    "er\0224\n\006engine\030\001 \001(\0162\033.caffe.TanHParameter"
+    ".Engine:\007DEFAULT\"+\n\006Engine\022\013\n\007DEFAULT\020\000\022"
+    "\t\n\005CAFFE\020\001\022\t\n\005CUDNN\020\002\"/\n\rTileParameter\022\017"
+    "\n\004axis\030\001 \001(\005:\0011\022\r\n\005tiles\030\002 \001(\005\"*\n\022Thresh"
+    "oldParameter\022\024\n\tthreshold\030\001 \001(\002:\0010\"\301\002\n\023W"
+    "indowDataParameter\022\016\n\006source\030\001 \001(\t\022\020\n\005sc"
+    "ale\030\002 \001(\002:\0011\022\021\n\tmean_file\030\003 \001(\t\022\022\n\nbatch"
+    "_size\030\004 \001(\r\022\024\n\tcrop_size\030\005 \001(\r:\0010\022\025\n\006mir"
+    "ror\030\006 \001(\010:\005false\022\031\n\014fg_threshold\030\007 \001(\002:\003"
+    "0.5\022\031\n\014bg_threshold\030\010 \001(\002:\0030.5\022\031\n\013fg_fra"
+    "ction\030\t \001(\002:\0040.25\022\026\n\013context_pad\030\n \001(\r:\001"
+    "0\022\027\n\tcrop_mode\030\013 \001(\t:\004warp\022\033\n\014cache_imag"
+    "es\030\014 \001(\010:\005false\022\025\n\013root_folder\030\r \001(\t:\000\"\353"
+    "\001\n\014SPPParameter\022\026\n\016pyramid_height\030\001 \001(\r\022"
+    "1\n\004pool\030\002 \001(\0162\036.caffe.SPPParameter.PoolM"
+    "ethod:\003MAX\0223\n\006engine\030\006 \001(\0162\032.caffe.SPPPa"
+    "rameter.Engine:\007DEFAULT\".\n\nPoolMethod\022\007\n"
+    "\003MAX\020\000\022\007\n\003AVE\020\001\022\016\n\nSTOCHASTIC\020\002\"+\n\006Engin"
+    "e\022\013\n\007DEFAULT\020\000\022\t\n\005CAFFE\020\001\022\t\n\005CUDNN\020\002\"\340\023\n"
+    "\020V1LayerParameter\022\016\n\006bottom\030\002 \003(\t\022\013\n\003top"
+    "\030\003 \003(\t\022\014\n\004name\030\004 \001(\t\022$\n\007include\030  \003(\0132\023."
+    "caffe.NetStateRule\022$\n\007exclude\030! \003(\0132\023.ca"
+    "ffe.NetStateRule\022/\n\004type\030\005 \001(\0162!.caffe.V"
+    "1LayerParameter.LayerType\022\037\n\005blobs\030\006 \003(\013"
+    "2\020.caffe.BlobProto\022\016\n\005param\030\351\007 \003(\t\022>\n\017bl"
+    "ob_share_mode\030\352\007 \003(\0162$.caffe.V1LayerPara"
+    "meter.DimCheckMode\022\020\n\010blobs_lr\030\007 \003(\002\022\024\n\014"
+    "weight_decay\030\010 \003(\002\022\023\n\013loss_weight\030# \003(\002\022"
+    "0\n\016accuracy_param\030\033 \001(\0132\030.caffe.Accuracy"
+    "Parameter\022,\n\014argmax_param\030\027 \001(\0132\026.caffe."
+    "ArgMaxParameter\022,\n\014concat_param\030\t \001(\0132\026."
+    "caffe.ConcatParameter\022\?\n\026contrastive_los"
+    "s_param\030( \001(\0132\037.caffe.ContrastiveLossPar"
+    "ameter\0226\n\021convolution_param\030\n \001(\0132\033.caff"
+    "e.ConvolutionParameter\022(\n\ndata_param\030\013 \001"
+    "(\0132\024.caffe.DataParameter\022.\n\rdropout_para"
+    "m\030\014 \001(\0132\027.caffe.DropoutParameter\0223\n\020dumm"
+    "y_data_param\030\032 \001(\0132\031.caffe.DummyDataPara"
+    "meter\022.\n\reltwise_param\030\030 \001(\0132\027.caffe.Elt"
+    "wiseParameter\022&\n\texp_param\030) \001(\0132\023.caffe"
+    ".ExpParameter\0221\n\017hdf5_data_param\030\r \001(\0132\030"
+    ".caffe.HDF5DataParameter\0225\n\021hdf5_output_"
+    "param\030\016 \001(\0132\032.caffe.HDF5OutputParameter\022"
+    "3\n\020hinge_loss_param\030\035 \001(\0132\031.caffe.HingeL"
+    "ossParameter\0223\n\020image_data_param\030\017 \001(\0132\031"
+    ".caffe.ImageDataParameter\0229\n\023infogain_lo"
+    "ss_param\030\020 \001(\0132\034.caffe.InfogainLossParam"
+    "eter\0229\n\023inner_product_param\030\021 \001(\0132\034.caff"
+    "e.InnerProductParameter\022&\n\tlrn_param\030\022 \001"
+    "(\0132\023.caffe.LRNParameter\0225\n\021memory_data_p"
+    "aram\030\026 \001(\0132\032.caffe.MemoryDataParameter\022&"
+    "\n\tmvn_param\030\" \001(\0132\023.caffe.MVNParameter\022."
+    "\n\rpooling_param\030\023 \001(\0132\027.caffe.PoolingPar"
+    "ameter\022*\n\013power_param\030\025 \001(\0132\025.caffe.Powe"
+    "rParameter\022(\n\nrelu_param\030\036 \001(\0132\024.caffe.R"
+    "eLUParameter\022.\n\rsigmoid_param\030& \001(\0132\027.ca"
+    "ffe.SigmoidParameter\022.\n\rsoftmax_param\030\' "
+    "\001(\0132\027.caffe.SoftmaxParameter\022*\n\013slice_pa"
+    "ram\030\037 \001(\0132\025.caffe.SliceParameter\022(\n\ntanh"
+    "_param\030% \001(\0132\024.caffe.TanHParameter\0222\n\017th"
+    "reshold_param\030\031 \001(\0132\031.caffe.ThresholdPar"
+    "ameter\0225\n\021window_data_param\030\024 \001(\0132\032.caff"
+    "e.WindowDataParameter\0227\n\017transform_param"
+    "\030$ \001(\0132\036.caffe.TransformationParameter\022("
+    "\n\nloss_param\030* \001(\0132\024.caffe.LossParameter"
+    "\022&\n\005layer\030\001 \001(\0132\027.caffe.V0LayerParameter"
+    "\"\330\004\n\tLayerType\022\010\n\004NONE\020\000\022\n\n\006ABSVAL\020#\022\014\n\010"
+    "ACCURACY\020\001\022\n\n\006ARGMAX\020\036\022\010\n\004BNLL\020\002\022\n\n\006CONC"
+    "AT\020\003\022\024\n\020CONTRASTIVE_LOSS\020%\022\017\n\013CONVOLUTIO"
+    "N\020\004\022\010\n\004DATA\020\005\022\021\n\rDECONVOLUTION\020\'\022\013\n\007DROP"
+    "OUT\020\006\022\016\n\nDUMMY_DATA\020 \022\022\n\016EUCLIDEAN_LOSS\020"
+    "\007\022\013\n\007ELTWISE\020\031\022\007\n\003EXP\020&\022\013\n\007FLATTEN\020\010\022\r\n\t"
+    "HDF5_DATA\020\t\022\017\n\013HDF5_OUTPUT\020\n\022\016\n\nHINGE_LO"
+    "SS\020\034\022\n\n\006IM2COL\020\013\022\016\n\nIMAGE_DATA\020\014\022\021\n\rINFO"
+    "GAIN_LOSS\020\r\022\021\n\rINNER_PRODUCT\020\016\022\007\n\003LRN\020\017\022"
+    "\017\n\013MEMORY_DATA\020\035\022\035\n\031MULTINOMIAL_LOGISTIC"
+    "_LOSS\020\020\022\007\n\003MVN\020\"\022\013\n\007POOLING\020\021\022\t\n\005POWER\020\032"
+    "\022\010\n\004RELU\020\022\022\013\n\007SIGMOID\020\023\022\036\n\032SIGMOID_CROSS"
+    "_ENTROPY_LOSS\020\033\022\013\n\007SILENCE\020$\022\013\n\007SOFTMAX\020"
+    "\024\022\020\n\014SOFTMAX_LOSS\020\025\022\t\n\005SPLIT\020\026\022\t\n\005SLICE\020"
+    "!\022\010\n\004TANH\020\027\022\017\n\013WINDOW_DATA\020\030\022\r\n\tTHRESHOL"
+    "D\020\037\"*\n\014DimCheckMode\022\n\n\006STRICT\020\000\022\016\n\nPERMI"
+    "SSIVE\020\001\"\375\007\n\020V0LayerParameter\022\014\n\004name\030\001 \001"
+    "(\t\022\014\n\004type\030\002 \001(\t\022\022\n\nnum_output\030\003 \001(\r\022\026\n\010"
+    "biasterm\030\004 \001(\010:\004true\022-\n\rweight_filler\030\005 "
+    "\001(\0132\026.caffe.FillerParameter\022+\n\013bias_fill"
+    "er\030\006 \001(\0132\026.caffe.FillerParameter\022\016\n\003pad\030"
+    "\007 \001(\r:\0010\022\022\n\nkernelsize\030\010 \001(\r\022\020\n\005group\030\t "
+    "\001(\r:\0011\022\021\n\006stride\030\n \001(\r:\0011\0225\n\004pool\030\013 \001(\0162"
+    "\".caffe.V0LayerParameter.PoolMethod:\003MAX"
+    "\022\032\n\rdropout_ratio\030\014 \001(\002:\0030.5\022\025\n\nlocal_si"
+    "ze\030\r \001(\r:\0015\022\020\n\005alpha\030\016 \001(\002:\0011\022\022\n\004beta\030\017 "
+    "\001(\002:\0040.75\022\014\n\001k\030\026 \001(\002:\0011\022\016\n\006source\030\020 \001(\t\022"
+    "\020\n\005scale\030\021 \001(\002:\0011\022\020\n\010meanfile\030\022 \001(\t\022\021\n\tb"
+    "atchsize\030\023 \001(\r\022\023\n\010cropsize\030\024 \001(\r:\0010\022\025\n\006m"
+    "irror\030\025 \001(\010:\005false\022\037\n\005blobs\0302 \003(\0132\020.caff"
+    "e.BlobProto\022\020\n\010blobs_lr\0303 \003(\002\022\024\n\014weight_"
+    "decay\0304 \003(\002\022\024\n\trand_skip\0305 \001(\r:\0010\022\035\n\020det"
+    "_fg_threshold\0306 \001(\002:\0030.5\022\035\n\020det_bg_thres"
+    "hold\0307 \001(\002:\0030.5\022\035\n\017det_fg_fraction\0308 \001(\002"
+    ":\0040.25\022\032\n\017det_context_pad\030: \001(\r:\0010\022\033\n\rde"
+    "t_crop_mode\030; \001(\t:\004warp\022\022\n\007new_num\030< \001(\005"
+    ":\0010\022\027\n\014new_channels\030= \001(\005:\0010\022\025\n\nnew_heig"
+    "ht\030> \001(\005:\0010\022\024\n\tnew_width\030\? \001(\005:\0010\022\035\n\016shu"
+    "ffle_images\030@ \001(\010:\005false\022\025\n\nconcat_dim\030A"
+    " \001(\r:\0011\0226\n\021hdf5_output_param\030\351\007 \001(\0132\032.ca"
+    "ffe.HDF5OutputParameter\".\n\nPoolMethod\022\007\n"
+    "\003MAX\020\000\022\007\n\003AVE\020\001\022\016\n\nSTOCHASTIC\020\002\"W\n\016PReLU"
+    "Parameter\022&\n\006filler\030\001 \001(\0132\026.caffe.Filler"
+    "Parameter\022\035\n\016channel_shared\030\002 \001(\010:\005false"
+    "\"\207\001\n\016NormalizedBBox\022\014\n\004xmin\030\001 \001(\002\022\014\n\004ymi"
+    "n\030\002 \001(\002\022\014\n\004xmax\030\003 \001(\002\022\014\n\004ymax\030\004 \001(\002\022\r\n\005l"
+    "abel\030\005 \001(\005\022\021\n\tdifficult\030\006 \001(\010\022\r\n\005score\030\007"
+    " \001(\002\022\014\n\004size\030\010 \001(\002*=\n\004Type\022\n\n\006DOUBLE\020\000\022\t"
+    "\n\005FLOAT\020\001\022\013\n\007FLOAT16\020\002\022\007\n\003INT\020\003\022\010\n\004UINT\020"
+    "\004*\034\n\005Phase\022\t\n\005TRAIN\020\000\022\010\n\004TEST\020\001", 16991);
   ::google::protobuf::MessageFactory::InternalRegisterGeneratedFile(
     "caffe.proto", &protobuf_RegisterTypes);
   ::google::protobuf::internal::OnShutdown(&protobuf_ShutdownFile_caffe_2eproto);
@@ -2550,6 +2558,23 @@ struct StaticDescriptorInitializer_caffe_2eproto {
     protobuf_AddDesc_caffe_2eproto();
   }
 } static_descriptor_initializer_caffe_2eproto_;
+const ::google::protobuf::EnumDescriptor* Type_descriptor() {
+  protobuf_AssignDescriptorsOnce();
+  return Type_descriptor_;
+}
+bool Type_IsValid(int value) {
+  switch (value) {
+    case 0:
+    case 1:
+    case 2:
+    case 3:
+    case 4:
+      return true;
+    default:
+      return false;
+  }
+}
+
 const ::google::protobuf::EnumDescriptor* Phase_descriptor() {
   protobuf_AssignDescriptorsOnce();
   return Phase_descriptor_;
@@ -2892,6 +2917,8 @@ const int BlobProto::kDataFieldNumber;
 const int BlobProto::kDiffFieldNumber;
 const int BlobProto::kDoubleDataFieldNumber;
 const int BlobProto::kDoubleDiffFieldNumber;
+const int BlobProto::kRawDataTypeFieldNumber;
+const int BlobProto::kRawDataFieldNumber;
 const int BlobProto::kNumFieldNumber;
 const int BlobProto::kChannelsFieldNumber;
 const int BlobProto::kHeightFieldNumber;
@@ -2920,9 +2947,10 @@ BlobProto::BlobProto(const BlobProto& from)
 
 void BlobProto::SharedCtor() {
   _cached_size_ = 0;
+  raw_data_.UnsafeSetDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
   shape_ = NULL;
-  ::memset(&num_, 0, reinterpret_cast<char*>(&width_) -
-    reinterpret_cast<char*>(&num_) + sizeof(width_));
+  ::memset(&raw_data_type_, 0, reinterpret_cast<char*>(&width_) -
+    reinterpret_cast<char*>(&raw_data_type_) + sizeof(width_));
 }
 
 BlobProto::~BlobProto() {
@@ -2931,6 +2959,7 @@ BlobProto::~BlobProto() {
 }
 
 void BlobProto::SharedDtor() {
+  raw_data_.DestroyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
   if (this != &BlobProto_default_instance_.get()) {
     delete shape_;
   }
@@ -2980,12 +3009,15 @@ void BlobProto::Clear() {
 } while (0)
 
   if (_has_bits_[0 / 32] & 225u) {
-    ZR_(num_, height_);
+    ZR_(raw_data_type_, num_);
     if (has_shape()) {
       if (shape_ != NULL) shape_->::caffe::BlobShape::Clear();
     }
+    if (has_raw_data()) {
+      raw_data_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+    }
   }
-  width_ = 0;
+  ZR_(channels_, width_);
 
 #undef ZR_HELPER_
 #undef ZR_
@@ -3150,6 +3182,39 @@ bool BlobProto::MergePartialFromCodedStream(
         } else {
           goto handle_unusual;
         }
+        if (input->ExpectTag(80)) goto parse_raw_data_type;
+        break;
+      }
+
+      // optional .caffe.Type raw_data_type = 10;
+      case 10: {
+        if (tag == 80) {
+         parse_raw_data_type:
+          int value;
+          DO_((::google::protobuf::internal::WireFormatLite::ReadPrimitive<
+                   int, ::google::protobuf::internal::WireFormatLite::TYPE_ENUM>(
+                 input, &value)));
+          if (::caffe::Type_IsValid(value)) {
+            set_raw_data_type(static_cast< ::caffe::Type >(value));
+          } else {
+            mutable_unknown_fields()->AddVarint(10, value);
+          }
+        } else {
+          goto handle_unusual;
+        }
+        if (input->ExpectTag(98)) goto parse_raw_data;
+        break;
+      }
+
+      // optional bytes raw_data = 12 [packed = false];
+      case 12: {
+        if (tag == 98) {
+         parse_raw_data:
+          DO_(::google::protobuf::internal::WireFormatLite::ReadBytes(
+                input, this->mutable_raw_data()));
+        } else {
+          goto handle_unusual;
+        }
         if (input->ExpectAtEnd()) goto success;
         break;
       }
@@ -3245,6 +3310,18 @@ void BlobProto::SerializeWithCachedSizes(
       this->double_diff(i), output);
   }
 
+  // optional .caffe.Type raw_data_type = 10;
+  if (has_raw_data_type()) {
+    ::google::protobuf::internal::WireFormatLite::WriteEnum(
+      10, this->raw_data_type(), output);
+  }
+
+  // optional bytes raw_data = 12 [packed = false];
+  if (has_raw_data()) {
+    ::google::protobuf::internal::WireFormatLite::WriteBytesMaybeAliased(
+      12, this->raw_data(), output);
+  }
+
   if (_internal_metadata_.have_unknown_fields()) {
     ::google::protobuf::internal::WireFormat::SerializeUnknownFields(
         unknown_fields(), output);
@@ -3339,6 +3416,19 @@ void BlobProto::SerializeWithCachedSizes(
       WriteDoubleNoTagToArray(this->double_diff(i), target);
   }
 
+  // optional .caffe.Type raw_data_type = 10;
+  if (has_raw_data_type()) {
+    target = ::google::protobuf::internal::WireFormatLite::WriteEnumToArray(
+      10, this->raw_data_type(), target);
+  }
+
+  // optional bytes raw_data = 12 [packed = false];
+  if (has_raw_data()) {
+    target =
+      ::google::protobuf::internal::WireFormatLite::WriteBytesToArray(
+        12, this->raw_data(), target);
+  }
+
   if (_internal_metadata_.have_unknown_fields()) {
     target = ::google::protobuf::internal::WireFormat::SerializeUnknownFieldsToArray(
         unknown_fields(), target);
@@ -3359,6 +3449,19 @@ size_t BlobProto::ByteSizeLong() const {
           *this->shape_);
     }
 
+    // optional .caffe.Type raw_data_type = 10;
+    if (has_raw_data_type()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::EnumSize(this->raw_data_type());
+    }
+
+    // optional bytes raw_data = 12 [packed = false];
+    if (has_raw_data()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::BytesSize(
+          this->raw_data());
+    }
+
     // optional int32 num = 1 [default = 0];
     if (has_num()) {
       total_size += 1 +
@@ -3366,6 +3469,8 @@ size_t BlobProto::ByteSizeLong() const {
           this->num());
     }
 
+  }
+  if (_has_bits_[8 / 32] & 1792u) {
     // optional int32 channels = 2 [default = 0];
     if (has_channels()) {
       total_size += 1 +
@@ -3380,14 +3485,14 @@ size_t BlobProto::ByteSizeLong() const {
           this->height());
     }
 
-  }
-  // optional int32 width = 4 [default = 0];
-  if (has_width()) {
-    total_size += 1 +
-      ::google::protobuf::internal::WireFormatLite::Int32Size(
-        this->width());
-  }
+    // optional int32 width = 4 [default = 0];
+    if (has_width()) {
+      total_size += 1 +
+        ::google::protobuf::internal::WireFormatLite::Int32Size(
+          this->width());
+    }
 
+  }
   // repeated float data = 5 [packed = true];
   {
     size_t data_size = 0;
@@ -3498,17 +3603,24 @@ void BlobProto::UnsafeMergeFrom(const BlobProto& from) {
     if (from.has_shape()) {
       mutable_shape()->::caffe::BlobShape::MergeFrom(from.shape());
     }
+    if (from.has_raw_data_type()) {
+      set_raw_data_type(from.raw_data_type());
+    }
+    if (from.has_raw_data()) {
+      set_has_raw_data();
+      raw_data_.AssignWithDefault(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), from.raw_data_);
+    }
     if (from.has_num()) {
       set_num(from.num());
     }
+  }
+  if (from._has_bits_[8 / 32] & (0xffu << (8 % 32))) {
     if (from.has_channels()) {
       set_channels(from.channels());
     }
     if (from.has_height()) {
       set_height(from.height());
     }
-  }
-  if (from._has_bits_[8 / 32] & (0xffu << (8 % 32))) {
     if (from.has_width()) {
       set_width(from.width());
     }
@@ -3548,6 +3660,8 @@ void BlobProto::InternalSwap(BlobProto* other) {
   diff_.UnsafeArenaSwap(&other->diff_);
   double_data_.UnsafeArenaSwap(&other->double_data_);
   double_diff_.UnsafeArenaSwap(&other->double_diff_);
+  std::swap(raw_data_type_, other->raw_data_type_);
+  raw_data_.Swap(&other->raw_data_);
   std::swap(num_, other->num_);
   std::swap(channels_, other->channels_);
   std::swap(height_, other->height_);
@@ -3733,15 +3847,94 @@ BlobProto::mutable_double_diff() {
   return &double_diff_;
 }
 
+// optional .caffe.Type raw_data_type = 10;
+bool BlobProto::has_raw_data_type() const {
+  return (_has_bits_[0] & 0x00000020u) != 0;
+}
+void BlobProto::set_has_raw_data_type() {
+  _has_bits_[0] |= 0x00000020u;
+}
+void BlobProto::clear_has_raw_data_type() {
+  _has_bits_[0] &= ~0x00000020u;
+}
+void BlobProto::clear_raw_data_type() {
+  raw_data_type_ = 0;
+  clear_has_raw_data_type();
+}
+::caffe::Type BlobProto::raw_data_type() const {
+  // @@protoc_insertion_point(field_get:caffe.BlobProto.raw_data_type)
+  return static_cast< ::caffe::Type >(raw_data_type_);
+}
+void BlobProto::set_raw_data_type(::caffe::Type value) {
+  assert(::caffe::Type_IsValid(value));
+  set_has_raw_data_type();
+  raw_data_type_ = value;
+  // @@protoc_insertion_point(field_set:caffe.BlobProto.raw_data_type)
+}
+
+// optional bytes raw_data = 12 [packed = false];
+bool BlobProto::has_raw_data() const {
+  return (_has_bits_[0] & 0x00000040u) != 0;
+}
+void BlobProto::set_has_raw_data() {
+  _has_bits_[0] |= 0x00000040u;
+}
+void BlobProto::clear_has_raw_data() {
+  _has_bits_[0] &= ~0x00000040u;
+}
+void BlobProto::clear_raw_data() {
+  raw_data_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+  clear_has_raw_data();
+}
+const ::std::string& BlobProto::raw_data() const {
+  // @@protoc_insertion_point(field_get:caffe.BlobProto.raw_data)
+  return raw_data_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+void BlobProto::set_raw_data(const ::std::string& value) {
+  set_has_raw_data();
+  raw_data_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);
+  // @@protoc_insertion_point(field_set:caffe.BlobProto.raw_data)
+}
+void BlobProto::set_raw_data(const char* value) {
+  set_has_raw_data();
+  raw_data_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));
+  // @@protoc_insertion_point(field_set_char:caffe.BlobProto.raw_data)
+}
+void BlobProto::set_raw_data(const void* value, size_t size) {
+  set_has_raw_data();
+  raw_data_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
+      ::std::string(reinterpret_cast<const char*>(value), size));
+  // @@protoc_insertion_point(field_set_pointer:caffe.BlobProto.raw_data)
+}
+::std::string* BlobProto::mutable_raw_data() {
+  set_has_raw_data();
+  // @@protoc_insertion_point(field_mutable:caffe.BlobProto.raw_data)
+  return raw_data_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+::std::string* BlobProto::release_raw_data() {
+  // @@protoc_insertion_point(field_release:caffe.BlobProto.raw_data)
+  clear_has_raw_data();
+  return raw_data_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+void BlobProto::set_allocated_raw_data(::std::string* raw_data) {
+  if (raw_data != NULL) {
+    set_has_raw_data();
+  } else {
+    clear_has_raw_data();
+  }
+  raw_data_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), raw_data);
+  // @@protoc_insertion_point(field_set_allocated:caffe.BlobProto.raw_data)
+}
+
 // optional int32 num = 1 [default = 0];
 bool BlobProto::has_num() const {
-  return (_has_bits_[0] & 0x00000020u) != 0;
+  return (_has_bits_[0] & 0x00000080u) != 0;
 }
 void BlobProto::set_has_num() {
-  _has_bits_[0] |= 0x00000020u;
+  _has_bits_[0] |= 0x00000080u;
 }
 void BlobProto::clear_has_num() {
-  _has_bits_[0] &= ~0x00000020u;
+  _has_bits_[0] &= ~0x00000080u;
 }
 void BlobProto::clear_num() {
   num_ = 0;
@@ -3759,13 +3952,13 @@ void BlobProto::set_num(::google::protobuf::int32 value) {
 
 // optional int32 channels = 2 [default = 0];
 bool BlobProto::has_channels() const {
-  return (_has_bits_[0] & 0x00000040u) != 0;
+  return (_has_bits_[0] & 0x00000100u) != 0;
 }
 void BlobProto::set_has_channels() {
-  _has_bits_[0] |= 0x00000040u;
+  _has_bits_[0] |= 0x00000100u;
 }
 void BlobProto::clear_has_channels() {
-  _has_bits_[0] &= ~0x00000040u;
+  _has_bits_[0] &= ~0x00000100u;
 }
 void BlobProto::clear_channels() {
   channels_ = 0;
@@ -3783,13 +3976,13 @@ void BlobProto::set_channels(::google::protobuf::int32 value) {
 
 // optional int32 height = 3 [default = 0];
 bool BlobProto::has_height() const {
-  return (_has_bits_[0] & 0x00000080u) != 0;
+  return (_has_bits_[0] & 0x00000200u) != 0;
 }
 void BlobProto::set_has_height() {
-  _has_bits_[0] |= 0x00000080u;
+  _has_bits_[0] |= 0x00000200u;
 }
 void BlobProto::clear_has_height() {
-  _has_bits_[0] &= ~0x00000080u;
+  _has_bits_[0] &= ~0x00000200u;
 }
 void BlobProto::clear_height() {
   height_ = 0;
@@ -3807,13 +4000,13 @@ void BlobProto::set_height(::google::protobuf::int32 value) {
 
 // optional int32 width = 4 [default = 0];
 bool BlobProto::has_width() const {
-  return (_has_bits_[0] & 0x00000100u) != 0;
+  return (_has_bits_[0] & 0x00000400u) != 0;
 }
 void BlobProto::set_has_width() {
-  _has_bits_[0] |= 0x00000100u;
+  _has_bits_[0] |= 0x00000400u;
 }
 void BlobProto::clear_has_width() {
-  _has_bits_[0] &= ~0x00000100u;
+  _has_bits_[0] &= ~0x00000400u;
 }
 void BlobProto::clear_width() {
   width_ = 0;
index e2fe083..f1b85f0 100644 (file)
@@ -641,6 +641,28 @@ inline bool V0LayerParameter_PoolMethod_Parse(
   return ::google::protobuf::internal::ParseNamedEnum<V0LayerParameter_PoolMethod>(
     V0LayerParameter_PoolMethod_descriptor(), name, value);
 }
+enum Type {
+  DOUBLE = 0,
+  FLOAT = 1,
+  FLOAT16 = 2,
+  INT = 3,
+  UINT = 4
+};
+bool Type_IsValid(int value);
+const Type Type_MIN = DOUBLE;
+const Type Type_MAX = UINT;
+const int Type_ARRAYSIZE = Type_MAX + 1;
+
+const ::google::protobuf::EnumDescriptor* Type_descriptor();
+inline const ::std::string& Type_Name(Type value) {
+  return ::google::protobuf::internal::NameOfEnum(
+    Type_descriptor(), value);
+}
+inline bool Type_Parse(
+    const ::std::string& name, Type* value) {
+  return ::google::protobuf::internal::ParseNamedEnum<Type>(
+    Type_descriptor(), name, value);
+}
 enum Phase {
   TRAIN = 0,
   TEST = 1
@@ -892,6 +914,25 @@ class BlobProto : public ::google::protobuf::Message /* @@protoc_insertion_point
   ::google::protobuf::RepeatedField< double >*
       mutable_double_diff();
 
+  // optional .caffe.Type raw_data_type = 10;
+  bool has_raw_data_type() const;
+  void clear_raw_data_type();
+  static const int kRawDataTypeFieldNumber = 10;
+  ::caffe::Type raw_data_type() const;
+  void set_raw_data_type(::caffe::Type value);
+
+  // optional bytes raw_data = 12 [packed = false];
+  bool has_raw_data() const;
+  void clear_raw_data();
+  static const int kRawDataFieldNumber = 12;
+  const ::std::string& raw_data() const;
+  void set_raw_data(const ::std::string& value);
+  void set_raw_data(const char* value);
+  void set_raw_data(const void* value, size_t size);
+  ::std::string* mutable_raw_data();
+  ::std::string* release_raw_data();
+  void set_allocated_raw_data(::std::string* raw_data);
+
   // optional int32 num = 1 [default = 0];
   bool has_num() const;
   void clear_num();
@@ -924,6 +965,10 @@ class BlobProto : public ::google::protobuf::Message /* @@protoc_insertion_point
  private:
   inline void set_has_shape();
   inline void clear_has_shape();
+  inline void set_has_raw_data_type();
+  inline void clear_has_raw_data_type();
+  inline void set_has_raw_data();
+  inline void clear_has_raw_data();
   inline void set_has_num();
   inline void clear_has_num();
   inline void set_has_channels();
@@ -944,7 +989,9 @@ class BlobProto : public ::google::protobuf::Message /* @@protoc_insertion_point
   mutable int _double_data_cached_byte_size_;
   ::google::protobuf::RepeatedField< double > double_diff_;
   mutable int _double_diff_cached_byte_size_;
+  ::google::protobuf::internal::ArenaStringPtr raw_data_;
   ::caffe::BlobShape* shape_;
+  int raw_data_type_;
   ::google::protobuf::int32 num_;
   ::google::protobuf::int32 channels_;
   ::google::protobuf::int32 height_;
@@ -12884,15 +12931,94 @@ BlobProto::mutable_double_diff() {
   return &double_diff_;
 }
 
+// optional .caffe.Type raw_data_type = 10;
+inline bool BlobProto::has_raw_data_type() const {
+  return (_has_bits_[0] & 0x00000020u) != 0;
+}
+inline void BlobProto::set_has_raw_data_type() {
+  _has_bits_[0] |= 0x00000020u;
+}
+inline void BlobProto::clear_has_raw_data_type() {
+  _has_bits_[0] &= ~0x00000020u;
+}
+inline void BlobProto::clear_raw_data_type() {
+  raw_data_type_ = 0;
+  clear_has_raw_data_type();
+}
+inline ::caffe::Type BlobProto::raw_data_type() const {
+  // @@protoc_insertion_point(field_get:caffe.BlobProto.raw_data_type)
+  return static_cast< ::caffe::Type >(raw_data_type_);
+}
+inline void BlobProto::set_raw_data_type(::caffe::Type value) {
+  assert(::caffe::Type_IsValid(value));
+  set_has_raw_data_type();
+  raw_data_type_ = value;
+  // @@protoc_insertion_point(field_set:caffe.BlobProto.raw_data_type)
+}
+
+// optional bytes raw_data = 12 [packed = false];
+inline bool BlobProto::has_raw_data() const {
+  return (_has_bits_[0] & 0x00000040u) != 0;
+}
+inline void BlobProto::set_has_raw_data() {
+  _has_bits_[0] |= 0x00000040u;
+}
+inline void BlobProto::clear_has_raw_data() {
+  _has_bits_[0] &= ~0x00000040u;
+}
+inline void BlobProto::clear_raw_data() {
+  raw_data_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+  clear_has_raw_data();
+}
+inline const ::std::string& BlobProto::raw_data() const {
+  // @@protoc_insertion_point(field_get:caffe.BlobProto.raw_data)
+  return raw_data_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+inline void BlobProto::set_raw_data(const ::std::string& value) {
+  set_has_raw_data();
+  raw_data_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value);
+  // @@protoc_insertion_point(field_set:caffe.BlobProto.raw_data)
+}
+inline void BlobProto::set_raw_data(const char* value) {
+  set_has_raw_data();
+  raw_data_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value));
+  // @@protoc_insertion_point(field_set_char:caffe.BlobProto.raw_data)
+}
+inline void BlobProto::set_raw_data(const void* value, size_t size) {
+  set_has_raw_data();
+  raw_data_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(),
+      ::std::string(reinterpret_cast<const char*>(value), size));
+  // @@protoc_insertion_point(field_set_pointer:caffe.BlobProto.raw_data)
+}
+inline ::std::string* BlobProto::mutable_raw_data() {
+  set_has_raw_data();
+  // @@protoc_insertion_point(field_mutable:caffe.BlobProto.raw_data)
+  return raw_data_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+inline ::std::string* BlobProto::release_raw_data() {
+  // @@protoc_insertion_point(field_release:caffe.BlobProto.raw_data)
+  clear_has_raw_data();
+  return raw_data_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited());
+}
+inline void BlobProto::set_allocated_raw_data(::std::string* raw_data) {
+  if (raw_data != NULL) {
+    set_has_raw_data();
+  } else {
+    clear_has_raw_data();
+  }
+  raw_data_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), raw_data);
+  // @@protoc_insertion_point(field_set_allocated:caffe.BlobProto.raw_data)
+}
+
 // optional int32 num = 1 [default = 0];
 inline bool BlobProto::has_num() const {
-  return (_has_bits_[0] & 0x00000020u) != 0;
+  return (_has_bits_[0] & 0x00000080u) != 0;
 }
 inline void BlobProto::set_has_num() {
-  _has_bits_[0] |= 0x00000020u;
+  _has_bits_[0] |= 0x00000080u;
 }
 inline void BlobProto::clear_has_num() {
-  _has_bits_[0] &= ~0x00000020u;
+  _has_bits_[0] &= ~0x00000080u;
 }
 inline void BlobProto::clear_num() {
   num_ = 0;
@@ -12910,13 +13036,13 @@ inline void BlobProto::set_num(::google::protobuf::int32 value) {
 
 // optional int32 channels = 2 [default = 0];
 inline bool BlobProto::has_channels() const {
-  return (_has_bits_[0] & 0x00000040u) != 0;
+  return (_has_bits_[0] & 0x00000100u) != 0;
 }
 inline void BlobProto::set_has_channels() {
-  _has_bits_[0] |= 0x00000040u;
+  _has_bits_[0] |= 0x00000100u;
 }
 inline void BlobProto::clear_has_channels() {
-  _has_bits_[0] &= ~0x00000040u;
+  _has_bits_[0] &= ~0x00000100u;
 }
 inline void BlobProto::clear_channels() {
   channels_ = 0;
@@ -12934,13 +13060,13 @@ inline void BlobProto::set_channels(::google::protobuf::int32 value) {
 
 // optional int32 height = 3 [default = 0];
 inline bool BlobProto::has_height() const {
-  return (_has_bits_[0] & 0x00000080u) != 0;
+  return (_has_bits_[0] & 0x00000200u) != 0;
 }
 inline void BlobProto::set_has_height() {
-  _has_bits_[0] |= 0x00000080u;
+  _has_bits_[0] |= 0x00000200u;
 }
 inline void BlobProto::clear_has_height() {
-  _has_bits_[0] &= ~0x00000080u;
+  _has_bits_[0] &= ~0x00000200u;
 }
 inline void BlobProto::clear_height() {
   height_ = 0;
@@ -12958,13 +13084,13 @@ inline void BlobProto::set_height(::google::protobuf::int32 value) {
 
 // optional int32 width = 4 [default = 0];
 inline bool BlobProto::has_width() const {
-  return (_has_bits_[0] & 0x00000100u) != 0;
+  return (_has_bits_[0] & 0x00000400u) != 0;
 }
 inline void BlobProto::set_has_width() {
-  _has_bits_[0] |= 0x00000100u;
+  _has_bits_[0] |= 0x00000400u;
 }
 inline void BlobProto::clear_has_width() {
-  _has_bits_[0] &= ~0x00000100u;
+  _has_bits_[0] &= ~0x00000400u;
 }
 inline void BlobProto::clear_width() {
   width_ = 0;
@@ -28597,6 +28723,11 @@ template <>
 inline const EnumDescriptor* GetEnumDescriptor< ::caffe::V0LayerParameter_PoolMethod>() {
   return ::caffe::V0LayerParameter_PoolMethod_descriptor();
 }
+template <> struct is_proto_enum< ::caffe::Type> : ::google::protobuf::internal::true_type {};
+template <>
+inline const EnumDescriptor* GetEnumDescriptor< ::caffe::Type>() {
+  return ::caffe::Type_descriptor();
+}
 template <> struct is_proto_enum< ::caffe::Phase> : ::google::protobuf::internal::true_type {};
 template <>
 inline const EnumDescriptor* GetEnumDescriptor< ::caffe::Phase>() {
index 3d23fb4..abe4bef 100644 (file)
@@ -50,6 +50,16 @@ syntax = "proto2";
 
 package caffe;
 
+// NVidia's Caffe feature is used to store fp16 weights, https://github.com/NVIDIA/caffe:
+// Math and storage types
+enum Type {
+  DOUBLE = 0;
+  FLOAT = 1;
+  FLOAT16 = 2;
+  INT = 3;  // math not supported
+  UINT = 4;  // math not supported
+}
+
 // Specifies the shape (dimensions) of a Blob.
 message BlobShape {
   repeated int64 dim = 1 [packed = true];
@@ -62,6 +72,11 @@ message BlobProto {
   repeated double double_data = 8 [packed = true];
   repeated double double_diff = 9 [packed = true];
 
+  // NVidia's Caffe fields begin.
+  optional Type raw_data_type = 10;
+  optional bytes raw_data = 12 [packed = false];
+  // NVidia's Caffe fields end.
+
   // 4D dimensions -- deprecated.  Use "shape" instead.
   optional int32 num = 1 [default = 0];
   optional int32 channels = 2 [default = 0];
index fd5335e..9923cf3 100644 (file)
@@ -225,13 +225,28 @@ public:
         blobShapeFromProto(pbBlob, shape);
 
         dstBlob.create((int)shape.size(), &shape[0], CV_32F);
-        CV_Assert(pbBlob.data_size() == (int)dstBlob.total());
-
-        CV_DbgAssert(pbBlob.GetDescriptor()->FindFieldByLowercaseName("data")->cpp_type() == FieldDescriptor::CPPTYPE_FLOAT);
         float *dstData = dstBlob.ptr<float>();
+        if (pbBlob.data_size())
+        {
+            // Single precision floats.
+            CV_Assert(pbBlob.data_size() == (int)dstBlob.total());
+
+            CV_DbgAssert(pbBlob.GetDescriptor()->FindFieldByLowercaseName("data")->cpp_type() == FieldDescriptor::CPPTYPE_FLOAT);
 
-        for (int i = 0; i < pbBlob.data_size(); i++)
-            dstData[i] = pbBlob.data(i);
+            for (int i = 0; i < pbBlob.data_size(); i++)
+                dstData[i] = pbBlob.data(i);
+        }
+        else
+        {
+            // Half precision floats.
+            CV_Assert(pbBlob.raw_data_type() == caffe::FLOAT16);
+            std::string raw_data = pbBlob.raw_data();
+
+            CV_Assert(raw_data.size() / 2 == (int)dstBlob.total());
+
+            Mat halfs((int)shape.size(), &shape[0], CV_16SC1, (void*)raw_data.c_str());
+            convertFp16(halfs, dstBlob);
+        }
     }
 
     void extractBinaryLayerParms(const caffe::LayerParameter& layer, LayerParams& layerParams)
diff --git a/modules/dnn/src/caffe/caffe_shrinker.cpp b/modules/dnn/src/caffe/caffe_shrinker.cpp
new file mode 100644 (file)
index 0000000..f9c50db
--- /dev/null
@@ -0,0 +1,65 @@
+// This file is part of OpenCV project.
+// It is subject to the license terms in the LICENSE file found in the top-level directory
+// of this distribution and at http://opencv.org/license.html.
+//
+// Copyright (C) 2017, Intel Corporation, all rights reserved.
+// Third party copyrights are property of their respective owners.
+
+#include "../precomp.hpp"
+
+#ifdef HAVE_PROTOBUF
+#include <fstream>
+#include "caffe_io.hpp"
+#endif
+
+namespace cv { namespace dnn {
+CV__DNN_EXPERIMENTAL_NS_BEGIN
+
+#ifdef HAVE_PROTOBUF
+
+void shrinkCaffeModel(const String& src, const String& dst)
+{
+    CV_TRACE_FUNCTION();
+
+    caffe::NetParameter net;
+    ReadNetParamsFromBinaryFileOrDie(src.c_str(), &net);
+
+    for (int i = 0; i < net.layer_size(); ++i)
+    {
+        caffe::LayerParameter* lp = net.mutable_layer(i);
+        for (int j = 0; j < lp->blobs_size(); ++j)
+        {
+            caffe::BlobProto* blob = lp->mutable_blobs(j);
+            CV_Assert(blob->data_size() != 0);  // float32 array.
+
+            Mat floats(1, blob->data_size(), CV_32FC1, (void*)blob->data().data());
+            Mat halfs(1, blob->data_size(), CV_16SC1);
+            convertFp16(floats, halfs);  // Convert to float16.
+
+            blob->clear_data();  // Clear float32 data.
+
+            // Set float16 data.
+            blob->set_raw_data(halfs.data, halfs.total() * halfs.elemSize());
+            blob->set_raw_data_type(caffe::FLOAT16);
+        }
+    }
+    size_t msgSize = net.ByteSizeLong();
+    std::vector<uint8_t> output(msgSize);
+    net.SerializeWithCachedSizesToArray(&output[0]);
+
+    std::ofstream ofs(dst.c_str(), std::ios::binary);
+    ofs.write((const char*)&output[0], msgSize);
+    ofs.close();
+}
+
+#else
+
+void shrinkCaffeModel(const String& src, const String& dst)
+{
+    CV_Error(cv::Error::StsNotImplemented, "libprotobuf required to import data from Caffe models");
+}
+
+#endif  // HAVE_PROTOBUF
+
+CV__DNN_EXPERIMENTAL_NS_END
+}} // namespace
index 303e05f..fe1036c 100644 (file)
@@ -94,6 +94,7 @@ void initializeLayerFactory()
     CV_DNN_REGISTER_LAYER_CLASS(LPNormalize,    LPNormalizeLayer);
 
     CV_DNN_REGISTER_LAYER_CLASS(ReLU,           ReLULayer);
+    CV_DNN_REGISTER_LAYER_CLASS(ReLU6,          ReLU6Layer);
     CV_DNN_REGISTER_LAYER_CLASS(ChannelsPReLU,  ChannelsPReLULayer);
     CV_DNN_REGISTER_LAYER_CLASS(Sigmoid,        SigmoidLayer);
     CV_DNN_REGISTER_LAYER_CLASS(TanH,           TanHLayer);
index 9c929a0..dee3fbb 100644 (file)
@@ -248,6 +248,62 @@ struct ReLUFunctor
     int64 getFLOPSPerElement() const { return 1; }
 };
 
+struct ReLU6Functor
+{
+    typedef ReLU6Layer Layer;
+    float minValue, maxValue;
+
+    ReLU6Functor(float minValue_ = 0.0f, float maxValue_ = 6.0f)
+        : minValue(minValue_), maxValue(maxValue_)
+    {
+        CV_Assert(minValue <= maxValue);
+    }
+
+    void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
+    {
+        for( int cn = cn0; cn < cn1; cn++, srcptr += planeSize, dstptr += planeSize )
+        {
+            int i = 0;
+#if CV_SIMD128
+            v_float32x4 minV = v_setall_f32(minValue), maxV = v_setall_f32(maxValue);
+            for( ; i <= len - 16; i += 16 )
+            {
+                v_float32x4 x0 = v_load(srcptr + i);
+                v_float32x4 x1 = v_load(srcptr + i + 4);
+                v_float32x4 x2 = v_load(srcptr + i + 8);
+                v_float32x4 x3 = v_load(srcptr + i + 12);
+                x0 = v_min(v_max(minV, x0), maxV);
+                x1 = v_min(v_max(minV, x1), maxV);
+                x2 = v_min(v_max(minV, x2), maxV);
+                x3 = v_min(v_max(minV, x3), maxV);
+                v_store(dstptr + i, x0);
+                v_store(dstptr + i + 4, x1);
+                v_store(dstptr + i + 8, x2);
+                v_store(dstptr + i + 12, x3);
+            }
+#endif
+            for( ; i < len; i++ )
+            {
+                float x = srcptr[i];
+                if (x >= minValue)
+                    dstptr[i] = x <= maxValue ? x : maxValue;
+                else
+                    dstptr[i] = minValue;
+            }
+        }
+    }
+
+#ifdef HAVE_HALIDE
+    void attachHalide(const Halide::Expr& input, Halide::Func& top)
+    {
+        Halide::Var x("x"), y("y"), c("c"), n("n");
+        top(x, y, c, n) = clamp(input, minValue, maxValue);
+    }
+#endif  // HAVE_HALIDE
+
+    int64 getFLOPSPerElement() const { return 2; }
+};
+
 struct TanHFunctor
 {
     typedef TanHLayer Layer;
@@ -517,6 +573,15 @@ Ptr<ReLULayer> ReLULayer::create(const LayerParams& params)
     return l;
 }
 
+Ptr<ReLU6Layer> ReLU6Layer::create(const LayerParams& params)
+{
+    float minValue = params.get<float>("min_value", 0.0f);
+    float maxValue = params.get<float>("max_value", 6.0f);
+    Ptr<ReLU6Layer> l(new ElementWiseLayer<ReLU6Functor>(ReLU6Functor(minValue, maxValue)));
+    l->setParamsFrom(params);
+    return l;
+}
+
 Ptr<TanHLayer> TanHLayer::create(const LayerParams& params)
 {
     Ptr<TanHLayer> l(new ElementWiseLayer<TanHFunctor>());
index 0797986..8e1f18e 100644 (file)
@@ -85,11 +85,38 @@ static Mat getTensorContent(const tensorflow::TensorProto &tensor)
     switch (tensor.dtype())
     {
         case tensorflow::DT_FLOAT:
-            return Mat(1, content.size() / sizeof(float), CV_32FC1, (void*)content.c_str()).clone();
+        {
+            if (!content.empty())
+                return Mat(1, content.size() / sizeof(float), CV_32FC1, (void*)content.c_str()).clone();
+            else
+            {
+                const RepeatedField<float>& field = tensor.float_val();
+                CV_Assert(!field.empty());
+                return Mat(1, field.size(), CV_32FC1, (void*)field.data()).clone();
+            }
+        }
         case tensorflow::DT_DOUBLE:
-            return Mat(1, content.size() / sizeof(double), CV_64FC1, (void*)content.c_str()).clone();
+        {
+            if (!content.empty())
+                return Mat(1, content.size() / sizeof(double), CV_64FC1, (void*)content.c_str()).clone();
+            else
+            {
+                const RepeatedField<double>& field = tensor.double_val();
+                CV_Assert(!field.empty());
+                return Mat(1, field.size(), CV_64FC1, (void*)field.data()).clone();
+            }
+        }
         case tensorflow::DT_INT32:
-            return Mat(1, content.size() / sizeof(int32_t), CV_32SC1, (void*)content.c_str()).clone();
+        {
+            if (!content.empty())
+                return Mat(1, content.size() / sizeof(int32_t), CV_32SC1, (void*)content.c_str()).clone();
+            else
+            {
+                const RepeatedField<int32_t>& field = tensor.int_val();
+                CV_Assert(!field.empty());
+                return Mat(1, field.size(), CV_32SC1, (void*)field.data()).clone();
+            }
+        }
         case tensorflow::DT_HALF:
         {
             Mat halfs;
@@ -573,7 +600,7 @@ void TFImporter::populateNet(Net dstNet)
         if(layers_to_ignore.find(li) != layers_to_ignore.end())
             continue;
 
-        if (type == "Conv2D" || type == "SpaceToBatchND")
+        if (type == "Conv2D" || type == "SpaceToBatchND" || type == "DepthwiseConv2dNative")
         {
             // The first node of dilated convolution subgraph.
             // Extract input node, dilation rate and paddings.
@@ -621,7 +648,28 @@ void TFImporter::populateNet(Net dstNet)
             }
 
             kernelFromTensor(getConstBlob(layer, value_id), layerParams.blobs[0]);
-            const int* kshape = layerParams.blobs[0].size.p;
+            int* kshape = layerParams.blobs[0].size.p;
+            if (type == "DepthwiseConv2dNative")
+            {
+                const int chMultiplier = kshape[0];
+                const int inCh = kshape[1];
+                const int height = kshape[2];
+                const int width = kshape[3];
+
+                Mat copy = layerParams.blobs[0].clone();
+                float* src = (float*)copy.data;
+                float* dst = (float*)layerParams.blobs[0].data;
+                for (int i = 0; i < chMultiplier; ++i)
+                    for (int j = 0; j < inCh; ++j)
+                        for (int s = 0; s < height * width; ++s)
+                            {
+                                int src_i = (i * inCh + j) * height * width + s;
+                                int dst_i = (j * chMultiplier + i) * height* width + s;
+                                dst[dst_i] = src[src_i];
+                            }
+                kshape[0] = inCh * chMultiplier;
+                kshape[1] = 1;
+            }
             layerParams.set("kernel_h", kshape[2]);
             layerParams.set("kernel_w", kshape[3]);
             layerParams.set("num_output", kshape[0]);
@@ -689,6 +737,10 @@ void TFImporter::populateNet(Net dstNet)
             layerParams.blobs.resize(1);
 
             StrIntVector next_layers = getNextLayers(net, name, "BiasAdd");
+            if (next_layers.empty())
+            {
+                next_layers = getNextLayers(net, name, "Add");
+            }
             if (next_layers.size() == 1) {
                 layerParams.set("bias_term", true);
                 layerParams.blobs.resize(2);
@@ -840,20 +892,20 @@ void TFImporter::populateNet(Net dstNet)
             {
                 // Multiplication by constant.
                 CV_Assert(layer.input_size() == 2);
+                Mat scaleMat = getTensorContent(getConstBlob(layer, value_id));
+                CV_Assert(scaleMat.type() == CV_32FC1);
 
-                float scale;
-                if (!getConstBlob(layer, value_id).float_val().empty())
-                    scale = getConstBlob(layer, value_id).float_val()[0];
-                else
+                int id;
+                if (scaleMat.total() == 1)  // is a scalar.
                 {
-                    Mat scaleMat;
-                    blobFromTensor(getConstBlob(layer, value_id), scaleMat);
-                    CV_Assert(scaleMat.total() == 1 && scaleMat.type() == CV_32FC1);
-                    scale = scaleMat.at<float>(0, 0);
+                    layerParams.set("scale", scaleMat.at<float>(0));
+                    id = dstNet.addLayer(name, "Power", layerParams);
+                }
+                else  // is a vector
+                {
+                    layerParams.blobs.resize(1, scaleMat);
+                    id = dstNet.addLayer(name, "Scale", layerParams);
                 }
-                layerParams.set("scale", scale);
-
-                int id = dstNet.addLayer(name, "Power", layerParams);
                 layer_id[name] = id;
 
                 Pin inp0 = parsePin(layer.input(0));
@@ -1006,12 +1058,13 @@ void TFImporter::populateNet(Net dstNet)
         }
         else if (type == "Abs" || type == "Tanh" || type == "Sigmoid" ||
                  type == "Relu" || type == "Elu" || type == "Softmax" ||
-                 type == "Identity")
+                 type == "Identity" || type == "Relu6")
         {
             std::string dnnType = type;
             if (type == "Abs") dnnType = "AbsVal";
             else if (type == "Tanh") dnnType = "TanH";
             else if (type == "Relu") dnnType = "ReLU";
+            else if (type == "Relu6") dnnType = "ReLU6";
             else if (type == "Elu") dnnType = "ELU";
 
             int id = dstNet.addLayer(name, dnnType, layerParams);
index 7512225..b0741f6 100644 (file)
@@ -177,4 +177,46 @@ TEST(Reproducibility_SqueezeNet_v1_1, Accuracy)
     normAssert(ref, out);
 }
 
+TEST(Reproducibility_AlexNet_fp16, Accuracy)
+{
+    const float l1 = 1e-5;
+    const float lInf = 2e-4;
+
+    const string proto = findDataFile("dnn/bvlc_alexnet.prototxt", false);
+    const string model = findDataFile("dnn/bvlc_alexnet.caffemodel", false);
+
+    shrinkCaffeModel(model, "bvlc_alexnet.caffemodel_fp16");
+    Net net = readNetFromCaffe(proto, "bvlc_alexnet.caffemodel_fp16");
+
+    Mat sample = imread(findDataFile("dnn/grace_hopper_227.png", false));
+
+    net.setInput(blobFromImage(sample, 1, Size(227, 227)));
+    Mat out = net.forward();
+    Mat ref = blobFromNPY(findDataFile("dnn/caffe_alexnet_prob.npy", false));
+    normAssert(ref, out, "", l1, lInf);
+}
+
+TEST(Reproducibility_GoogLeNet_fp16, Accuracy)
+{
+    const float l1 = 1e-5;
+    const float lInf = 3e-3;
+
+    const string proto = findDataFile("dnn/bvlc_googlenet.prototxt", false);
+    const string model = findDataFile("dnn/bvlc_googlenet.caffemodel", false);
+
+    shrinkCaffeModel(model, "bvlc_googlenet.caffemodel_fp16");
+    Net net = readNetFromCaffe(proto, "bvlc_googlenet.caffemodel_fp16");
+
+    std::vector<Mat> inpMats;
+    inpMats.push_back( imread(_tf("googlenet_0.png")) );
+    inpMats.push_back( imread(_tf("googlenet_1.png")) );
+    ASSERT_TRUE(!inpMats[0].empty() && !inpMats[1].empty());
+
+    net.setInput(blobFromImages(inpMats), "data");
+    Mat out = net.forward("prob");
+
+    Mat ref = blobFromNPY(_tf("googlenet_prob.npy"));
+    normAssert(out, ref, "", l1, lInf);
+}
+
 }
index d145bdf..f296fc4 100644 (file)
@@ -91,11 +91,12 @@ static void runTensorFlowNet(const std::string& prefix,
     normAssert(target, output, "", l1, lInf);
 }
 
-TEST(Test_TensorFlow, single_conv)
+TEST(Test_TensorFlow, conv)
 {
     runTensorFlowNet("single_conv");
     runTensorFlowNet("atrous_conv2d_valid");
     runTensorFlowNet("atrous_conv2d_same");
+    runTensorFlowNet("depthwise_conv2d");
 }
 
 TEST(Test_TensorFlow, padding)
@@ -114,8 +115,9 @@ TEST(Test_TensorFlow, pad_and_concat)
     runTensorFlowNet("pad_and_concat");
 }
 
-TEST(Test_TensorFlow, fused_batch_norm)
+TEST(Test_TensorFlow, batch_norm)
 {
+    runTensorFlowNet("batch_norm");
     runTensorFlowNet("fused_batch_norm");
 }
 
@@ -131,6 +133,11 @@ TEST(Test_TensorFlow, deconvolution)
     runTensorFlowNet("deconvolution");
 }
 
+TEST(Test_TensorFlow, matmul)
+{
+    runTensorFlowNet("matmul");
+}
+
 TEST(Test_TensorFlow, fp16)
 {
     const float l1 = 1e-3;
index d486d26..69c1bd8 100644 (file)
@@ -1636,6 +1636,99 @@ public:
         return true;
     }
 
+    class TrainAutoBody : public ParallelLoopBody
+    {
+    public:
+        TrainAutoBody(const vector<SvmParams>& _parameters,
+                      const cv::Mat& _samples,
+                      const cv::Mat& _responses,
+                      const cv::Mat& _labels,
+                      const vector<int>& _sidx,
+                      bool _is_classification,
+                      int _k_fold,
+                      std::vector<double>& _result) :
+        parameters(_parameters), samples(_samples), responses(_responses), labels(_labels),
+        sidx(_sidx), is_classification(_is_classification), k_fold(_k_fold), result(_result)
+        {}
+
+        void operator()( const cv::Range& range ) const
+        {
+            int sample_count = samples.rows;
+            int var_count_ = samples.cols;
+            size_t sample_size = var_count_*samples.elemSize();
+
+            int test_sample_count = (sample_count + k_fold/2)/k_fold;
+            int train_sample_count = sample_count - test_sample_count;
+
+            // Use a local instance
+            cv::Ptr<SVMImpl> svm = makePtr<SVMImpl>();
+            svm->class_labels = labels;
+
+            int rtype = responses.type();
+
+            Mat temp_train_samples(train_sample_count, var_count_, CV_32F);
+            Mat temp_test_samples(test_sample_count, var_count_, CV_32F);
+            Mat temp_train_responses(train_sample_count, 1, rtype);
+            Mat temp_test_responses;
+
+            for( int p = range.start; p < range.end; p++ )
+            {
+                svm->setParams(parameters[p]);
+
+                double error = 0;
+                for( int k = 0; k < k_fold; k++ )
+                {
+                    int start = (k*sample_count + k_fold/2)/k_fold;
+                    for( int i = 0; i < train_sample_count; i++ )
+                    {
+                        int j = sidx[(i+start)%sample_count];
+                        memcpy(temp_train_samples.ptr(i), samples.ptr(j), sample_size);
+                        if( is_classification )
+                            temp_train_responses.at<int>(i) = responses.at<int>(j);
+                        else if( !responses.empty() )
+                            temp_train_responses.at<float>(i) = responses.at<float>(j);
+                    }
+
+                    // Train SVM on <train_size> samples
+                    if( !svm->do_train( temp_train_samples, temp_train_responses ))
+                        continue;
+
+                    for( int i = 0; i < test_sample_count; i++ )
+                    {
+                        int j = sidx[(i+start+train_sample_count) % sample_count];
+                        memcpy(temp_test_samples.ptr(i), samples.ptr(j), sample_size);
+                    }
+
+                    svm->predict(temp_test_samples, temp_test_responses, 0);
+                    for( int i = 0; i < test_sample_count; i++ )
+                    {
+                        float val = temp_test_responses.at<float>(i);
+                        int j = sidx[(i+start+train_sample_count) % sample_count];
+                        if( is_classification )
+                            error += (float)(val != responses.at<int>(j));
+                        else
+                        {
+                            val -= responses.at<float>(j);
+                            error += val*val;
+                        }
+                    }
+                }
+
+                result[p] = error;
+            }
+        }
+
+    private:
+        const vector<SvmParams>& parameters;
+        const cv::Mat& samples;
+        const cv::Mat& responses;
+        const cv::Mat& labels;
+        const vector<int>& sidx;
+        bool is_classification;
+        int k_fold;
+        std::vector<double>& result;
+    };
+
     bool trainAuto( const Ptr<TrainData>& data, int k_fold,
                     ParamGrid C_grid, ParamGrid gamma_grid, ParamGrid p_grid,
                     ParamGrid nu_grid, ParamGrid coef_grid, ParamGrid degree_grid,
@@ -1713,15 +1806,12 @@ public:
 
         int sample_count = samples.rows;
         var_count = samples.cols;
-        size_t sample_size = var_count*samples.elemSize();
 
         vector<int> sidx;
         setRangeVector(sidx, sample_count);
 
-        int i, j, k;
-
         // randomly permute training samples
-        for( i = 0; i < sample_count; i++ )
+        for( int i = 0; i < sample_count; i++ )
         {
             int i1 = rng.uniform(0, sample_count);
             int i2 = rng.uniform(0, sample_count);
@@ -1735,7 +1825,7 @@ public:
             // between the k_fold parts.
             vector<int> sidx0, sidx1;
 
-            for( i = 0; i < sample_count; i++ )
+            for( int i = 0; i < sample_count; i++ )
             {
                 if( responses.at<int>(sidx[i]) == 0 )
                     sidx0.push_back(sidx[i]);
@@ -1746,15 +1836,15 @@ public:
             int n0 = (int)sidx0.size(), n1 = (int)sidx1.size();
             int a0 = 0, a1 = 0;
             sidx.clear();
-            for( k = 0; k < k_fold; k++ )
+            for( int k = 0; k < k_fold; k++ )
             {
                 int b0 = ((k+1)*n0 + k_fold/2)/k_fold, b1 = ((k+1)*n1 + k_fold/2)/k_fold;
                 int a = (int)sidx.size(), b = a + (b0 - a0) + (b1 - a1);
-                for( i = a0; i < b0; i++ )
+                for( int i = a0; i < b0; i++ )
                     sidx.push_back(sidx0[i]);
-                for( i = a1; i < b1; i++ )
+                for( int i = a1; i < b1; i++ )
                     sidx.push_back(sidx1[i]);
-                for( i = 0; i < (b - a); i++ )
+                for( int i = 0; i < (b - a); i++ )
                 {
                     int i1 = rng.uniform(a, b);
                     int i2 = rng.uniform(a, b);
@@ -1764,23 +1854,12 @@ public:
             }
         }
 
-        int test_sample_count = (sample_count + k_fold/2)/k_fold;
-        int train_sample_count = sample_count - test_sample_count;
-
-        SvmParams best_params = params;
-        double min_error = FLT_MAX;
-
-        int rtype = responses.type();
-
-        Mat temp_train_samples(train_sample_count, var_count, CV_32F);
-        Mat temp_test_samples(test_sample_count, var_count, CV_32F);
-        Mat temp_train_responses(train_sample_count, 1, rtype);
-        Mat temp_test_responses;
-
         // If grid.minVal == grid.maxVal, this will allow one and only one pass through the loop with params.var = grid.minVal.
         #define FOR_IN_GRID(var, grid) \
             for( params.var = grid.minVal; params.var == grid.minVal || params.var < grid.maxVal; params.var = (grid.minVal == grid.maxVal) ? grid.maxVal + 1 : params.var * grid.logStep )
 
+        // Create the list of parameters to test
+        std::vector<SvmParams> parameters;
         FOR_IN_GRID(C, C_grid)
         FOR_IN_GRID(gamma, gamma_grid)
         FOR_IN_GRID(p, p_grid)
@@ -1788,51 +1867,23 @@ public:
         FOR_IN_GRID(coef0, coef_grid)
         FOR_IN_GRID(degree, degree_grid)
         {
-            // make sure we updated the kernel and other parameters
-            setParams(params);
-
-            double error = 0;
-            for( k = 0; k < k_fold; k++ )
-            {
-                int start = (k*sample_count + k_fold/2)/k_fold;
-                for( i = 0; i < train_sample_count; i++ )
-                {
-                    j = sidx[(i+start)%sample_count];
-                    memcpy(temp_train_samples.ptr(i), samples.ptr(j), sample_size);
-                    if( is_classification )
-                        temp_train_responses.at<int>(i) = responses.at<int>(j);
-                    else if( !responses.empty() )
-                        temp_train_responses.at<float>(i) = responses.at<float>(j);
-                }
+            parameters.push_back(params);
+        }
 
-                // Train SVM on <train_size> samples
-                if( !do_train( temp_train_samples, temp_train_responses ))
-                    continue;
+        std::vector<double> result(parameters.size());
+        TrainAutoBody invoker(parameters, samples, responses, class_labels, sidx,
+                              is_classification, k_fold, result);
+        parallel_for_(cv::Range(0,(int)parameters.size()), invoker);
 
-                for( i = 0; i < test_sample_count; i++ )
-                {
-                    j = sidx[(i+start+train_sample_count) % sample_count];
-                    memcpy(temp_test_samples.ptr(i), samples.ptr(j), sample_size);
-                }
-
-                predict(temp_test_samples, temp_test_responses, 0);
-                for( i = 0; i < test_sample_count; i++ )
-                {
-                    float val = temp_test_responses.at<float>(i);
-                    j = sidx[(i+start+train_sample_count) % sample_count];
-                    if( is_classification )
-                        error += (float)(val != responses.at<int>(j));
-                    else
-                    {
-                        val -= responses.at<float>(j);
-                        error += val*val;
-                    }
-                }
-            }
-            if( min_error > error )
+        // Extract the best parameters
+        SvmParams best_params = params;
+        double min_error = FLT_MAX;
+        for( int i = 0; i < (int)result.size(); i++ )
+        {
+            if( result[i] < min_error )
             {
-                min_error   = error;
-                best_params = params;
+                min_error   = result[i];
+                best_params = parameters[i];
             }
         }
 
index 5d229cf..be9bcf5 100644 (file)
@@ -353,26 +353,38 @@ void copy(const Mat& src, Mat& dst, const Mat& mask, bool invertMask)
         return;
     }
 
-    CV_Assert( src.size == mask.size && mask.type() == CV_8U );
+    int mcn = mask.channels();
+    CV_Assert( src.size == mask.size && mask.depth() == CV_8U
+               && (mcn == 1 || mcn == src.channels()) );
 
     const Mat *arrays[]={&src, &dst, &mask, 0};
     Mat planes[3];
 
     NAryMatIterator it(arrays, planes);
-    size_t j, k, elemSize = src.elemSize(), total = planes[0].total();
+    size_t j, k, elemSize = src.elemSize(), maskElemSize = mask.elemSize(), total = planes[0].total();
     size_t i, nplanes = it.nplanes;
+    size_t elemSize1 = src.elemSize1();
 
     for( i = 0; i < nplanes; i++, ++it)
     {
         const uchar* sptr = planes[0].ptr();
         uchar* dptr = planes[1].ptr();
         const uchar* mptr = planes[2].ptr();
-
-        for( j = 0; j < total; j++, sptr += elemSize, dptr += elemSize )
+        for( j = 0; j < total; j++, sptr += elemSize, dptr += elemSize, mptr += maskElemSize )
         {
-            if( (mptr[j] != 0) ^ invertMask )
-                for( k = 0; k < elemSize; k++ )
-                    dptr[k] = sptr[k];
+            if( mcn == 1)
+            {
+                if( (mptr[0] != 0) ^ invertMask )
+                    for( k = 0; k < elemSize; k++ )
+                        dptr[k] = sptr[k];
+            }
+            else
+            {
+                for( int c = 0; c < mcn; c++ )
+                    if( (mptr[c] != 0) ^ invertMask )
+                        for( k = 0; k < elemSize1; k++ )
+                            dptr[k + c * elemSize1] = sptr[k + c * elemSize1];
+            }
         }
     }
 }
@@ -414,25 +426,37 @@ void set(Mat& dst, const Scalar& gamma, const Mat& mask)
         return;
     }
 
-    CV_Assert( dst.size == mask.size && mask.type() == CV_8U );
+    int cn = dst.channels(), mcn = mask.channels();
+    CV_Assert( dst.size == mask.size && (mcn == 1 || mcn == cn) );
 
     const Mat *arrays[]={&dst, &mask, 0};
     Mat planes[2];
 
     NAryMatIterator it(arrays, planes);
-    size_t j, k, elemSize = dst.elemSize(), total = planes[0].total();
+    size_t j, k, elemSize = dst.elemSize(), maskElemSize = mask.elemSize(), total = planes[0].total();
     size_t i, nplanes = it.nplanes;
+    size_t elemSize1 = dst.elemSize1();
 
     for( i = 0; i < nplanes; i++, ++it)
     {
         uchar* dptr = planes[0].ptr();
         const uchar* mptr = planes[1].ptr();
 
-        for( j = 0; j < total; j++, dptr += elemSize )
+        for( j = 0; j < total; j++, dptr += elemSize, mptr += maskElemSize )
         {
-            if( mptr[j] )
-                for( k = 0; k < elemSize; k++ )
-                    dptr[k] = gptr[k];
+            if( mcn == 1)
+            {
+                if( mptr[0] )
+                    for( k = 0; k < elemSize; k++ )
+                        dptr[k] = gptr[k];
+            }
+            else
+            {
+                for( int c = 0; c < mcn; c++ )
+                    if( mptr[c] )
+                        for( k = 0; k < elemSize1; k++ )
+                            dptr[k + c * elemSize1] = gptr[k + c * elemSize1];
+            }
         }
     }
 }