[nnc] Support Clip and Reshape operations on caffe2 importer (#2751)
authorПавел Ильютченко/AI Tools Lab /SRR/Engineer/삼성전자 <p.iliutchenk@samsung.com>
Thu, 10 Jan 2019 13:04:34 +0000 (16:04 +0300)
committerEfimov Alexander/AI Tools Lab/./Samsung Electronics <a.efimov@samsung.com>
Thu, 10 Jan 2019 13:04:34 +0000 (16:04 +0300)
* Support GivenTensorInt64Fill const tensors
* Convert Clip operation in CappedRelu
* Support Reshape operation

Signed-off-by: Pavel Iliutchenko <p.iliutchenk@samsung.com>
contrib/nnc/include/core/modelIR/DataType.h
contrib/nnc/include/core/modelIR/Scalar.h
contrib/nnc/passes/caffe2_frontend/caffe2_importer.cpp
contrib/nnc/passes/caffe2_frontend/caffe2_importer.h
contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.cpp
contrib/nnc/passes/caffe2_frontend/caffe2_op_creator.h
contrib/nnc/passes/caffe2_frontend/caffe2_op_types.h

index 22d6f90..d7c79c4 100644 (file)
@@ -23,7 +23,8 @@ namespace mir {
 enum class DTYPE {
   UNKNOWN,
   FLOAT32,
-  INT32
+  INT32,
+  INT64
 };
 
 } // namespace mir
index dba1e08..01f1444 100644 (file)
@@ -62,6 +62,8 @@ public:
       case DTYPE::FLOAT32:
       case DTYPE::INT32:
         return 4;
+      case DTYPE::INT64:
+        return 8;
     }
   }
   /**
index e2a5fa8..5a0e613 100644 (file)
@@ -122,10 +122,13 @@ void Caffe2Importer::collectUnsupportedOp(const OperatorDef& op) {
     case SupportedCaffe2OpType::constantFill:
     case SupportedCaffe2OpType::dropout:
     case SupportedCaffe2OpType::givenTensorFill:
+    case SupportedCaffe2OpType::givenTensorInt64Fill:
     case SupportedCaffe2OpType::relu:
     case SupportedCaffe2OpType::sigmoid:
     case SupportedCaffe2OpType::softmax:
     case SupportedCaffe2OpType::sum:
+    case SupportedCaffe2OpType::clip:
+    case SupportedCaffe2OpType::reshape:
       _opCreator->commonCheck(op, _problemsOpSet);
       break;
     default:
@@ -139,7 +142,8 @@ void Caffe2Importer::preloadAllTensors() {
     // All tensor values are stored in 'GivenTensorFill' and 'ConstantFill' operators, so skip rest
     auto opType = _operatorTypes.at(op.type());
     if ((opType == SupportedCaffe2OpType::givenTensorFill
-         || opType == SupportedCaffe2OpType::constantFill)
+         || opType == SupportedCaffe2OpType::constantFill
+         || opType == SupportedCaffe2OpType::givenTensorInt64Fill)
         && hasArgument(op.arg(), "values")) {
       _MIRTensors.insert(std::make_pair(op.output(0), createTensor(op)));
     }
@@ -165,6 +169,7 @@ void Caffe2Importer::createMIRNodesFromOp(const OperatorDef& op) {
   switch (opType) {
     case SupportedCaffe2OpType::constantFill:
     case SupportedCaffe2OpType::givenTensorFill:
+    case SupportedCaffe2OpType::givenTensorInt64Fill:
       return;
     case SupportedCaffe2OpType::add:
       outputs = _opCreator->convertAdd(inputs, op, _MIRTensors);
@@ -205,6 +210,12 @@ void Caffe2Importer::createMIRNodesFromOp(const OperatorDef& op) {
     case SupportedCaffe2OpType::sum:
       outputs = _opCreator->convertSum(inputs);
       break;
+    case SupportedCaffe2OpType::clip:
+      outputs = _opCreator->convertClip(inputs, op);
+      break;
+    case SupportedCaffe2OpType::reshape:
+      outputs = _opCreator->convertReshape(inputs, op, _MIRTensors);
+      break;
     default:
       assert(false && "All unsupported types should have been found before this pass.");
   }
@@ -225,14 +236,36 @@ mir::TensorVariant Caffe2Importer::createTensor(const OperatorDef& op) {
   const auto& values = findArgumentByName(op.arg(), "values");
 
   // Create untyped tensor. Note, tensor contents will be *copied* here.
-  auto element_type = mir::DTYPE::FLOAT32;
-  size_t element_size = sizeof(float);
-  size_t data_size = values.floats().size() * element_size;
-  std::shared_ptr<char> data(new char[data_size], std::default_delete<char[]>());
-  memcpy(data.get(), values.floats().data(), data_size);
+  mir::DTYPE element_type(mir::DTYPE::UNKNOWN);
+  const auto opType(_operatorTypes.at(op.type()));
+  size_t element_size = 0;
+  size_t data_size = 0;
+  char* data_ptr = nullptr;
+  // if values on floats
+  if (values.floats().size() > 0) {
+    element_type = mir::DTYPE::FLOAT32;
+    element_size = sizeof(float);
+    data_size = values.floats().size() * element_size;
+    data_ptr = new char[data_size];
+    memcpy(data_ptr, values.floats().data(), data_size);
+  }
+  // if values on ints
+  if (values.ints().size() > 0) {
+    if (opType == SupportedCaffe2OpType::givenTensorInt64Fill) {
+      element_size = sizeof(int64_t);
+      element_type = mir::DTYPE::INT64;
+    } else {
+      element_size = sizeof(int32_t);
+      element_type = mir::DTYPE::INT32;
+    }
 
+    data_size = values.ints().size() * element_size;
+    data_ptr = new char[data_size];
+    memcpy(data_ptr, values.ints().data(), data_size);
+  }
+  std::shared_ptr<char> data(data_ptr, std::default_delete<char[]>());
   Shape tensor_shape = ShapeHelper::createShape(
-          shape.ints(), static_cast<size_t>(shape.ints().size()));
+      shape.ints(), static_cast<size_t>(shape.ints().size()));
 
   return mir::TensorVariant(tensor_shape, data, element_type, element_size);
 }
@@ -242,30 +275,13 @@ std::vector<mir::IODescriptor> Caffe2Importer::getInputMIROps(const OperatorDef&
   // so choose caffe2 inputs, which are 'real' inputs
   std::vector<mir::IODescriptor> inputs;
   SupportedCaffe2OpType opType = _operatorTypes.at(op.type());
-  switch (opType) {
-    case SupportedCaffe2OpType::givenTensorFill:
-    case SupportedCaffe2OpType::constantFill:
-      break;
-    case SupportedCaffe2OpType::add:
-    case SupportedCaffe2OpType::averagePool:
-    case SupportedCaffe2OpType::conv:
-    case SupportedCaffe2OpType::dropout:
-    case SupportedCaffe2OpType::FC:
-    case SupportedCaffe2OpType::maxPool:
-    case SupportedCaffe2OpType::mul:
-    case SupportedCaffe2OpType::relu:
-    case SupportedCaffe2OpType::sigmoid:
-    case SupportedCaffe2OpType::softmax:
-    case SupportedCaffe2OpType::spatialBN:
-      inputs.push_back(_blobNameToIODescriptor[op.input(0)]);
-      break;
-    case SupportedCaffe2OpType::sum:
-    case SupportedCaffe2OpType::concat:
-      for (auto& i : op.input())
+  if (opType != SupportedCaffe2OpType::givenTensorFill &&
+      opType != SupportedCaffe2OpType::constantFill &&
+      opType != SupportedCaffe2OpType::givenTensorInt64Fill)
+  {
+    for (auto& i : op.input())
+      if (_blobNameToIODescriptor.find(i) != _blobNameToIODescriptor.end())
         inputs.push_back(_blobNameToIODescriptor[i]);
-      break;
-    default:
-      assert(false && "All unsupported types should have been found before this pass.");
   }
 
   return inputs;
@@ -280,21 +296,24 @@ void Caffe2Importer::setGraphOutputs() {
 }
 
 const std::map<std::string, SupportedCaffe2OpType> Caffe2Importer::_operatorTypes = {
-        {"Add",             SupportedCaffe2OpType::add},
-        {"AveragePool",     SupportedCaffe2OpType::averagePool},
-        {"Conv",            SupportedCaffe2OpType::conv},
-        {"Concat",          SupportedCaffe2OpType::concat},
-        {"ConstantFill",    SupportedCaffe2OpType::constantFill},
-        {"Dropout",         SupportedCaffe2OpType::dropout},
-        {"FC",              SupportedCaffe2OpType::FC},
-        {"GivenTensorFill", SupportedCaffe2OpType::givenTensorFill},
-        {"MaxPool",         SupportedCaffe2OpType::maxPool},
-        {"Mul",             SupportedCaffe2OpType::mul},
-        {"Relu",            SupportedCaffe2OpType::relu},
-        {"Sigmoid",         SupportedCaffe2OpType::sigmoid},
-        {"Softmax",         SupportedCaffe2OpType::softmax},
-        {"SpatialBN",       SupportedCaffe2OpType::spatialBN},
-        {"Sum",             SupportedCaffe2OpType::sum}
+{"Add",                     SupportedCaffe2OpType::add},
+{"AveragePool",             SupportedCaffe2OpType::averagePool},
+{"Conv",                    SupportedCaffe2OpType::conv},
+{"Concat",                  SupportedCaffe2OpType::concat},
+{"ConstantFill",            SupportedCaffe2OpType::constantFill},
+{"Dropout",                 SupportedCaffe2OpType::dropout},
+{"FC",                      SupportedCaffe2OpType::FC},
+{"GivenTensorFill",         SupportedCaffe2OpType::givenTensorFill},
+{"MaxPool",                 SupportedCaffe2OpType::maxPool},
+{"Mul",                     SupportedCaffe2OpType::mul},
+{"Relu",                    SupportedCaffe2OpType::relu},
+{"Sigmoid",                 SupportedCaffe2OpType::sigmoid},
+{"Softmax",                 SupportedCaffe2OpType::softmax},
+{"SpatialBN",               SupportedCaffe2OpType::spatialBN},
+{"Sum",                     SupportedCaffe2OpType::sum},
+{"Clip",                    SupportedCaffe2OpType::clip},
+{"Reshape",                 SupportedCaffe2OpType::reshape},
+{"GivenTensorInt64Fill",    SupportedCaffe2OpType::givenTensorInt64Fill},
 };
 
 } // namespace nnc
index c01de54..8720442 100644 (file)
@@ -66,7 +66,7 @@ private:
 
   // This map maps caffe2 operators names to MIR operators
   // that correspond to previous caffe2 operators
-  std::map<std::string, mir::IODescriptor> _blobNameToIODescriptor;
+  std::unordered_map<std::string, mir::IODescriptor> _blobNameToIODescriptor;
   mir::Operation* _lastMIROp = nullptr;
 
   std::map<std::string, mir::TensorVariant> _MIRTensors;
index 478ec6b..ee92ff9 100644 (file)
@@ -61,8 +61,30 @@ using nnc::mir::transposeTensor;
 
 static std::pair<std::vector<int32_t>, std::vector<int32_t>>
 getPadding(const ::caffe2::OperatorDef& op) {
+
+  if (hasArgument(op.arg(), "pads")) {
+    // pads order: t l b r
+    auto pads_arg = findArgumentByName(op.arg(), "pads");
+
+    std::vector<int32_t> paddings;
+    for (const auto& pad : pads_arg.ints())
+      paddings.push_back(static_cast<int32_t>(pad));
+
+    assert(paddings.size() == 4);
+
+    int32_t pad_t = paddings[0];
+    int32_t pad_l = paddings[1];
+    int32_t pad_b = paddings[2];
+    int32_t pad_r = paddings[3];
+
+    std::vector<int32_t> padding_before{pad_t, pad_l};
+    std::vector<int32_t> padding_after{pad_b, pad_r};
+    return {padding_before, padding_after};
+  }
+
   bool has_custom_pad = hasArgument(op.arg(), "pad_l") || hasArgument(op.arg(), "pad_r")
                         || hasArgument(op.arg(), "pad_t") || hasArgument(op.arg(), "pad_b");
+
   if (has_custom_pad) {
     int32_t pad_l = getSingleArgument(op, "pad_l", 0);
     int32_t pad_t = getSingleArgument(op, "pad_t", 0);
@@ -78,13 +100,35 @@ getPadding(const ::caffe2::OperatorDef& op) {
   return {{pad, pad}, {pad, pad}};
 };
 
+static std::vector<int32_t>
+getStrides(const ::caffe2::OperatorDef& op) {
+  std::vector<int32_t> strides;
+
+  if (hasArgument(op.arg(), "stride")) {
+    int stride = getSingleArgument(op, "stride", 1);
+    strides = {stride, stride};
+  }
+
+  if (hasArgument(op.arg(), "strides")) {
+    // strides order: h w
+    auto strides_arg = findArgumentByName(op.arg(), "strides");
+    for (const auto& s : strides_arg.ints())
+      strides.push_back(s);
+  }
+
+  assert(!strides.empty() && "Strides not found");
+
+  return strides;
+}
+
 static Shape getWindowShape(const ::caffe2::OperatorDef& op,
                             const std::vector<IODescriptor>& inputs) {
   int is_global_pooling = getSingleArgument(op, "global_pooling", 0);
-  bool has_custom_kernel_size = hasArgument(op.arg(), "kernel_h")
-                                || hasArgument(op.arg(), "kernel_w");
+  bool has_custom_kernel_size = hasArgument(op.arg(), "kernel_h") ||
+                                hasArgument(op.arg(), "kernel_w");
+  bool has_custom_kernels_size = hasArgument(op.arg(), "kernels");
 
-  int kernel_h, kernel_w;
+  int kernel_h(0), kernel_w(0);
   if (is_global_pooling) {
     auto& input_shape = inputs[0].op->getOutputShape(inputs[0].index);
     assert(input_shape.rank() == 4 && "getWindowShape() inputs must be of rank 4");
@@ -95,10 +139,21 @@ static Shape getWindowShape(const ::caffe2::OperatorDef& op,
       kernel_h = getSingleArgument(op, "kernel_h", 0);
       kernel_w = getSingleArgument(op, "kernel_w", 0);
     } else {
-      kernel_h = kernel_w = getSingleArgument(op, "kernel", 0);
+      if (has_custom_kernels_size) {
+        // kernels order: h w
+        std::vector<int32_t> kernels;
+        auto kernels_arg = findArgumentByName(op.arg(), "kernels");
+        for (const auto& ker : kernels_arg.ints())
+          kernels.push_back(static_cast<int32_t>(ker));
+        assert(kernels.size() == 2);
+        kernel_h = kernels[0];
+        kernel_w = kernels[1];
+      } else {
+        kernel_h = kernel_w = getSingleArgument(op, "kernel", 0);
+      }
     }
   }
-  return Shape({kernel_h, kernel_w});
+  return Shape{kernel_h, kernel_w};
 }
 
 mir::IODescriptor Caffe2OpCreator::convertCaffeToMIR(const mir::IODescriptor& arg) {
@@ -200,14 +255,20 @@ Caffe2OpCreator::convertAdd(const std::vector<mir::IODescriptor>& inputs,
                             const ::caffe2::OperatorDef& op,
                             const MIRTensors& mir_tensors) {
   const auto& input_shape = inputs[0].op->getOutputShape(inputs[0].index);
-  const auto& addend = mir_tensors.at(op.input(1));
 
-  assert(addend.getShape().rank() == 1 && "Only 1-rank addend is supported");
-  assert(addend.getShape().numElements() == input_shape.dim(1)
-         && "Only addend size equal to number of input channels is supported");
 
-  // TODO: replace with elementwise op, when broadcating will be added in elementwise op
-  auto add = createOp<ops::BiasAddOp>(convertCaffeToMIR(inputs[0]), addend);
+  std::vector<mir::IODescriptor> add_input;
+  for (const auto& i : inputs)
+    add_input.push_back(convertCaffeToMIR(i.op->getOutput(0)));
+
+  // check mir tensors contain operand
+  if (mir_tensors.find(op.input(1)) != mir_tensors.end()) {
+    auto next_input = createOp<ops::ConstantOp>(mir_tensors.at(op.input(1)));
+    add_input.push_back(next_input[0].getOutput(0));
+  }
+
+  auto add = createOp<ops::ElementwiseOp>(add_input, ops::ElementwiseOp::OpType::add);
+
   return {convertMIRToCaffe(add->getOutput(0))};
 }
 
@@ -216,8 +277,7 @@ Caffe2OpCreator::convertAveragePool(const std::vector<IODescriptor>& inputs,
                                     const OperatorDef& op) {
   Shape window_shape = getWindowShape(op, inputs);
 
-  int stride = getSingleArgument(op, "stride", 1);
-  Shape strides = Shape({stride, stride});
+  Shape strides(getStrides(op));
 
   ops::PoolOp::PoolingType pool_type = ops::PoolOp::PoolingType::AVG;
   ops::PoolOp::BorderType border_type = ops::PoolOp::BorderType::EMPTY;
@@ -235,8 +295,8 @@ Caffe2OpCreator::convertAveragePool(const std::vector<IODescriptor>& inputs,
 std::vector<IODescriptor> Caffe2OpCreator::convertConv(const std::vector<IODescriptor>& inputs,
                                                        const ::caffe2::OperatorDef& op,
                                                        const MIRTensors& mir_tensors) {
-  int stride = getSingleArgument(op, "stride", 1);
-  Shape stride_shape = Shape({stride, stride});
+  // dilation order: h w (not used)
+  Shape stride_shape(getStrides(op));
 
   std::vector<int32_t> pad_before, pad_after;
   std::tie(pad_before, pad_after) = getPadding(op);
@@ -306,9 +366,7 @@ Caffe2OpCreator::convertFullyConnected(const std::vector<IODescriptor>& inputs,
 std::vector<IODescriptor> Caffe2OpCreator::convertMaxPool(const std::vector<IODescriptor>& inputs,
                                                           const OperatorDef& op) {
   Shape window_shape = getWindowShape(op, inputs);
-
-  int stride = getSingleArgument(op, "stride", 1);
-  Shape strides = Shape({stride, stride});
+  Shape strides(getStrides(op));
 
   ops::PoolOp::PoolingType pool_type = ops::PoolOp::PoolingType::MAX;
   ops::PoolOp::BorderType border_type = ops::PoolOp::BorderType::EMPTY;
@@ -397,6 +455,42 @@ std::vector<IODescriptor> Caffe2OpCreator::convertSum(const std::vector<IODescri
   return {op->getOutput(0)};
 }
 
+std::vector<mir::IODescriptor>
+Caffe2OpCreator::convertClip(const std::vector<mir::IODescriptor>& inputs,
+                             const ::caffe2::OperatorDef& op) {
+
+  float max = getSingleArgument(op, "max", float(0));
+  float min = getSingleArgument(op, "min", float(0));
+
+  assert(max > 0.0 && min == 0.0 && "Support only if clip is CappedRelu");
+  auto cap_relu = createOp<ops::CappedReluOp>(inputs[0], max);
+
+  return {cap_relu->getOutput(0)};
+}
+
+
+std::vector<mir::IODescriptor>
+Caffe2OpCreator::convertReshape(const std::vector<mir::IODescriptor>& inputs,
+                                const ::caffe2::OperatorDef& op,
+                                const MIRTensors& mir_tensors) {
+  // Check new shape input
+  assert(mir_tensors.find(op.input(1)) != mir_tensors.end());
+  const auto& shape_tensor = mir_tensors.at(op.input(1));
+
+  Tensor<int64_t> out_shape_tensor(shape_tensor);
+
+  ShapeRange range(out_shape_tensor.getShape());
+  std::vector<int32_t> shape_vec;
+  for (const auto& index: range) {
+    shape_vec.push_back(static_cast<int32_t>(out_shape_tensor.at(index)));
+  }
+  Shape out_shape(shape_vec);
+
+  auto reshape = createOp<ops::ReshapeOp>(inputs[0], out_shape);
+
+  return {reshape->getOutput(0)};
+}
+
 std::vector<IODescriptor>
 Caffe2OpCreator::createInput(const std::string& input_name, const mir::Shape& input_shape) {
   // TODO For now we only support convolutional networks with one element per batch.
index 5bbbf9d..ec2bbc5 100644 (file)
@@ -18,7 +18,7 @@
 #define NNCC_CAFFE2_OP_CREATOR_H
 
 #include <set>
-#include <map>
+#include <unordered_map>
 #include <vector>
 #include <memory>
 
@@ -92,6 +92,13 @@ public:
 
   std::vector<mir::IODescriptor> convertSum(const std::vector<mir::IODescriptor>&);
 
+  std::vector<mir::IODescriptor> convertClip(const std::vector<mir::IODescriptor>&,
+                                             const ::caffe2::OperatorDef&);
+
+  std::vector<mir::IODescriptor> convertReshape(const std::vector<mir::IODescriptor>&,
+                                                const ::caffe2::OperatorDef&, const MIRTensors&);
+
+
 private:
   Graph* _graph = nullptr;
 
index 414bd87..6613cda 100644 (file)
@@ -22,19 +22,22 @@ namespace nnc {
 enum class SupportedCaffe2OpType {
   add,
   averagePool,
+  clip,
   concat,
   conv,
   constantFill,
   dropout,
   FC,
   givenTensorFill,
+  givenTensorInt64Fill,
   maxPool,
   mul,
   relu,
+  reshape,
   sigmoid,
   softmax,
   spatialBN,
-  sum
+  sum,
 };
 
 }  // namespace nnc