Refactored tflite frontend and fixed coding style (#1885)
authorIvan Vagin/AI Tools Lab /SRR/Engineer/삼성전자 <ivan.vagin@samsung.com>
Wed, 24 Oct 2018 17:54:33 +0000 (20:54 +0300)
committerРоман Михайлович Русяев/AI Tools Lab /SRR/Staff Engineer/삼성전자 <r.rusyaev@samsung.com>
Wed, 24 Oct 2018 17:54:33 +0000 (20:54 +0300)
Refactored tflite frontend and fixed coding style

Signed-off-by: Ivan Vagin <ivan.vagin@samsung.com>
29 files changed:
contrib/nnc/examples/tflite_frontend/sanity_check.cpp
contrib/nnc/passes/caffe_frontend/caffe_frontend.cpp
contrib/nnc/passes/caffe_frontend/caffe_importer.cpp
contrib/nnc/passes/caffe_frontend/caffe_importer.h
contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp
contrib/nnc/passes/caffe_frontend/caffe_op_creator.h
contrib/nnc/passes/caffe_frontend/proto_reader.cpp
contrib/nnc/passes/caffe_frontend/proto_reader.h
contrib/nnc/passes/tflite_frontend/CMakeLists.txt
contrib/nnc/passes/tflite_frontend/schema.h [deleted file]
contrib/nnc/passes/tflite_frontend/tflite_dump_visitor.cpp [deleted file]
contrib/nnc/passes/tflite_frontend/tflite_dump_visitor.h [deleted file]
contrib/nnc/passes/tflite_frontend/tflite_frontend.cpp
contrib/nnc/passes/tflite_frontend/tflite_importer.cpp
contrib/nnc/passes/tflite_frontend/tflite_importer.h
contrib/nnc/passes/tflite_frontend/tflite_importer.inline.cpp [deleted file]
contrib/nnc/passes/tflite_frontend/tflite_importer.inline.h [deleted file]
contrib/nnc/passes/tflite_frontend/tflite_ir_visitor.cpp [deleted file]
contrib/nnc/passes/tflite_frontend/tflite_ir_visitor.h [deleted file]
contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp
contrib/nnc/passes/tflite_frontend/tflite_op_creator.h
contrib/nnc/passes/tflite_frontend/tflite_visitor.h [deleted file]
contrib/nnc/passes/tflite_frontend/tflite_walker.cpp [deleted file]
contrib/nnc/passes/tflite_frontend/tflite_walker.h [deleted file]
contrib/nnc/tests/import/tflite.cpp
contrib/nnc/unittests/CMakeLists.txt
contrib/nnc/unittests/tflite_frontend/CMakeLists.txt [new file with mode: 0644]
contrib/nnc/unittests/tflite_frontend/test_data/unsupported.tflite [new file with mode: 0644]
contrib/nnc/unittests/tflite_frontend/unsupportedTfliteModel.cpp [new file with mode: 0644]

index 7ab0989..edd3dd5 100644 (file)
@@ -28,13 +28,11 @@ using namespace nnc;
 using namespace nnc::mir;
 using namespace nnc::cli;
 
-enum Format {FormatDot, FormatDump};
-
 int main(int argc, const char **argv) {
   cli::CommandLine::getParser()->parseCommandLine(argc, argv, false);
   std::string model = cli::inputFile;
 
-  nnc::tflite::v_dev::TfliteImporter importer{model};
+  nnc::TfliteImporter importer{model};
 
   try {
     importer.import();
index 6291900..5a257ec 100644 (file)
 namespace nnc {
 
 PassData CaffeFrontend::run(PassData) {
-  nnc::CaffeImporter importer{cli::inputFile};
+  CaffeImporter importer{cli::inputFile};
 
   importer.import();
 
-  return reinterpret_cast<mir::Graph *>(importer.createIR());
+  return importer.createIR();
 }
 
 } // namespace nnc
index b9f2c9e..3f58b40 100644 (file)
@@ -34,6 +34,7 @@ namespace nnc {
 using VariableOp = nnc::mir::ops::VariableOp;
 using nnc::mir::Shape;
 using nnc::mir::transposeTensor;
+using namespace ::caffe;
 
 void CaffeImporter::import() {
   GOOGLE_PROTOBUF_VERIFY_VERSION;
@@ -45,7 +46,7 @@ void CaffeImporter::import() {
   collectUnsupportedLayers();
 }
 
-Graph *CaffeImporter::createIR() {
+GraphCaffeImporter::createIR() {
 
   for (int i = 0; i < _net->layer_size(); ++i)
     createMIRNodesFromLayer(_net->layer(i));
@@ -71,14 +72,14 @@ void CaffeImporter::collectUnsupportedLayers() {
 }
 
 void CaffeImporter::createMIRNodesFromLayer(const LayerParameter& lp) {
-  auto inputs = getInputMIROps(lp);
+  auto inputs = getPrecedingMIROps(lp);
   auto params = createOpParams(lp);
 
   std::vector<INode::Ref> outputs;
-  INode *prev;
-  CaffeOpType opType = _operatorTypes.at(lp.type());
+  INodeprev;
+  CaffeOpType op_type = _operatorTypes.at(lp.type());
 
-  switch (opType) {
+  switch (op_type) {
     case CaffeOpType::input:
       processInputLayer(lp);
       break;
@@ -134,10 +135,10 @@ void CaffeImporter::collectUnsupportedOp(const LayerParameter& lp) {
     return;
   }
 
-  CaffeOpType opType = it->second;
+  CaffeOpType op_type = it->second;
   std::vector<std::shared_ptr<IrTensor>> params;
 
-  switch (opType) {
+  switch (op_type) {
     case CaffeOpType::input:
     case CaffeOpType::softmax:
     case CaffeOpType::scale:
@@ -171,187 +172,183 @@ void CaffeImporter::collectUnsupportedOp(const LayerParameter& lp) {
   }
 }
 
-void CaffeImporter::createGraphInputs(const std::vector<std::string> &names,
-                                      const std::vector<Shape> &shapes) {
-    assert(names.size() == shapes.size());
-
-    for (size_t i = 0; i < names.size(); ++i) {
-        auto node = _graph->create<VariableOp>(names[i]);
-        _opsForBlobsTheyOutput[names[i]] = node;
-
-        Shape inputShape = shapes[i];
-        // WARNING! Temporary solution! Assuming that every 4D input will be used for a convolution,
-        // so we change every 4D input from Caffe NCHW to Model IR HWC (batch is cut off earlier).
-        // TODO: Implement a more consistent way of handling shapes within the model.
-        if (shapes[i].rank() == 3) {
-            const Shape &sh = shapes[i];
-            inputShape = Shape{sh.dim(1), sh.dim(2), sh.dim(0)};
-        }
-        // WARNING! Temporary solution!
-
-        node->getOperation()->setOutputShape(0, inputShape);
+void CaffeImporter::createGraphInputs(const std::vector<std::string>& names,
+                                      const std::vector<Shape>& shapes) {
+  assert(names.size() == shapes.size());
+
+  for (size_t i = 0; i < names.size(); ++i) {
+    auto node = _graph->create<VariableOp>(names[i]);
+    _opsForBlobsTheyOutput[names[i]] = node;
+
+    Shape input_shape = shapes[i];
+    // WARNING! Temporary solution! Assuming that every 4D input will be used for a convolution,
+    // so we change every 4D input from Caffe NCHW to Model IR HWC (batch is cut off earlier).
+    // TODO: Implement a more consistent way of handling shapes within the model.
+    if (shapes[i].rank() == 3) {
+      const Shape& sh = shapes[i];
+      input_shape = Shape{sh.dim(1), sh.dim(2), sh.dim(0)};
     }
+    // WARNING! Temporary solution!
+
+    node->getOperation()->setOutputShape(0, input_shape);
+  }
 }
 
 void CaffeImporter::processDeprecatedInput() {
-    if (_net->input_dim_size() != 0 || _net->input_shape_size() != 0)
-        throw PassException("Deprecated Caffe input types are not supported");
+  if (_net->input_dim_size() != 0 || _net->input_shape_size() != 0)
+    throw PassException("Deprecated Caffe input types are not supported");
 }
 
 void CaffeImporter::processInputLayer(const LayerParameter& lp) {
-    std::vector<std::string> inputNames;
-    for (const auto &name  : lp.top())
-        inputNames.push_back(name);
+  std::vector<std::string> input_names;
+  for (const auto& name  : lp.top())
+    input_names.push_back(name);
 
-    for (const auto &shape : lp.input_param().shape()) {
-        Shape sh = ShapeHelper::createShape(shape.dim(), shape.dim_size());
-        _inputShapes.push_back(ShapeHelper::cutOffBatchDim(sh));
-    }
+  for (const auto& shape : lp.input_param().shape()) {
+    Shape sh = ShapeHelper::createShape(shape.dim(), shape.dim_size());
+    _inputShapes.push_back(ShapeHelper::cutOffBatchDim(sh));
+  }
 
-    if (!_inputShapes.empty())
-        createGraphInputs(inputNames, _inputShapes);
+  if (!_inputShapes.empty())
+    createGraphInputs(input_names, _inputShapes);
 }
 
-std::shared_ptr<IrTensor> CaffeImporter::createTensor(const BlobProto &bp) {
-    auto type = IrTensor::DTYPE::FLOAT;
-    size_t elementSize;
-
-    const char *srcData;
-    size_t bufferSize;
-
-    if (bp.data_size() != 0) {
-        assert(bp.double_data_size() == 0);
-        elementSize = sizeof(float);
-        bufferSize = bp.data_size() * elementSize;
-        srcData = reinterpret_cast<const char *>(bp.data().data());
-    }
-    else if (bp.double_data_size() != 0) {
-        elementSize = sizeof(double);
-        bufferSize = bp.double_data_size() * elementSize;
-        srcData = reinterpret_cast<const char *>(bp.double_data().data());
-    }
-    else {
-        throw PassException("No data in Caffe BlobProto, investigate");
-    }
+std::shared_ptr<IrTensor> CaffeImporter::createTensor(const BlobProto& bp) {
+  auto type = IrTensor::DTYPE::FLOAT;
+  size_t element_size;
+
+  const char* src_data;
+  size_t buffer_size;
+
+  if (bp.data_size() != 0) {
+    assert(bp.double_data_size() == 0);
+    element_size = sizeof(float);
+    buffer_size = bp.data_size() * element_size;
+    src_data = reinterpret_cast<const char*>(bp.data().data());
+  } else if (bp.double_data_size() != 0) {
+    element_size = sizeof(double);
+    buffer_size = bp.double_data_size() * element_size;
+    src_data = reinterpret_cast<const char*>(bp.double_data().data());
+  } else {
+    throw PassException("No data in Caffe BlobProto, investigate");
+  }
 
-    // Create untyped tensor. Note, tensor contents will be *copied* here.
-    std::shared_ptr<char> tensorBufferCopy(new char[bufferSize],
+  // Create untyped tensor. Note, tensor contents will be *copied* here.
+  std::shared_ptr<char> tensor_buffer_copy(new char[buffer_size],
                                            std::default_delete<char[]>());
 
-    char *dstData = tensorBufferCopy.get();
-    memcpy(dstData, srcData, bufferSize);
+  char* dst_data = tensor_buffer_copy.get();
+  memcpy(dst_data, src_data, buffer_size);
 
-    Shape tensorShape = ShapeHelper::createShape(
-        bp.shape().dim(), static_cast<size_t>(bp.shape().dim_size()));
+  Shape tensor_shape = ShapeHelper::createShape(
+          bp.shape().dim(), static_cast<size_t>(bp.shape().dim_size()));
 
-    auto tensor = std::make_shared<IrTensor>(tensorShape, tensorBufferCopy, type, elementSize);
+  auto tensor = std::make_shared<IrTensor>(tensor_shape, tensor_buffer_copy, type, element_size);
 
-    return tensor;
+  return tensor;
 }
 
-std::vector<INode::Ref> CaffeImporter::getInputMIROps(const LayerParameter &lp) {
-    std::vector<INode::Ref> inputs;
+std::vector<INode::Ref> CaffeImporter::getPrecedingMIROps(const LayerParameter& lp) {
+  std::vector<INode::Ref> inputs;
 
-    for (const auto &inputBlobName : lp.bottom())
-        inputs.push_back(_opsForBlobsTheyOutput[inputBlobName]);
+  for (const auto& input_blob_name : lp.bottom())
+    inputs.push_back(_opsForBlobsTheyOutput[input_blob_name]);
 
-    return inputs;
+  return inputs;
 }
 
-std::vector<std::shared_ptr<IrTensor>> CaffeImporter::createOpParams(const LayerParameter &lp) {
-    std::vector<std::shared_ptr<IrTensor>> params;
+std::vector<std::shared_ptr<IrTensor>> CaffeImporter::createOpParams(const LayerParameterlp) {
+  std::vector<std::shared_ptr<IrTensor>> params;
 
-    for (const auto &blob : lp.blobs()) {
+  for (const auto& blob : lp.blobs()) {
 
-        std::shared_ptr<IrTensor> tensor = createTensor(blob);
+    std::shared_ptr<IrTensor> tensor = createTensor(blob);
 
-        if (lp.has_convolution_param() && blob.shape().dim_size() == 4) {
-            // TODO support non default channel axis
-            assert(lp.convolution_param().axis() == 1 && "assuming channel axis number set to default");
-            params.emplace_back(transposeTensor<2, 3, 1, 0>(tensor));
-        }
-        else if (lp.has_inner_product_param() && blob.shape().dim_size() == 2) {
-            params.emplace_back(transposeTensor<1, 0>(tensor));
-        }
-        else {
-            params.push_back(tensor);
-        }
+    if (lp.has_convolution_param() && blob.shape().dim_size() == 4) {
+      // TODO support non default channel axis
+      assert(lp.convolution_param().axis() == 1 && "assuming channel axis number set to default");
+      params.emplace_back(transposeTensor<2, 3, 1, 0>(tensor));
+    } else if (lp.has_inner_product_param() && blob.shape().dim_size() == 2) {
+      params.emplace_back(transposeTensor<1, 0>(tensor));
+    } else {
+      params.push_back(tensor);
     }
+  }
 
-    return params;
+  return params;
 }
 
 void CaffeImporter::setGraphOutputs() {
-    for (auto &outputIdx : _graphOutputs)
-        _graph->markOutput(outputIdx);
+  for (auto& output_idx : _graphOutputs)
+    _graph->markOutput(output_idx);
 }
 
 void CaffeImporter::setIrNodeNames() {
-    for (auto &item : _opsForBlobsTheyOutput)
-        item.second->setName(item.first);
+  for (auto& item : _opsForBlobsTheyOutput)
+    item.second->setName(item.first);
 }
 
 const std::map<std::string, CaffeOpType> CaffeImporter::_operatorTypes = {
-    {"AbsVal", CaffeOpType::absVal},
-    {"Accuracy", CaffeOpType::accuracy},
-    {"ArgMax", CaffeOpType::argMax},
-    {"BatchNorm", CaffeOpType::batchNorm},
-    {"BatchReindex", CaffeOpType::batchReindex},
-    {"Bias", CaffeOpType::bias},
-    {"BNLL", CaffeOpType::BNLL},
-    {"Clip", CaffeOpType::clip},
-    {"Concat", CaffeOpType::concat},
-    {"ContrastiveLoss", CaffeOpType::contrastiveLoss},
-    {"Convolution", CaffeOpType::convolution},
-    {"Crop", CaffeOpType::crop},
-    {"Data", CaffeOpType::data},
-    {"Deconvolution", CaffeOpType::deconvolution},
-    {"Dropout", CaffeOpType::dropout},
-    {"DummyData", CaffeOpType::dummyData},
-    {"Eltwise", CaffeOpType::eltwise},
-    {"ELU", CaffeOpType::ELU},
-    {"Embed", CaffeOpType::embed},
-    {"EuclidianLoss", CaffeOpType::euclidianLoss},
-    {"Exp", CaffeOpType::exp},
-    {"Filter", CaffeOpType::filter},
-    {"Flatten", CaffeOpType::flatten},
-    {"HDF5Data", CaffeOpType::HDF5Data},
-    {"HDF5Output", CaffeOpType::HDF5Output},
-    {"HingeLoss", CaffeOpType::hingeLoss},
-    {"Im2Col", CaffeOpType::im2Col},
-    {"ImageData", CaffeOpType::imageData},
-    {"InfogainLoss", CaffeOpType::infogainLoss},
-    {"InnerProduct", CaffeOpType::innerProduct},
-    {"Input", CaffeOpType::input},
-    {"Log", CaffeOpType::log},
-    {"LRN", CaffeOpType::LRN},
-    {"LSTM", CaffeOpType::LSTM},
-    {"MemoryData", CaffeOpType::memoryData},
-    {"MultinomialLogisticLoss", CaffeOpType::multinomialLogisticLoss},
-    {"MVN", CaffeOpType::MVN},
-    {"Parameter", CaffeOpType::parameter},
-    {"Pooling", CaffeOpType::pooling},
-    {"Power", CaffeOpType::power},
-    {"PReLU", CaffeOpType::PReLU},
-    {"Python", CaffeOpType::python},
-    {"Recurrent", CaffeOpType::recurrent},
-    {"Reduction", CaffeOpType::reduction},
-    {"ReLU", CaffeOpType::ReLU},
-    {"Reshape", CaffeOpType::reshape},
-    {"RNN", CaffeOpType::RNN},
-    {"Scale", CaffeOpType::scale},
-    {"SigmoidCrossEntropyLoss", CaffeOpType::sigmoidCrossEntropyLoss},
-    {"Sigmoid", CaffeOpType::sigmoid},
-    {"Silence", CaffeOpType::silence},
-    {"Softmax", CaffeOpType::softmax},
-    {"SoftmaxLoss", CaffeOpType::softmaxLoss},
-    {"SPP", CaffeOpType::SPP},
-    {"Split", CaffeOpType::split},
-    {"Slice", CaffeOpType::slice},
-    {"Tanh", CaffeOpType::tanh},
-    {"Threshold", CaffeOpType::threshold},
-    {"Tile", CaffeOpType::tile},
-    {"WindowData", CaffeOpType::windowData}
+        {"AbsVal",                  CaffeOpType::absVal},
+        {"Accuracy",                CaffeOpType::accuracy},
+        {"ArgMax",                  CaffeOpType::argMax},
+        {"BatchNorm",               CaffeOpType::batchNorm},
+        {"BatchReindex",            CaffeOpType::batchReindex},
+        {"Bias",                    CaffeOpType::bias},
+        {"BNLL",                    CaffeOpType::BNLL},
+        {"Clip",                    CaffeOpType::clip},
+        {"Concat",                  CaffeOpType::concat},
+        {"ContrastiveLoss",         CaffeOpType::contrastiveLoss},
+        {"Convolution",             CaffeOpType::convolution},
+        {"Crop",                    CaffeOpType::crop},
+        {"Data",                    CaffeOpType::data},
+        {"Deconvolution",           CaffeOpType::deconvolution},
+        {"Dropout",                 CaffeOpType::dropout},
+        {"DummyData",               CaffeOpType::dummyData},
+        {"Eltwise",                 CaffeOpType::eltwise},
+        {"ELU",                     CaffeOpType::ELU},
+        {"Embed",                   CaffeOpType::embed},
+        {"EuclidianLoss",           CaffeOpType::euclidianLoss},
+        {"Exp",                     CaffeOpType::exp},
+        {"Filter",                  CaffeOpType::filter},
+        {"Flatten",                 CaffeOpType::flatten},
+        {"HDF5Data",                CaffeOpType::HDF5Data},
+        {"HDF5Output",              CaffeOpType::HDF5Output},
+        {"HingeLoss",               CaffeOpType::hingeLoss},
+        {"Im2Col",                  CaffeOpType::im2Col},
+        {"ImageData",               CaffeOpType::imageData},
+        {"InfogainLoss",            CaffeOpType::infogainLoss},
+        {"InnerProduct",            CaffeOpType::innerProduct},
+        {"Input",                   CaffeOpType::input},
+        {"Log",                     CaffeOpType::log},
+        {"LRN",                     CaffeOpType::LRN},
+        {"LSTM",                    CaffeOpType::LSTM},
+        {"MemoryData",              CaffeOpType::memoryData},
+        {"MultinomialLogisticLoss", CaffeOpType::multinomialLogisticLoss},
+        {"MVN",                     CaffeOpType::MVN},
+        {"Parameter",               CaffeOpType::parameter},
+        {"Pooling",                 CaffeOpType::pooling},
+        {"Power",                   CaffeOpType::power},
+        {"PReLU",                   CaffeOpType::PReLU},
+        {"Python",                  CaffeOpType::python},
+        {"Recurrent",               CaffeOpType::recurrent},
+        {"Reduction",               CaffeOpType::reduction},
+        {"ReLU",                    CaffeOpType::ReLU},
+        {"Reshape",                 CaffeOpType::reshape},
+        {"RNN",                     CaffeOpType::RNN},
+        {"Scale",                   CaffeOpType::scale},
+        {"SigmoidCrossEntropyLoss", CaffeOpType::sigmoidCrossEntropyLoss},
+        {"Sigmoid",                 CaffeOpType::sigmoid},
+        {"Silence",                 CaffeOpType::silence},
+        {"Softmax",                 CaffeOpType::softmax},
+        {"SoftmaxLoss",             CaffeOpType::softmaxLoss},
+        {"SPP",                     CaffeOpType::SPP},
+        {"Split",                   CaffeOpType::split},
+        {"Slice",                   CaffeOpType::slice},
+        {"Tanh",                    CaffeOpType::tanh},
+        {"Threshold",               CaffeOpType::threshold},
+        {"Tile",                    CaffeOpType::tile},
+        {"WindowData",              CaffeOpType::windowData}
 };
 
 } // namespace nnc
index 0fba57b..474c816 100644 (file)
@@ -30,8 +30,6 @@
 
 namespace nnc {
 
-using namespace ::caffe;
-
 class CaffeImporter : public NNImporter {
 public:
   explicit CaffeImporter(std::string filename) : _modelFilename(std::move(filename)),
@@ -48,11 +46,11 @@ public:
   * @brief Create MIR graph from caffe model, must be called after 'import' method
   * @return MIR graph, corresponding to processed caffe model
   */
-  Graph *createIR() override;
+  GraphcreateIR() override;
 
 private:
   std::string _modelFilename;
-  std::unique_ptr<NetParameter> _net;
+  std::unique_ptr<::caffe::NetParameter> _net;
   mir::Graph* _graph;
   OpCreator _opCreator;
 
@@ -70,6 +68,7 @@ private:
   * @brief Mark output MIR nodes
   */
   void setGraphOutputs();
+
   /**
   * @brief Set MIR node names
   */
@@ -82,29 +81,35 @@ private:
   void collectUnsupportedLayers();
 
   /**
-  * @brief Creating MIR node from single caffe layer
+  * @brief Create MIR node from single caffe layer
   */
-  void createMIRNodesFromLayer(const LayerParameter& lp);
+  void createMIRNodesFromLayer(const ::caffe::LayerParameter& lp);
+
   /**
-  * @brief Collecting unsupported parts of caffe layer
+  * @brief Collect unsupported parts of caffe layer
   */
-  void collectUnsupportedOp(const LayerParameter& lp);
+  void collectUnsupportedOp(const ::caffe::LayerParameter& lp);
+
   /**
-  * @brief Creates MIR tensor from caffe blob
+  * @brief Create MIR tensor from caffe blob
   */
   std::shared_ptr<mir::TensorVariant> createTensor(const ::caffe::BlobProto&);
+
   /**
-  * @brief Returns MIR ops, under given caffe layer
+  * @brief Return MIR ops, under given caffe layer
   */
-  std::vector<mir::INode::Ref> getInputMIROps(const ::caffe::LayerParameter&);
+  std::vector<mir::INode::Ref> getPrecedingMIROps(const ::caffe::LayerParameter&);
+
   /**
-  * @brief Prepares Caffe layer parameters for Model IR operation creator.
+  * @brief Prepare Caffe layer parameters for Model IR operation creator.
   */
   std::vector<std::shared_ptr<mir::TensorVariant>> createOpParams(const ::caffe::LayerParameter&);
 
-  void createGraphInputs(const std::vector<std::string> &names,
-                         const std::vector<mir::Shape> &shapes);
+  void createGraphInputs(const std::vector<std::string>& names,
+                         const std::vector<mir::Shape>& shapes);
+
   void processInputLayer(const ::caffe::LayerParameter&);
+
   void processDeprecatedInput();
 };
 
index 4896a1a..ef62ca1 100644 (file)
@@ -44,6 +44,7 @@
 namespace nnc {
 
 using namespace mir;
+using namespace ::caffe;
 
 template <typename OptsType>
 static inline bool has2DStride(const OptsType& opts) {
@@ -53,10 +54,9 @@ static inline bool has2DStride(const OptsType& opts) {
   return opts.has_stride_h();
 }
 
-static inline Shape getStrideFromOneValue(bool hasStride, uint32_t stride) {
-  if (hasStride) {
+static inline Shape getStrideFromOneValue(bool has_stride, uint32_t stride) {
+  if (has_stride)
     return Shape{static_cast<int32_t>(stride), static_cast<int32_t>(stride), 1};
-  }
   else
     return Shape{1, 1, 1};
 }
@@ -82,35 +82,38 @@ static inline Shape getStride(const OptsType& opts) {
  * @todo Currently, stride_h and stride_w options take precedence if they are present,
  * but maybe it is not correct logic. Check how it really is done.
  */
-__attribute__ ((unused)) static Shape getConvStride(const ConvolutionParameter& opts) {
+static Shape getConvStride(const ConvolutionParameter& opts) {
   if (has2DStride(opts))
     return getStrideFromTwoValues(opts.stride_h(), opts.stride_w());
   else
     return getStride(opts);
 }
 
-__attribute__ ((unused)) static Shape getPoolStride(const PoolingParameter& opts) {
+static Shape getPoolStride(const PoolingParameter& opts) {
   if (has2DStride(opts))
     return getStrideFromTwoValues(opts.stride_h(), opts.stride_w());
   else
     return getStrideFromOneValue(opts.has_stride(), opts.stride());
 }
 
-__attribute__ ((unused)) static Shape getPoolWindowShape(const PoolingParameter &opts) {
+static Shape getPoolWindowShape(const PoolingParameter& opts) {
   if (opts.has_kernel_h() != opts.has_kernel_w())
     throw PassException("Pool layer has only 1 out of 2 kernel dimensions, investigate");
 
   if (opts.has_kernel_h()) {
-    return Shape{static_cast<int32_t>(opts.kernel_h()), static_cast<int32_t>(opts.kernel_w()), 1};
-  }
-  else if (opts.has_kernel_size()) {
-    return Shape{static_cast<int32_t>(opts.kernel_size()), static_cast<int32_t>(opts.kernel_size()), 1};
-  }
-  else
+    return Shape{static_cast<int32_t>(opts.kernel_h()),
+                 static_cast<int32_t>(opts.kernel_w()),
+                 1};
+  } else if (opts.has_kernel_size()) {
+    return Shape{static_cast<int32_t>(opts.kernel_size()),
+                 static_cast<int32_t>(opts.kernel_size()),
+                 1};
+  } else {
     throw PassException("Pooling layer doesn't have kernel size data, investigate");
+  }
 }
 
-__attribute__ ((unused)) static ops::PoolOp::PoolingType getPoolingType(const PoolingParameter& opts) {
+static ops::PoolOp::PoolingType getPoolingType(const PoolingParameter& opts) {
   using PoolingType = ops::PoolOp::PoolingType;
 
   if (opts.pool() == PoolingParameter::MAX)
@@ -119,7 +122,7 @@ __attribute__ ((unused)) static ops::PoolOp::PoolingType getPoolingType(const Po
     return PoolingType::AVG;
   else
     throw PassException("Unsupported pooling type: " +
-                          PoolingParameter::PoolMethod_Name(opts.pool()));
+                        PoolingParameter::PoolMethod_Name(opts.pool()));
 }
 
 /**
@@ -128,7 +131,7 @@ __attribute__ ((unused)) static ops::PoolOp::PoolingType getPoolingType(const Po
  * @todo Decide how to process axis in general.
  */
 template <typename OptsType>
-__attribute__ ((unused)) static int getAxisValue(const OptsType& opts) {
+static int getAxisValue(const OptsType& opts) {
   // -1 represents last one dimension
   int axis = -1;
   if (opts.has_axis()) {
@@ -139,7 +142,7 @@ __attribute__ ((unused)) static int getAxisValue(const OptsType& opts) {
                    "so make sure import works correctly." << std::endl;
     else if (axis != 1 && axis != -1)
       throw PassException("Softmax/Concat layer axis param is not 1 or -1, which implies"
-                            "unsupported NN architecture.");
+                          "unsupported NN architecture.");
   }
 
   // axis 1 represents channels in caffe, in Model ir it is second dimension for now
@@ -160,68 +163,71 @@ __attribute__ ((unused)) static int getAxisValue(const OptsType& opts) {
  * @param foldedKernel original grouped kernel
  * @return unfolded kernel, compatible with ordinary conv2D operation
  */
-static std::shared_ptr<IrTensor> fixGroupedKernel(int groups, std::shared_ptr<IrTensor> foldedKernel) {
-  const int kernelInChanNum = 2;
-  const int kernelOutChanNum = 3;
+static std::shared_ptr<IrTensor>
+fixGroupedKernel(int groups, std::shared_ptr<IrTensor> folded_kernel) {
+  const int kernel_in_chan_num = 2;
+  const int kernel_out_chan_num = 3;
 
-  const Shape &kernelShape = foldedKernel->getShape();
-  auto kernelInChannels = kernelShape.dim(kernelInChanNum);
-  auto kernelOutChannels = kernelShape.dim(kernelOutChanNum);
-  auto inChannels = kernelInChannels * groups;
+  const Shape& kernel_shape = folded_kernel->getShape();
+  auto kernel_in_channels = kernel_shape.dim(kernel_in_chan_num);
+  auto kernel_out_channels = kernel_shape.dim(kernel_out_chan_num);
+  auto in_channels = kernel_in_channels * groups;
 
   // Original kernel has shape [H, W, inputChannels/groups, outputChannels]
   // here creates unfolded kernel with shape [H, W, inputChannels, outputChannels]
-  Shape unfoldKernelShape(kernelShape);
-  unfoldKernelShape.dim(kernelInChanNum) = inChannels;
-  auto bufferSize = unfoldKernelShape.numElements() * foldedKernel->getElementSize();
-  std::shared_ptr<char> buffer(new char[bufferSize], std::default_delete<char[]>());
-  size_t dataSize = foldedKernel->getElementSize();
-  std::shared_ptr<IrTensor> unfoldKernel =
-          std::make_shared<IrTensor>(unfoldKernelShape, buffer, foldedKernel->getDataType(), dataSize);
-
-  int inGroupSize = kernelInChannels;
-  int outGroupSize = kernelOutChannels / groups;
-  assert(kernelOutChannels % groups == 0);
+  Shape unfold_kernel_shape(kernel_shape);
+  unfold_kernel_shape.dim(kernel_in_chan_num) = in_channels;
+  auto buffer_size = unfold_kernel_shape.numElements() * folded_kernel->getElementSize();
+  std::shared_ptr<char> buffer(new char[buffer_size], std::default_delete<char[]>());
+  size_t data_size = folded_kernel->getElementSize();
+  std::shared_ptr<IrTensor> unfold_kernel =
+          std::make_shared<IrTensor>(unfold_kernel_shape, buffer, folded_kernel->getDataType(),
+                                     data_size);
+
+  int in_group_size = kernel_in_channels;
+  int out_group_size = kernel_out_channels / groups;
+  assert(kernel_out_channels % groups == 0);
 
   // Iterate over "unfolded" kernel Shape and insert appropriate values into result kernel
-  for (const mir::Index &idx: mir::ShapeRange(unfoldKernelShape)) {
-    auto inGroupNo = idx.at(kernelInChanNum) / inGroupSize;
-    auto outGroupNo = idx.at(kernelOutChanNum) / outGroupSize;
+  for (const mir::Index& idx: mir::ShapeRange(unfold_kernel_shape)) {
+    auto in_group_no = idx.at(kernel_in_chan_num) / in_group_size;
+    auto out_group_no = idx.at(kernel_out_chan_num) / out_group_size;
     // check that input channel group fits output channel group
-    if (inGroupNo == outGroupNo) {
+    if (in_group_no == out_group_no) {
       // compute index in original kernel that corresponds output index
-      mir::Index foldedIdx(idx);
-      foldedIdx.at(kernelInChanNum) %= inGroupSize;
+      mir::Index folded_idx(idx);
+      folded_idx.at(kernel_in_chan_num) %= in_group_size;
 
-      std::copy(foldedKernel->at(foldedIdx), foldedKernel->at(foldedIdx) + dataSize, unfoldKernel->at(idx));
-    }
-    else {
+      std::copy(folded_kernel->at(folded_idx), folded_kernel->at(folded_idx) + data_size,
+                unfold_kernel->at(idx));
+    else {
       // fill element of output kernel with zero element
-      assert(foldedKernel->getDataType() == IrTensor::DTYPE::FLOAT && "unsupported data type, add appropriate zero element creation");
-      float *elem = reinterpret_cast<float *>(unfoldKernel->at(idx));
+      assert(folded_kernel->getDataType() == IrTensor::DTYPE::FLOAT &&
+             "unsupported data type, add appropriate zero element creation");
+      float* elem = reinterpret_cast<float*>(unfold_kernel->at(idx));
       *elem = 0.0f;
     }
   }
-  return unfoldKernel;
+  return unfold_kernel;
 }
 
 void OpCreator::checkConv2D(const caffe::ConvolutionParameter& opts,
-                            std::set<std::string> &problemsOpSet) {
+                            std::set<std::string>& problems_op_set) {
   assert(opts.stride_size() <= 2);
 
   if (opts.pad_size() != 0 && (opts.has_pad_h() || opts.has_pad_w()))
-    problemsOpSet.insert("Conv2D: Conflicting padding properties");
+    problems_op_set.insert("Conv2D: Conflicting padding properties");
 
   if (opts.pad_size() > 2)
-    problemsOpSet.insert("Conv2D: Unsupported number of pads");
+    problems_op_set.insert("Conv2D: Unsupported number of pads");
 }
 
 std::vector<INode::Ref> OpCreator::createConv2D(InputOps inputs, InputParams params,
                                                 const caffe::ConvolutionParameter& opts) {
-  ops::PaddingType padType = ops::PaddingType::Custom;
-  Shape strideShape = getConvStride(opts);
+  ops::PaddingType pad_type = ops::PaddingType::Custom;
+  Shape stride_shape = getConvStride(opts);
 
-  std::shared_ptr<IrTensor> unfoldedTensor = params[0];
+  std::shared_ptr<IrTensor> unfolded_tensor = params[0];
   std::vector<INode::Ref> outputs;
   auto in_group_size = params[0]->getShape().dim(2);
   auto out_channels = params[0]->getShape().dim(3);
@@ -232,18 +238,18 @@ std::vector<INode::Ref> OpCreator::createConv2D(InputOps inputs, InputParams par
     // TODO handle properly kernel with layer multiplier
     std::shared_ptr<IrTensor> transposed_tensor = mir::transposeTensor<0, 1, 3, 2>(params[0]);
     outputs = createOp<ops::DepthwiseConv2DOp>(inputs, std::move(*transposed_tensor),
-                                               strideShape, padType);
+                                               stride_shape, pad_type);
   } else {
     if (num_groups != 1) {
       // first we need to convert kernel of grouped convolution to appropriate ordinary kernel
-      unfoldedTensor = fixGroupedKernel(opts.group(), params[0]);
+      unfolded_tensor = fixGroupedKernel(opts.group(), params[0]);
     }
-    outputs = createOp<ops::Conv2DOp>(inputs, std::move(*unfoldedTensor),
-                                      strideShape, padType);
+    outputs = createOp<ops::Conv2DOp>(inputs, std::move(*unfolded_tensor),
+                                      stride_shape, pad_type);
   }
 
   // Set pads
-  auto *op = static_cast<ops::Conv2DOp *>(outputs[0]->getOperation());
+  auto* op = static_cast<ops::Conv2DOp*>(outputs[0]->getOperation());
 
   int pad_h = opts.has_pad_h() ? opts.pad_h() : 0;
   int pad_w = opts.has_pad_w() ? opts.pad_w() : 0;
@@ -261,8 +267,8 @@ std::vector<INode::Ref> OpCreator::createConv2D(InputOps inputs, InputParams par
     return outputs;
 }
 
-void OpCreator::checkFullyConnected(const caffe::InnerProductParameter &opts,
-                                    std::set<std::string> &problemsOpSet) {
+void OpCreator::checkFullyConnected(const caffe::InnerProductParameteropts,
+                                    std::set<std::string>problemsOpSet) {
   if (opts.has_axis() && opts.axis() != 1)
     problemsOpSet.insert("Fully Connected: layer axis param is not supported yet");
 
@@ -278,38 +284,38 @@ void OpCreator::checkFullyConnected(const caffe::InnerProductParameter &opts,
  * implement it correctly.
  * @todo Support axis and transpose parameters as needed.
  */
-std::vector<INode::Ref> OpCreator::createFullyConnected(InputOps &inputs, InputParams &params,
-                                                        const caffe::InnerProductParameter &opts) {
+std::vector<INode::Ref> OpCreator::createFullyConnected(InputOps& inputs, InputParams& params,
+                                                        const caffe::InnerProductParameteropts) {
   // Add Reshape operation to make sure the input for FC operation has shape [1, fcInputSize]
   // It is needed because Caffe InnerProduct layer takes NCHW input and flattens the CHW part.
   auto outputs = createOp<ops::ReshapeOp>(inputs);
-  int32_t fcInputSize = static_cast<int32_t>(
-                                 params[0]->getShape().numElements()) / opts.num_output();
-  outputs[0]->getOperation()->setOutputShape(0, {1, fcInputSize});
+  int32_t fc_input_size = static_cast<int32_t>(
+                                  params[0]->getShape().numElements()) / opts.num_output();
+  outputs[0]->getOperation()->setOutputShape(0, {1, fc_input_size});
 
-  auto fcOutputs = createOp<ops::FullyConnectedOp>(outputs, std::move(*params[0]));
+  auto fc_outputs = createOp<ops::FullyConnectedOp>(outputs, std::move(*params[0]));
 
   // bias_term is optional (so might not be present) and defaults to true
   if (!opts.has_bias_term() || opts.bias_term())
-    return createOp<ops::BiasAddOp>(fcOutputs, std::move(*params[1]));
+    return createOp<ops::BiasAddOp>(fc_outputs, std::move(*params[1]));
   else
-    return fcOutputs;
+    return fc_outputs;
 }
 
 std::vector<INode::Ref> OpCreator::createConcat(InputOps inputs, InputParams params,
                                                 const caffe::ConcatParameter& opts) {
-  (void)params;
+  (void) params;
 
   return createOp<ops::ConcatOp>(inputs, inputs.size(), getAxisValue(opts));
 }
 
-void OpCreator::checkPool(const caffe::PoolingParameter &opts,
-                          std::set<std::string> &problemsOpSet) {
+void OpCreator::checkPool(const caffe::PoolingParameteropts,
+                          std::set<std::string>problemsOpSet) {
   if (opts.has_global_pooling() && opts.global_pooling())
     problemsOpSet.insert("Pooling: pooling layer global_pooling param is not supported yet");
 
-  ops::PoolOp::PoolingType poolType = getPoolingType(opts);
-  if (poolType != ops::PoolOp::PoolingType::AVG && poolType != ops::PoolOp::PoolingType::MAX)
+  ops::PoolOp::PoolingType pool_type = getPoolingType(opts);
+  if (pool_type != ops::PoolOp::PoolingType::AVG && pool_type != ops::PoolOp::PoolingType::MAX)
     problemsOpSet.insert("Pooling: unsupported pooling type");
 
   if (opts.has_pad() && (opts.has_pad_h() || opts.has_pad_w()))
@@ -318,29 +324,30 @@ void OpCreator::checkPool(const caffe::PoolingParameter &opts,
 
 std::vector<INode::Ref> OpCreator::createPool(InputOps inputs, InputParams params,
                                               const caffe::PoolingParameter& opts) {
-  (void)params;
+  (void) params;
 
-  Shape windowShape = getPoolWindowShape(opts);
-  ops::PoolOp::PoolingType poolType = getPoolingType(opts);
-  ops::PaddingType padType = ops::PaddingType::Custom;
+  Shape window_shape = getPoolWindowShape(opts);
+  ops::PoolOp::PoolingType pool_type = getPoolingType(opts);
+  ops::PaddingType pad_type = ops::PaddingType::Custom;
   Shape stride = getPoolStride(opts);
-  ops::PoolOp::BorderType borderType;
-  switch (poolType) {
+  ops::PoolOp::BorderType border_type;
+  switch (pool_type) {
     case ops::PoolOp::PoolingType::AVG:
-      borderType = ops::PoolOp::BorderType::ZEROFILLED;
+      border_type = ops::PoolOp::BorderType::ZEROFILLED;
       break;
     case ops::PoolOp::PoolingType::MAX:
-      borderType = ops::PoolOp::BorderType::EMPTY;
+      border_type = ops::PoolOp::BorderType::EMPTY;
       break;
     default:
       // This check performed in checkPool()
       assert(false);
   }
 
-  auto pooling = createOp<ops::PoolOp>(inputs, windowShape, stride, poolType, padType, borderType);
+  auto pooling = createOp<ops::PoolOp>(inputs, window_shape, stride, pool_type,
+                                       pad_type, border_type);
 
   // Set pads
-  auto op = static_cast<ops::PoolOp *>(pooling[0]->getOperation());
+  auto op = static_cast<ops::PoolOp*>(pooling[0]->getOperation());
   int pad_h = opts.has_pad_h() ? opts.pad_h() : 0;
   int pad_w = opts.has_pad_w() ? opts.pad_w() : 0;
   if (opts.has_pad())
@@ -354,13 +361,13 @@ std::vector<INode::Ref> OpCreator::createPool(InputOps inputs, InputParams param
 
 std::vector<INode::Ref> OpCreator::createSoftmax(InputOps inputs, InputParams params,
                                                  const caffe::SoftmaxParameter& opts) {
-  (void)params;
+  (void) params;
 
   return createOp<ops::SoftmaxOp>(inputs, getAxisValue(opts));
 }
 
-void OpCreator::checkReshape(const caffe::ReshapeParameter &opts,
-                             std::set<std::string> &problemsOpSet) {
+void OpCreator::checkReshape(const caffe::ReshapeParameteropts,
+                             std::set<std::string>problemsOpSet) {
   if (opts.has_axis() || opts.has_num_axes())
     problemsOpSet.insert("Reshape layer axis and num_axes params are not supported yet");
 
@@ -382,30 +389,31 @@ void OpCreator::checkReshape(const caffe::ReshapeParameter &opts,
  */
 std::vector<INode::Ref> OpCreator::createReshape(InputOps inputs, InputParams params,
                                                  const caffe::ReshapeParameter& opts) {
-  (void)params;
+  (void) params;
 
   auto outputs = createOp<ops::ReshapeOp>(inputs);
 
-  Shape newShape = ShapeHelper::createShape(opts.shape().dim(), opts.shape().dim_size());
+  Shape new_shape = ShapeHelper::createShape(opts.shape().dim(), opts.shape().dim_size());
 
-  outputs[0]->getOperation()->setOutputShape(0, newShape);
+  outputs[0]->getOperation()->setOutputShape(0, new_shape);
   return outputs;
 }
 
-void OpCreator::checkRelu(const caffe::ReLUParameter &opts,
-                          std::set<std::string> &problemsOpSet) {
+void OpCreator::checkRelu(const caffe::ReLUParameteropts,
+                          std::set<std::string>& problems_op_set) {
   if (opts.has_negative_slope())
-    problemsOpSet.insert("ReLU layer negative_slope param is not supported yet.");
+    problems_op_set.insert("ReLU layer negative_slope param is not supported yet.");
 }
 
 std::vector<INode::Ref> OpCreator::createRelu(InputOps inputs, InputParams params,
                                               const caffe::ReLUParameter& opts) {
-  (void)params;
+  (void) params;
 
   return createOp<ops::ReluOp>(inputs);
 }
 
-std::vector<INode::Ref> OpCreator::createScale(InputOps inputs, InputParams params, const ScaleParameter& opts) {
+std::vector<INode::Ref>
+OpCreator::createScale(InputOps inputs, InputParams params, const ScaleParameter& opts) {
   auto outputs = createOp<ops::ScaleOp>(inputs, std::move(*params[0]));
   // bias_term is optional (so might not be present) and defaults to true
   if (!opts.has_bias_term() || opts.bias_term())
@@ -414,49 +422,53 @@ std::vector<INode::Ref> OpCreator::createScale(InputOps inputs, InputParams para
     return outputs;
 }
 
-void OpCreator::checkBatchNorm(const caffe::BatchNormParameter &opts, InputParams params,
-                               std::set<std::string> &problemsOpSet) {
+void OpCreator::checkBatchNorm(const caffe::BatchNormParameteropts, InputParams params,
+                               std::set<std::string>& problems_op_set) {
   // Check that last blob(with scaleFactor) containing only one number
   if (params[2]->getShape().rank() != 1 && params[2]->getShape().dim(0) != 1)
-    problemsOpSet.insert("Unexpected shape of scale parameter in batch norm");
+    problems_op_set.insert("Unexpected shape of scale parameter in batch norm");
 }
 
-std::vector<INode::Ref> OpCreator::createBatchNorm(InputOps inputs, InputParams params, const BatchNormParameter& opts) {
-  const float MAFRAC_DEF =  0.999f;
-  const float EPS_DEF =  1e-5f;
+std::vector<INode::Ref>
+OpCreator::createBatchNorm(InputOps inputs, InputParams params, const BatchNormParameter& opts) {
+  const float MAFRAC_DEF = 0.999f;
+  const float EPS_DEF = 1e-5f;
   // optional params may be left out, so we fill them with defalt values (lifted from caffe docs)
-  float moving_average_fraction = (opts.has_moving_average_fraction()) ? opts.moving_average_fraction() : MAFRAC_DEF;
-  (void)moving_average_fraction;
+  float moving_average_fraction = MAFRAC_DEF;
+  if (opts.has_moving_average_fraction())
+    moving_average_fraction = opts.moving_average_fraction();
+  (void) moving_average_fraction;
   float eps = (opts.has_eps()) ? opts.eps() : EPS_DEF;
 
-  float scaleFactor = *reinterpret_cast<float *>(params[2]->at(mir::Index{0}));
+  float scale_factor = *reinterpret_cast<float*>(params[2]->at(mir::Index{0}));
   // Code below is taken from cpu caffe implementation:
   // https://github.com/BVLC/caffe/blob/master/src/caffe/layers/batch_norm_layer.cpp#L100
-  if (scaleFactor != 0.0f)
-    scaleFactor = 1.0f / scaleFactor;
+  if (scale_factor != 0.0f)
+    scale_factor = 1.0f / scale_factor;
 
   // create bias argument from mean:
   // multiply elements of mean by scaleFactor and get opposite numbers
   // to subtract mean from input via biasAdd operation
-  Tensor<float> biasData(*params[0]);
+  Tensor<float> bias_data(*params[0]);
 
-  for (Index idx: ShapeRange(biasData.getShape()))
-    biasData.at(idx) *= -scaleFactor;
-  auto meanOutputs = createOp<ops::BiasAddOp>(inputs, std::move(*params[0]));
+  for (Index idx: ShapeRange(bias_data.getShape()))
+    bias_data.at(idx) *= -scale_factor;
+  auto mean_outputs = createOp<ops::BiasAddOp>(inputs, std::move(*params[0]));
 
   // create scale argument from variance:
   // multiply elements of variance by scaleFactor and
   // normalize biased input using scale operation
-  Tensor<float> scaleData(*params[1]);
-  for (Index idx: ShapeRange(scaleData.getShape()))
-    scaleData.at(idx) = 1.0f/std::sqrt(scaleData.at(idx)*scaleFactor + eps);
-  auto varianceOutputs = createOp<ops::ScaleOp>(meanOutputs, std::move(*params[1]));
+  Tensor<float> scale_data(*params[1]);
+  for (Index idx: ShapeRange(scale_data.getShape()))
+    scale_data.at(idx) = 1.0f / std::sqrt(scale_data.at(idx) * scale_factor + eps);
+  auto variance_outputs = createOp<ops::ScaleOp>(mean_outputs, std::move(*params[1]));
 
-  return varianceOutputs;
+  return variance_outputs;
 }
 
-std::vector<INode::Ref> OpCreator::createDropout(InputOps inputs, InputParams params, const DropoutParameter& opts) {
-  (void)params;
+std::vector<INode::Ref>
+OpCreator::createDropout(InputOps inputs, InputParams params, const DropoutParameter& opts) {
+  (void) params;
   const float DROPOUT_RATIO_DEF = 0.5f;
   // optional params may be left out, so we fill them with defalt values (lifted from caffe docs)
   float dropot_ratio = (opts.has_dropout_ratio()) ? opts.dropout_ratio() : DROPOUT_RATIO_DEF;
index 28f92c0..ad743dc 100644 (file)
@@ -32,8 +32,6 @@
 
 namespace nnc {
 
-using namespace ::caffe;
-
 using nnc::mir::Graph;
 using nnc::mir::INode;
 using IrTensor = nnc::mir::TensorVariant;
@@ -41,50 +39,66 @@ using nnc::mir::Shape;
 
 class OpCreator {
 public:
-    using InputOps = std::vector<INode::Ref>&;
-    using InputParams = std::vector<std::shared_ptr<IrTensor>>&;
-
-    explicit OpCreator(Graph* g) : graph(g) {};
-
-    std::vector<INode::Ref> createConv2D(InputOps inputs, InputParams params, const ConvolutionParameter& opts);
-    std::vector<INode::Ref> createFullyConnected(InputOps inputs, InputParams params, const InnerProductParameter& opts);
-    std::vector<INode::Ref> createConcat(InputOps inputs, InputParams params, const ConcatParameter& opts);
-    std::vector<INode::Ref> createPool(InputOps inputs, InputParams params, const PoolingParameter& opts);
-    std::vector<INode::Ref> createSoftmax(InputOps inputs, InputParams params, const SoftmaxParameter& opts);
-    std::vector<INode::Ref> createReshape(InputOps inputs, InputParams params, const ReshapeParameter& opts);
-    std::vector<INode::Ref> createRelu(InputOps inputs, InputParams params, const ReLUParameter& opts);
-    std::vector<INode::Ref> createScale(InputOps inputs, InputParams params, const ScaleParameter& opts);
-    std::vector<INode::Ref> createBatchNorm(InputOps inputs, InputParams params, const BatchNormParameter& opts);
-    std::vector<INode::Ref> createDropout(InputOps inputs, InputParams params, const DropoutParameter& opts);
-
-    void checkConv2D(const caffe::ConvolutionParameter& opts, std::set<std::string> &unsupportedOpSet);
-    void checkFullyConnected(const caffe::InnerProductParameter &opts, std::set<std::string> &problemsOpSet);
-    void checkPool(const caffe::PoolingParameter &opts, std::set<std::string> &problemsOpSet);
-    void checkReshape(const caffe::ReshapeParameter &opts, std::set<std::string> &problemsOpSet);
-    void checkRelu(const caffe::ReLUParameter &opts, std::set<std::string> &problemsOpSet);
-    void checkBatchNorm(const caffe::BatchNormParameter &opts, InputParams params,
-                        std::set<std::string> &problemsOpSet);
+  using InputOps = std::vector<INode::Ref>&;
+  using InputParams = std::vector<std::shared_ptr<IrTensor>>&;
+
+  explicit OpCreator(Graph* g) : graph(g) {};
+
+  std::vector<INode::Ref> createConv2D(InputOps, InputParams,
+                                       const ::caffe::ConvolutionParameter&);
+
+  std::vector<INode::Ref> createFullyConnected(InputOps, InputParams,
+                                               const ::caffe::InnerProductParameter&);
+
+  std::vector<INode::Ref> createConcat(InputOps, InputParams, const ::caffe::ConcatParameter&);
+
+  std::vector<INode::Ref> createPool(InputOps, InputParams, const ::caffe::PoolingParameter&);
+
+  std::vector<INode::Ref> createSoftmax(InputOps, InputParams, const ::caffe::SoftmaxParameter&);
+
+  std::vector<INode::Ref> createReshape(InputOps, InputParams, const ::caffe::ReshapeParameter&);
+
+  std::vector<INode::Ref> createRelu(InputOps, InputParams, const ::caffe::ReLUParameter&);
+
+  std::vector<INode::Ref> createScale(InputOps, InputParams, const ::caffe::ScaleParameter&);
+
+  std::vector<INode::Ref> createBatchNorm(InputOps, InputParams,
+                                          const ::caffe::BatchNormParameter&);
+
+  std::vector<INode::Ref> createDropout(InputOps, InputParams, const ::caffe::DropoutParameter&);
+
+  void checkConv2D(const caffe::ConvolutionParameter&, std::set<std::string>&);
+
+  void checkFullyConnected(const caffe::InnerProductParameter&, std::set<std::string>&);
+
+  void checkPool(const caffe::PoolingParameter&, std::set<std::string>&);
+
+  void checkReshape(const caffe::ReshapeParameter&, std::set<std::string>&);
+
+  void checkRelu(const caffe::ReLUParameter&, std::set<std::string>&);
+
+  void checkBatchNorm(const caffe::BatchNormParameter&, InputParams, std::set<std::string>&);
 
 private:
-    Graph* graph = nullptr;
+  Graph* graph = nullptr;
 
-    void connectInputs(INode::Ref op, std::vector<INode::Ref>& inputs);
+  void connectInputs(INode::Ref op, std::vector<INode::Ref>& inputs);
 
-    template <typename OpType, typename ...Types>
-    std::vector<INode::Ref> createOp(std::vector<INode::Ref>& inputs, Types&&... args);
+  template <typename OpType, typename ...Types>
+  std::vector<INode::Ref> createOp(std::vector<INode::Ref>& inputs, Types&& ... args);
 };
 
-template<typename OpType, typename ...Types>
-std::vector<INode::Ref> OpCreator::createOp(std::vector<INode::Ref>& inputs, Types&&... args) {
-    std::vector<INode::Ref> outputs;
+template <typename OpType, typename ...Types>
+std::vector<INode::Ref> OpCreator::createOp(std::vector<INode::Ref>& inputs, Types&& ... args) {
+  std::vector<INode::Ref> outputs;
 
-    // TODO: set operation names
-    auto op = graph->create<OpType>("", std::forward<Types>(args)...);
+  // TODO: set operation names
+  auto op = graph->create<OpType>("", std::forward<Types>(args)...);
 
-    connectInputs(op, inputs);
-    outputs.push_back(op);
+  connectInputs(op, inputs);
+  outputs.push_back(op);
 
-    return outputs;
+  return outputs;
 }
 
 } // namespace nnc
index 8718401..3a366b9 100644 (file)
@@ -29,37 +29,37 @@ const int protoBytesLimit = INT_MAX;
 const int protoBytesWarningLimit = 1024 * 1024 * 512;
 
 bool readProtoFromTextFile(const char* filename, ::caffe::NetParameter* proto) {
-    int fd = open(filename, O_RDONLY);
-    if (fd == -1) {
-        std::cout << "File not found: " << filename << std::endl;
-        return false;
-    }
+  int fd = open(filename, O_RDONLY);
+  if (fd == -1) {
+    std::cout << "File not found: " << filename << std::endl;
+    return false;
+  }
 
-    FileInputStream input{fd};
+  FileInputStream input{fd};
 
-    bool success = google::protobuf::TextFormat::Parse(&input, proto);
+  bool success = google::protobuf::TextFormat::Parse(&input, proto);
 
-    close(fd);
+  close(fd);
 
-    return success;
+  return success;
 }
 
 bool readProtoFromBinaryFile(const char* filename, ::caffe::NetParameter* proto) {
-    int fd = open(filename, O_RDONLY);
-    if (fd == -1) {
-        std::cout << "File not found: " << filename << std::endl;
-        return false;
-    }
+  int fd = open(filename, O_RDONLY);
+  if (fd == -1) {
+    std::cout << "File not found: " << filename << std::endl;
+    return false;
+  }
 
-    FileInputStream raw_input{fd};
-    CodedInputStream coded_input{&raw_input};
-    coded_input.SetTotalBytesLimit(protoBytesLimit, protoBytesWarningLimit);
+  FileInputStream raw_input{fd};
+  CodedInputStream coded_input{&raw_input};
+  coded_input.SetTotalBytesLimit(protoBytesLimit, protoBytesWarningLimit);
 
-    bool success = proto->ParseFromCodedStream(&coded_input);
+  bool success = proto->ParseFromCodedStream(&coded_input);
 
-    close(fd);
+  close(fd);
 
-    return success;
+  return success;
 }
 
 } // namespace nnc
index c3810bb..de397ad 100644 (file)
@@ -28,6 +28,7 @@ using google::protobuf::io::ZeroCopyInputStream;
 using google::protobuf::io::CodedInputStream;
 
 bool readProtoFromTextFile(const char* filename, ::caffe::NetParameter* proto);
+
 bool readProtoFromBinaryFile(const char* filename, ::caffe::NetParameter* proto);
 
 } // namespace nnc
index a9c09ed..4213e10 100644 (file)
@@ -22,12 +22,9 @@ target_include_directories(tflite_schema PUBLIC ${FB_GEN_INCLUDE_DIRS})
 # TFLITE importer #
 ###################
 
-set(tflite_importer_sources tflite_walker.cpp
-                            tflite_dump_visitor.cpp
-                            tflite_ir_visitor.cpp
-                            tflite_op_creator.cpp
-        tflite_importer.cpp
-        tflite_frontend.cpp)
+set(tflite_importer_sources tflite_op_creator.cpp
+                            tflite_importer.cpp
+                            tflite_frontend.cpp)
 file(GLOB tflite_importer_headers *.h)
 
 set(tflite_import tflite_import)
diff --git a/contrib/nnc/passes/tflite_frontend/schema.h b/contrib/nnc/passes/tflite_frontend/schema.h
deleted file mode 100644 (file)
index e7c0bf6..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef NNCC_SCHEMA_H
-#define NNCC_SCHEMA_H
-
-#define tflite v_dev_tflite
-#include "schema_generated.h"
-#undef tflite
-
-#endif // NNCC_SCHEMA_H
diff --git a/contrib/nnc/passes/tflite_frontend/tflite_dump_visitor.cpp b/contrib/nnc/passes/tflite_frontend/tflite_dump_visitor.cpp
deleted file mode 100644 (file)
index 853d538..0000000
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <iostream>
-
-#include "tflite_dump_visitor.h"
-
-using std::cout;
-using std::endl;
-
-static std::ostream &operator<<(std::ostream &os, const flatbuffers::Vector<int32_t> *v);
-static std::ostream &operator<<(std::ostream &os, Padding pad);
-static std::ostream &operator<<(std::ostream &os, ActivationFunctionType act);
-
-namespace nnc
-{
-namespace tflite
-{
-
-void DumpVisitor::visit(const Model *m)
-{
-  cout << "[Model version]: " << m->version() << endl;
-  cout << "[Model description]: " << m->description()->data() << endl;
-  cout << "[Model info]: " << m->subgraphs()->size() << " subgraphs" << endl;
-  cout << "[Model info]: " << m->buffers()->size() << " buffers" << endl;
-}
-
-void DumpVisitor::visit(const SubGraph *s)
-{
-  cout << "[Subgraph]: \"" << (s->name() ? s->name()->c_str() : "\0") << "\"" << endl;
-  cout << "[Subgraph inputs]: " << s->inputs() << endl;
-  cout << "[Subgraph outputs]: " << s->outputs() << endl;
-  cout << "[Subgraph info]: " << s->tensors()->size() << " tensors" << endl;
-}
-
-void DumpVisitor::visit(const Buffer *b)
-{
-  cout << "[Buffer " << bufferCnt++ << "]: size: " << (b->data() ? b->data()->size() : 0) << endl;
-}
-
-void DumpVisitor::visit(const Tensor *t)
-{
-  cout << "[Tensor " << tensorCnt++ << "]: \"" << t->name()->data() << "\"" << endl;
-  cout << "  [Tensor shape]: " << t->shape() << endl;
-  cout << "  [Tensor buffer]: " << t->buffer() << endl;
-}
-
-void DumpVisitor::visit(const Operator *op)
-{
-  cout << "[Operator]: " << opNames[op->opcode_index()] << endl;
-  cout << "  [Operator inputs]: " << op->inputs() << endl;
-  cout << "  [Operator outputs]: " << op->outputs() << endl;
-
-  switch (op->builtin_options_type())
-  {
-  case BuiltinOptions::BuiltinOptions_Conv2DOptions:
-  {
-    const Conv2DOptions *opts = op->builtin_options_as<Conv2DOptions>();
-    cout << "  [Padding]: " << opts->padding() << endl;
-    cout << "  [Strides]: " << opts->stride_w() << ", " << opts->stride_h() << endl;
-    cout << "  [Activation]: " << opts->fused_activation_function() << endl;
-    break;
-  }
-  case BuiltinOptions::BuiltinOptions_DepthwiseConv2DOptions:
-  {
-    const DepthwiseConv2DOptions *opts = op->builtin_options_as<DepthwiseConv2DOptions>();
-    cout << "  [Padding]: " << opts->padding() << endl;
-    cout << "  [Strides]: " << opts->stride_w() << ", " << opts->stride_h() << endl;
-    cout << "  [Activation]: " << opts->fused_activation_function() << endl;
-    cout << "  [DepthMultiplier]: " << opts->depth_multiplier() << endl;
-    break;
-  }
-  case BuiltinOptions::BuiltinOptions_ReshapeOptions:
-  {
-    const ReshapeOptions *opts = op->builtin_options_as<ReshapeOptions>();
-    cout << "  [New shape]: " << opts->new_shape() << endl;
-    break;
-  }
-  default:
-    break;
-  }
-}
-
-void DumpVisitor::visit(const OperatorCode *oc)
-{
-  opNames.push_back(EnumNamesBuiltinOperator()[oc->builtin_code()]);
-  cout << "[Model operator]: " << opNames.back() << endl;
-}
-
-} // namespace tflite
-} // namespace nnc
-
-static std::ostream &operator<<(std::ostream &os, const flatbuffers::Vector<int32_t> *v)
-{
-  for (size_t i = 0; i < v->size(); ++i)
-  {
-    if (i != 0)
-      os << ", ";
-    os << (*v)[i];
-  }
-
-  return os;
-}
-
-static std::ostream &operator<<(std::ostream &os, Padding pad)
-{
-  os << EnumNamesPadding()[pad];
-  return os;
-}
-
-static std::ostream &operator<<(std::ostream &os, ActivationFunctionType act)
-{
-  os << EnumNamesActivationFunctionType()[act];
-  return os;
-}
diff --git a/contrib/nnc/passes/tflite_frontend/tflite_dump_visitor.h b/contrib/nnc/passes/tflite_frontend/tflite_dump_visitor.h
deleted file mode 100644 (file)
index bb86de6..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef NNCC_TFLITE_DUMP_VISITOR_H
-#define NNCC_TFLITE_DUMP_VISITOR_H
-
-#include <vector>
-
-#include "tflite_visitor.h"
-#include "schema.h"
-
-using namespace v_dev_tflite;
-
-namespace nnc
-{
-namespace tflite
-{
-
-class DumpVisitor : public Visitor
-{
-public:
-  void visit(const Model *) override;
-  void visit(const SubGraph *) override;
-  void visit(const Tensor *) override;
-  void visit(const OperatorCode *) override;
-  void visit(const Operator *) override;
-  void visit(const Buffer *) override;
-
-private:
-  // TODO: add counter reset mechanism or restructure the code
-  int tensorCnt = 0;
-  int bufferCnt = 0;
-
-  std::vector<const char *> opNames;
-};
-
-} // namespace tflite
-} // namespace nnc
-
-#endif // NNCC_TFLITE_DUMP_VISITOR_H
index b0c1c21..947030f 100644 (file)
@@ -28,7 +28,7 @@
 namespace nnc {
 
 PassData TFLiteFrontend::run(PassData) {
-  nnc::tflite::v_dev::TfliteImporter importer{cli::inputFile};
+  TfliteImporter importer{cli::inputFile};
 
   importer.import();
 
index 412b226..126fdf5 100644 (file)
  * limitations under the License.
  */
 
-#include <iostream>
-
 #include "tflite_importer.h"
-#include "tflite_ir_visitor.h"
-#include "tflite_dump_visitor.h"
-#include "tflite_walker.h"
+#include "tflite_op_creator.h"
+
+using namespace ::tflite;
+
+namespace nnc {
+
+TfliteImporter::TfliteImporter(std::string filename) : _filename(filename) {
+  _modelRaw.reset(new ModelAllocation{std::move(filename)});
+  _graph = new Graph();
+  _opCreator.reset(new OpCreator(_graph));
+}
+
+void TfliteImporter::importUnpacked() {
+  import();
+
+  _model.reset(_modelPacked->UnPack());
+}
+
+void TfliteImporter::import() {
+  const void* model_buffer = _modelRaw->getDataPnt();
+
+  if (!model_buffer)
+    throw PassException("Could not load model: " + _filename + "\n");
+
+  auto verifier = flatbuffers::Verifier(reinterpret_cast<const uint8_t*>(model_buffer),
+                                        _modelRaw->getNumBytes());
+
+  if (!VerifyModelBuffer(verifier))
+    throw PassException("Could not load model: " + _filename + "\n");
+
+  _modelPacked = GetModel(_modelRaw->getDataPnt());
+
+  _opcodes = _modelPacked->operator_codes();
+  _buffers = _modelPacked->buffers();
+  collectUnsupportedOps();
+}
+
+void TfliteImporter::collectUnsupportedOps() {
+  for (auto sub_graph: *(_modelPacked->subgraphs()))
+    for (auto op: *(sub_graph->operators()))
+      processUnsupportedOp(op);
+
+  if (!_problemsOpSet.empty()) {
+    std::string msg("Detected problems:\n");
+    for (const auto& problem_str : _problemsOpSet)
+      msg.append(problem_str + "\n");
+    throw PassException(msg);
+  }
+}
+
+void TfliteImporter::processUnsupportedOp(const Operator* op) {
+  unsigned int opcode = (*_opcodes)[op->opcode_index()]->builtin_code();
+  switch (opcode) {
+    case BuiltinOperator_MAX_POOL_2D:
+    case BuiltinOperator_AVERAGE_POOL_2D:
+      _opCreator->checkPool(op->builtin_options_as<Pool2DOptions>(), _problemsOpSet);
+      break;
+    case BuiltinOperator_CONCATENATION:
+      _opCreator->checkConcat(op->builtin_options_as<ConcatenationOptions>(), _problemsOpSet);
+      break;
+    case BuiltinOperator_CONV_2D:
+      _opCreator->checkConv2D(op->builtin_options_as<Conv2DOptions>(), _problemsOpSet);
+      break;
+    case BuiltinOperator_DEPTHWISE_CONV_2D:
+      _opCreator->checkDepthConv2D(op->builtin_options_as<DepthwiseConv2DOptions>(),
+                                   _problemsOpSet);
+      break;
+    case BuiltinOperator_FULLY_CONNECTED:
+      _opCreator->checkFullyConnected(op->builtin_options_as<FullyConnectedOptions>(),
+                                      _problemsOpSet);
+      break;
+    case BuiltinOperator_SOFTMAX:
+    case BuiltinOperator_RESHAPE:
+      // No checks
+      break;
+    default:
+      _problemsOpSet.insert(std::string(EnumNamesBuiltinOperator()[opcode])
+                            + ": unsupported operator");
+  }
+}
+
+Graph* TfliteImporter::createIR() {
+  walkGraphAndCreateMIR();
+  return _graph;
+}
+
+void TfliteImporter::walkGraphAndCreateMIR() {
+  walkModel(_modelPacked);
+  setIrNodeNames();
+  setGraphOutputs();
+}
+
+void TfliteImporter::walkModel(const Model* m) {
+  for (auto sub_graph: *(_modelPacked->subgraphs()))
+    walkSubGraph(sub_graph);
+}
+
+void TfliteImporter::walkSubGraph(const SubGraph* s) {
+  _tensors = s->tensors();
+
+  _graphInputs.assign(s->inputs()->begin(), s->inputs()->end());
+  _graphOutputs.assign(s->outputs()->begin(), s->outputs()->end());
+
+  for (auto i : *s->inputs()) {
+    const Tensor* t = (*s->tensors())[i];
+    auto node = _graph->create<mir::ops::VariableOp>(t->name()->c_str());
+    _opsForTensorsTheyOutput[i] = node;
+
+    Shape inputShape = ShapeHelper::createShape(*t->shape(), t->shape()->size());
+    // So far we assume that if the first dimension is equal to 1,
+    // then it is the batch dimension and should be ignored
+    ShapeHelper::cutOffBatchDim(inputShape);
+    node->getOperation()->setOutputShape(0, inputShape);
+  }
+
+  for (auto op: *(s->operators()))
+    walkOperator(op);
+}
+
+void TfliteImporter::walkOperator(const Operator* op) {
+  auto inputs = getPrecedingMIROps(op);
+  auto params = createOpParams(op);
+
+  std::vector<INode::Ref> outputs;
+
+  unsigned int opcode = (*_opcodes)[op->opcode_index()]->builtin_code();
+  switch (opcode) {
+    case BuiltinOperator_CONV_2D:
+      outputs = _opCreator->createConv2D(inputs, params, op->builtin_options_as<Conv2DOptions>());
+      break;
+    case BuiltinOperator_DEPTHWISE_CONV_2D:
+      outputs = _opCreator->createDepthConv2D(inputs, params,
+                                              op->builtin_options_as<DepthwiseConv2DOptions>());
+      break;
+    case BuiltinOperator_MAX_POOL_2D:
+      outputs = _opCreator->createMaxPool(inputs, params, op->builtin_options_as<Pool2DOptions>());
+      break;
+    case BuiltinOperator_AVERAGE_POOL_2D:
+      outputs = _opCreator->createAvgPool(inputs, params, op->builtin_options_as<Pool2DOptions>());
+      break;
+    case BuiltinOperator_CONCATENATION:
+      outputs = _opCreator->createConcat(inputs, params,
+                                         op->builtin_options_as<ConcatenationOptions>());
+      break;
+    case BuiltinOperator_RESHAPE:
+      outputs = _opCreator->createReshape(inputs, params, op->builtin_options_as<ReshapeOptions>());
+      break;
+    case BuiltinOperator_FULLY_CONNECTED:
+      outputs = _opCreator->createFullyConnected(inputs, params,
+                                                 op->builtin_options_as<FullyConnectedOptions>());
+      break;
+    case BuiltinOperator_SOFTMAX:
+      outputs = _opCreator->createSoftmax(inputs, params, op->builtin_options_as<SoftmaxOptions>());
+      break;
+    default:
+      assert(false && "All unsupported types should have been found before this pass.");
+  }
+
+  assert(op->outputs()->size() == outputs.size());
+  for (size_t i = 0; i < op->outputs()->size(); ++i)
+    _opsForTensorsTheyOutput[(*(op->outputs()))[i]] = outputs[i];
+}
+
+std::vector<INode::Ref> TfliteImporter::getPrecedingMIROps(const Operator* op) {
+  std::vector<INode::Ref> inputsForOp;
+
+  try {
+    for (auto i : *(op->inputs())) {
+      int buffer_idx = (*_tensors)[i]->buffer();
+      if ((*_buffers)[buffer_idx]->data() == nullptr) {
+        // By this point every input for the operation "op" should have corresponding
+        // Model IR operations that output its inputs. This assumption is provided by the fact
+        // that TFLite format specifies all operations in the execution order.
+        inputsForOp.push_back(_opsForTensorsTheyOutput.at(i));
+      }
+    }
+  }
+  catch (const std::out_of_range& e) {
+    throw PassException("Found a TFLite operator with an input tensor for which "
+                        "a corresponding Model IR node that outputs it was not created.");
+  }
+
+  return inputsForOp;
+}
+
+std::vector<std::shared_ptr<IrTensor>> TfliteImporter::createOpParams(const Operator* op) {
+  std::vector<std::shared_ptr<IrTensor>> params_for_op;
+
+  for (auto i : *(op->inputs())) {
+    const Tensor* t = (*_tensors)[i];
+    const Buffer* b = (*_buffers)[t->buffer()];
+    if (b->data() != nullptr) {
+      std::shared_ptr<IrTensor> tensor = createTensor(t, b);
+
+      unsigned int opcode = (*_opcodes)[op->opcode_index()]->builtin_code();
+
+      if ((opcode == BuiltinOperator_CONV_2D || opcode == BuiltinOperator_DEPTHWISE_CONV_2D)
+          && t->shape()->size() == 4) {
+        // Change dimension indices [0, 1, 2, 3] to [1, 2, 3, 0].
+        // This is needed because TFLite convolution weights are stored as NHWC, and we use HWCN.
+        // TODO: Currently this is only used by the interpreter and shape inference,
+        // don't forget to change this if tensor shape processing architecture changes.
+        params_for_op.emplace_back(mir::transposeTensor<1, 2, 3, 0>(tensor));
+      } else if (opcode == BuiltinOperator_FULLY_CONNECTED && t->shape()->size() == 2) {
+        params_for_op.emplace_back(mir::transposeTensor<1, 0>(tensor));
+      } else {
+        params_for_op.push_back(tensor);
+      }
+    }
+  }
+
+  return params_for_op;
+}
+
+std::shared_ptr<IrTensor> TfliteImporter::createTensor(const Tensor* t, const Buffer* b) {
+  // Create TensorVariant by copying the tensor buffer contents.
+  // Another option is to copy the data in a TensorVariant constructor.
+  std::shared_ptr<char> tensor_buffer_copy(new char[b->data()->size()],
+                                           [](char* d) { delete[] d; });
+  std::copy(b->data()->begin(), b->data()->end(), tensor_buffer_copy.get());
+
+  size_t elementSize;
+  IrTensor::DTYPE type;
+  switch (t->type()) {
+    case TensorType_UINT8:
+      elementSize = sizeof(uint8_t);
+      type = IrTensor::DTYPE::INT;
+      break;
+    case TensorType_FLOAT16:
+      elementSize = sizeof(uint16_t);
+      type = IrTensor::DTYPE::FLOAT;
+      break;
+    case TensorType_INT32:
+      elementSize = sizeof(uint32_t);
+      type = IrTensor::DTYPE::INT;
+      break;
+    case TensorType_FLOAT32:
+      elementSize = sizeof(uint32_t);
+      type = IrTensor::DTYPE::FLOAT;
+      break;
+    case TensorType_INT64:
+      elementSize = sizeof(uint64_t);
+      type = IrTensor::DTYPE::INT;
+      break;
+    default:
+      throw PassException(
+              std::string("Encountered unsupported tensor type ") +
+              EnumNamesTensorType()[t->type()]);
+  }
+
+  Shape tensor_shape = ShapeHelper::createShape(*t->shape(), t->shape()->size());
 
-namespace nnc
-{
-namespace tflite
-{
-namespace v_dev
-{
+  return std::make_shared<IrTensor>(tensor_shape, tensor_buffer_copy, type, elementSize);
+}
 
-using namespace ::v_dev_tflite;
+void TfliteImporter::setGraphOutputs() {
+  // Marking nodes as output nodes.
+  for (auto output_idx : _graphOutputs)
+    _graph->markOutput(_opsForTensorsTheyOutput[output_idx]);
+}
 
-#include "tflite_importer.inline.cpp"
+void TfliteImporter::setIrNodeNames() {
+  // Setting names of the nodes.
+  // Note: we change the computation graph, (for example, TFLite Conv2D
+  // turns into IR Conv2D->BiasAdd->ReLU), so not all of the nodes will have names.
+  for (auto& item : _opsForTensorsTheyOutput)
+    item.second->setName((*_tensors)[item.first]->name()->c_str());
+}
 
-} // namespace v_dev
-} // namespace tflite
-} // namespace nnc
+}  // namespace nnc
index a712c40..4eae5e3 100644 (file)
 #ifndef NNCC_TFLITE_IMPORTER_H
 #define NNCC_TFLITE_IMPORTER_H
 
+#include <set>
 #include <memory>
 #include <string>
 
-#include "schema.h"
+#include "schema_generated.h"
+#include "pass/PassException.h"
 #include "passes/common_frontend/nn_importer.h"
 #include "passes/common_frontend/model_allocation.h"
-#include "pass/PassException.h"
 
-namespace nnc
-{
-namespace tflite
-{
-namespace v_dev
-{
+#include "core/modelIR/graph.h"
+#include "core/modelIR/ir_node.h"
+#include "core/modelIR/TensorUtil.h"
+#include "core/modelIR/TensorVariant.h"
+
+#include "tflite_op_creator.h"
+
+namespace nnc {
+
+class TfliteImporter : NNImporter {
+public:
+  explicit TfliteImporter(std::string filename);
+
+  /**
+  * @brief Import model from file, must be called before 'createIR' method
+  * @throw PassException in case, if model couldn't be parsed or NNC doesn't support it
+  */
+  void import() override;
+
+  /**
+  * @brief Create MIR graph from caffe model, must be called after 'import' method
+  * @return MIR graph, corresponding to processed caffe model
+  */
+  mir::Graph* createIR() override;
+
+  void importUnpacked();
+
+private:
+  std::string _filename;
+  std::unique_ptr<ModelAllocation> _modelRaw;
+  std::unique_ptr<::tflite::ModelT> _model;
+  const ::tflite::Model* _modelPacked = nullptr;
+
+  Graph* _graph = nullptr;
+  std::unique_ptr<OpCreator> _opCreator;
+
+  const flatbuffers::Vector<flatbuffers::Offset<::tflite::OperatorCode>>* _opcodes = nullptr;
+  const flatbuffers::Vector<flatbuffers::Offset<::tflite::Tensor>>* _tensors = nullptr;
+  const flatbuffers::Vector<flatbuffers::Offset<::tflite::Buffer>>* _buffers = nullptr;
+
+  std::vector<int32_t> _graphInputs;
+  std::vector<int32_t> _graphOutputs;
+
+  // This map maps indices of TFLite tensors to MIR operations/nodes
+  // that correspond to operations having these tensors as output.
+  std::map<int, INode::Ref> _opsForTensorsTheyOutput;
+
+  std::set<std::string> _problemsOpSet;
+
+  /**
+  * @brief Pass through tflite graph and create MIR graph
+  */
+  void walkGraphAndCreateMIR();
+
+  void walkModel(const ::tflite::Model*);
+
+  void walkSubGraph(const ::tflite::SubGraph*);
+
+  void walkOperator(const ::tflite::Operator*);
+
+  /**
+  * @brief Pass through tflite graph and collect operators unsupported by NNC
+  * @throw PassException with message, containing detected problems
+  */
+  void collectUnsupportedOps();
+
+  void processUnsupportedOp(const ::tflite::Operator* op);
+
+  /**
+  * @brief Mark output MIR nodes
+  */
+  void setGraphOutputs();
+
+  /**
+  * @brief Set MIR node names
+  */
+  void setIrNodeNames();
+
+  /**
+  * @brief Prepare data for creating an MIR node/operation.
+  */
+  std::vector<std::shared_ptr<IrTensor>> createOpParams(const ::tflite::Operator* op);
+
+  /**
+  * @brief Return MIR ops, preceding given tflite operator
+  */
+  std::vector<INode::Ref> getPrecedingMIROps(const ::tflite::Operator* op);
+
+  std::shared_ptr<IrTensor> createTensor(const ::tflite::Tensor* t, const ::tflite::Buffer* b);
 
-using namespace ::v_dev_tflite;
-#include "tflite_importer.inline.h"
+  std::shared_ptr<IrTensor> convertTensorForConv(std::shared_ptr<IrTensor>);
+};
 
-} // namespace v_dev
-} // namespace tflite
-} // namespace nnc
+}  // namespace nnc
 
-#endif // NNCC_TFLITE_IMPORTER_H
+#endif  // NNCC_TFLITE_IMPORTER_H
diff --git a/contrib/nnc/passes/tflite_frontend/tflite_importer.inline.cpp b/contrib/nnc/passes/tflite_frontend/tflite_importer.inline.cpp
deleted file mode 100644 (file)
index 27b6dbb..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-TfliteImporter::TfliteImporter(std::string filename)
-{
-  _modelFilename = filename;
-  modelRaw.reset(new ModelAllocation{std::move(filename)});
-}
-
-void TfliteImporter::importUnpacked()
-{
-  import();
-
-  model.reset(modelPacked->UnPack());
-}
-
-void TfliteImporter::import()
-{
-  const void *modelBuffer = modelRaw->getDataPnt();
-
-  if (!modelBuffer)
-    throw PassException("Could not load model: " + _modelFilename+ "\n");
-
-  auto verifier = flatbuffers::Verifier(reinterpret_cast<const uint8_t *>(modelBuffer),
-                                        modelRaw->getNumBytes());
-
-  if (!VerifyModelBuffer(verifier))
-    throw PassException("Could not load model: " + _modelFilename + "\n");
-
-  modelPacked = GetModel(modelRaw->getDataPnt());
-}
-
-Graph *TfliteImporter::createIR()
-{
-  IrVisitor irCreator{};
-  ModelWalker walker{std::vector<Visitor *>{&irCreator}};
-
-  walker.walk(modelPacked);
-  irCreator.setIrNodeNames();
-  irCreator.setGraphOutputs();
-
-  return irCreator.getGraph();
-}
-
diff --git a/contrib/nnc/passes/tflite_frontend/tflite_importer.inline.h b/contrib/nnc/passes/tflite_frontend/tflite_importer.inline.h
deleted file mode 100644 (file)
index 6bb1191..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-class TfliteImporter : NNImporter {
-public:
-  explicit TfliteImporter(std::string filename);
-
-  void import() override;
-  mir::Graph *createIR() override;
-
-  void importUnpacked();
-
-protected:
-  std::string _modelFilename;
-  std::unique_ptr<ModelAllocation> modelRaw;
-  std::unique_ptr<ModelT> model;
-  const Model *modelPacked = nullptr;
-};
-
diff --git a/contrib/nnc/passes/tflite_frontend/tflite_ir_visitor.cpp b/contrib/nnc/passes/tflite_frontend/tflite_ir_visitor.cpp
deleted file mode 100644 (file)
index 192c182..0000000
+++ /dev/null
@@ -1,259 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <stdexcept>
-#include <algorithm>
-
-#include "schema.h"
-#include "pass/PassException.h"
-#include "core/modelIR/Shape.h"
-#include "core/modelIR/Index.h"
-#include "core/modelIR/TensorUtil.h"
-#include "core/modelIR/operations/variable_op.h"
-
-#include "passes/common_frontend/shape_helper.h"
-#include "tflite_ir_visitor.h"
-#include "tflite_op_creator.h"
-
-
-namespace nnc
-{
-namespace tflite
-{
-
-using nnc::mir::Index;
-using VariableOp = nnc::mir::ops::VariableOp;
-using nnc::mir::Shape;
-using nnc::mir::transposeTensor;
-
-IrVisitor::IrVisitor()
-{
-  // Note: this object will later be deleted in the PluginInstance destructor.
-  // TODO: make this a smart pointer. Note that it requires changing the NNImporter interface,
-  //       because currently it returns a void*.
-  graph = new Graph();
-  opCreator.reset(new OpCreator(graph));
-}
-
-void IrVisitor::visit(const Model *m)
-{
-  opcodes = m->operator_codes();
-  buffers = m->buffers();
-}
-
-void IrVisitor::visit(const SubGraph *s)
-{
-  tensors = s->tensors();
-
-  graphInputs.assign(s->inputs()->begin(), s->inputs()->end());
-  graphOutputs.assign(s->outputs()->begin(), s->outputs()->end());
-
-  for (auto i : *s->inputs())
-  {
-    const Tensor *t = (*s->tensors())[i];
-    auto node = graph->create<VariableOp>(t->name()->c_str());
-    opsForTensorsTheyOutput[i] = node;
-
-    Shape inputShape = ShapeHelper::createShape(*t->shape(), t->shape()->size());
-    // So far we assume that if the first dimension is equal to 1,
-    // then it is the batch dimension and should be ignored
-    ShapeHelper::cutOffBatchDim(inputShape);
-    node->getOperation()->setOutputShape(0, inputShape);
-  }
-}
-
-void IrVisitor::visit(const Operator *op)
-{
-  auto inputs = createOpInputs(op);
-  auto params = createOpParams(op);
-
-  std::vector<INode::Ref> outputs;
-
-  unsigned int opcode = (*opcodes)[op->opcode_index()]->builtin_code();
-  // TODO: support other NN operator types
-  switch (opcode)
-  {
-  case BuiltinOperator_CONV_2D:
-    outputs = opCreator->createConv2D(inputs, params, op->builtin_options_as<Conv2DOptions>());
-    break;
-  case BuiltinOperator_DEPTHWISE_CONV_2D:
-    outputs = opCreator->createDepthConv2D(inputs, params,
-                                          op->builtin_options_as<DepthwiseConv2DOptions>());
-    break;
-  case BuiltinOperator_MAX_POOL_2D:
-    outputs = opCreator->createMaxPool(inputs, params, op->builtin_options_as<Pool2DOptions>());
-    break;
-  case BuiltinOperator_AVERAGE_POOL_2D:
-    outputs = opCreator->createAvgPool(inputs, params, op->builtin_options_as<Pool2DOptions>());
-    break;
-  case BuiltinOperator_CONCATENATION:
-    outputs = opCreator->createConcat(inputs, params, op->builtin_options_as<ConcatenationOptions>());
-    break;
-  case BuiltinOperator_RESHAPE:
-    outputs = opCreator->createReshape(inputs, params, op->builtin_options_as<ReshapeOptions>());
-    break;
-  case BuiltinOperator_FULLY_CONNECTED:
-    outputs = opCreator->createFullyConnected(inputs, params, op->builtin_options_as<FullyConnectedOptions>());
-    break;
-  case BuiltinOperator_SOFTMAX:
-    outputs = opCreator->createSoftmax(inputs, params, op->builtin_options_as<SoftmaxOptions>());
-    break;
-  default:
-    throw PassException(
-            std::string("Encountered unsupported TFLite operator: ") +
-            EnumNamesBuiltinOperator()[opcode]);
-  }
-
-  for (size_t i = 0; i < op->outputs()->size(); ++i)
-  {
-    opsForTensorsTheyOutput[(*(op->outputs()))[i]] = outputs[i];
-  }
-}
-
-void IrVisitor::visit(const Tensor *) {}
-void IrVisitor::visit(const OperatorCode *) {}
-void IrVisitor::visit(const Buffer *) {}
-
-std::vector<INode::Ref> IrVisitor::createOpInputs(const Operator *op)
-{
-  std::vector<INode::Ref> inputsForOp;
-
-  try
-  {
-    for (auto i : *(op->inputs()))
-    {
-      int bufferIdx = (*tensors)[i]->buffer();
-      if ((*buffers)[bufferIdx]->data() == nullptr)
-      {
-        // By this point every input for the operation "op" should have corresponding
-        // Model IR operations that output its inputs. This assumption is provided by the fact
-        // that TFLite format specifies all operations in the execution order.
-        inputsForOp.push_back(opsForTensorsTheyOutput.at(i));
-      }
-    }
-  }
-  catch (const std::out_of_range &e)
-  {
-    throw PassException("Found a TFLite operator with an input tensor for which "
-                        "a corresponding Model IR node that outputs it was not created.");
-  }
-
-  return inputsForOp;
-}
-
-std::vector<std::shared_ptr<IrTensor>> IrVisitor::createOpParams(const Operator *op)
-{
-  std::vector<std::shared_ptr<IrTensor>> paramsForOp;
-
-  for (auto i : *(op->inputs()))
-  {
-    const Tensor *t = (*tensors)[i];
-    const Buffer *b = (*buffers)[t->buffer()];
-    if (b->data() != nullptr)
-    {
-      std::shared_ptr<IrTensor> tensor = createTensor(t, b);
-
-      unsigned int opcode = (*opcodes)[op->opcode_index()]->builtin_code();
-
-      if ((opcode == BuiltinOperator_CONV_2D || opcode == BuiltinOperator_DEPTHWISE_CONV_2D)
-          && t->shape()->size() == 4)
-      {
-        // Change dimension indices [0, 1, 2, 3] to [1, 2, 3, 0].
-        // This is needed because TFLite convolution weights are stored as NHWC, and we use HWCN.
-        // TODO: Currently this is only used by the interpreter and shape inference,
-        // don't forget to change this if tensor shape processing architecture changes.
-        paramsForOp.emplace_back(transposeTensor<1, 2, 3, 0>(tensor));
-      }
-      else if (opcode == BuiltinOperator_FULLY_CONNECTED && t->shape()->size() == 2)
-      {
-        paramsForOp.emplace_back(transposeTensor<1, 0>(tensor));
-      }
-      else
-      {
-        paramsForOp.push_back(tensor);
-      }
-    }
-  }
-
-  return paramsForOp;
-}
-
-std::shared_ptr<IrTensor> IrVisitor::createTensor(const Tensor *t, const Buffer *b)
-{
-  // Create TensorVariant by copying the tensor buffer contents.
-  // Another option is to copy the data in a TensorVariant constructor.
-  std::shared_ptr<char> tensorBufferCopy(new char[b->data()->size()], [](char *d) { delete[] d; });
-  std::copy(b->data()->begin(), b->data()->end(), tensorBufferCopy.get());
-
-  size_t elementSize;
-  IrTensor::DTYPE type;
-  switch (t->type())
-  {
-    case TensorType_UINT8:
-      elementSize = sizeof(uint8_t);
-      type = IrTensor::DTYPE::INT;
-      break;
-    case TensorType_FLOAT16:
-      elementSize = sizeof(uint16_t);
-      type = IrTensor::DTYPE::FLOAT;
-      break;
-    case TensorType_INT32:
-      elementSize = sizeof(uint32_t);
-      type = IrTensor::DTYPE::INT;
-      break;
-    case TensorType_FLOAT32:
-      elementSize = sizeof(uint32_t);
-      type = IrTensor::DTYPE::FLOAT;
-      break;
-    case TensorType_INT64:
-      elementSize = sizeof(uint64_t);
-      type = IrTensor::DTYPE::INT;
-      break;
-    default:
-      throw PassException(
-              std::string("Encountered unsupported tensor type ") +
-              EnumNamesTensorType()[t->type()]);
-  }
-
-  Shape tensorShape = ShapeHelper::createShape(*t->shape(), t->shape()->size());
-
-  return std::make_shared<IrTensor>(tensorShape, tensorBufferCopy, type, elementSize);
-}
-
-Graph *IrVisitor::getGraph() { return graph; }
-
-void IrVisitor::setGraphOutputs()
-{
-  // Marking nodes as output nodes.
-  for (auto outputIdx : graphOutputs)
-  {
-    graph->markOutput(opsForTensorsTheyOutput[outputIdx]);
-  }
-}
-
-void IrVisitor::setIrNodeNames()
-{
-  // Setting names of the nodes.
-  // Note: we change the computation graph, (for example, TFLite Conv2D
-  // turns into IR Conv2D->BiasAdd->ReLU), so not all of the nodes will have names.
-  for (auto &item : opsForTensorsTheyOutput)
-  {
-    item.second->setName((*tensors)[item.first]->name()->c_str());
-  }
-}
-
-} // namespace tflite
-} // namespace nnc
diff --git a/contrib/nnc/passes/tflite_frontend/tflite_ir_visitor.h b/contrib/nnc/passes/tflite_frontend/tflite_ir_visitor.h
deleted file mode 100644 (file)
index 397d666..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef NNCC_TFLITE_IR_VISITOR_H
-#define NNCC_TFLITE_IR_VISITOR_H
-
-#include "flatbuffers/flatbuffers.h"
-
-#include <map>
-#include <vector>
-#include <memory>
-
-#include "core/modelIR/graph.h"
-#include "core/modelIR/ir_node.h"
-#include "core/modelIR/TensorVariant.h"
-
-#include "schema.h"
-#include "tflite_visitor.h"
-#include "tflite_op_creator.h"
-
-namespace nnc
-{
-namespace tflite
-{
-
-using namespace v_dev_tflite;
-
-using nnc::mir::Graph;
-using nnc::mir::INode;
-using IrTensor = nnc::mir::TensorVariant;
-
-class IrVisitor : public Visitor
-{
-public:
-  IrVisitor();
-
-  void visit(const Model *) override;
-  void visit(const SubGraph *) override;
-  void visit(const Tensor *) override;
-  void visit(const OperatorCode *) override;
-  void visit(const Operator *) override;
-  void visit(const Buffer *) override;
-
-  Graph *getGraph();
-  void setGraphOutputs();
-  void setIrNodeNames();
-
-private:
-  Graph *graph = nullptr;
-  std::unique_ptr<OpCreator> opCreator;
-
-  const flatbuffers::Vector<flatbuffers::Offset<OperatorCode>> *opcodes = nullptr;
-  const flatbuffers::Vector<flatbuffers::Offset<Tensor>> *tensors = nullptr;
-  const flatbuffers::Vector<flatbuffers::Offset<Buffer>> *buffers = nullptr;
-
-  std::vector<int32_t> graphInputs;
-  std::vector<int32_t> graphOutputs;
-
-  // This map maps indices of the tensors in TFLite to operations/nodes in the IR graph
-  // that correspond to operations having these tensors as output.
-  std::map<int, INode::Ref> opsForTensorsTheyOutput;
-
-  // These two methods prepare data for creating an IR node/operation.
-  std::vector<std::shared_ptr<IrTensor>> createOpParams(const Operator *op);
-  std::vector<INode::Ref> createOpInputs(const Operator *op);
-
-  std::shared_ptr<IrTensor> createTensor(const Tensor *t, const Buffer *b);
-  std::shared_ptr<IrTensor> convertTensorForConv(std::shared_ptr<IrTensor>);
-};
-
-} // namespace tflite
-} // namespace nnc
-
-#endif // NNCC_TFLITE_IR_VISITOR_H
index 97507a4..72a0def 100644 (file)
@@ -15,6 +15,7 @@
  */
 
 #include "tflite_op_creator.h"
+#include "schema_generated.h"
 
 #include "core/modelIR/operations/concat_op.h"
 #include "core/modelIR/operations/conv_2d_op.h"
 #include "pass/PassException.h"
 
 using namespace nnc::mir;
+using namespace ::tflite;
 
-namespace nnc
-{
-namespace tflite
-{
+namespace nnc {
+
+void OpCreator::checkConv2D(const Conv2DOptions* opts, std::set<std::string>& problems_op_set) {
+  checkActivationType(opts->fused_activation_function(), problems_op_set);
+}
 
 std::vector<INode::Ref> OpCreator::createConv2D(InputOps inputs, InputParams params,
-                                                const Conv2DOptions *opts)
-{
+                                                const Conv2DOptions* opts) {
   auto outputs = createOp<ops::Conv2DOp>(inputs, ActivationFunctionType_NONE, std::move(*params[0]),
                                          Shape{static_cast<int32_t>(opts->stride_h()),
                                                static_cast<int32_t>(opts->stride_w()), 1},
@@ -46,9 +48,13 @@ std::vector<INode::Ref> OpCreator::createConv2D(InputOps inputs, InputParams par
                                   std::move(*params[1]));
 }
 
+void OpCreator::checkDepthConv2D(const DepthwiseConv2DOptions* opts,
+                                 std::set<std::string>& problems_op_set) {
+  checkActivationType(opts->fused_activation_function(), problems_op_set);
+}
+
 std::vector<INode::Ref> OpCreator::createDepthConv2D(InputOps inputs, InputParams params,
-                                                     const DepthwiseConv2DOptions *opts)
-{
+                                                     const DepthwiseConv2DOptions* opts) {
   auto outputs = createOp<ops::DepthwiseConv2DOp>(
           inputs, ActivationFunctionType_NONE, std::move(*params[0]),
           Shape{static_cast<int32_t>(opts->stride_h()),
@@ -58,17 +64,24 @@ std::vector<INode::Ref> OpCreator::createDepthConv2D(InputOps inputs, InputParam
                                   std::move(*params[1]));
 }
 
+void OpCreator::checkConcat(const ConcatenationOptions* opts,
+                            std::set<std::string>& problems_op_set) {
+  checkActivationType(opts->fused_activation_function(), problems_op_set);
+}
+
 std::vector<INode::Ref> OpCreator::createConcat(InputOps inputs, InputParams params,
-                                                const ConcatenationOptions *opts)
-{
+                                                const ConcatenationOptions* opts) {
   // Decrementing axis to account for the unnecessary batch dimension
   return createOp<ops::ConcatOp>(inputs, opts->fused_activation_function(), inputs.size(),
                                  opts->axis() - 1);
 }
 
+void OpCreator::checkPool(const Pool2DOptions* opts, std::set<std::string>& problems_op_set) {
+  checkActivationType(opts->fused_activation_function(), problems_op_set);
+}
+
 std::vector<INode::Ref> OpCreator::createMaxPool(InputOps inputs, InputParams params,
-                                                 const Pool2DOptions *opts)
-{
+                                                 const Pool2DOptions* opts) {
   return createOp<ops::PoolOp>(inputs, opts->fused_activation_function(),
                                Shape{static_cast<int32_t>(opts->filter_height()),
                                      static_cast<int32_t>(opts->filter_width()), 1},
@@ -79,8 +92,7 @@ std::vector<INode::Ref> OpCreator::createMaxPool(InputOps inputs, InputParams pa
 }
 
 std::vector<INode::Ref> OpCreator::createAvgPool(InputOps inputs, InputParams params,
-                                                 const Pool2DOptions *opts)
-{
+                                                 const Pool2DOptions* opts) {
   return createOp<ops::PoolOp>(inputs, opts->fused_activation_function(),
                                Shape{static_cast<int32_t>(opts->filter_height()),
                                      static_cast<int32_t>(opts->filter_width()), 1},
@@ -91,15 +103,13 @@ std::vector<INode::Ref> OpCreator::createAvgPool(InputOps inputs, InputParams pa
 }
 
 std::vector<INode::Ref> OpCreator::createSoftmax(InputOps inputs, InputParams params,
-                                                 const SoftmaxOptions *opts)
-{
+                                                 const SoftmaxOptions* opts) {
   // -1 represents last one dimension
   return createOp<ops::SoftmaxOp>(inputs, ActivationFunctionType_NONE, -1);
 }
 
 std::vector<INode::Ref> OpCreator::createReshape(InputOps inputs, InputParams params,
-                                                 const ReshapeOptions *opts)
-{
+                                                 const ReshapeOptions* opts) {
   auto outputs = createOp<ops::ReshapeOp>(inputs, ActivationFunctionType_NONE);
 
   // TODO: we should also support "-1" values in new_shape, which means that correct
@@ -110,27 +120,39 @@ std::vector<INode::Ref> OpCreator::createReshape(InputOps inputs, InputParams pa
   return outputs;
 }
 
-std::vector<INode::Ref> OpCreator::createFullyConnected(InputOps &inputs, InputParams &params,
-                                                        const FullyConnectedOptions *opts)
-{
+void OpCreator::checkFullyConnected(const FullyConnectedOptions* opts,
+                                    std::set<std::string>& problems_op_set) {
+  checkActivationType(opts->fused_activation_function(), problems_op_set);
+}
+
+std::vector<INode::Ref> OpCreator::createFullyConnected(InputOps& inputs, InputParams& params,
+                                                        const FullyConnectedOptions* opts) {
   // Add Reshape operation to make sure the input for FC operation has shape [1, fcInputSize]
   auto outputs = createOp<ops::ReshapeOp>(inputs, ActivationFunctionType_NONE);
   int32_t fcInputSize = params[0]->getShape().dim(0);
   outputs[0]->getOperation()->setOutputShape(0, {1, fcInputSize});
 
-  auto fcOutputs = createOp<ops::FullyConnectedOp>(outputs, ActivationFunctionType_NONE, std::move(*params[0]));
-  return createOp<ops::BiasAddOp>(fcOutputs, opts->fused_activation_function(), std::move(*params[1]));
+  auto fc_outputs = createOp<ops::FullyConnectedOp>(outputs, ActivationFunctionType_NONE,
+                                                    std::move(*params[0]));
+  return createOp<ops::BiasAddOp>(fc_outputs, opts->fused_activation_function(),
+                                  std::move(*params[1]));
 }
 
-INode::Ref OpCreator::addFusedActivation(INode::Ref input, ActivationFunctionType activationType)
-{
+void OpCreator::checkActivationType(ActivationFunctionType activation_type,
+                                    std::set<std::string>& problems_op_set) {
+  if (activation_type != ActivationFunctionType_NONE
+      && activation_type != ActivationFunctionType_RELU
+      && activation_type != ActivationFunctionType_RELU6)
+    problems_op_set.insert(std::string("Unsupported activation type: ")
+                           + EnumNamesActivationFunctionType()[activation_type]);
+}
+
+INode::Ref OpCreator::addFusedActivation(INode::Ref input, ActivationFunctionType activation_type) {
   INode::Ref activation;
 
-  if (activationType != ActivationFunctionType_NONE)
-  {
+  if (activation_type != ActivationFunctionType_NONE) {
     // TODO: process other activation types
-    switch (activationType)
-    {
+    switch (activation_type) {
       case ActivationFunctionType_RELU:
         activation = graph->create<ops::ReluOp>("");
         break;
@@ -138,30 +160,23 @@ INode::Ref OpCreator::addFusedActivation(INode::Ref input, ActivationFunctionTyp
         activation = graph->create<ops::CappedReluOp>("", 6);
         break;
       default:
-        throw PassException(std::string("Encountered unsupported NN activation type: ") +
-                            EnumNamesActivationFunctionType()[activationType]);
+        assert(false && "Unsupported activation types must be detected before this pass");
     }
 
     assert(input->getOperation()->getNumOutputs() == 1);
     activation->connectInputTo(0, input->getOutput(0));
     return activation;
-  }
-  else
-  {
+  } else {
     return input;
   }
 }
 
-void OpCreator::connectInputs(INode::Ref op, std::vector<INode::Ref> &inputs)
-{
+void OpCreator::connectInputs(INode::Ref op, std::vector<INode::Ref>& inputs) {
   // TODO: this part doesn't support the situation where an operator takes as input
   // some tensor that is not the 0th output of some other operator
   assert(inputs.size() == op->getOperation()->getNumInputs());
   for (size_t i = 0; i < inputs.size(); ++i)
-  {
     op->connectInputTo(i, inputs[i]->getOutput(0));
-  }
 }
 
-} // namespace tflite
 } // namespace nnc
index 5cb5da9..9321ddc 100644 (file)
@@ -17,6 +17,7 @@
 #ifndef NNCC_TFLITE_OP_CREATOR_H
 #define NNCC_TFLITE_OP_CREATOR_H
 
+#include <set>
 #include <map>
 #include <vector>
 #include <memory>
 
 #include "core/modelIR/operations/common.h"
 
-#include "schema.h"
+#include "schema_generated.h"
 #include "passes/common_frontend/shape_helper.h"
 
-namespace nnc
-{
-namespace tflite
-{
+namespace nnc {
 
-using namespace v_dev_tflite;
+namespace ops = mir::ops;
+using mir::Graph;
+using mir::INode;
+using IrTensor = mir::TensorVariant;
+using mir::Shape;
 
-namespace ops = nnc::mir::ops;
-using nnc::mir::Graph;
-using nnc::mir::INode;
-using IrTensor = nnc::mir::TensorVariant;
-using nnc::mir::Shape;
-
-class OpCreator
-{
+class OpCreator {
 public:
-  using InputOps = std::vector<INode::Ref> &;
-  using InputParams = std::vector<std::shared_ptr<IrTensor>> &;
-
-  explicit OpCreator(Graph *g) : graph(g) {};
-
-  std::vector<INode::Ref> createConv2D(InputOps inputs, InputParams params,
-                                       const Conv2DOptions *opts);
-  std::vector<INode::Ref> createDepthConv2D(InputOps inputs, InputParams params,
-                                            const DepthwiseConv2DOptions *opts);
-  std::vector<INode::Ref> createConcat(InputOps inputs, InputParams params,
-                                       const ConcatenationOptions *opts);
-  std::vector<INode::Ref> createMaxPool(InputOps inputs, InputParams params,
-                                        const Pool2DOptions *opts);
-  std::vector<INode::Ref> createAvgPool(InputOps inputs, InputParams params,
-                                        const Pool2DOptions *opts);
-  std::vector<INode::Ref> createSoftmax(InputOps inputs, InputParams params,
-                                        const SoftmaxOptions *opts);
-  std::vector<INode::Ref> createReshape(InputOps inputs, InputParams params,
-                                        const ReshapeOptions *opts);
-  std::vector<INode::Ref> createFullyConnected(InputOps inputs, InputParams params,
-                                               const FullyConnectedOptions *opts);
+  using InputOps = std::vector<INode::Ref>&;
+  using InputParams = std::vector<std::shared_ptr<IrTensor>>&;
+
+  explicit OpCreator(Graph* g) : graph(g) {};
+
+  std::vector<INode::Ref> createConv2D(InputOps, InputParams, const ::tflite::Conv2DOptions*);
+
+  std::vector<INode::Ref> createDepthConv2D(InputOps, InputParams,
+                                            const ::tflite::DepthwiseConv2DOptions*);
+
+  std::vector<INode::Ref> createConcat(InputOps, InputParams,
+                                       const ::tflite::ConcatenationOptions*);
+
+  std::vector<INode::Ref> createMaxPool(InputOps, InputParams, const ::tflite::Pool2DOptions*);
+
+  std::vector<INode::Ref> createAvgPool(InputOps, InputParams, const ::tflite::Pool2DOptions*);
+
+  std::vector<INode::Ref> createSoftmax(InputOps, InputParams, const ::tflite::SoftmaxOptions*);
+
+  std::vector<INode::Ref> createReshape(InputOps, InputParams, const ::tflite::ReshapeOptions*);
+
+  std::vector<INode::Ref> createFullyConnected(InputOps, InputParams,
+                                               const ::tflite::FullyConnectedOptions*);
+
+  void checkPool(const ::tflite::Pool2DOptions*, std::set<std::string>&);
+
+  void checkConcat(const ::tflite::ConcatenationOptions*, std::set<std::string>&);
+
+  void checkConv2D(const ::tflite::Conv2DOptions*, std::set<std::string>&);
+
+  void checkDepthConv2D(const ::tflite::DepthwiseConv2DOptions*, std::set<std::string>&);
+
+  void checkFullyConnected(const ::tflite::FullyConnectedOptions*, std::set<std::string>&);
 
 private:
-  Graph *graph = nullptr;
+  Graph* graph = nullptr;
+
+  std::map<::tflite::Padding, ops::PaddingType> paddingMap = {
+          {::tflite::Padding_SAME,  ops::PaddingType::Same},
+          {::tflite::Padding_VALID, ops::PaddingType::Valid}};
+
+  void checkActivationType(::tflite::ActivationFunctionType, std::set<std::string>&);
 
-  std::map<Padding, ops::PaddingType> paddingMap = {{Padding_SAME, ops::PaddingType::Same},
-                                                    {Padding_VALID, ops::PaddingType::Valid}};
+  INode::Ref addFusedActivation(INode::Ref input, ::tflite::ActivationFunctionType activationType);
 
-  INode::Ref addFusedActivation(INode::Ref input, ActivationFunctionType activationType);
-  void connectInputs(INode::Ref op, std::vector<INode::Ref> &inputs);
+  void connectInputs(INode::Ref op, std::vector<INode::Ref>& inputs);
 
-  template <typename OpType, typename... Types>
-  std::vector<INode::Ref> createOp(std::vector<INode::Ref> &inputs,
-                                   ActivationFunctionType activation, Types &&... args);
+  template<typename OpType, typename... Types>
+  std::vector<INode::Ref> createOp(std::vector<INode::Ref>inputs,
+                                   ::tflite::ActivationFunctionType activation, Types&& ... args);
 };
 
-template <typename OpType, typename... Types>
-std::vector<INode::Ref> OpCreator::createOp(std::vector<INode::Ref> &inputs,
-                                            ActivationFunctionType activation, Types &&... args)
-{
+template<typename OpType, typename... Types>
+std::vector<INode::Ref> OpCreator::createOp(
+        std::vector<INode::Ref>& inputs,
+        ::tflite::ActivationFunctionType activation, Types&& ... args) {
   std::vector<INode::Ref> outputs;
 
   // TODO: how to name operations? in Tensorflow tensors get names, not operations
@@ -99,7 +110,6 @@ std::vector<INode::Ref> OpCreator::createOp(std::vector<INode::Ref> &inputs,
   return outputs;
 }
 
-} // namespace tflite
 } // namespace nnc
 
 #endif // NNCC_TFLITE_OP_CREATOR_H
diff --git a/contrib/nnc/passes/tflite_frontend/tflite_visitor.h b/contrib/nnc/passes/tflite_frontend/tflite_visitor.h
deleted file mode 100644 (file)
index 1792c1d..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef NNCC_TFLITE_VISITOR_H
-#define NNCC_TFLITE_VISITOR_H
-
-#include "schema.h"
-
-using namespace v_dev_tflite;
-
-namespace nnc
-{
-namespace tflite
-{
-
-class Visitor
-{
-public:
-  virtual void visit(const Model *) = 0;
-  virtual void visit(const SubGraph *) = 0;
-  virtual void visit(const Tensor *) = 0;
-  virtual void visit(const OperatorCode *) = 0;
-  virtual void visit(const Operator *) = 0;
-  virtual void visit(const Buffer *) = 0;
-};
-
-} // namespace tflite
-} // namespace nnc
-
-#endif // NNCC_TFLITE_VISITOR_H
diff --git a/contrib/nnc/passes/tflite_frontend/tflite_walker.cpp b/contrib/nnc/passes/tflite_frontend/tflite_walker.cpp
deleted file mode 100644 (file)
index 20f1c5a..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "tflite_walker.h"
-
-namespace nnc
-{
-namespace tflite
-{
-
-void ModelWalker::walkContents(const Model *m)
-{
-  walkVector(m->operator_codes());
-  walkVector(m->buffers());
-  walkVector(m->subgraphs());
-}
-
-void ModelWalker::walkContents(const SubGraph *s)
-{
-  walkVector(s->tensors());
-  walkVector(s->operators());
-}
-
-void ModelWalker::walkContents(const Tensor *t) {}
-void ModelWalker::walkContents(const Buffer *b) {}
-void ModelWalker::walkContents(const OperatorCode *oc) {}
-void ModelWalker::walkContents(const Operator *) {}
-
-} // namespace tflite
-} // namespace nnc
diff --git a/contrib/nnc/passes/tflite_frontend/tflite_walker.h b/contrib/nnc/passes/tflite_frontend/tflite_walker.h
deleted file mode 100644 (file)
index df29e9d..0000000
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef NNCC_TFLITE_WALKER_H
-#define NNCC_TFLITE_WALKER_H
-
-#include "flatbuffers/flatbuffers.h"
-
-#include <vector>
-#include <memory>
-
-#include "schema.h"
-#include "tflite_visitor.h"
-
-using namespace v_dev_tflite;
-
-namespace nnc
-{
-namespace tflite
-{
-
-class ModelWalker
-{
-public:
-  explicit ModelWalker(std::vector<Visitor *> actions) : actions(std::move(actions)) {};
-
-  template <typename T> void walk(T *elem);
-  template <typename T> void performActions(T *elem);
-  template <typename T> void walkVector(const flatbuffers::Vector<flatbuffers::Offset<T>> *v);
-
-private:
-  void walkContents(const Model *);
-  void walkContents(const Tensor *);
-  void walkContents(const OperatorCode *);
-  void walkContents(const Operator *);
-  void walkContents(const SubGraph *);
-  void walkContents(const Buffer *);
-
-  std::vector<Visitor *> actions;
-};
-
-template <typename T> void ModelWalker::walk(T *elem)
-{
-  performActions(elem);
-  walkContents(elem);
-}
-
-template <typename T> void ModelWalker::performActions(T *elem)
-{
-  for (auto action : actions)
-  {
-    action->visit(elem);
-  }
-}
-
-template <typename T>
-void ModelWalker::walkVector(const flatbuffers::Vector<flatbuffers::Offset<T>> *v)
-{
-  for (auto it = v->begin(); it != v->end(); ++it)
-  {
-    walk(*it);
-  }
-}
-
-} // namespace tflite
-} // namespace nnc
-
-#endif // NNCC_TFLITE_WALKER_H
index 62426cc..2fe6f8b 100644 (file)
@@ -32,7 +32,7 @@ int main(int argc, const char **argv)
   cli::CommandLine::getParser()->parseCommandLine(argc, argv);
   std::string modelName = cli::inputFile;
 
-  nnc::tflite::v_dev::TfliteImporter importer{modelName};
+  nnc::TfliteImporter importer{modelName};
 
   importer.import();
 
index d587200..93be98a 100644 (file)
@@ -17,3 +17,4 @@ add_subdirectory(core)
 add_subdirectory(soft_backend)
 add_subdirectory(support)
 add_subdirectory(caffe_frontend)
+add_subdirectory(tflite_frontend)
diff --git a/contrib/nnc/unittests/tflite_frontend/CMakeLists.txt b/contrib/nnc/unittests/tflite_frontend/CMakeLists.txt
new file mode 100644 (file)
index 0000000..632686f
--- /dev/null
@@ -0,0 +1,9 @@
+file(GLOB_RECURSE TESTS "*.cpp")
+
+add_definitions(-DTFLITE_TEST_DIR="${CMAKE_CURRENT_SOURCE_DIR}/test_data/")
+
+add_nnc_unit_test(nnc_tflite_frontend_test ${TESTS} ${OPTIONS_SRC})
+if (TARGET nnc_tflite_frontend_test)
+    nncc_target_link_libraries(nnc_tflite_frontend_test tflite_import nnc_support nnc_core )
+    target_include_directories(nnc_tflite_frontend_test PRIVATE ${NNC_TFLITE_FRONTEND_DIR})
+endif()
diff --git a/contrib/nnc/unittests/tflite_frontend/test_data/unsupported.tflite b/contrib/nnc/unittests/tflite_frontend/test_data/unsupported.tflite
new file mode 100644 (file)
index 0000000..496c0e2
Binary files /dev/null and b/contrib/nnc/unittests/tflite_frontend/test_data/unsupported.tflite differ
diff --git a/contrib/nnc/unittests/tflite_frontend/unsupportedTfliteModel.cpp b/contrib/nnc/unittests/tflite_frontend/unsupportedTfliteModel.cpp
new file mode 100644 (file)
index 0000000..c89d158
--- /dev/null
@@ -0,0 +1,27 @@
+#include "tflite_importer.h"
+#include "gtest/gtest.h"
+#include "pass/PassException.h"
+#include <string>
+#include <iostream>
+
+const char *ErrorMsg = "Detected problems:\n"
+                       "ADD: unsupported operator\n"
+                       "TANH: unsupported operator\n";
+
+// When adding support for new layers, change the model, not the test
+TEST(TFLITE_IMPORT_UNSUPPORTED, ImportModelWithUnsupportedLayers) {
+  std::string filename = std::string(TFLITE_TEST_DIR) + "unsupported.tflite";
+  std::cout << filename << "\n";
+
+  nnc::TfliteImporter importer{filename};
+  try {
+    importer.import();
+    importer.createIR();
+  }
+  catch (nnc::PassException &e) {
+    ASSERT_EQ(std::string(ErrorMsg), e.what());
+    return;
+  }
+
+  FAIL();
+}