[nnc] Remove Model IR after all passes (#2036)
authorРоман Михайлович Русяев/AI Tools Lab /SRR/Staff Engineer/삼성전자 <r.rusyaev@samsung.com>
Tue, 30 Oct 2018 09:05:33 +0000 (12:05 +0300)
committerGitHub Enterprise <noreply-CODE@samsung.com>
Tue, 30 Oct 2018 09:05:33 +0000 (12:05 +0300)
* add `cleanup` method in `Pass` class
* remove redundant frontend classes
* free all pass data in `PassManager` destructor
* replace `OpCreator` with CaffeOpCreator/TFLiteOpCreator to avoid violation of ODR
* free resources in unit tests

Signed-off-by: Roman Rusyaev <r.rusyaev@samsung.com>
24 files changed:
contrib/nnc/driver/Driver.cpp
contrib/nnc/examples/caffe_frontend/model_dump.cpp
contrib/nnc/examples/tflite_frontend/sanity_check.cpp
contrib/nnc/include/pass/Pass.h
contrib/nnc/include/pass/PassData.h
contrib/nnc/include/passes/caffe_frontend/CaffeFrontend.h [deleted file]
contrib/nnc/include/passes/caffe_frontend/caffe_importer.h [moved from contrib/nnc/passes/caffe_frontend/caffe_importer.h with 91% similarity]
contrib/nnc/include/passes/caffe_frontend/caffe_op_creator.h [moved from contrib/nnc/passes/caffe_frontend/caffe_op_creator.h with 95% similarity]
contrib/nnc/include/passes/caffe_frontend/caffe_op_types.h [moved from contrib/nnc/passes/caffe_frontend/caffe_op_types.h with 100% similarity]
contrib/nnc/include/passes/tflite_frontend/TfliteFrontend.h [deleted file]
contrib/nnc/include/passes/tflite_frontend/tflite_importer.h [moved from contrib/nnc/passes/tflite_frontend/tflite_importer.h with 91% similarity]
contrib/nnc/include/passes/tflite_frontend/tflite_op_creator.h [moved from contrib/nnc/passes/tflite_frontend/tflite_op_creator.h with 96% similarity]
contrib/nnc/pass/PassManager.cpp
contrib/nnc/passes/caffe_frontend/caffe_frontend.cpp [deleted file]
contrib/nnc/passes/caffe_frontend/caffe_importer.cpp
contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp
contrib/nnc/passes/tflite_frontend/CMakeLists.txt
contrib/nnc/passes/tflite_frontend/tflite_frontend.cpp [deleted file]
contrib/nnc/passes/tflite_frontend/tflite_importer.cpp
contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp
contrib/nnc/tests/import/caffe.cpp
contrib/nnc/tests/import/tflite.cpp
contrib/nnc/unittests/caffe_frontend/unsupportedCaffeModel.cpp
contrib/nnc/unittests/tflite_frontend/unsupportedTfliteModel.cpp

index f3180fd..cd57065 100644 (file)
@@ -16,8 +16,8 @@
 
 #include "pass/PassData.h"
 
-#include "passes/caffe_frontend/CaffeFrontend.h"
-#include "passes/tflite_frontend/TfliteFrontend.h"
+#include "passes/caffe_frontend/caffe_importer.h"
+#include "passes/tflite_frontend/tflite_importer.h"
 #include "passes/interpreter/InterpreterPass.h"
 #include "passes/soft_backend/CPPGenerator.h"
 #include "passes/acl_soft_backend/AclCppGenerator.h"
@@ -43,6 +43,9 @@ void Driver::runPasses() {
     pass_data = pass->run(pass_data);
   }
 
+  // NOTE. Now we destroy data of all passes when PassManager is destroyed.
+  // In future to reduce memory consumption we can destory it when passes are being performed
+
 } // runPasses
 
 /**
@@ -66,11 +69,11 @@ void Driver::registerFrontendPass() {
 
   if (cli::caffeFrontend) {
 #ifdef NNC_FRONTEND_CAFFE_ENABLED
-    pass = std::move(std::unique_ptr<Pass>(new CaffeFrontend()));
+    pass = std::move(std::unique_ptr<Pass>(new CaffeImporter(cli::inputFile)));
 #endif // NNC_FRONTEND_CAFFE_ENABLED
   } else if (cli::tflFrontend) {
 #ifdef NNC_FRONTEND_TFLITE_ENABLED
-    pass = std::move(std::unique_ptr<Pass>(new TFLiteFrontend()));
+    pass = std::move(std::unique_ptr<Pass>(new TfliteImporter(cli::inputFile)));
 #endif // NNC_FRONTEND_TFLITE_ENABLED
   } else {
     throw DriverException("one of the following options must be defined: '"
index 2963abd..968f5e9 100644 (file)
@@ -18,7 +18,7 @@
 
 #include "support/CommandLine.h"
 #include "option/Options.h"
-#include "caffe_importer.h"
+#include "passes/caffe_frontend/caffe_importer.h"
 #include "core/modelIR/graph.h"
 #include "core/modelIR/IrDotDumper.h"
 #include "core/modelIR/ShapeInference.h"
index edd3dd5..15c96ec 100644 (file)
@@ -19,7 +19,7 @@
 #include "support/CommandLine.h"
 #include "pass/PassException.h"
 #include "option/Options.h"
-#include "tflite_importer.h"
+#include "passes/tflite_frontend/tflite_importer.h"
 #include "core/modelIR/graph.h"
 #include "core/modelIR/IrDotDumper.h"
 #include "core/modelIR/ShapeInference.h"
index 170de75..1c6ecfd 100644 (file)
@@ -37,6 +37,11 @@ public:
    */
   virtual PassData run(PassData data) = 0;
 
+  /**
+   * @brief clean compiler pass data
+   */
+  virtual void cleanup() {};
+
   virtual ~Pass() = default;
 };
 
index 2191957..e109460 100644 (file)
@@ -30,14 +30,13 @@ namespace nnc
 class PassData
 {
 public:
-  PassData(const PassData &) = default;
-
   PassData(std::nullptr_t data) { _dataContainer.unknown = data; _dataType = PDT::UNKNOWN; }
 
   /**
    * @brief Implicit conversion from Graph* to PassData
    */
   /* implicit */ PassData(mir::Graph *graph) { _dataContainer.graph = graph; _dataType = PDT::GRAPH; }
+
   /**
    * @brief Implicit conversion from PassData to Graph*
    */
diff --git a/contrib/nnc/include/passes/caffe_frontend/CaffeFrontend.h b/contrib/nnc/include/passes/caffe_frontend/CaffeFrontend.h
deleted file mode 100644 (file)
index c35e28b..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef NNCC_CAFFEFRONTEND_H
-#define NNCC_CAFFEFRONTEND_H
-
-#include "pass/Pass.h"
-#include "pass/PassData.h"
-
-
-namespace nnc {
-
-/**
- * @brief class represent frontend of caffe NN framework
- */
-class CaffeFrontend : public Pass {
-public:
-  PassData run(PassData) override;
-
-};
-
-} // namespace nnc
-
-#endif //NNCC_CAFFEFRONTEND_H
 #include "caffe/proto/caffe.pb.h"
 
 #include "passes/common_frontend/nn_importer.h"
+/// @todo caffe_op_creator.h and caffe_op_types.h aren't interface files.
+/// We need to remove them and use here forward declarations
 #include "caffe_op_creator.h"
 #include "caffe_op_types.h"
 
+#include "pass/Pass.h"
+#include "pass/PassData.h"
+
 namespace nnc {
 
-class CaffeImporter : public NNImporter {
+class CaffeImporter : public NNImporter, public Pass {
 public:
   explicit CaffeImporter(std::string filename) : _modelFilename(std::move(filename)),
                                                  _graph(new mir::Graph()),
@@ -48,11 +53,14 @@ public:
   */
   Graph* createIR() override;
 
+  PassData run(PassData) override;
+  void cleanup() override;
+
 private:
   std::string _modelFilename;
   std::unique_ptr<::caffe::NetParameter> _net;
   mir::Graph* _graph;
-  OpCreator _opCreator;
+  CaffeOpCreator _opCreator;
 
   std::vector<mir::Shape> _inputShapes;
 
@@ -37,12 +37,12 @@ using nnc::mir::INode;
 using IrTensor = nnc::mir::TensorVariant;
 using nnc::mir::Shape;
 
-class OpCreator {
+class CaffeOpCreator {
 public:
   using InputOps = std::vector<INode::Ref>&;
   using InputParams = std::vector<std::shared_ptr<IrTensor>>&;
 
-  explicit OpCreator(Graph* g) : graph(g) {};
+  explicit CaffeOpCreator(Graph* g) : graph(g) {};
 
   std::vector<INode::Ref> createConv2D(InputOps, InputParams,
                                        const ::caffe::ConvolutionParameter&);
@@ -101,7 +101,7 @@ private:
 };
 
 template <typename OpType, typename ...Types>
-std::vector<INode::Ref> OpCreator::createOp(std::vector<INode::Ref>& inputs, Types&& ... args) {
+std::vector<INode::Ref> CaffeOpCreator::createOp(std::vector<INode::Ref>& inputs, Types&& ... args) {
   std::vector<INode::Ref> outputs;
 
   // TODO: set operation names
diff --git a/contrib/nnc/include/passes/tflite_frontend/TfliteFrontend.h b/contrib/nnc/include/passes/tflite_frontend/TfliteFrontend.h
deleted file mode 100644 (file)
index 894fa00..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef NNCC_TFLITEFRONTEND_H
-#define NNCC_TFLITEFRONTEND_H
-
-#include "pass/Pass.h"
-#include "pass/PassData.h"
-
-
-namespace nnc {
-
-/**
- * @brief class represent frontend of tensorflow lite NN framework
- */
-class TFLiteFrontend : public Pass {
-public:
-  PassData run(PassData) override;
-
-};
-
-} // namespace nnc
-
-#endif //NNCC_TFLITEFRONTEND_H
@@ -22,7 +22,9 @@
 #include <string>
 
 #include "schema_generated.h"
+#include "pass/Pass.h"
 #include "pass/PassException.h"
+#include "pass/PassData.h"
 #include "passes/common_frontend/nn_importer.h"
 #include "passes/common_frontend/model_allocation.h"
 
 #include "core/modelIR/TensorUtil.h"
 #include "core/modelIR/TensorVariant.h"
 
+/// @todo tflite_op_creator.h isn't interface file.
+/// We need to remove it and use here forward declarations
 #include "tflite_op_creator.h"
 
 namespace nnc {
 
-class TfliteImporter : NNImporter {
+class TfliteImporter : public NNImporter, public Pass {
 public:
   explicit TfliteImporter(std::string filename);
 
@@ -53,6 +57,9 @@ public:
 
   void importUnpacked();
 
+  PassData run(PassData) override;
+  void cleanup() override;
+
 private:
   std::string _filename;
   std::unique_ptr<ModelAllocation> _modelRaw;
@@ -60,7 +67,7 @@ private:
   const ::tflite::Model* _modelPacked = nullptr;
 
   Graph* _graph = nullptr;
-  std::unique_ptr<OpCreator> _opCreator;
+  std::unique_ptr<TFLiteOpCreator> _opCreator;
 
   const flatbuffers::Vector<flatbuffers::Offset<::tflite::OperatorCode>>* _opcodes = nullptr;
   const flatbuffers::Vector<flatbuffers::Offset<::tflite::Tensor>>* _tensors = nullptr;
@@ -41,12 +41,12 @@ using mir::INode;
 using IrTensor = mir::TensorVariant;
 using mir::Shape;
 
-class OpCreator {
+class TFLiteOpCreator {
 public:
   using InputOps = std::vector<INode::Ref>&;
   using InputParams = std::vector<std::shared_ptr<IrTensor>>&;
 
-  explicit OpCreator(Graph* g) : graph(g) {};
+  explicit TFLiteOpCreator(Graph* g) : graph(g) {};
 
   std::vector<INode::Ref> createConv2D(InputOps, InputParams, const ::tflite::Conv2DOptions*);
 
@@ -96,7 +96,7 @@ private:
 };
 
 template<typename OpType, typename... Types>
-std::vector<INode::Ref> OpCreator::createOp(
+std::vector<INode::Ref> TFLiteOpCreator::createOp(
         std::vector<INode::Ref>& inputs,
         ::tflite::ActivationFunctionType activation, Types&& ... args) {
   std::vector<INode::Ref> outputs;
index 677e67c..3151b79 100644 (file)
@@ -22,7 +22,10 @@ namespace nnc
 
 PassManager::PassManager() {}
 
-PassManager::~PassManager() {}
+PassManager::~PassManager() {
+  for (auto &pass : _passes)
+    pass->cleanup();
+}
 
 void PassManager::registerPass(std::unique_ptr<Pass> pass)
 {
diff --git a/contrib/nnc/passes/caffe_frontend/caffe_frontend.cpp b/contrib/nnc/passes/caffe_frontend/caffe_frontend.cpp
deleted file mode 100644 (file)
index 5a257ec..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <map>
-#include <vector>
-#include <iostream>
-
-#include "option/Options.h"
-#include "pass/PassException.h"
-#include "passes/caffe_frontend/CaffeFrontend.h"
-
-#include "caffe_importer.h"
-
-
-namespace nnc {
-
-PassData CaffeFrontend::run(PassData) {
-  CaffeImporter importer{cli::inputFile};
-
-  importer.import();
-
-  return importer.createIR();
-}
-
-} // namespace nnc
index 3e5b262..6273ace 100644 (file)
@@ -19,7 +19,7 @@
 #include <sstream>
 #include <cassert>
 
-#include "caffe_importer.h"
+#include "passes/caffe_frontend/caffe_importer.h"
 #include "proto_reader.h"
 
 #include "core/modelIR/Shape.h"
@@ -304,6 +304,15 @@ void CaffeImporter::setIrNodeNames() {
     item.second->setName(item.first);
 }
 
+PassData CaffeImporter::run(PassData) {
+  import();
+  return createIR();
+}
+
+void CaffeImporter::cleanup() {
+  delete _graph;
+}
+
 const std::map<std::string, CaffeOpType> CaffeImporter::_operatorTypes = {
         {"AbsVal",                  CaffeOpType::absVal},
         {"Accuracy",                CaffeOpType::accuracy},
index 3ec7143..3c5abf2 100644 (file)
@@ -39,7 +39,7 @@
 
 #include "passes/common_frontend/shape_helper.h"
 #include "pass/PassException.h"
-#include "caffe_op_creator.h"
+#include "passes/caffe_frontend/caffe_op_creator.h"
 
 #include <set>
 #include <cmath>
@@ -215,7 +215,7 @@ fixGroupedKernel(int groups, std::shared_ptr<IrTensor> folded_kernel) {
   return unfold_kernel;
 }
 
-void OpCreator::checkConv2D(const caffe::ConvolutionParameter& opts,
+void CaffeOpCreator::checkConv2D(const caffe::ConvolutionParameter& opts,
                             std::set<std::string>& problems_op_set) {
   assert(opts.stride_size() <= 2);
 
@@ -226,7 +226,7 @@ void OpCreator::checkConv2D(const caffe::ConvolutionParameter& opts,
     problems_op_set.insert("Conv2D: Unsupported number of pads");
 }
 
-std::vector<INode*> OpCreator::createConv2D(InputOps inputs, InputParams params,
+std::vector<INode*> CaffeOpCreator::createConv2D(InputOps inputs, InputParams params,
                                                 const caffe::ConvolutionParameter& opts) {
   ops::PaddingType pad_type = ops::PaddingType::Custom;
   Shape stride_shape = getConvStride(opts);
@@ -271,7 +271,7 @@ std::vector<INode*> OpCreator::createConv2D(InputOps inputs, InputParams params,
     return outputs;
 }
 
-void OpCreator::checkFullyConnected(const caffe::InnerProductParameter& opts,
+void CaffeOpCreator::checkFullyConnected(const caffe::InnerProductParameter& opts,
                                     std::set<std::string>& problemsOpSet) {
   if (opts.has_axis() && opts.axis() != 1)
     problemsOpSet.insert("Fully Connected: layer axis param is not supported yet");
@@ -288,7 +288,7 @@ void OpCreator::checkFullyConnected(const caffe::InnerProductParameter& opts,
  * implement it correctly.
  * @todo Support axis and transpose parameters as needed.
  */
-std::vector<INode*> OpCreator::createFullyConnected(InputOps& inputs, InputParams& params,
+std::vector<INode*> CaffeOpCreator::createFullyConnected(InputOps& inputs, InputParams& params,
                                                         const caffe::InnerProductParameter& opts) {
   // Add Reshape operation to make sure the input for FC operation has shape [1, fcInputSize]
   // It is needed because Caffe InnerProduct layer takes NCHW input and flattens the CHW part.
@@ -306,14 +306,14 @@ std::vector<INode*> OpCreator::createFullyConnected(InputOps& inputs, InputParam
     return fc_outputs;
 }
 
-std::vector<INode*> OpCreator::createConcat(InputOps inputs, InputParams params,
+std::vector<INode*> CaffeOpCreator::createConcat(InputOps inputs, InputParams params,
                                                 const caffe::ConcatParameter& opts) {
   (void) params;
 
   return createOp<ops::ConcatOp>(inputs, inputs.size(), getAxisValue(opts));
 }
 
-void OpCreator::checkPool(const caffe::PoolingParameter& opts,
+void CaffeOpCreator::checkPool(const caffe::PoolingParameter& opts,
                           std::set<std::string>& problemsOpSet) {
   if (opts.has_global_pooling() && opts.global_pooling())
     problemsOpSet.insert("Pooling: pooling layer global_pooling param is not supported yet");
@@ -326,7 +326,7 @@ void OpCreator::checkPool(const caffe::PoolingParameter& opts,
     problemsOpSet.insert("Pooling: conflicting padding properties in pooling");
 }
 
-std::vector<INode*> OpCreator::createPool(InputOps inputs, InputParams params,
+std::vector<INode*> CaffeOpCreator::createPool(InputOps inputs, InputParams params,
                                               const caffe::PoolingParameter& opts) {
   (void) params;
 
@@ -363,14 +363,14 @@ std::vector<INode*> OpCreator::createPool(InputOps inputs, InputParams params,
   return pooling;
 }
 
-std::vector<INode*> OpCreator::createSoftmax(InputOps inputs, InputParams params,
+std::vector<INode*> CaffeOpCreator::createSoftmax(InputOps inputs, InputParams params,
                                                  const caffe::SoftmaxParameter& opts) {
   (void) params;
 
   return createOp<ops::SoftmaxOp>(inputs, getAxisValue(opts));
 }
 
-void OpCreator::checkReshape(const caffe::ReshapeParameter& opts,
+void CaffeOpCreator::checkReshape(const caffe::ReshapeParameter& opts,
                              std::set<std::string>& problemsOpSet) {
   if (opts.has_axis() || opts.has_num_axes())
     problemsOpSet.insert("Reshape layer axis and num_axes params are not supported yet");
@@ -391,7 +391,7 @@ void OpCreator::checkReshape(const caffe::ReshapeParameter& opts,
  * @todo Decide how to react to the absence of "shape" parameter.
  * @todo Support zero values in "shape" parameter.
  */
-std::vector<INode*> OpCreator::createReshape(InputOps inputs, InputParams params,
+std::vector<INode*> CaffeOpCreator::createReshape(InputOps inputs, InputParams params,
                                                  const caffe::ReshapeParameter& opts) {
   (void) params;
 
@@ -403,13 +403,13 @@ std::vector<INode*> OpCreator::createReshape(InputOps inputs, InputParams params
   return outputs;
 }
 
-void OpCreator::checkRelu(const caffe::ReLUParameter& opts,
+void CaffeOpCreator::checkRelu(const caffe::ReLUParameter& opts,
                           std::set<std::string>& problems_op_set) {
   if (opts.has_negative_slope())
     problems_op_set.insert("ReLU layer negative_slope param is not supported yet.");
 }
 
-std::vector<INode*> OpCreator::createRelu(InputOps inputs, InputParams params,
+std::vector<INode*> CaffeOpCreator::createRelu(InputOps inputs, InputParams params,
                                               const caffe::ReLUParameter& opts) {
   (void) params;
 
@@ -417,7 +417,7 @@ std::vector<INode*> OpCreator::createRelu(InputOps inputs, InputParams params,
 }
 
 std::vector<INode*>
-OpCreator::createScale(InputOps inputs, InputParams params, const ScaleParameter& opts) {
+CaffeOpCreator::createScale(InputOps inputs, InputParams params, const ScaleParameter& opts) {
   auto outputs = createOp<ops::ScaleOp>(inputs, std::move(*params[0]));
   // bias_term is optional (so might not be present) and defaults to true
   if (!opts.has_bias_term() || opts.bias_term())
@@ -426,7 +426,7 @@ OpCreator::createScale(InputOps inputs, InputParams params, const ScaleParameter
     return outputs;
 }
 
-void OpCreator::checkBatchNorm(const caffe::BatchNormParameter& opts, InputParams params,
+void CaffeOpCreator::checkBatchNorm(const caffe::BatchNormParameter& opts, InputParams params,
                                std::set<std::string>& problems_op_set) {
   // Check that last blob(with scaleFactor) containing only one number
   if (params[2]->getShape().rank() != 1 && params[2]->getShape().dim(0) != 1)
@@ -434,7 +434,7 @@ void OpCreator::checkBatchNorm(const caffe::BatchNormParameter& opts, InputParam
 }
 
 std::vector<INode*>
-OpCreator::createBatchNorm(InputOps inputs, InputParams params, const BatchNormParameter& opts) {
+CaffeOpCreator::createBatchNorm(InputOps inputs, InputParams params, const BatchNormParameter& opts) {
   const float MAFRAC_DEF = 0.999f;
   const float EPS_DEF = 1e-5f;
   // optional params may be left out, so we fill them with defalt values (lifted from caffe docs)
@@ -471,7 +471,7 @@ OpCreator::createBatchNorm(InputOps inputs, InputParams params, const BatchNormP
 }
 
 std::vector<INode*>
-OpCreator::createDropout(InputOps inputs, InputParams params, const DropoutParameter& opts) {
+CaffeOpCreator::createDropout(InputOps inputs, InputParams params, const DropoutParameter& opts) {
   (void) params;
   const float DROPOUT_RATIO_DEF = 0.5f;
   // optional params may be left out, so we fill them with defalt values (lifted from caffe docs)
@@ -480,7 +480,7 @@ OpCreator::createDropout(InputOps inputs, InputParams params, const DropoutParam
 }
 
 std::vector<INode*>
-  OpCreator::createDeconvolution( std::vector<INode*> &inputs,
+  CaffeOpCreator::createDeconvolution( std::vector<INode*> &inputs,
                                   std::vector<std::shared_ptr<IrTensor>> &params,
                                   const ConvolutionParameter &opts) noexcept {
   ops::PaddingType pad_type = ops::PaddingType::Custom;
@@ -513,7 +513,7 @@ std::vector<INode*>
 }
 
 std::vector<INode*>
-OpCreator::createELU(std::vector<INode*>& inputs,
+CaffeOpCreator::createELU(std::vector<INode*>& inputs,
                      std::vector<std::shared_ptr<IrTensor>>& params,
                      const ELUParameter& opts) noexcept {
   const float ELU_ALPHA= 1.0f;
@@ -521,14 +521,14 @@ OpCreator::createELU(std::vector<INode*>& inputs,
   return createOp<ops::EluOp>(inputs, elu_alpha);
 }
 
-std::vector<INode*> OpCreator::createTanh(std::vector<INode*>& inputs,
+std::vector<INode*> CaffeOpCreator::createTanh(std::vector<INode*>& inputs,
                                           std::vector<std::shared_ptr<IrTensor>>&,
                                           const TanHParameter&) noexcept {
   return createOp<ops::TanhOp>(inputs);
 }
 
 
-std::vector<INode*> OpCreator::createEltwise(std::vector<INode*>& inputs,
+std::vector<INode*> CaffeOpCreator::createEltwise(std::vector<INode*>& inputs,
                                              std::vector<std::shared_ptr<IrTensor>>& params,
                                              const EltwiseParameter& opts) noexcept {
   (void) params;
@@ -551,7 +551,7 @@ std::vector<INode*> OpCreator::createEltwise(std::vector<INode*>& inputs,
 }
 
 
-void OpCreator::connectInputs(INode* op, InputOps inputs) {
+void CaffeOpCreator::connectInputs(INode* op, InputOps inputs) {
   // TODO: this part doesn't support the situation where an operator takes as input
   // some tensor that is not the 0th output of some other operator
   for (int i = 0; i < static_cast<int>(inputs.size()); ++i)
index 4213e10..fce09e3 100644 (file)
@@ -23,8 +23,7 @@ target_include_directories(tflite_schema PUBLIC ${FB_GEN_INCLUDE_DIRS})
 ###################
 
 set(tflite_importer_sources tflite_op_creator.cpp
-                            tflite_importer.cpp
-                            tflite_frontend.cpp)
+                            tflite_importer.cpp)
 file(GLOB tflite_importer_headers *.h)
 
 set(tflite_import tflite_import)
diff --git a/contrib/nnc/passes/tflite_frontend/tflite_frontend.cpp b/contrib/nnc/passes/tflite_frontend/tflite_frontend.cpp
deleted file mode 100644 (file)
index 947030f..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <map>
-#include <vector>
-#include <iostream>
-
-#include "pass/PassException.h"
-#include "passes/tflite_frontend/TfliteFrontend.h"
-#include "option/Options.h"
-
-#include "tflite_importer.h"
-
-
-namespace nnc {
-
-PassData TFLiteFrontend::run(PassData) {
-  TfliteImporter importer{cli::inputFile};
-
-  importer.import();
-
-  return reinterpret_cast<mir::Graph*>(importer.createIR());
-}
-
-} // namespace nnc
index 7a5355f..148b4ee 100644 (file)
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#include "tflite_importer.h"
-#include "tflite_op_creator.h"
+#include "passes/tflite_frontend/tflite_importer.h"
+#include "passes/tflite_frontend/tflite_op_creator.h"
 
 using namespace ::tflite;
 
@@ -24,7 +24,7 @@ namespace nnc {
 TfliteImporter::TfliteImporter(std::string filename) : _filename(filename) {
   _modelRaw.reset(new ModelAllocation{std::move(filename)});
   _graph = new Graph();
-  _opCreator.reset(new OpCreator(_graph));
+  _opCreator.reset(new TFLiteOpCreator(_graph));
 }
 
 void TfliteImporter::importUnpacked() {
@@ -290,4 +290,14 @@ void TfliteImporter::setIrNodeNames() {
     item.second->setName((*_tensors)[item.first]->name()->c_str());
 }
 
+
+PassData TfliteImporter::run(PassData) {
+  import();
+  return createIR();
+}
+
+void TfliteImporter ::cleanup() {
+  delete _graph;
+}
+
 }  // namespace nnc
index 72a0def..00bb13b 100644 (file)
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-#include "tflite_op_creator.h"
+#include "passes/tflite_frontend/tflite_op_creator.h"
 #include "schema_generated.h"
 
 #include "core/modelIR/operations/concat_op.h"
@@ -34,11 +34,11 @@ using namespace ::tflite;
 
 namespace nnc {
 
-void OpCreator::checkConv2D(const Conv2DOptions* opts, std::set<std::string>& problems_op_set) {
+void TFLiteOpCreator::checkConv2D(const Conv2DOptions* opts, std::set<std::string>& problems_op_set) {
   checkActivationType(opts->fused_activation_function(), problems_op_set);
 }
 
-std::vector<INode::Ref> OpCreator::createConv2D(InputOps inputs, InputParams params,
+std::vector<INode::Ref> TFLiteOpCreator::createConv2D(InputOps inputs, InputParams params,
                                                 const Conv2DOptions* opts) {
   auto outputs = createOp<ops::Conv2DOp>(inputs, ActivationFunctionType_NONE, std::move(*params[0]),
                                          Shape{static_cast<int32_t>(opts->stride_h()),
@@ -48,12 +48,12 @@ std::vector<INode::Ref> OpCreator::createConv2D(InputOps inputs, InputParams par
                                   std::move(*params[1]));
 }
 
-void OpCreator::checkDepthConv2D(const DepthwiseConv2DOptions* opts,
+void TFLiteOpCreator::checkDepthConv2D(const DepthwiseConv2DOptions* opts,
                                  std::set<std::string>& problems_op_set) {
   checkActivationType(opts->fused_activation_function(), problems_op_set);
 }
 
-std::vector<INode::Ref> OpCreator::createDepthConv2D(InputOps inputs, InputParams params,
+std::vector<INode::Ref> TFLiteOpCreator::createDepthConv2D(InputOps inputs, InputParams params,
                                                      const DepthwiseConv2DOptions* opts) {
   auto outputs = createOp<ops::DepthwiseConv2DOp>(
           inputs, ActivationFunctionType_NONE, std::move(*params[0]),
@@ -64,23 +64,23 @@ std::vector<INode::Ref> OpCreator::createDepthConv2D(InputOps inputs, InputParam
                                   std::move(*params[1]));
 }
 
-void OpCreator::checkConcat(const ConcatenationOptions* opts,
+void TFLiteOpCreator::checkConcat(const ConcatenationOptions* opts,
                             std::set<std::string>& problems_op_set) {
   checkActivationType(opts->fused_activation_function(), problems_op_set);
 }
 
-std::vector<INode::Ref> OpCreator::createConcat(InputOps inputs, InputParams params,
+std::vector<INode::Ref> TFLiteOpCreator::createConcat(InputOps inputs, InputParams params,
                                                 const ConcatenationOptions* opts) {
   // Decrementing axis to account for the unnecessary batch dimension
   return createOp<ops::ConcatOp>(inputs, opts->fused_activation_function(), inputs.size(),
                                  opts->axis() - 1);
 }
 
-void OpCreator::checkPool(const Pool2DOptions* opts, std::set<std::string>& problems_op_set) {
+void TFLiteOpCreator::checkPool(const Pool2DOptions* opts, std::set<std::string>& problems_op_set) {
   checkActivationType(opts->fused_activation_function(), problems_op_set);
 }
 
-std::vector<INode::Ref> OpCreator::createMaxPool(InputOps inputs, InputParams params,
+std::vector<INode::Ref> TFLiteOpCreator::createMaxPool(InputOps inputs, InputParams params,
                                                  const Pool2DOptions* opts) {
   return createOp<ops::PoolOp>(inputs, opts->fused_activation_function(),
                                Shape{static_cast<int32_t>(opts->filter_height()),
@@ -91,7 +91,7 @@ std::vector<INode::Ref> OpCreator::createMaxPool(InputOps inputs, InputParams pa
                                ops::PoolOp::BorderType::EMPTY);
 }
 
-std::vector<INode::Ref> OpCreator::createAvgPool(InputOps inputs, InputParams params,
+std::vector<INode::Ref> TFLiteOpCreator::createAvgPool(InputOps inputs, InputParams params,
                                                  const Pool2DOptions* opts) {
   return createOp<ops::PoolOp>(inputs, opts->fused_activation_function(),
                                Shape{static_cast<int32_t>(opts->filter_height()),
@@ -102,13 +102,13 @@ std::vector<INode::Ref> OpCreator::createAvgPool(InputOps inputs, InputParams pa
                                ops::PoolOp::BorderType::EMPTY);
 }
 
-std::vector<INode::Ref> OpCreator::createSoftmax(InputOps inputs, InputParams params,
+std::vector<INode::Ref> TFLiteOpCreator::createSoftmax(InputOps inputs, InputParams params,
                                                  const SoftmaxOptions* opts) {
   // -1 represents last one dimension
   return createOp<ops::SoftmaxOp>(inputs, ActivationFunctionType_NONE, -1);
 }
 
-std::vector<INode::Ref> OpCreator::createReshape(InputOps inputs, InputParams params,
+std::vector<INode::Ref> TFLiteOpCreator::createReshape(InputOps inputs, InputParams params,
                                                  const ReshapeOptions* opts) {
   auto outputs = createOp<ops::ReshapeOp>(inputs, ActivationFunctionType_NONE);
 
@@ -120,12 +120,12 @@ std::vector<INode::Ref> OpCreator::createReshape(InputOps inputs, InputParams pa
   return outputs;
 }
 
-void OpCreator::checkFullyConnected(const FullyConnectedOptions* opts,
+void TFLiteOpCreator::checkFullyConnected(const FullyConnectedOptions* opts,
                                     std::set<std::string>& problems_op_set) {
   checkActivationType(opts->fused_activation_function(), problems_op_set);
 }
 
-std::vector<INode::Ref> OpCreator::createFullyConnected(InputOps& inputs, InputParams& params,
+std::vector<INode::Ref> TFLiteOpCreator::createFullyConnected(InputOps& inputs, InputParams& params,
                                                         const FullyConnectedOptions* opts) {
   // Add Reshape operation to make sure the input for FC operation has shape [1, fcInputSize]
   auto outputs = createOp<ops::ReshapeOp>(inputs, ActivationFunctionType_NONE);
@@ -138,7 +138,7 @@ std::vector<INode::Ref> OpCreator::createFullyConnected(InputOps& inputs, InputP
                                   std::move(*params[1]));
 }
 
-void OpCreator::checkActivationType(ActivationFunctionType activation_type,
+void TFLiteOpCreator::checkActivationType(ActivationFunctionType activation_type,
                                     std::set<std::string>& problems_op_set) {
   if (activation_type != ActivationFunctionType_NONE
       && activation_type != ActivationFunctionType_RELU
@@ -147,7 +147,7 @@ void OpCreator::checkActivationType(ActivationFunctionType activation_type,
                            + EnumNamesActivationFunctionType()[activation_type]);
 }
 
-INode::Ref OpCreator::addFusedActivation(INode::Ref input, ActivationFunctionType activation_type) {
+INode::Ref TFLiteOpCreator::addFusedActivation(INode::Ref input, ActivationFunctionType activation_type) {
   INode::Ref activation;
 
   if (activation_type != ActivationFunctionType_NONE) {
@@ -171,7 +171,7 @@ INode::Ref OpCreator::addFusedActivation(INode::Ref input, ActivationFunctionTyp
   }
 }
 
-void OpCreator::connectInputs(INode::Ref op, std::vector<INode::Ref>& inputs) {
+void TFLiteOpCreator::connectInputs(INode::Ref op, std::vector<INode::Ref>& inputs) {
   // TODO: this part doesn't support the situation where an operator takes as input
   // some tensor that is not the 0th output of some other operator
   assert(inputs.size() == op->getOperation()->getNumInputs());
index 9b3d338..939ce07 100644 (file)
@@ -18,7 +18,7 @@
 #include "support/CommandLine.h"
 #include "option/Options.h"
 
-#include "caffe_importer.h"
+#include "passes/caffe_frontend/caffe_importer.h"
 
 using namespace nnc;
 
index 2fe6f8b..dddc9e8 100644 (file)
@@ -18,7 +18,7 @@
 #include "support/CommandLine.h"
 #include "option/Options.h"
 
-#include "tflite_importer.h"
+#include "passes/tflite_frontend/tflite_importer.h"
 
 using namespace nnc;
 
index 7b5422c..86b0ea7 100644 (file)
@@ -1,4 +1,4 @@
-#include "caffe_importer.h"
+#include "passes/caffe_frontend/caffe_importer.h"
 #include "gtest/gtest.h"
 #include "pass/PassException.h"
 #include <string>
@@ -20,6 +20,7 @@ TEST(CAFFE_IMPORT_UNSUPPORTED, ImportAModelWithUnsupportedLayers) {
   }
   catch (nnc::PassException &e) {
     ASSERT_EQ(std::string(ErrorMsg), e.what());
+    importer.cleanup();
     return;
   }
 
index c89d158..d1f0b85 100644 (file)
@@ -1,4 +1,4 @@
-#include "tflite_importer.h"
+#include "passes/tflite_frontend/tflite_importer.h"
 #include "gtest/gtest.h"
 #include "pass/PassException.h"
 #include <string>
@@ -20,6 +20,7 @@ TEST(TFLITE_IMPORT_UNSUPPORTED, ImportModelWithUnsupportedLayers) {
   }
   catch (nnc::PassException &e) {
     ASSERT_EQ(std::string(ErrorMsg), e.what());
+    importer.cleanup();
     return;
   }