[nnc] Rename op creation methods to match framework layer names (#2051)
authorСергей Баранников/AI Tools Lab /SRR/Engineer/삼성전자 <s.barannikov@samsung.com>
Tue, 30 Oct 2018 14:06:59 +0000 (17:06 +0300)
committerРоман Михайлович Русяев/AI Tools Lab /SRR/Staff Engineer/삼성전자 <r.rusyaev@samsung.com>
Tue, 30 Oct 2018 14:06:59 +0000 (17:06 +0300)
* Rename CaffeOpCreator and TFLiteOpCreator methods to match source framework layer names, not the MIR operation names

Signed-off-by: Sergei Barannikov <s.barannikov@samsung.com>
contrib/nnc/include/passes/caffe_frontend/caffe_op_creator.h
contrib/nnc/include/passes/tflite_frontend/tflite_op_creator.h
contrib/nnc/passes/caffe_frontend/caffe_importer.cpp
contrib/nnc/passes/caffe_frontend/caffe_op_creator.cpp
contrib/nnc/passes/tflite_frontend/tflite_importer.cpp
contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp

index b9730b4..6a30b6c 100644 (file)
@@ -44,50 +44,49 @@ public:
 
   explicit CaffeOpCreator(Graph* g) : graph(g) {};
 
-  std::vector<INode::Ref> createConv2D(InputOps, InputParams,
-                                       const ::caffe::ConvolutionParameter&);
+  std::vector<INode::Ref> convertConvolution(InputOps, InputParams,
+                                             const ::caffe::ConvolutionParameter&);
 
-  std::vector<INode::Ref> createFullyConnected(InputOps, InputParams,
-                                               const ::caffe::InnerProductParameter&);
+  std::vector<INode::Ref> convertInnerProduct(InputOps, InputParams,
+                                              const ::caffe::InnerProductParameter&);
 
-  std::vector<INode::Ref> createConcat(InputOps, InputParams, const ::caffe::ConcatParameter&);
+  std::vector<INode::Ref> convertConcat(InputOps, InputParams, const ::caffe::ConcatParameter&);
 
-  std::vector<INode::Ref> createPool(InputOps, InputParams, const ::caffe::PoolingParameter&);
+  std::vector<INode::Ref> convertPooling(InputOps, InputParams, const ::caffe::PoolingParameter&);
 
-  std::vector<INode::Ref> createSoftmax(InputOps, InputParams, const ::caffe::SoftmaxParameter&);
+  std::vector<INode::Ref> convertSoftmax(InputOps, InputParams, const ::caffe::SoftmaxParameter&);
 
-  std::vector<INode::Ref> createReshape(InputOps, InputParams, const ::caffe::ReshapeParameter&);
+  std::vector<INode::Ref> convertReshape(InputOps, InputParams, const ::caffe::ReshapeParameter&);
 
-  std::vector<INode::Ref> createRelu(InputOps, InputParams, const ::caffe::ReLUParameter&);
+  std::vector<INode::Ref> convertReLU(InputOps, InputParams, const ::caffe::ReLUParameter&);
 
-  std::vector<INode::Ref> createScale(InputOps, InputParams, const ::caffe::ScaleParameter&);
+  std::vector<INode::Ref> convertScale(InputOps, InputParams, const ::caffe::ScaleParameter&);
 
-  std::vector<INode::Ref> createBatchNorm(InputOps, InputParams,
-                                          const ::caffe::BatchNormParameter&);
+  std::vector<INode::Ref> convertBatchNorm(InputOps, InputParams,
+                                           const ::caffe::BatchNormParameter&);
 
-  std::vector<INode::Ref> createDropout(InputOps, InputParams, const ::caffe::DropoutParameter&);
+  std::vector<INode::Ref> convertDropout(InputOps, InputParams, const ::caffe::DropoutParameter&);
 
-  std::vector<INode::Ref> createDeconvolution(
-    InputOps , InputParams, const ::caffe::ConvolutionParameter&) noexcept;
+  std::vector<INode::Ref> convertDeconvolution(InputOps, InputParams,
+                                               const ::caffe::ConvolutionParameter&) noexcept;
   
-  std::vector<INode::Ref> createELU(
-    InputOps, InputParams, const ::caffe::ELUParameter&) noexcept;
+  std::vector<INode::Ref> convertELU(InputOps, InputParams, const ::caffe::ELUParameter&) noexcept;
   
-  std::vector<INode::Ref> createTanh(
-    InputOps, InputParams, const ::caffe::TanHParameter&) noexcept;
+  std::vector<INode::Ref> convertTanH(InputOps, InputParams,
+                                      const ::caffe::TanHParameter&) noexcept;
   
-  std::vector<INode::Ref> createEltwise(
-    InputOps, InputParams, const ::caffe::EltwiseParameter&) noexcept;
+  std::vector<INode::Ref> convertEltwise(InputOps, InputParams,
+                                         const ::caffe::EltwiseParameter&) noexcept;
 
-  void checkConv2D(const caffe::ConvolutionParameter&, std::set<std::string>&);
+  void checkConvolution(const caffe::ConvolutionParameter&, std::set<std::string>&);
 
-  void checkFullyConnected(const caffe::InnerProductParameter&, std::set<std::string>&);
+  void checkInnerProduct(const caffe::InnerProductParameter&, std::set<std::string>&);
 
-  void checkPool(const caffe::PoolingParameter&, std::set<std::string>&);
+  void checkPooling(const caffe::PoolingParameter&, std::set<std::string>&);
 
   void checkReshape(const caffe::ReshapeParameter&, std::set<std::string>&);
 
-  void checkRelu(const caffe::ReLUParameter&, std::set<std::string>&);
+  void checkReLU(const caffe::ReLUParameter&, std::set<std::string>&);
 
   void checkBatchNorm(const caffe::BatchNormParameter&, InputParams, std::set<std::string>&);
 
index 100134a..a399a17 100644 (file)
@@ -48,32 +48,33 @@ public:
 
   explicit TFLiteOpCreator(Graph* g) : graph(g) {};
 
-  std::vector<INode::Ref> createConv2D(InputOps, InputParams, const ::tflite::Conv2DOptions*);
+  std::vector<INode::Ref> convertConv2D(InputOps, InputParams, const ::tflite::Conv2DOptions*);
 
-  std::vector<INode::Ref> createDepthConv2D(InputOps, InputParams,
-                                            const ::tflite::DepthwiseConv2DOptions*);
+  std::vector<INode::Ref> convertDepthwiseConv2D(InputOps, InputParams,
+                                                 const ::tflite::DepthwiseConv2DOptions*);
 
-  std::vector<INode::Ref> createConcat(InputOps, InputParams,
-                                       const ::tflite::ConcatenationOptions*);
+  std::vector<INode::Ref> convertConcatenation(InputOps, InputParams,
+                                               const ::tflite::ConcatenationOptions*);
 
-  std::vector<INode::Ref> createMaxPool(InputOps, InputParams, const ::tflite::Pool2DOptions*);
+  std::vector<INode::Ref> convertMaxPool2D(InputOps, InputParams, const ::tflite::Pool2DOptions*);
 
-  std::vector<INode::Ref> createAvgPool(InputOps, InputParams, const ::tflite::Pool2DOptions*);
+  std::vector<INode::Ref> convertAveragePool2D(InputOps, InputParams,
+                                               const ::tflite::Pool2DOptions*);
 
   std::vector<INode::Ref> createSoftmax(InputOps, InputParams, const ::tflite::SoftmaxOptions*);
 
-  std::vector<INode::Ref> createReshape(InputOps, InputParams, const ::tflite::ReshapeOptions*);
+  std::vector<INode::Ref> convertReshape(InputOps, InputParams, const ::tflite::ReshapeOptions*);
 
-  std::vector<INode::Ref> createFullyConnected(InputOps, InputParams,
-                                               const ::tflite::FullyConnectedOptions*);
+  std::vector<INode::Ref> convertFullyConnected(InputOps, InputParams,
+                                                const ::tflite::FullyConnectedOptions*);
 
-  void checkPool(const ::tflite::Pool2DOptions*, std::set<std::string>&);
+  void checkPool2D(const ::tflite::Pool2DOptions*, std::set<std::string>&);
 
-  void checkConcat(const ::tflite::ConcatenationOptions*, std::set<std::string>&);
+  void checkConcatenation(const ::tflite::ConcatenationOptions*, std::set<std::string>&);
 
   void checkConv2D(const ::tflite::Conv2DOptions*, std::set<std::string>&);
 
-  void checkDepthConv2D(const ::tflite::DepthwiseConv2DOptions*, std::set<std::string>&);
+  void checkDepthwiseConv2D(const ::tflite::DepthwiseConv2DOptions*, std::set<std::string>&);
 
   void checkFullyConnected(const ::tflite::FullyConnectedOptions*, std::set<std::string>&);
 
index 6273ace..9a81424 100644 (file)
@@ -84,46 +84,46 @@ void CaffeImporter::createMIRNodesFromLayer(const LayerParameter& lp) {
       processInputLayer(lp);
       break;
     case CaffeOpType::convolution:
-      outputs = _opCreator.createConv2D(inputs, params, lp.convolution_param());
+      outputs = _opCreator.convertConvolution(inputs, params, lp.convolution_param());
       break;
     case CaffeOpType::innerProduct:
-      outputs = _opCreator.createFullyConnected(inputs, params, lp.inner_product_param());
+      outputs = _opCreator.convertInnerProduct(inputs, params, lp.inner_product_param());
       break;
     case CaffeOpType::pooling:
-      outputs = _opCreator.createPool(inputs, params, lp.pooling_param());
+      outputs = _opCreator.convertPooling(inputs, params, lp.pooling_param());
       break;
     case CaffeOpType::concat:
-      outputs = _opCreator.createConcat(inputs, params, lp.concat_param());
+      outputs = _opCreator.convertConcat(inputs, params, lp.concat_param());
       break;
     case CaffeOpType::reshape:
-      outputs = _opCreator.createReshape(inputs, params, lp.reshape_param());
+      outputs = _opCreator.convertReshape(inputs, params, lp.reshape_param());
       break;
     case CaffeOpType::ReLU:
-      outputs = _opCreator.createRelu(inputs, params, lp.relu_param());
+      outputs = _opCreator.convertReLU(inputs, params, lp.relu_param());
       break;
     case CaffeOpType::softmax:
-      outputs = _opCreator.createSoftmax(inputs, params, lp.softmax_param());
+      outputs = _opCreator.convertSoftmax(inputs, params, lp.softmax_param());
       break;
     case CaffeOpType::scale:
-      outputs = _opCreator.createScale(inputs, params, lp.scale_param());
+      outputs = _opCreator.convertScale(inputs, params, lp.scale_param());
       break;
     case CaffeOpType::batchNorm:
-      outputs = _opCreator.createBatchNorm(inputs, params, lp.batch_norm_param());
+      outputs = _opCreator.convertBatchNorm(inputs, params, lp.batch_norm_param());
       break;
     case CaffeOpType::dropout:
-      outputs = _opCreator.createDropout(inputs, params, lp.dropout_param());
+      outputs = _opCreator.convertDropout(inputs, params, lp.dropout_param());
       break;
     case CaffeOpType ::tanh:
-      outputs = _opCreator.createTanh(inputs, params, lp.tanh_param());
+      outputs = _opCreator.convertTanH(inputs, params, lp.tanh_param());
       break;
     case CaffeOpType ::ELU:
-      outputs = _opCreator.createELU(inputs, params, lp.elu_param());
+      outputs = _opCreator.convertELU(inputs, params, lp.elu_param());
       break;
     case CaffeOpType ::eltwise:
-      outputs = _opCreator.createEltwise(inputs, params, lp.eltwise_param());
+      outputs = _opCreator.convertEltwise(inputs, params, lp.eltwise_param());
       break;
     case CaffeOpType ::deconvolution:
-      outputs = _opCreator.createDeconvolution(inputs, params, lp.convolution_param());
+      outputs = _opCreator.convertDeconvolution(inputs, params, lp.convolution_param());
       break;
     case CaffeOpType::split:
       prev = _opsForBlobsTheyOutput[lp.bottom(0)];
@@ -164,19 +164,19 @@ void CaffeImporter::collectUnsupportedOp(const LayerParameter& lp) {
       break;
     case CaffeOpType::deconvolution:
     case CaffeOpType::convolution:
-      _opCreator.checkConv2D(lp.convolution_param(), _problemsOpSet);
+      _opCreator.checkConvolution(lp.convolution_param(), _problemsOpSet);
       break;
     case CaffeOpType::innerProduct:
-      _opCreator.checkFullyConnected(lp.inner_product_param(), _problemsOpSet);
+      _opCreator.checkInnerProduct(lp.inner_product_param(), _problemsOpSet);
       break;
     case CaffeOpType::pooling:
-      _opCreator.checkPool(lp.pooling_param(), _problemsOpSet);
+      _opCreator.checkPooling(lp.pooling_param(), _problemsOpSet);
       break;
     case CaffeOpType::reshape:
       _opCreator.checkReshape(lp.reshape_param(), _problemsOpSet);
       break;
     case CaffeOpType::ReLU:
-      _opCreator.checkRelu(lp.relu_param(), _problemsOpSet);
+      _opCreator.checkReLU(lp.relu_param(), _problemsOpSet);
       break;
     case CaffeOpType::batchNorm:
       params = createOpParams(lp);
index 3c5abf2..746899a 100644 (file)
@@ -215,8 +215,8 @@ fixGroupedKernel(int groups, std::shared_ptr<IrTensor> folded_kernel) {
   return unfold_kernel;
 }
 
-void CaffeOpCreator::checkConv2D(const caffe::ConvolutionParameter& opts,
-                            std::set<std::string>& problems_op_set) {
+void CaffeOpCreator::checkConvolution(const caffe::ConvolutionParameter& opts,
+                                      std::set<std::string>& problems_op_set) {
   assert(opts.stride_size() <= 2);
 
   if (opts.pad_size() != 0 && (opts.has_pad_h() || opts.has_pad_w()))
@@ -226,8 +226,8 @@ void CaffeOpCreator::checkConv2D(const caffe::ConvolutionParameter& opts,
     problems_op_set.insert("Conv2D: Unsupported number of pads");
 }
 
-std::vector<INode*> CaffeOpCreator::createConv2D(InputOps inputs, InputParams params,
-                                                const caffe::ConvolutionParameter& opts) {
+std::vector<INode*> CaffeOpCreator::convertConvolution(InputOps inputs, InputParams params,
+                                                       const caffe::ConvolutionParameter& opts) {
   ops::PaddingType pad_type = ops::PaddingType::Custom;
   Shape stride_shape = getConvStride(opts);
 
@@ -271,8 +271,8 @@ std::vector<INode*> CaffeOpCreator::createConv2D(InputOps inputs, InputParams pa
     return outputs;
 }
 
-void CaffeOpCreator::checkFullyConnected(const caffe::InnerProductParameter& opts,
-                                    std::set<std::string>& problemsOpSet) {
+void CaffeOpCreator::checkInnerProduct(const caffe::InnerProductParameter& opts,
+                                       std::set<std::string>& problemsOpSet) {
   if (opts.has_axis() && opts.axis() != 1)
     problemsOpSet.insert("Fully Connected: layer axis param is not supported yet");
 
@@ -288,7 +288,7 @@ void CaffeOpCreator::checkFullyConnected(const caffe::InnerProductParameter& opt
  * implement it correctly.
  * @todo Support axis and transpose parameters as needed.
  */
-std::vector<INode*> CaffeOpCreator::createFullyConnected(InputOps& inputs, InputParams& params,
+std::vector<INode*> CaffeOpCreator::convertInnerProduct(InputOps& inputs, InputParams& params,
                                                         const caffe::InnerProductParameter& opts) {
   // Add Reshape operation to make sure the input for FC operation has shape [1, fcInputSize]
   // It is needed because Caffe InnerProduct layer takes NCHW input and flattens the CHW part.
@@ -306,15 +306,15 @@ std::vector<INode*> CaffeOpCreator::createFullyConnected(InputOps& inputs, Input
     return fc_outputs;
 }
 
-std::vector<INode*> CaffeOpCreator::createConcat(InputOps inputs, InputParams params,
-                                                const caffe::ConcatParameter& opts) {
+std::vector<INode*> CaffeOpCreator::convertConcat(InputOps inputs, InputParams params,
+                                                  const caffe::ConcatParameter& opts) {
   (void) params;
 
   return createOp<ops::ConcatOp>(inputs, inputs.size(), getAxisValue(opts));
 }
 
-void CaffeOpCreator::checkPool(const caffe::PoolingParameter& opts,
-                          std::set<std::string>& problemsOpSet) {
+void CaffeOpCreator::checkPooling(const caffe::PoolingParameter& opts,
+                                  std::set<std::string>& problemsOpSet) {
   if (opts.has_global_pooling() && opts.global_pooling())
     problemsOpSet.insert("Pooling: pooling layer global_pooling param is not supported yet");
 
@@ -326,8 +326,8 @@ void CaffeOpCreator::checkPool(const caffe::PoolingParameter& opts,
     problemsOpSet.insert("Pooling: conflicting padding properties in pooling");
 }
 
-std::vector<INode*> CaffeOpCreator::createPool(InputOps inputs, InputParams params,
-                                              const caffe::PoolingParameter& opts) {
+std::vector<INode*> CaffeOpCreator::convertPooling(InputOps inputs, InputParams params,
+                                                   const caffe::PoolingParameter& opts) {
   (void) params;
 
   Shape window_shape = getPoolWindowShape(opts);
@@ -343,7 +343,7 @@ std::vector<INode*> CaffeOpCreator::createPool(InputOps inputs, InputParams para
       border_type = ops::PoolOp::BorderType::EMPTY;
       break;
     default:
-      // This check performed in checkPool()
+      // This check performed in checkPooling()
       assert(false);
   }
 
@@ -363,8 +363,8 @@ std::vector<INode*> CaffeOpCreator::createPool(InputOps inputs, InputParams para
   return pooling;
 }
 
-std::vector<INode*> CaffeOpCreator::createSoftmax(InputOps inputs, InputParams params,
-                                                 const caffe::SoftmaxParameter& opts) {
+std::vector<INode*> CaffeOpCreator::convertSoftmax(InputOps inputs, InputParams params,
+                                                   const caffe::SoftmaxParameter& opts) {
   (void) params;
 
   return createOp<ops::SoftmaxOp>(inputs, getAxisValue(opts));
@@ -391,8 +391,8 @@ void CaffeOpCreator::checkReshape(const caffe::ReshapeParameter& opts,
  * @todo Decide how to react to the absence of "shape" parameter.
  * @todo Support zero values in "shape" parameter.
  */
-std::vector<INode*> CaffeOpCreator::createReshape(InputOps inputs, InputParams params,
-                                                 const caffe::ReshapeParameter& opts) {
+std::vector<INode*> CaffeOpCreator::convertReshape(InputOps inputs, InputParams params,
+                                                   const caffe::ReshapeParameter& opts) {
   (void) params;
 
   auto outputs = createOp<ops::ReshapeOp>(inputs);
@@ -403,21 +403,21 @@ std::vector<INode*> CaffeOpCreator::createReshape(InputOps inputs, InputParams p
   return outputs;
 }
 
-void CaffeOpCreator::checkRelu(const caffe::ReLUParameter& opts,
-                          std::set<std::string>& problems_op_set) {
+void CaffeOpCreator::checkReLU(const caffe::ReLUParameter& opts,
+                               std::set<std::string>& problems_op_set) {
   if (opts.has_negative_slope())
     problems_op_set.insert("ReLU layer negative_slope param is not supported yet.");
 }
 
-std::vector<INode*> CaffeOpCreator::createRelu(InputOps inputs, InputParams params,
-                                              const caffe::ReLUParameter& opts) {
+std::vector<INode*> CaffeOpCreator::convertReLU(InputOps inputs, InputParams params,
+                                                const caffe::ReLUParameter& opts) {
   (void) params;
 
   return createOp<ops::ReluOp>(inputs);
 }
 
 std::vector<INode*>
-CaffeOpCreator::createScale(InputOps inputs, InputParams params, const ScaleParameter& opts) {
+CaffeOpCreator::convertScale(InputOps inputs, InputParams params, const ScaleParameter& opts) {
   auto outputs = createOp<ops::ScaleOp>(inputs, std::move(*params[0]));
   // bias_term is optional (so might not be present) and defaults to true
   if (!opts.has_bias_term() || opts.bias_term())
@@ -434,7 +434,8 @@ void CaffeOpCreator::checkBatchNorm(const caffe::BatchNormParameter& opts, Input
 }
 
 std::vector<INode*>
-CaffeOpCreator::createBatchNorm(InputOps inputs, InputParams params, const BatchNormParameter& opts) {
+CaffeOpCreator::convertBatchNorm(InputOps inputs, InputParams params,
+                                 const BatchNormParameter& opts) {
   const float MAFRAC_DEF = 0.999f;
   const float EPS_DEF = 1e-5f;
   // optional params may be left out, so we fill them with defalt values (lifted from caffe docs)
@@ -471,7 +472,7 @@ CaffeOpCreator::createBatchNorm(InputOps inputs, InputParams params, const Batch
 }
 
 std::vector<INode*>
-CaffeOpCreator::createDropout(InputOps inputs, InputParams params, const DropoutParameter& opts) {
+CaffeOpCreator::convertDropout(InputOps inputs, InputParams params, const DropoutParameter& opts) {
   (void) params;
   const float DROPOUT_RATIO_DEF = 0.5f;
   // optional params may be left out, so we fill them with defalt values (lifted from caffe docs)
@@ -480,9 +481,8 @@ CaffeOpCreator::createDropout(InputOps inputs, InputParams params, const Dropout
 }
 
 std::vector<INode*>
-  CaffeOpCreator::createDeconvolution( std::vector<INode*> &inputs,
-                                  std::vector<std::shared_ptr<IrTensor>> &params,
-                                  const ConvolutionParameter &opts) noexcept {
+  CaffeOpCreator::convertDeconvolution(InputOps& inputs, InputParams& params,
+                                       const ConvolutionParameter& opts) noexcept {
   ops::PaddingType pad_type = ops::PaddingType::Custom;
   Shape stride_shape = getStride(opts);
 
@@ -513,24 +513,21 @@ std::vector<INode*>
 }
 
 std::vector<INode*>
-CaffeOpCreator::createELU(std::vector<INode*>& inputs,
-                     std::vector<std::shared_ptr<IrTensor>>& params,
-                     const ELUParameter& opts) noexcept {
+CaffeOpCreator::convertELU(InputOps& inputs, InputParams& params,
+                           const ELUParameter& opts) noexcept {
   const float ELU_ALPHA= 1.0f;
   float elu_alpha = opts.has_alpha() ? opts.alpha() : ELU_ALPHA;
   return createOp<ops::EluOp>(inputs, elu_alpha);
 }
 
-std::vector<INode*> CaffeOpCreator::createTanh(std::vector<INode*>& inputs,
-                                          std::vector<std::shared_ptr<IrTensor>>&,
-                                          const TanHParameter&) noexcept {
+std::vector<INode*> CaffeOpCreator::convertTanH(InputOps& inputs, InputParams&,
+                                                const TanHParameter&) noexcept {
   return createOp<ops::TanhOp>(inputs);
 }
 
 
-std::vector<INode*> CaffeOpCreator::createEltwise(std::vector<INode*>& inputs,
-                                             std::vector<std::shared_ptr<IrTensor>>& params,
-                                             const EltwiseParameter& opts) noexcept {
+std::vector<INode*> CaffeOpCreator::convertEltwise(InputOps inputs, InputParams& params,
+                                                   const EltwiseParameter& opts) noexcept {
   (void) params;
   ops::ElementwiseOp::OpType optype;
   switch (opts.operation()){
index 148b4ee..0518c7b 100644 (file)
@@ -70,17 +70,18 @@ void TfliteImporter::processUnsupportedOp(const Operator* op) {
   switch (opcode) {
     case BuiltinOperator_MAX_POOL_2D:
     case BuiltinOperator_AVERAGE_POOL_2D:
-      _opCreator->checkPool(op->builtin_options_as<Pool2DOptions>(), _problemsOpSet);
+      _opCreator->checkPool2D(op->builtin_options_as<Pool2DOptions>(), _problemsOpSet);
       break;
     case BuiltinOperator_CONCATENATION:
-      _opCreator->checkConcat(op->builtin_options_as<ConcatenationOptions>(), _problemsOpSet);
+      _opCreator->checkConcatenation(op->builtin_options_as<ConcatenationOptions>(),
+                                     _problemsOpSet);
       break;
     case BuiltinOperator_CONV_2D:
       _opCreator->checkConv2D(op->builtin_options_as<Conv2DOptions>(), _problemsOpSet);
       break;
     case BuiltinOperator_DEPTHWISE_CONV_2D:
-      _opCreator->checkDepthConv2D(op->builtin_options_as<DepthwiseConv2DOptions>(),
-                                   _problemsOpSet);
+      _opCreator->checkDepthwiseConv2D(op->builtin_options_as<DepthwiseConv2DOptions>(),
+                                       _problemsOpSet);
       break;
     case BuiltinOperator_FULLY_CONNECTED:
       _opCreator->checkFullyConnected(op->builtin_options_as<FullyConnectedOptions>(),
@@ -149,28 +150,30 @@ void TfliteImporter::walkOperator(const Operator* op) {
   unsigned int opcode = (*_opcodes)[op->opcode_index()]->builtin_code();
   switch (opcode) {
     case BuiltinOperator_CONV_2D:
-      outputs = _opCreator->createConv2D(inputs, params, op->builtin_options_as<Conv2DOptions>());
+      outputs = _opCreator->convertConv2D(inputs, params, op->builtin_options_as<Conv2DOptions>());
       break;
     case BuiltinOperator_DEPTHWISE_CONV_2D:
-      outputs = _opCreator->createDepthConv2D(inputs, params,
-                                              op->builtin_options_as<DepthwiseConv2DOptions>());
+      outputs = _opCreator->convertDepthwiseConv2D(inputs, params,
+                                                   op->builtin_options_as<DepthwiseConv2DOptions>());
       break;
     case BuiltinOperator_MAX_POOL_2D:
-      outputs = _opCreator->createMaxPool(inputs, params, op->builtin_options_as<Pool2DOptions>());
+      outputs = _opCreator->convertMaxPool2D(inputs, params,
+                                             op->builtin_options_as<Pool2DOptions>());
       break;
     case BuiltinOperator_AVERAGE_POOL_2D:
-      outputs = _opCreator->createAvgPool(inputs, params, op->builtin_options_as<Pool2DOptions>());
+      outputs = _opCreator->convertAveragePool2D(inputs, params,
+                                                 op->builtin_options_as<Pool2DOptions>());
       break;
     case BuiltinOperator_CONCATENATION:
-      outputs = _opCreator->createConcat(inputs, params,
-                                         op->builtin_options_as<ConcatenationOptions>());
+      outputs = _opCreator->convertConcatenation(inputs, params,
+                                                 op->builtin_options_as<ConcatenationOptions>());
       break;
     case BuiltinOperator_RESHAPE:
-      outputs = _opCreator->createReshape(inputs, params, op->builtin_options_as<ReshapeOptions>());
+      outputs = _opCreator->convertReshape(inputs, params, op->builtin_options_as<ReshapeOptions>());
       break;
     case BuiltinOperator_FULLY_CONNECTED:
-      outputs = _opCreator->createFullyConnected(inputs, params,
-                                                 op->builtin_options_as<FullyConnectedOptions>());
+      outputs = _opCreator->convertFullyConnected(inputs, params,
+                                                  op->builtin_options_as<FullyConnectedOptions>());
       break;
     case BuiltinOperator_SOFTMAX:
       outputs = _opCreator->createSoftmax(inputs, params, op->builtin_options_as<SoftmaxOptions>());
index 00bb13b..d46cdb0 100644 (file)
@@ -34,12 +34,13 @@ using namespace ::tflite;
 
 namespace nnc {
 
-void TFLiteOpCreator::checkConv2D(const Conv2DOptions* opts, std::set<std::string>& problems_op_set) {
+void TFLiteOpCreator::checkConv2D(const Conv2DOptions* opts,
+                                  std::set<std::string>& problems_op_set) {
   checkActivationType(opts->fused_activation_function(), problems_op_set);
 }
 
-std::vector<INode::Ref> TFLiteOpCreator::createConv2D(InputOps inputs, InputParams params,
-                                                const Conv2DOptions* opts) {
+std::vector<INode::Ref> TFLiteOpCreator::convertConv2D(InputOps inputs, InputParams params,
+                                                       const Conv2DOptions* opts) {
   auto outputs = createOp<ops::Conv2DOp>(inputs, ActivationFunctionType_NONE, std::move(*params[0]),
                                          Shape{static_cast<int32_t>(opts->stride_h()),
                                                static_cast<int32_t>(opts->stride_w()), 1},
@@ -48,13 +49,13 @@ std::vector<INode::Ref> TFLiteOpCreator::createConv2D(InputOps inputs, InputPara
                                   std::move(*params[1]));
 }
 
-void TFLiteOpCreator::checkDepthConv2D(const DepthwiseConv2DOptions* opts,
-                                 std::set<std::string>& problems_op_set) {
+void TFLiteOpCreator::checkDepthwiseConv2D(const DepthwiseConv2DOptions* opts,
+                                           std::set<std::string>& problems_op_set) {
   checkActivationType(opts->fused_activation_function(), problems_op_set);
 }
 
-std::vector<INode::Ref> TFLiteOpCreator::createDepthConv2D(InputOps inputs, InputParams params,
-                                                     const DepthwiseConv2DOptions* opts) {
+std::vector<INode::Ref> TFLiteOpCreator::convertDepthwiseConv2D(InputOps inputs, InputParams params,
+                                                                const DepthwiseConv2DOptions* opts) {
   auto outputs = createOp<ops::DepthwiseConv2DOp>(
           inputs, ActivationFunctionType_NONE, std::move(*params[0]),
           Shape{static_cast<int32_t>(opts->stride_h()),
@@ -64,24 +65,25 @@ std::vector<INode::Ref> TFLiteOpCreator::createDepthConv2D(InputOps inputs, Inpu
                                   std::move(*params[1]));
 }
 
-void TFLiteOpCreator::checkConcat(const ConcatenationOptions* opts,
-                            std::set<std::string>& problems_op_set) {
+void TFLiteOpCreator::checkConcatenation(const ConcatenationOptions* opts,
+                                         std::set<std::string>& problems_op_set) {
   checkActivationType(opts->fused_activation_function(), problems_op_set);
 }
 
-std::vector<INode::Ref> TFLiteOpCreator::createConcat(InputOps inputs, InputParams params,
-                                                const ConcatenationOptions* opts) {
+std::vector<INode::Ref> TFLiteOpCreator::convertConcatenation(InputOps inputs, InputParams params,
+                                                              const ConcatenationOptions* opts) {
   // Decrementing axis to account for the unnecessary batch dimension
   return createOp<ops::ConcatOp>(inputs, opts->fused_activation_function(), inputs.size(),
                                  opts->axis() - 1);
 }
 
-void TFLiteOpCreator::checkPool(const Pool2DOptions* opts, std::set<std::string>& problems_op_set) {
+void TFLiteOpCreator::checkPool2D(const Pool2DOptions* opts,
+                                  std::set<std::string>& problems_op_set) {
   checkActivationType(opts->fused_activation_function(), problems_op_set);
 }
 
-std::vector<INode::Ref> TFLiteOpCreator::createMaxPool(InputOps inputs, InputParams params,
-                                                 const Pool2DOptions* opts) {
+std::vector<INode::Ref> TFLiteOpCreator::convertMaxPool2D(InputOps inputs, InputParams params,
+                                                          const Pool2DOptions* opts) {
   return createOp<ops::PoolOp>(inputs, opts->fused_activation_function(),
                                Shape{static_cast<int32_t>(opts->filter_height()),
                                      static_cast<int32_t>(opts->filter_width()), 1},
@@ -91,8 +93,8 @@ std::vector<INode::Ref> TFLiteOpCreator::createMaxPool(InputOps inputs, InputPar
                                ops::PoolOp::BorderType::EMPTY);
 }
 
-std::vector<INode::Ref> TFLiteOpCreator::createAvgPool(InputOps inputs, InputParams params,
-                                                 const Pool2DOptions* opts) {
+std::vector<INode::Ref> TFLiteOpCreator::convertAveragePool2D(InputOps inputs, InputParams params,
+                                                              const Pool2DOptions* opts) {
   return createOp<ops::PoolOp>(inputs, opts->fused_activation_function(),
                                Shape{static_cast<int32_t>(opts->filter_height()),
                                      static_cast<int32_t>(opts->filter_width()), 1},
@@ -108,8 +110,8 @@ std::vector<INode::Ref> TFLiteOpCreator::createSoftmax(InputOps inputs, InputPar
   return createOp<ops::SoftmaxOp>(inputs, ActivationFunctionType_NONE, -1);
 }
 
-std::vector<INode::Ref> TFLiteOpCreator::createReshape(InputOps inputs, InputParams params,
-                                                 const ReshapeOptions* opts) {
+std::vector<INode::Ref> TFLiteOpCreator::convertReshape(InputOps inputs, InputParams params,
+                                                        const ReshapeOptions* opts) {
   auto outputs = createOp<ops::ReshapeOp>(inputs, ActivationFunctionType_NONE);
 
   // TODO: we should also support "-1" values in new_shape, which means that correct
@@ -125,8 +127,9 @@ void TFLiteOpCreator::checkFullyConnected(const FullyConnectedOptions* opts,
   checkActivationType(opts->fused_activation_function(), problems_op_set);
 }
 
-std::vector<INode::Ref> TFLiteOpCreator::createFullyConnected(InputOps& inputs, InputParams& params,
-                                                        const FullyConnectedOptions* opts) {
+std::vector<INode::Ref> TFLiteOpCreator::convertFullyConnected(InputOps& inputs,
+                                                               InputParams& params,
+                                                               const FullyConnectedOptions* opts) {
   // Add Reshape operation to make sure the input for FC operation has shape [1, fcInputSize]
   auto outputs = createOp<ops::ReshapeOp>(inputs, ActivationFunctionType_NONE);
   int32_t fcInputSize = params[0]->getShape().dim(0);
@@ -147,7 +150,8 @@ void TFLiteOpCreator::checkActivationType(ActivationFunctionType activation_type
                            + EnumNamesActivationFunctionType()[activation_type]);
 }
 
-INode::Ref TFLiteOpCreator::addFusedActivation(INode::Ref input, ActivationFunctionType activation_type) {
+INode::Ref TFLiteOpCreator::addFusedActivation(INode::Ref input,
+                                               ActivationFunctionType activation_type) {
   INode::Ref activation;
 
   if (activation_type != ActivationFunctionType_NONE) {