Add TFLite-to-Model IR operator creator implementation (#407)
authorDmitry Mozolev/AI Tools Lab /SRR/Engineer/삼성전자 <d.mozolev@samsung.com>
Fri, 29 Jun 2018 11:54:15 +0000 (14:54 +0300)
committerSergey Vostokov/AI Tools Lab /SRR/Staff Engineer/삼성전자 <s.vostokov@samsung.com>
Fri, 29 Jun 2018 11:54:15 +0000 (20:54 +0900)
Add TFLite-to-Model IR operator creator implementation

Implementation for operator creator API functions, which
create the corresponding Model IR operators.

Signed-off-by: Dmitry Mozolev <d.mozolev@samsung.com>
contrib/nnc/libs/frontend/tflite/include/tflite_op_creator.h
contrib/nnc/libs/frontend/tflite/src/tflite_op_creator.cpp

index 4fece1d..20dd975 100644 (file)
@@ -12,6 +12,8 @@
 #include "nnc/core/linalg/TensorVariant.h"
 #include "nncc/core/ADT/tensor/Shape.h"
 
+#include "nnc/core/IR/model/operations/common.h"
+
 #include "schema_v3.h"
 #include "shape_helper.h"
 
@@ -58,6 +60,9 @@ public:
 private:
   Graph *graph = nullptr;
 
+  std::map<Padding, ops::PaddingType> paddingMap = {{Padding_SAME, ops::PaddingType::Same},
+                                                    {Padding_VALID, ops::PaddingType::Valid}};
+
   INode::Ref addFusedActivation(INode::Ref input, ActivationFunctionType activationType);
   void connectInputs(INode::Ref op, std::vector<INode::Ref> &inputs);
 
index fce484e..9e87d20 100644 (file)
@@ -1,7 +1,14 @@
 #include "tflite_op_creator.h"
 
+#include "nnc/core/IR/model/operations/concat_op.h"
+#include "nnc/core/IR/model/operations/conv_2d_op.h"
+#include "nnc/core/IR/model/operations/depthwise_conv2d_op.h"
 #include "nnc/core/IR/model/operations/relu_op.h"
 #include "nnc/core/IR/model/operations/capped_relu_op.h"
+#include "nnc/core/IR/model/operations/softmax_op.h"
+#include "nnc/core/IR/model/operations/pool_op.h"
+#include "nnc/core/IR/model/operations/bias_add_op.h"
+#include "nnc/core/IR/model/operations/reshape_op.h"
 
 namespace nncc
 {
@@ -15,43 +22,74 @@ namespace tflite
 std::vector<INode::Ref> OpCreator::createConv2D(InputOps inputs, InputParams params,
                                                 const Conv2DOptions *opts)
 {
-  throw std::runtime_error{"Not yet implemented"};
+  auto outputs = createOp<ops::Conv2DOp>(inputs, ActivationFunctionType_NONE, std::move(*params[0]),
+                                         Shape{static_cast<uint32_t>(opts->stride_h()),
+                                               static_cast<uint32_t>(opts->stride_w()), 1},
+                                         paddingMap[opts->padding()]);
+  return createOp<ops::BiasAddOp>(outputs, opts->fused_activation_function(),
+                                  std::move(*params[1]));
 }
 
 std::vector<INode::Ref> OpCreator::createDepthConv2D(InputOps inputs, InputParams params,
                                                      const DepthwiseConv2DOptions *opts)
 {
-  throw std::runtime_error{"Not yet implemented"};
+  auto outputs = createOp<ops::DepthwiseConv2DOp>(
+          inputs, ActivationFunctionType_NONE, std::move(*params[0]),
+          Shape{static_cast<uint32_t>(opts->stride_h()),
+                static_cast<uint32_t>(opts->stride_w()), 1},
+          paddingMap[opts->padding()]);
+  return createOp<ops::BiasAddOp>(outputs, opts->fused_activation_function(),
+                                  std::move(*params[1]));
 }
 
 std::vector<INode::Ref> OpCreator::createConcat(InputOps inputs, InputParams params,
                                                 const ConcatenationOptions *opts)
 {
-  throw std::runtime_error{"Not yet implemented"};
+  // Decrementing axis to account for the unnecessary batch dimension
+  return createOp<ops::ConcatOp>(inputs, opts->fused_activation_function(), inputs.size(),
+                                 opts->axis() - 1);
 }
 
 std::vector<INode::Ref> OpCreator::createMaxPool(InputOps inputs, InputParams params,
                                                  const Pool2DOptions *opts)
 {
-  throw std::runtime_error{"Not yet implemented"};
+  return createOp<ops::PoolOp>(inputs, opts->fused_activation_function(),
+                               Shape{static_cast<uint32_t>(opts->filter_height()),
+                                     static_cast<uint32_t>(opts->filter_width()), 1},
+                               Shape{static_cast<uint32_t>(opts->stride_h()),
+                                     static_cast<uint32_t>(opts->stride_w()), 1},
+                               ops::PoolOp::PoolingType::MAX, paddingMap[opts->padding()]);
 }
 
 std::vector<INode::Ref> OpCreator::createAvgPool(InputOps inputs, InputParams params,
                                                  const Pool2DOptions *opts)
 {
-  throw std::runtime_error{"Not yet implemented"};
+  return createOp<ops::PoolOp>(inputs, opts->fused_activation_function(),
+                               Shape{static_cast<uint32_t>(opts->filter_height()),
+                                     static_cast<uint32_t>(opts->filter_width()), 1},
+                               Shape{static_cast<uint32_t>(opts->stride_h()),
+                                     static_cast<uint32_t>(opts->stride_w()), 1},
+                               ops::PoolOp::PoolingType::AVG, paddingMap[opts->padding()]);
 }
 
 std::vector<INode::Ref> OpCreator::createSoftmax(InputOps inputs, InputParams params,
                                                  const SoftmaxOptions *opts)
 {
-  throw std::runtime_error{"Not yet implemented"};
+  // TODO: here assuming that softmax is applied to a 1-d tensor
+  return createOp<ops::SoftmaxOp>(inputs, ActivationFunctionType_NONE, 0);
 }
 
 std::vector<INode::Ref> OpCreator::createReshape(InputOps inputs, InputParams params,
                                                  const ReshapeOptions *opts)
 {
-  throw std::runtime_error{"Not yet implemented"};
+  auto outputs = createOp<ops::ReshapeOp>(inputs, ActivationFunctionType_NONE);
+
+  // TODO: we should also support "-1" values in new_shape, which means that correct
+  // shape values must be calculated. Better do it in the shape inference module.
+  Shape newShape = common::ShapeHelper::createShape(*opts->new_shape(), opts->new_shape()->size());
+
+  outputs[0]->getOperation()->setOutputShape(0, newShape);
+  return outputs;
 }
 
 INode::Ref OpCreator::addFusedActivation(INode::Ref input, ActivationFunctionType activationType)