[nnc] Support Elementwise Add and Mul operations with weights array (#2628)
authorПавел Ильютченко/AI Tools Lab /SRR/Engineer/삼성전자 <p.iliutchenk@samsung.com>
Mon, 17 Dec 2018 19:07:17 +0000 (22:07 +0300)
committerEfimov Alexander/AI Tools Lab/./Samsung Electronics <a.efimov@samsung.com>
Mon, 17 Dec 2018 19:07:17 +0000 (22:07 +0300)
* Add support Elementwise Add and Mul operations with weights array
* Support constant operation in soft cpu backend
* Remove ref type from InputParams and InputOps and add them to

Signed-off-by: Pavel Iliutchenko <p.iliutchenk@samsung.com>
Signed-off-by: Pavel Iliutchenko <p.iliutchenk@samsung.com>
Signed-off-by: Pavel Iliutchenko <p.iliutchenk@samsung.com>
* Add refs for all creates

Signed-off-by: Pavel Iliutchenko <p.iliutchenk@samsung.com>
contrib/nnc/core/modelIR/Graph.cpp
contrib/nnc/include/core/modelIR/Graph.h
contrib/nnc/passes/soft_backend/ModelAnalyzer.cpp
contrib/nnc/passes/soft_backend/ModelAnalyzer.h
contrib/nnc/passes/soft_backend/code_snippets/cpp_operations.def
contrib/nnc/passes/tflite_frontend/tflite_op_creator.cpp
contrib/nnc/passes/tflite_frontend/tflite_op_creator.h

index fd9eb87..462a712 100644 (file)
@@ -128,6 +128,14 @@ std::vector<Operation*> Graph::collectInputs() const {
   return res;
 }
 
+std::vector<Operation*> Graph::collectConstants() const {
+  std::vector<Operation*> res;
+  for (auto& e : _constants) {
+    res.emplace_back(e);
+  }
+  return res;
+}
+
 std::vector<Operation*> Graph::collectOutputs() const {
   std::vector<Operation*> res;
   for (auto& e : _outputs) {
index edd227d..3456c19 100644 (file)
@@ -62,6 +62,12 @@ class Graph {
   std::vector<Operation*> collectInputs() const;
 
   /**
+   * @brief Returns all constants from graph
+   * @returns vector containing all graph constant nodes
+   */
+  std::vector<Operation*> collectConstants() const;
+
+  /**
    * @brief Returns all outputs from graph
    * @returns vector containing all graph outputs nodes
    */
index a2ef5a4..7f278a0 100644 (file)
@@ -67,11 +67,16 @@ void ModelAnalyzer::addOpDescr(Operation* op, const string& opName) {
   size_t nodeTid = INVALID_TENSOR_ID;
   if (op->getPrevNodes().empty()) {
     if (auto* p2const = dynamic_cast<ops::ConstantOp*>(op)) {
-      type = OpDescr::Type::CONSTANT;
+      type = OpDescr::Type::ORDINARY;
+
       auto* shape = const_cast<Shape*> (&p2const->getOutputShape(0));
-      nodeTid = allocateTensor(name, TensorDescription::Type::CONSTANT, shape);
+      /*
+       * FIXME allocateTensor get const Shape
+       */
+      nodeTid = allocateTensor(name, TensorDescription::Type::ORDINARY, shape);
     } else {
       // process input op
+      assert(op->getType() == Operation::Type::variable);
       Shape inputShape = op->getOutputShape(0);
       nodeTid = allocateTensor(name, TensorDescription::Type::IN, &inputShape);
       type = OpDescr::Type::IN;
@@ -90,10 +95,8 @@ void ModelAnalyzer::addOpDescr(Operation* op, const string& opName) {
   nodeOutputs.push_back(nodeTid);
   // process op outputs
   // consider op as output if it has no consumers
-  if (op->getNextNodes().empty() && (type != OpDescr::Type::CONSTANT)) {
-    assert(type == OpDescr::Type::OUT);
+  if (op->getNextNodes().empty() && (type == OpDescr::Type::OUT))
     _outputs.push_back(nodeTid);
-  }
   // process op inputs
   vector<size_t> nodeInputs;
   for (const IODescriptor &d: op->getPrevNodes()) {
@@ -135,8 +138,13 @@ void ModelAnalyzer::analyze(const mir::Graph* g) {
   // Set contains pointer to node if it is visited by DFS
   set<Operation*> visited;
 
+  // Collect all inputs and constants
+  vector<Operation*> init_ops(g->collectInputs());
+  vector<Operation*> constant_ops(g->collectConstants());
+  init_ops.insert(init_ops.end(), constant_ops.begin(), constant_ops.end());
+
   // Walk all network inputs
-  for (Operation* in : g->collectInputs()) {
+  for (Operation* in : init_ops) {
     assert(dynamic_cast<ops::VariableOp*>(in) || dynamic_cast<ops::ConstantOp*>(in));
     if (!visited.count(in)) {
       visited.insert(in);
index e01806e..04a56cb 100644 (file)
@@ -45,8 +45,7 @@ struct TensorDescription {
   enum class Type {
     IN,
     OUT,
-    ORDINARY,
-    CONSTANT
+    ORDINARY
   };
   size_t _id;
   Type _type;
@@ -62,8 +61,7 @@ struct OpDescr {
   enum class Type {
     IN,
     OUT,
-    ORDINARY,
-    CONSTANT
+    ORDINARY
   };
 
   Type _type;
index 1547fe1..d837a9b 100644 (file)
@@ -169,6 +169,20 @@ static bool isAddrAligned(const void *data, int alignment)
   return (reinterpret_cast<uintptr_t>(data) % alignment) == 0;
 }
 
+static inline Tensor deserializeTensor(const char*& buf)
+{
+  int32_t d_type = deserializeT<int32_t>(buf);
+  assert(d_type == 1 && "Unknown data type");
+  int32_t element_size = deserializeT<int32_t>(buf);
+  assert(element_size == 4 && "Unsupported element size");
+  Shape shape = deserializeShape(buf);
+  const float* data = reinterpret_cast<const float*>(buf);
+
+  Tensor tensor(shape, const_cast<float*>(data));
+  buf += element_size * shape.getNumElems();
+  return tensor;
+}
+
 static inline Kernel deserializeKernel(const char *&buf)
 {
   int32_t dType = deserializeT<int32_t>(buf);
@@ -655,3 +669,7 @@ void gather(Tensor &out, const char *params, const Tensor &data, const Tensor &i
          shapeToRuntimeShape(indices.getShape()), indices.getData(),
          shapeToRuntimeShape(out.getShape()), out.getData());
 }
+
+void constant(Tensor& out, const char* params) {
+  out = deserializeTensor(params);
+}
index e7bf27b..488a47b 100644 (file)
@@ -31,6 +31,7 @@
 #include "core/modelIR/operations/ReluOp.h"
 #include "core/modelIR/operations/ReshapeOp.h"
 #include "core/modelIR/operations/ResizeOp.h"
+#include "core/modelIR/operations/ScaleOp.h"
 #include "core/modelIR/operations/SigmoidOp.h"
 #include "core/modelIR/operations/SliceOp.h"
 #include "core/modelIR/operations/SoftmaxOp.h"
 #include "core/modelIR/operations/SqueezeOp.h"
 #include "core/modelIR/operations/TanhOp.h"
 
-#include "core/modelIR/Tensor.h"
-#include "core/modelIR/ShapeRange.h"
 #include "pass/PassException.h"
 
-#include "core/modelIR/Tensor.h"
 #include "core/modelIR/Shape.h"
 #include "core/modelIR/ShapeRange.h"
+#include "core/modelIR/Tensor.h"
 
 using namespace nnc::mir;
 using namespace ::tflite;
@@ -85,7 +84,7 @@ void TFLiteOpCreator::checkConv2D(const Conv2DOptions* opts,
   checkActivationType(opts->fused_activation_function(), problems_op_set);
 }
 
-std::vector<mir::Operation*> TFLiteOpCreator::convertConv2D(InputOps inputs, InputParams params,
+std::vector<mir::Operation*> TFLiteOpCreator::convertConv2D(InputOps& inputs, InputParams& params,
                                                             const Conv2DOptions* opts) {
   const auto& input_shape = inputs[0]->getOutputShape(0);
   const auto& kernel_shape = params[0].getShape();
@@ -108,7 +107,7 @@ void TFLiteOpCreator::checkDepthwiseConv2D(const DepthwiseConv2DOptions* opts,
 }
 
 std::vector<mir::Operation*>
-TFLiteOpCreator::convertDepthwiseConv2D(InputOps inputs, InputParams params,
+TFLiteOpCreator::convertDepthwiseConv2D(InputOps& inputs, InputParams& params,
                                         const DepthwiseConv2DOptions* opts) {
   const auto& input_shape = inputs[0]->getOutputShape(0);
   const auto& kernel_shape = params[0].getShape();
@@ -131,8 +130,8 @@ void TFLiteOpCreator::checkConcatenation(const ConcatenationOptions* opts,
   checkActivationType(opts->fused_activation_function(), problems_op_set);
 }
 
-std::vector<mir::Operation*> TFLiteOpCreator::convertConcatenation(InputOps inputs,
-                                                                   InputParams params,
+std::vector<mir::Operation*> TFLiteOpCreator::convertConcatenation(InputOps& inputs,
+                                                                   InputParams& params,
                                                                    const ConcatenationOptions* opts) {
   std::vector<IODescriptor> descriptors;
   for (auto i : inputs)
@@ -146,7 +145,7 @@ void TFLiteOpCreator::checkPool2D(const Pool2DOptions* opts,
   checkActivationType(opts->fused_activation_function(), problems_op_set);
 }
 
-std::vector<mir::Operation*> TFLiteOpCreator::convertMaxPool2D(InputOps inputs, InputParams params,
+std::vector<mir::Operation*> TFLiteOpCreator::convertMaxPool2D(InputOps& inputs, InputParams& params,
                                                                const Pool2DOptions* opts) {
   auto& input_shape = inputs[0]->getOutputShape(0);
   Shape window_shape{opts->filter_height(), opts->filter_width()};
@@ -163,8 +162,8 @@ std::vector<mir::Operation*> TFLiteOpCreator::convertMaxPool2D(InputOps inputs,
                                ops::PoolOp::RoundMode::floor);
 }
 
-std::vector<mir::Operation*> TFLiteOpCreator::convertAveragePool2D(InputOps inputs,
-                                                                   InputParams params,
+std::vector<mir::Operation*> TFLiteOpCreator::convertAveragePool2D(InputOps& inputs,
+                                                                   InputParams& params,
                                                                    const Pool2DOptions* opts) {
   auto& input_shape = inputs[0]->getOutputShape(0);
   Shape window_shape{opts->filter_height(), opts->filter_width()};
@@ -181,7 +180,7 @@ std::vector<mir::Operation*> TFLiteOpCreator::convertAveragePool2D(InputOps inpu
                                ops::PoolOp::RoundMode::floor);
 }
 
-std::vector<mir::Operation*> TFLiteOpCreator::createSoftmax(InputOps inputs, InputParams params,
+std::vector<mir::Operation*> TFLiteOpCreator::createSoftmax(InputOps& inputs, InputParams& params,
                                                             const SoftmaxOptions* opts) {
   // Softmax in TFLite is always 2-D.
   assert(inputs[0]->getOutputShape(0).rank() == 2);
@@ -198,7 +197,7 @@ Shape shapeFromTensor(mir::Tensor<int32_t>&& t) {
   return temporary_shape;
 }
 
-std::vector<mir::Operation*> TFLiteOpCreator::createSlice(InputOps inputs, InputParams params,
+std::vector<mir::Operation*> TFLiteOpCreator::createSlice(InputOps& inputs, InputParams& params,
                                                           const ::tflite::SliceOptions*) {
   auto starts = shapeFromTensor(mir::Tensor<int32_t>(params[0]));
   auto sizes = shapeFromTensor(mir::Tensor<int32_t>(params[1]));
@@ -208,7 +207,7 @@ std::vector<mir::Operation*> TFLiteOpCreator::createSlice(InputOps inputs, Input
                                 starts, sizes);
 }
 
-std::vector<mir::Operation*> TFLiteOpCreator::convertReshape(InputOps inputs, InputParams params,
+std::vector<mir::Operation*> TFLiteOpCreator::convertReshape(InputOps& inputs, InputParams& params,
                                                              const ReshapeOptions* opts) {
   // TODO: we should also support "-1" values in new_shape, which means that correct
   // shape values must be calculated. Better do it in the shape inference module.
@@ -245,19 +244,35 @@ TFLiteOpCreator::convertResizeNN(InputOps& inputs, InputParams& params,
 }
 
 std::vector<mir::Operation*>
-TFLiteOpCreator::createAdd(InputOps& inputs, InputParams&, const ::tflite::AddOptions* opts) {
+TFLiteOpCreator::createAdd(InputOps& inputs, const InputParams& params,
+                           const ::tflite::AddOptions* opts) {
   std::vector<IODescriptor> descriptors;
+
   for (auto i : inputs)
     descriptors.push_back(i->getOutput(0));
+
+  for (const auto& param : params) {
+    auto weights_tensor = createOp<ops::ConstantOp>(ActivationFunctionType_NONE, param);
+    descriptors.push_back(weights_tensor[0]->getOutput(0));
+  }
+
   return createOp<ops::ElementwiseOp>(opts->fused_activation_function(), descriptors,
                                       ops::ElementwiseOp::OpType::add);
 }
 
 std::vector<mir::Operation*>
-TFLiteOpCreator::createMul(InputOps& inputs, InputParams&, const ::tflite::MulOptions* opts) {
+TFLiteOpCreator::createMul(InputOps& inputs, const InputParams& params,
+                           const ::tflite::MulOptions* opts) {
   std::vector<IODescriptor> descriptors;
+
   for (auto i : inputs)
     descriptors.push_back(i->getOutput(0));
+
+  for (const auto& param : params) {
+    auto weights_tensor = createOp<ops::ConstantOp>(ActivationFunctionType_NONE, param);
+    descriptors.push_back(weights_tensor[0]->getOutput(0));
+  }
+
   return createOp<ops::ElementwiseOp>(opts->fused_activation_function(), descriptors,
                                       ops::ElementwiseOp::OpType::mul);
 }
@@ -281,7 +296,7 @@ TFLiteOpCreator::createMax(InputOps& inputs, InputParams&,
                                       ops::ElementwiseOp::OpType::max);
 }
 
-std::vector<mir::Operation*> TFLiteOpCreator::convertReducer(InputOps inputs, InputParams params,
+std::vector<mir::Operation*> TFLiteOpCreator::convertReducer(InputOps& inputs, InputParams& params,
                                                              ops::ReduceFOp::FuncType ft,
                                                              const ::tflite::ReducerOptions* opts) {
   assert(params.at(0).getShape().rank() <= 1 && "Must be 1-dim or 0-dim tensor");
@@ -356,7 +371,7 @@ mir::Operation* TFLiteOpCreator::addFusedActivation(mir::Operation* input,
 }
 
 std::vector<mir::Operation*> TFLiteOpCreator::createSqueeze(
-  InputOps inputs, InputParams params, const ::tflite::SqueezeOptions* opts) {
+  InputOps& inputs, InputParams& params, const ::tflite::SqueezeOptions* opts) {
 
   std::vector<int32_t> squeeze_dims{opts->squeeze_dims()->begin(), opts->squeeze_dims()->end()};
 
@@ -364,7 +379,7 @@ std::vector<mir::Operation*> TFLiteOpCreator::createSqueeze(
                                   squeeze_dims);
 }
 
-std::vector<mir::Operation*> TFLiteOpCreator::createPad(InputOps inputs, InputParams params,
+std::vector<mir::Operation*> TFLiteOpCreator::createPad(InputOps& inputs, InputParams& params,
                                                const ::tflite::PadOptions *opts) {
   assert(params.size() == 1); // support pad with one param
   std::vector<std::pair<int32_t, int32_t>> paddings;
index 0d802c2..3ae321a 100644 (file)
@@ -42,39 +42,39 @@ using mir::Shape;
 
 class TFLiteOpCreator {
 public:
-  using InputOps = std::vector<mir::Operation*>&;
-  using InputParams = std::vector<mir::TensorVariant>&;
+  using InputOps = std::vector<mir::Operation*>;
+  using InputParams = std::vector<mir::TensorVariant>;
 
   explicit TFLiteOpCreator(Graph* g) : _graph(g) {}
 
-  std::vector<mir::Operation*> convertConv2D(InputOps, InputParams, const ::tflite::Conv2DOptions*);
+  std::vector<mir::Operation*> convertConv2D(InputOps&, InputParams&, const ::tflite::Conv2DOptions*);
 
-  std::vector<mir::Operation*> convertDepthwiseConv2D(InputOps, InputParams,
+  std::vector<mir::Operation*> convertDepthwiseConv2D(InputOps&, InputParams&,
                                                       const ::tflite::DepthwiseConv2DOptions*);
 
-  std::vector<mir::Operation*> convertConcatenation(InputOps, InputParams,
+  std::vector<mir::Operation*> convertConcatenation(InputOps&, InputParams&,
                                                     const ::tflite::ConcatenationOptions*);
 
-  std::vector<mir::Operation*> convertMaxPool2D(InputOps, InputParams,
+  std::vector<mir::Operation*> convertMaxPool2D(InputOps&, InputParams&,
                                                 const ::tflite::Pool2DOptions*);
 
-  std::vector<mir::Operation*> convertAveragePool2D(InputOps, InputParams,
+  std::vector<mir::Operation*> convertAveragePool2D(InputOps&, InputParams&,
                                                     const ::tflite::Pool2DOptions*);
 
-  std::vector<mir::Operation*> convertReducer(InputOps, InputParams, ops::ReduceFOp::FuncType,
+  std::vector<mir::Operation*> convertReducer(InputOps&, InputParams&, ops::ReduceFOp::FuncType,
                                         const ::tflite::ReducerOptions*);
 
-  std::vector<mir::Operation*> createSoftmax(InputOps, InputParams, const ::tflite::SoftmaxOptions*);
+  std::vector<mir::Operation*> createSoftmax(InputOps&, InputParams&, const ::tflite::SoftmaxOptions*);
 
-  std::vector<mir::Operation*> createSlice(InputOps, InputParams, const ::tflite::SliceOptions*);
-
-  std::vector<mir::Operation*> convertReshape(InputOps, InputParams,
+  std::vector<mir::Operation*> createSlice(InputOps&, InputParams&, const ::tflite::SliceOptions*);
+  
+  std::vector<mir::Operation*> convertReshape(InputOps&, InputParams&,
                                               const ::tflite::ReshapeOptions*);
 
-  std::vector<mir::Operation*> convertFullyConnected(InputOps, InputParams,
+  std::vector<mir::Operation*> convertFullyConnected(InputOps&, InputParams&,
                                                      const ::tflite::FullyConnectedOptions*);
 
-  std::vector<mir::Operation*> convertResizeNN(InputOps, InputParams,
+  std::vector<mir::Operation*> convertResizeNN(InputOps&, InputParams&,
                                                const ::tflite::ResizeNearestNeighborOptions*);
 
   std::vector<mir::Operation*> createLogistic(InputOps& inputs, InputParams& params);
@@ -85,9 +85,9 @@ public:
                                              const ::tflite::SqueezeOptions* opts);
 
   /** @brief Elementwise Add  */
-  std::vector<mir::Operation*> createAdd(InputOps&, InputParams&, const ::tflite::AddOptions*);
+  std::vector<mir::Operation*> createAdd(InputOps&, const InputParams&, const ::tflite::AddOptions*);
   /** @brief Elementwise product */
-  std::vector<mir::Operation*> createMul(InputOps&, InputParams&, const ::tflite::MulOptions*);
+  std::vector<mir::Operation*> createMul(InputOps&, const InputParams&, const ::tflite::MulOptions*);
   /** @brief Elementwise maximum  */
   std::vector<mir::Operation*> createMax(InputOps&, InputParams&, const ::tflite::MaximumMinimumOptions*);
   /** @brief Elementwise division  */