Add functions preparing data for the TFLite-to-IR operator creator (#376)
authorDmitry Mozolev/AI Tools Lab /SRR/Engineer/삼성전자 <d.mozolev@samsung.com>
Fri, 29 Jun 2018 07:28:40 +0000 (10:28 +0300)
committerSergey Vostokov/AI Tools Lab /SRR/Staff Engineer/삼성전자 <s.vostokov@samsung.com>
Fri, 29 Jun 2018 07:28:40 +0000 (16:28 +0900)
Add functions preparing data for the TFLite-to-IR operator creator

Operator creator interface functions take collections of operator
inputs and operator parameters as arguments. These two functions
prepare this data.

Signed-off-by: Dmitry Mozolev <d.mozolev@samsung.com>
contrib/nnc/libs/frontend/tflite/include/tflite_ir_visitor.h
contrib/nnc/libs/frontend/tflite/src/tflite_ir_visitor.cpp

index 78999c4..31178c9 100644 (file)
@@ -4,6 +4,7 @@
 #include "flatbuffers/flatbuffers.h"
 
 #include <map>
+#include <vector>
 #include <memory>
 
 #include "nnc/core/IR/model/graph/graph.h"
@@ -57,6 +58,10 @@ private:
   // that correspond to operations having these tensors as output.
   std::map<int, INode::Ref> opsForTensorsTheyOutput;
 
+  // These two methods prepare data for creating an IR node/operation.
+  std::vector<std::shared_ptr<IrTensor>> createOpParams(const Operator *op);
+  std::vector<INode::Ref> createOpInputs(const Operator *op);
+
   std::shared_ptr<IrTensor> createTensor(const Tensor *t, const Buffer *b);
   std::shared_ptr<IrTensor> convertTensorForConv(std::shared_ptr<IrTensor>);
 };
index 80071ec..754d1b6 100644 (file)
@@ -69,6 +69,63 @@ void IrVisitor::visit(const Tensor *t) { throw std::runtime_error{"Not yet imple
 void IrVisitor::visit(const OperatorCode *) { throw std::runtime_error{"Not yet implemented"}; }
 void IrVisitor::visit(const Buffer *) { throw std::runtime_error{"Not yet implemented"}; }
 
+std::vector<INode::Ref> IrVisitor::createOpInputs(const Operator *op)
+{
+  std::vector<INode::Ref> inputsForOp;
+
+  try
+  {
+    for (auto i : *(op->inputs()))
+    {
+      int bufferIdx = (*tensors)[i]->buffer();
+      if ((*buffers)[bufferIdx]->data() == nullptr)
+      {
+        // By this point every input for the operation "op" should have corresponding
+        // Model IR operations that output its inputs. This assumption is provided by the fact
+        // that TFLite format specifies all operations in the execution order.
+        inputsForOp.push_back(opsForTensorsTheyOutput.at(i));
+      }
+    }
+  }
+  catch (const std::out_of_range &e)
+  {
+    throw PluginException("Found a TFLite operator with an input tensor for which "
+                          "a corresponding Model IR node that outputs it was not created.");
+  }
+
+  return inputsForOp;
+}
+
+std::vector<std::shared_ptr<IrTensor>> IrVisitor::createOpParams(const Operator *op)
+{
+  std::vector<std::shared_ptr<IrTensor>> paramsForOp;
+
+  for (auto i : *(op->inputs()))
+  {
+    const Tensor *t = (*tensors)[i];
+    const Buffer *b = (*buffers)[t->buffer()];
+    if (b->data() != nullptr)
+    {
+      std::shared_ptr<IrTensor> tensor = createTensor(t, b);
+
+      unsigned int opcode = (*opcodes)[op->opcode_index()]->builtin_code();
+
+      // Converting convolution weights tensor to HWCN format.
+      // TODO: Currently this is used by the interpreter, but by itself not necessary,
+      // so remove this when/if not needed.
+      if (opcode == BuiltinOperator_CONV_2D && t->shape()->size() == 4)
+      {
+        paramsForOp.push_back(convertTensorForConv(std::move(tensor)));
+      }
+      else
+      {
+        paramsForOp.push_back(tensor);
+      }
+    }
+  }
+
+  return paramsForOp;
+}
 
 std::shared_ptr<IrTensor> IrVisitor::createTensor(const Tensor *t, const Buffer *b)
 {