[loco exporter] Parse and export graph I/O name (#3803)
author박천교/On-Device Lab(SR)/Engineer/삼성전자 <ch.bahk@samsung.com>
Mon, 17 Jun 2019 02:02:30 +0000 (11:02 +0900)
committer박종현/On-Device Lab(SR)/Staff Engineer/삼성전자 <jh1302.park@samsung.com>
Mon, 17 Jun 2019 02:02:30 +0000 (11:02 +0900)
* [loco exporter] Parse and export graph I/O name

Graph input and output names are parsed from loco graph's information
and then exported as name of input and output tensors.

Signed-off-by: Cheongyo Bahk <ch.bahk@samsung.com>
* Fix typo

contrib/loco-exporter/src/Exporter.test.cpp
contrib/loco-exporter/src/LocoExporterImpl.cpp
contrib/loco-exporter/src/LocoExporterUtils.cpp
contrib/loco-exporter/src/LocoExporterUtils.h
contrib/loco-exporter/src/TensorExporter.cpp

index bc0d083..5dd96c1 100644 (file)
@@ -42,7 +42,11 @@ public:
   loco::Pull *pullLayer()
   {
     loco::Pull *pull = _graph.nodes()->create<loco::Pull>();
-    _graph.inputs()->create()->node(pull);
+
+    auto graph_input = _graph.inputs()->create();
+    graph_input->name("graph_input");
+    graph_input->node(pull);
+
     pull->dtype(loco::DataType::FLOAT32);
     setSampleShape(pull);
     return pull;
@@ -64,7 +68,11 @@ public:
   loco::Push *pushLayer(loco::Node *input)
   {
     loco::Push *push = _graph.nodes()->create<loco::Push>();
-    _graph.outputs()->create()->node(push);
+
+    auto graph_output = _graph.outputs()->create();
+    graph_output->name("graph_output");
+    graph_output->node(push);
+
     push->from(input);
     return push;
   }
index 6daf6f3..9719ad7 100644 (file)
@@ -61,6 +61,8 @@ void ExporterImpl::exportGraph(loco::Graph *graph)
   // This version is taken from comment in fbs
   constexpr uint32_t version = 3;
 
+  registerGraphIOName(graph, gd);
+
   // parse graph into SerializedModelData structure
   exportOpDefinedTensors(graph->nodes(), _builder, gd);
 
index 0a07a84..c933bf8 100644 (file)
@@ -41,4 +41,16 @@ tflite::Padding getOpPadding(loco::MaxPool2D *node)
   return tflite::Padding_SAME;
 }
 
+void registerGraphIOName(loco::Graph *graph, SerializedModelData &gd)
+{
+  for (uint32_t in = 0; in < graph->inputs()->size(); ++in)
+  {
+    gd._input_names.emplace_back(graph->inputs()->at(in)->name());
+  }
+  for (uint32_t out = 0; out < graph->outputs()->size(); ++out)
+  {
+    gd._output_names.emplace_back(graph->outputs()->at(out)->name());
+  }
+}
+
 } // namepsace loco_exporter
index 91b0031..4cbc99c 100644 (file)
@@ -18,7 +18,7 @@
 #define __LOCO_EXPORTER_UTILS_H__
 
 #include "schema_generated.h"
-#include "loco/IR/Nodes.h"
+#include "loco.h"
 
 #include "loco/IR/PermutingCodec.h"
 
@@ -73,6 +73,10 @@ struct SerializedModelData final
   std::unordered_map<loco::Node *, tflite::TensorType> _node_to_type;
   std::unordered_map<loco::Node *, ShapeDescription> _node_to_shape;
 
+  // Graph input and output names
+  std::vector<std::string> _input_names;
+  std::vector<std::string> _output_names;
+
   /**
    * @brief if opcode is not registered in table of opcodes add it
    * @param builtin_code
@@ -90,6 +94,9 @@ template <typename PermDescr> inline bool isIdentity(PermDescr *descr)
 
 tflite::Padding getOpPadding(loco::MaxPool2D *node);
 
+/// @brief Register graph input and output names to SerializedModelData
+void registerGraphIOName(loco::Graph *graph, SerializedModelData &gd);
+
 } // namespace loco_exporter
 
 #endif // __LOCO_EXPORTER_UTILS_H__
index 4f4e261..bd55ae6 100644 (file)
@@ -77,7 +77,23 @@ void exportOpDefinedTensor(NodeT *node, FlatBufferBuilder &builder, SerializedMo
 
   // encode and register tensor itself using attributes from previous steps
   auto tensor_id = static_cast<uint32_t>(gd._tensors.size());
-  std::string name = "t_" + std::to_string(tensor_id);
+  std::string name;
+  if (dynamic_cast<loco::Pull *>(node)) // current node is input
+  {
+    // TODO Now only supports single input. Let's support multi inputs
+    assert(gd._input_names.size() == 1);
+    name = gd._input_names[0];
+  }
+  else if (dynamic_cast<loco::Push *>(*loco::succs(node).begin())) // next node is output
+  {
+    // TODO Now only supports single output. Let's support multi outputs
+    assert(gd._output_names.size() == 1);
+    name = gd._output_names[0];
+  }
+  else
+  {
+    name = "t_" + std::to_string(tensor_id);
+  }
   auto name_offset = builder.CreateString(name);
   auto tensor_offset = CreateTensor(builder, shape_offset, tensor_type, buffer_id, name_offset,
                                     /*quantization*/ 0, /*is_variable*/ false);