From 776f7ceea4c2123f6da7e9e1571243db16342d28 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EB=B0=95=EC=A2=85=ED=98=84/On-Device=20Lab=28SR=29/Staff?= =?utf8?q?=20Engineer/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Fri, 12 Jul 2019 14:13:09 +0900 Subject: [PATCH] [exo/tflite] Export Tensors after Allocation (#4199) This commit rewrites exportOpDefinedTensors as a 2-stage function. Now, exportOpDefinedTensors first allocates a T/F Lite tensor first, and then export allocated T/F Lite tensors. The first phase iterates over loco nodes, but the second phase now iterates over allocated T/F Lite tensors. Signed-off-by: Jonghyun Park --- contrib/exo-tflite/src/TensorExporter.cpp | 169 ++++++++++++++---------------- 1 file changed, 76 insertions(+), 93 deletions(-) diff --git a/contrib/exo-tflite/src/TensorExporter.cpp b/contrib/exo-tflite/src/TensorExporter.cpp index 40f086a..42173f7 100644 --- a/contrib/exo-tflite/src/TensorExporter.cpp +++ b/contrib/exo-tflite/src/TensorExporter.cpp @@ -25,6 +25,61 @@ using namespace flatbuffers; namespace { +class TFLTensorInfo +{ +public: + TFLTensorInfo() = default; + +public: + void name(const std::string &name) { _name = name; } + const std::string &name(void) const { return _name; } + +public: + const tflite::TensorType &dtype(void) const { return _dtype; } + void dtype(const tflite::TensorType &dtype) { _dtype = dtype; } + + const ShapeDescription &shape(void) const { return _shape; } + void shape(const ShapeDescription &shape) { _shape = shape; } + +public: + loco::ConstGen *content(void) const { return _content; } + void content(loco::ConstGen *c) { _content = c; } + +private: + std::string _name; + + tflite::TensorType _dtype; + ShapeDescription _shape; + + // TODO Find a better design + loco::ConstGen *_content = nullptr; +}; + +using TFLTensorContext = std::vector; + +void allocateTFLiteTensor(loco::Node *node, TFLTensorContext &ctx) +{ + auto tensor_index = static_cast(ctx.size()); + // TODO Use Graph-level metadata for Input & Output + auto tensor_name = "t_" + std::to_string(tensor_index); + + TFLTensorInfo tensor_info; + + tensor_info.name(tensor_name); + tensor_info.dtype(TypeInference::get(node)); + tensor_info.shape(ShapeInference::get(node)); + tensor_info.content(dynamic_cast(node)); + + set_tensor_index(node, tensor_index); + + ctx.emplace_back(tensor_info); +} + +} // namespace + +namespace +{ + flatbuffers::Offset> encodeShape(FlatBufferBuilder &builder, const ShapeDescription &shape) { @@ -32,6 +87,11 @@ flatbuffers::Offset> encodeShape(FlatBufferBuilder &builder, return builder.CreateVector(shape._dims); } +flatbuffers::Offset encodeOpBuffer(FlatBufferBuilder &builder) +{ + return CreateBuffer(builder); +} + template flatbuffers::Offset encodeOpBuffer(FlatBufferBuilder &builder, NodeT *) { @@ -56,112 +116,35 @@ flatbuffers::Offset encodeOpBuffer(FlatBufferBuilder &builder, l } // namespace -template -void exportOpDefinedTensor(NodeT *node, FlatBufferBuilder &builder, SerializedModelData &gd) +void exportOpDefinedTensor(const TFLTensorInfo &info, FlatBufferBuilder &builder, + SerializedModelData &gd) { // Create and register output tensor shape - ShapeDescription shape_description = ShapeInference::get(node); - auto shape_offset = encodeShape(builder, shape_description); - - // encode and register output tensor type - auto tensor_type = TypeInference::get(node); - // gd._node_to_type[node] = tensor_type; + auto shape_offset = encodeShape(builder, info.shape()); // encode and register output tensor buffer - auto buffer = encodeOpBuffer(builder, node); + auto buffer = (info.content() != nullptr) ? encodeOpBuffer(builder, info.content()) + : encodeOpBuffer(builder); auto buffer_id = static_cast(gd._buffers.size()); gd._buffers.push_back(buffer); - // encode and register tensor itself using attributes from previous steps - auto tensor_id = static_cast(gd._tensors.size()); - - std::string name; - // if current node is input - if (auto pull = dynamic_cast(node)) - { - name = gd._pull_to_name[pull]; - } - // if next node is output - else if (auto push = dynamic_cast(*loco::succs(node).begin())) - { - name = gd._push_to_name[push]; - } - else - { - name = "t_" + std::to_string(tensor_id); - } - auto name_offset = builder.CreateString(name); - auto tensor_offset = CreateTensor(builder, shape_offset, tensor_type, buffer_id, name_offset, + auto name_offset = builder.CreateString(info.name()); + auto tensor_offset = CreateTensor(builder, shape_offset, info.dtype(), buffer_id, name_offset, /*quantization*/ 0, /*is_variable*/ false); - set_tensor_index(node, tensor_id); gd._tensors.push_back(tensor_offset); } void exportOpDefinedTensors(loco::Graph *g, FlatBufferBuilder &builder, SerializedModelData &gd) { - // Operations should be traversed in RPO because during processing of current operation - // we need to know all attributes of previous operations, - // like shape, type,tensor id related with previous operation - auto sequence = loco::postorder_traversal(loco::output_nodes(g)); - for (loco::Node *node : sequence) + TFLTensorContext tensor_ctx; + + for (auto node : loco::postorder_traversal(loco::output_nodes(g))) + { + allocateTFLiteTensor(node, tensor_ctx); + } + + for (const auto &tensor_info : tensor_ctx) { - if (auto *pull = dynamic_cast(node)) - { - // Create tensor for input node - exportOpDefinedTensor(pull, builder, gd); - } - else if (dynamic_cast(node)) - { - // Do nothing for exit node - } - else if (auto *cst = dynamic_cast(node)) - { - // Create tensor filled with constant data - exportOpDefinedTensor(cst, builder, gd); - } - else if (auto *encode = dynamic_cast(node)) - { - exportOpDefinedTensor(encode, builder, gd); - } - else if (auto *decode = dynamic_cast(node)) - { - exportOpDefinedTensor(decode, builder, gd); - } - else if (auto *encode = dynamic_cast(node)) - { - exportOpDefinedTensor(encode, builder, gd); - } - else if (auto *max_pool = dynamic_cast(node)) - { - exportOpDefinedTensor(max_pool, builder, gd); - } - else if (auto *avg_pool = dynamic_cast(node)) - { - exportOpDefinedTensor(avg_pool, builder, gd); - } - else if (auto *conv2d = dynamic_cast(node)) - { - exportOpDefinedTensor(conv2d, builder, gd); - } - else if (auto *relu = dynamic_cast(node)) - { - exportOpDefinedTensor(relu, builder, gd); - } - else if (auto *tconcat = dynamic_cast(node)) - { - exportOpDefinedTensor(tconcat, builder, gd); - } - else if (auto *encode = dynamic_cast(node)) - { - exportOpDefinedTensor(encode, builder, gd); - } - else if (auto *biasadd = dynamic_cast *>(node)) - { - exportOpDefinedTensor(biasadd, builder, gd); - } - else - { - assert(false && "unsupported node type"); - } + exportOpDefinedTensor(tensor_info, builder, gd); } } -- 2.7.4