namespace
{
+class TFLTensorInfo
+{
+public:
+ TFLTensorInfo() = default;
+
+public:
+ void name(const std::string &name) { _name = name; }
+ const std::string &name(void) const { return _name; }
+
+public:
+ const tflite::TensorType &dtype(void) const { return _dtype; }
+ void dtype(const tflite::TensorType &dtype) { _dtype = dtype; }
+
+ const ShapeDescription &shape(void) const { return _shape; }
+ void shape(const ShapeDescription &shape) { _shape = shape; }
+
+public:
+ loco::ConstGen *content(void) const { return _content; }
+ void content(loco::ConstGen *c) { _content = c; }
+
+private:
+ std::string _name;
+
+ tflite::TensorType _dtype;
+ ShapeDescription _shape;
+
+ // TODO Find a better design
+ loco::ConstGen *_content = nullptr;
+};
+
+using TFLTensorContext = std::vector<TFLTensorInfo>;
+
+void allocateTFLiteTensor(loco::Node *node, TFLTensorContext &ctx)
+{
+ auto tensor_index = static_cast<TFLTensorIndex>(ctx.size());
+ // TODO Use Graph-level metadata for Input & Output
+ auto tensor_name = "t_" + std::to_string(tensor_index);
+
+ TFLTensorInfo tensor_info;
+
+ tensor_info.name(tensor_name);
+ tensor_info.dtype(TypeInference::get(node));
+ tensor_info.shape(ShapeInference::get(node));
+ tensor_info.content(dynamic_cast<loco::ConstGen *>(node));
+
+ set_tensor_index(node, tensor_index);
+
+ ctx.emplace_back(tensor_info);
+}
+
+} // namespace
+
+namespace
+{
+
flatbuffers::Offset<Vector<int32_t>> encodeShape(FlatBufferBuilder &builder,
const ShapeDescription &shape)
{
return builder.CreateVector(shape._dims);
}
+flatbuffers::Offset<tflite::Buffer> encodeOpBuffer(FlatBufferBuilder &builder)
+{
+ return CreateBuffer(builder);
+}
+
template <typename NodeT>
flatbuffers::Offset<tflite::Buffer> encodeOpBuffer(FlatBufferBuilder &builder, NodeT *)
{
} // namespace
-template <typename NodeT>
-void exportOpDefinedTensor(NodeT *node, FlatBufferBuilder &builder, SerializedModelData &gd)
+void exportOpDefinedTensor(const TFLTensorInfo &info, FlatBufferBuilder &builder,
+ SerializedModelData &gd)
{
// Create and register output tensor shape
- ShapeDescription shape_description = ShapeInference::get(node);
- auto shape_offset = encodeShape(builder, shape_description);
-
- // encode and register output tensor type
- auto tensor_type = TypeInference::get(node);
- // gd._node_to_type[node] = tensor_type;
+ auto shape_offset = encodeShape(builder, info.shape());
// encode and register output tensor buffer
- auto buffer = encodeOpBuffer(builder, node);
+ auto buffer = (info.content() != nullptr) ? encodeOpBuffer(builder, info.content())
+ : encodeOpBuffer(builder);
auto buffer_id = static_cast<uint32_t>(gd._buffers.size());
gd._buffers.push_back(buffer);
- // encode and register tensor itself using attributes from previous steps
- auto tensor_id = static_cast<uint32_t>(gd._tensors.size());
-
- std::string name;
- // if current node is input
- if (auto pull = dynamic_cast<loco::Pull *>(node))
- {
- name = gd._pull_to_name[pull];
- }
- // if next node is output
- else if (auto push = dynamic_cast<loco::Push *>(*loco::succs(node).begin()))
- {
- name = gd._push_to_name[push];
- }
- else
- {
- name = "t_" + std::to_string(tensor_id);
- }
- auto name_offset = builder.CreateString(name);
- auto tensor_offset = CreateTensor(builder, shape_offset, tensor_type, buffer_id, name_offset,
+ auto name_offset = builder.CreateString(info.name());
+ auto tensor_offset = CreateTensor(builder, shape_offset, info.dtype(), buffer_id, name_offset,
/*quantization*/ 0, /*is_variable*/ false);
- set_tensor_index(node, tensor_id);
gd._tensors.push_back(tensor_offset);
}
void exportOpDefinedTensors(loco::Graph *g, FlatBufferBuilder &builder, SerializedModelData &gd)
{
- // Operations should be traversed in RPO because during processing of current operation
- // we need to know all attributes of previous operations,
- // like shape, type,tensor id related with previous operation
- auto sequence = loco::postorder_traversal(loco::output_nodes(g));
- for (loco::Node *node : sequence)
+ TFLTensorContext tensor_ctx;
+
+ for (auto node : loco::postorder_traversal(loco::output_nodes(g)))
+ {
+ allocateTFLiteTensor(node, tensor_ctx);
+ }
+
+ for (const auto &tensor_info : tensor_ctx)
{
- if (auto *pull = dynamic_cast<loco::Pull *>(node))
- {
- // Create tensor for input node
- exportOpDefinedTensor(pull, builder, gd);
- }
- else if (dynamic_cast<loco::Push *>(node))
- {
- // Do nothing for exit node
- }
- else if (auto *cst = dynamic_cast<loco::ConstGen *>(node))
- {
- // Create tensor filled with constant data
- exportOpDefinedTensor(cst, builder, gd);
- }
- else if (auto *encode = dynamic_cast<loco::FeatureEncode *>(node))
- {
- exportOpDefinedTensor(encode, builder, gd);
- }
- else if (auto *decode = dynamic_cast<loco::FeatureDecode *>(node))
- {
- exportOpDefinedTensor(decode, builder, gd);
- }
- else if (auto *encode = dynamic_cast<loco::FilterEncode *>(node))
- {
- exportOpDefinedTensor(encode, builder, gd);
- }
- else if (auto *max_pool = dynamic_cast<loco::MaxPool2D *>(node))
- {
- exportOpDefinedTensor(max_pool, builder, gd);
- }
- else if (auto *avg_pool = dynamic_cast<loco::AvgPool2D *>(node))
- {
- exportOpDefinedTensor(avg_pool, builder, gd);
- }
- else if (auto *conv2d = dynamic_cast<loco::Conv2D *>(node))
- {
- exportOpDefinedTensor(conv2d, builder, gd);
- }
- else if (auto *relu = dynamic_cast<loco::ReLU *>(node))
- {
- exportOpDefinedTensor(relu, builder, gd);
- }
- else if (auto *tconcat = dynamic_cast<loco::TensorConcat *>(node))
- {
- exportOpDefinedTensor(tconcat, builder, gd);
- }
- else if (auto *encode = dynamic_cast<loco::BiasEncode *>(node))
- {
- exportOpDefinedTensor(encode, builder, gd);
- }
- else if (auto *biasadd = dynamic_cast<loco::BiasAdd<loco::Domain::Tensor> *>(node))
- {
- exportOpDefinedTensor(biasadd, builder, gd);
- }
- else
- {
- assert(false && "unsupported node type");
- }
+ exportOpDefinedTensor(tensor_info, builder, gd);
}
}