--- /dev/null
+nncc_find_package(FlatBuffers QUIET)
+
+if(NOT FlatBuffers_FOUND)
+ message(STATUS "Build exo-tflite: FALSE (missing FlatBuffers)")
+ return()
+endif(NOT FlatBuffers_FOUND)
+
+nncc_find_package(TensorFlowSource EXACT 1.14 QUIET)
+
+if(NOT TensorFlowSource_FOUND)
+ message(STATUS "Build exo-tflite: FALSE (missing TensorFlowSource)")
+ return()
+endif(NOT TensorFlowSource_FOUND)
+
+message(STATUS "Build exo-tflite: TRUE")
+
+set(TFLITE_SCHEMA_DIR "${TensorFlowSource_DIR}/tensorflow/lite/schema")
+
+FlatBuffers_Target(exo_tflite_fbs
+ OUTPUT_DIR "${CMAKE_CURRENT_BINARY_DIR}/gen"
+ SCHEMA_DIR "${TFLITE_SCHEMA_DIR}"
+ SCHEMA_FILES schema.fbs
+)
+
+file(GLOB_RECURSE SOURCES "src/*.cpp")
+file(GLOB_RECURSE TESTS "src/*.test.cpp")
+list(REMOVE_ITEM SOURCES ${TESTS})
+
+add_library(exo_tflite SHARED ${SOURCES})
+target_include_directories(exo_tflite PUBLIC include)
+target_include_directories(exo_tflite PRIVATE src)
+target_link_libraries(exo_tflite PUBLIC exo_tflite_fbs)
+target_link_libraries(exo_tflite PUBLIC loco)
+target_link_libraries(exo_tflite PRIVATE stdex)
+# Let's apply nncc common compile options
+#
+# NOTE This will enable strict compilation (warnings as error).
+# Please refer to the top-level CMakeLists.txt for details
+target_link_libraries(exo_tflite PRIVATE nncc_common)
+
+if (NOT ENABLE_TEST)
+ return()
+endif (NOT ENABLE_TEST)
+
+# Google Test is mandatory for internal testing
+nncc_find_package(GTest REQUIRED)
+
+GTest_AddTest(exo_tflite_test ${TESTS})
+target_include_directories(exo_tflite_test PRIVATE src)
+target_link_libraries(exo_tflite_test stdex)
+target_link_libraries(exo_tflite_test exo_tflite)
--- /dev/null
+# exo-tflite
+
+_exo-tflite_ includes _loco_-to-_T/F Lite_ exporter (as a library).
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __EXO_TFL_EXPORTER_H__
+#define __EXO_TFL_EXPORTER_H__
+
+#include <loco.h>
+
+#include <memory>
+
+namespace exo
+{
+
+/**
+ * HOW TO USE:
+ *
+ * loco::Graph *g = ...;
+ *
+ * TFLExporter e(g);
+ * e.dumpToFile("model.tflite");
+ *
+ * HOW TO USE (simplified):
+ *
+ * TFLExporter(g).dumpToFile("model.tflite");
+ *
+ */
+class TFLExporter
+{
+public:
+ class Impl;
+
+public:
+ explicit TFLExporter(loco::Graph *graph);
+ ~TFLExporter();
+
+ /**
+ * @brief write to a file
+ * @param path path to file where to write data
+ * @throws any file related exceptions
+ */
+ void dumpToFile(const char *path) const;
+
+private:
+ std::unique_ptr<Impl> _impl;
+};
+
+} // namespace exo
+
+#endif // __EXO_TFL_EXPORTER_H__
--- /dev/null
+require("stdex")
+require("loco")
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ExporterUtils.h"
+
+uint32_t SerializedModelData::registerBuiltinOpcode(tflite::BuiltinOperator builtin_code)
+{
+ auto it = _operator_codes.find(OpCode{builtin_code});
+ if (it != _operator_codes.end())
+ {
+ return it->second;
+ }
+ auto idx = static_cast<uint32_t>(_operator_codes.size());
+ _operator_codes.emplace(OpCode{builtin_code}, idx);
+ return idx;
+}
+
+tflite::Padding getOpPadding(const loco::Pad<2> *pad)
+{
+ // VALID padding
+ if (pad->top() == 0 && pad->bottom() == 0 && pad->left() == 0 && pad->right() == 0)
+ return tflite::Padding_VALID;
+
+ // SAME padding
+ if ((pad->top() <= pad->bottom()) && (pad->bottom() <= pad->top() + 1) &&
+ (pad->left() <= pad->right()) && (pad->right() <= pad->left() + 1))
+ return tflite::Padding_SAME;
+
+ throw std::runtime_error("NYI for custom PAD");
+}
+
+void registerGraphIOName(loco::Graph *graph, SerializedModelData &gd)
+{
+ for (uint32_t in = 0; in < graph->inputs()->size(); ++in)
+ {
+ auto pull = graph->inputs()->at(in)->node();
+ auto name = graph->inputs()->at(in)->name();
+
+ gd._pull_to_name[pull] = name;
+ }
+ for (uint32_t out = 0; out < graph->outputs()->size(); ++out)
+ {
+ auto push = graph->outputs()->at(out)->node();
+ auto name = graph->outputs()->at(out)->name();
+
+ gd._push_to_name[push] = name;
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __EXPORTER_UTILS_H__
+#define __EXPORTER_UTILS_H__
+
+#include "schema_generated.h"
+#include "loco.h"
+
+#include "loco/IR/PermutingCodec.h"
+
+#include <unordered_map>
+
+struct OpCode
+{
+ tflite::BuiltinOperator opcode;
+
+ bool operator==(const OpCode &rhs) const { return opcode == rhs.opcode; }
+};
+
+namespace std
+{
+
+template <> struct hash<OpCode>
+{
+ size_t operator()(const OpCode &x) const { return hash<int>()(x.opcode); }
+};
+
+} // namespace std
+
+struct ShapeDescription
+{
+ std::vector<int32_t> _dims;
+ bool _rank_known;
+};
+
+// Prerequisites for tflite::Model object creation
+struct SerializedModelData final
+{
+ SerializedModelData() = default;
+ SerializedModelData(const SerializedModelData &) = delete;
+
+ std::unordered_map<OpCode, uint32_t> _operator_codes;
+ std::vector<flatbuffers::Offset<tflite::Operator>> _operators;
+ std::vector<flatbuffers::Offset<tflite::Tensor>> _tensors;
+ std::vector<flatbuffers::Offset<tflite::Buffer>> _buffers;
+ std::vector<int32_t> _inputs;
+ std::vector<int32_t> _outputs;
+ std::unordered_map<loco::Node *, int32_t> _node_to_tensor_id;
+
+ // Data for type and shape inference
+ std::unordered_map<loco::Node *, tflite::TensorType> _node_to_type;
+ std::unordered_map<loco::Node *, ShapeDescription> _node_to_shape;
+
+ // Graph input and output names
+ std::unordered_map<loco::Pull *, std::string> _pull_to_name;
+ std::unordered_map<loco::Push *, std::string> _push_to_name;
+
+ /**
+ * @brief if opcode is not registered in table of opcodes add it
+ * @param builtin_code
+ * @return idx of opcode in table of opcodes (see schema)
+ */
+ uint32_t registerBuiltinOpcode(tflite::BuiltinOperator builtin_code);
+};
+
+template <typename Permutation> inline bool isNHWC(Permutation *perm);
+
+template <> inline bool isNHWC(loco::Permutation<loco::Domain::Feature> *perm)
+{
+ return perm->axis(loco::FeatureAxis::Count) == 0 && perm->axis(loco::FeatureAxis::Height) == 1 &&
+ perm->axis(loco::FeatureAxis::Width) == 2 && perm->axis(loco::FeatureAxis::Depth) == 3;
+}
+
+template <> inline bool isNHWC(loco::Permutation<loco::Domain::Filter> *perm)
+{
+ return perm->axis(loco::FilterAxis::Count) == 0 && perm->axis(loco::FilterAxis::Height) == 1 &&
+ perm->axis(loco::FilterAxis::Width) == 2 && perm->axis(loco::FilterAxis::Depth) == 3;
+}
+
+tflite::Padding getOpPadding(const loco::Pad<2> *pad);
+
+/// @brief Register graph input and output names to SerializedModelData
+void registerGraphIOName(loco::Graph *graph, SerializedModelData &gd);
+
+#endif // __EXPORTER_UTILS_H__
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "OperationExporter.h"
+#include "ExporterUtils.h"
+
+using namespace flatbuffers;
+using namespace tflite;
+
+namespace
+{
+
+void exportRelu(loco::ReLU *node, FlatBufferBuilder &builder, SerializedModelData &gd)
+{
+ uint32_t op_idx = gd.registerBuiltinOpcode(tflite::BuiltinOperator_RELU);
+ std::vector<int32_t> inputs_vec{gd._node_to_tensor_id[node->input()]};
+ std::vector<int32_t> outputs_vec{gd._node_to_tensor_id[static_cast<loco::Node *>(node)]};
+ auto inputs = builder.CreateVector(inputs_vec);
+ auto outputs = builder.CreateVector(outputs_vec);
+ auto op_offset = CreateOperator(builder, op_idx, inputs, outputs);
+ gd._operators.push_back(op_offset);
+}
+
+void exportMaxPool2D(loco::MaxPool2D *node, FlatBufferBuilder &builder, SerializedModelData &gd)
+{
+ uint32_t op_idx = gd.registerBuiltinOpcode(tflite::BuiltinOperator_MAX_POOL_2D);
+ std::vector<int32_t> inputs_vec{gd._node_to_tensor_id[node->ifm()]};
+ std::vector<int32_t> outputs_vec{gd._node_to_tensor_id[static_cast<loco::Node *>(node)]};
+ auto inputs = builder.CreateVector(inputs_vec);
+ auto outputs = builder.CreateVector(outputs_vec);
+ tflite::Padding padding = getOpPadding(node->pad());
+ auto options = CreatePool2DOptions(builder, padding, node->stride()->horizontal(),
+ node->stride()->vertical(), node->window()->horizontal(),
+ node->window()->vertical());
+ auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
+ tflite::BuiltinOptions_Pool2DOptions, options.Union());
+ gd._operators.push_back(op_offset);
+}
+
+void exportAvgPool2D(loco::AvgPool2D *node, FlatBufferBuilder &builder, SerializedModelData &gd)
+{
+ // TFlite only support Valid convention of average pooling
+ assert(node->convention() == loco::AvgPool2D::Convention::Valid);
+
+ uint32_t op_idx = gd.registerBuiltinOpcode(tflite::BuiltinOperator_AVERAGE_POOL_2D);
+ std::vector<int32_t> inputs_vec{gd._node_to_tensor_id[node->ifm()]};
+ std::vector<int32_t> outputs_vec{gd._node_to_tensor_id[static_cast<loco::Node *>(node)]};
+ auto inputs = builder.CreateVector(inputs_vec);
+ auto outputs = builder.CreateVector(outputs_vec);
+ tflite::Padding padding = getOpPadding(node->pad());
+ auto options = CreatePool2DOptions(builder, padding, node->stride()->horizontal(),
+ node->stride()->vertical(), node->window()->horizontal(),
+ node->window()->vertical());
+ auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
+ tflite::BuiltinOptions_Pool2DOptions, options.Union());
+ gd._operators.push_back(op_offset);
+}
+
+void exportConv2D(loco::Conv2D *node, FlatBufferBuilder &builder, SerializedModelData &gd)
+{
+ uint32_t op_idx = gd.registerBuiltinOpcode(tflite::BuiltinOperator_CONV_2D);
+
+ // Third input of CONV_2D of tflite should be bias. We will make (and register to gd) dummy zero
+ // bias. Bias would be rank 1, have size of output kernel count, and have all zero values, i.e.
+ // zero bias.
+ auto *ker = dynamic_cast<loco::FilterEncode *>(node->ker());
+ assert(ker);
+ int32_t bias_vec_size = gd._node_to_shape[ker]._dims[0]; // output kernel count
+
+ auto bias_vec_shape_offset = builder.CreateVector(std::vector<int32_t>{bias_vec_size});
+ size_t raw_bias_vec_size = bias_vec_size * sizeof(int32_t);
+
+ std::vector<float> bias_vec_data(bias_vec_size); // initialized as zero vector
+
+ auto bias_vec_offset =
+ builder.CreateVector(reinterpret_cast<uint8_t *>(bias_vec_data.data()), raw_bias_vec_size);
+
+ auto bias_buffer_offset = CreateBuffer(builder, bias_vec_offset);
+
+ const auto bias_buffer_id = static_cast<uint32_t>(gd._buffers.size());
+
+ gd._buffers.push_back(bias_buffer_offset);
+
+ auto bias_tensor_offset =
+ CreateTensor(builder, bias_vec_shape_offset, TensorType_FLOAT32, bias_buffer_id);
+
+ auto bias_tensor_id = static_cast<int32_t>(gd._tensors.size());
+ gd._tensors.push_back(bias_tensor_offset);
+
+ // Make input, output and options for operator
+ std::vector<int32_t> inputs_vec{gd._node_to_tensor_id[node->ifm()],
+ gd._node_to_tensor_id[node->ker()], bias_tensor_id};
+ std::vector<int32_t> outputs_vec{gd._node_to_tensor_id[static_cast<loco::Node *>(node)]};
+ auto inputs = builder.CreateVector(inputs_vec);
+ auto outputs = builder.CreateVector(outputs_vec);
+ tflite::Padding padding = getOpPadding(node->pad());
+ auto options = CreateConv2DOptions(builder, padding, node->stride()->horizontal(),
+ node->stride()->vertical());
+
+ // Make CONV_2D operator
+ auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
+ tflite::BuiltinOptions_Conv2DOptions, options.Union());
+ gd._operators.push_back(op_offset);
+}
+
+/// @brief Export given node into identity, i.e. CONCATENATION with one input
+template <typename NodeT>
+void exportIdentity(NodeT *node, FlatBufferBuilder &builder, SerializedModelData &gd)
+{
+ uint32_t op_idx = gd.registerBuiltinOpcode(tflite::BuiltinOperator_CONCATENATION);
+ std::vector<int32_t> inputs_vec{gd._node_to_tensor_id[node->arg(0)]};
+ std::vector<int32_t> outputs_vec{gd._node_to_tensor_id[static_cast<loco::Node *>(node)]};
+ auto inputs = builder.CreateVector(inputs_vec);
+ auto outputs = builder.CreateVector(outputs_vec);
+ auto options = CreateConcatenationOptions(builder); // use dummy 0 axis and NONE activation
+ auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
+ tflite::BuiltinOptions_ConcatenationOptions, options.Union());
+
+ gd._operators.push_back(op_offset);
+}
+
+/// @brief Export loco nodes as TRANSPOSE
+void exportAsTranspose(loco::Node *node, FlatBufferBuilder &builder,
+ std::vector<int32_t> &perm_vec_data, SerializedModelData &gd)
+{
+ uint32_t op_idx = gd.registerBuiltinOpcode(tflite::BuiltinOperator_TRANSPOSE);
+
+ auto options = CreateTransposeOptions(builder);
+
+ // Create constant tensor with perm vector
+ constexpr int perm_vec_size = 4;
+ assert(perm_vec_data.size() == perm_vec_size);
+ auto perm_vec_shape_offset = builder.CreateVector(std::vector<int32_t>{perm_vec_size});
+ constexpr size_t raw_perm_vec_size = perm_vec_size * sizeof(int32_t);
+
+ auto perm_vec_offset =
+ builder.CreateVector(reinterpret_cast<uint8_t *>(perm_vec_data.data()), raw_perm_vec_size);
+
+ auto perm_buffer_offset = CreateBuffer(builder, perm_vec_offset);
+
+ const auto perm_buffer_id = static_cast<uint32_t>(gd._buffers.size());
+
+ gd._buffers.push_back(perm_buffer_offset);
+
+ auto perm_tensor_offset =
+ CreateTensor(builder, perm_vec_shape_offset, TensorType_INT32, perm_buffer_id);
+
+ auto perm_tensor_id = static_cast<int32_t>(gd._tensors.size());
+ gd._tensors.push_back(perm_tensor_offset);
+
+ // Create permutation node
+
+ std::vector<int32_t> inputs_vec{gd._node_to_tensor_id[node->arg(0)], perm_tensor_id};
+ std::vector<int32_t> outputs_vec{gd._node_to_tensor_id[node]};
+
+ auto inputs = builder.CreateVector(inputs_vec);
+ auto outputs = builder.CreateVector(outputs_vec);
+
+ constexpr auto options_type = tflite::BuiltinOptions::BuiltinOptions_TransposeOptions;
+
+ auto transpose_offset =
+ CreateOperator(builder, op_idx, inputs, outputs, options_type, options.Union());
+ gd._operators.push_back(transpose_offset);
+}
+
+void exportFeatureEncode(loco::FeatureEncode *node, FlatBufferBuilder &builder,
+ SerializedModelData &gd)
+{
+ auto encoder = dynamic_cast<loco::PermutingEncoder<loco::Domain::Feature> *>(node->encoder());
+ auto perm = encoder->perm();
+
+ if (isNHWC(perm))
+ {
+ // Note that tflite represents feature as NHWC
+ exportIdentity(node, builder, gd);
+ }
+ else
+ {
+ std::vector<int32_t> perm_vec_data(4);
+ perm_vec_data[0] = perm->axis(loco::FeatureAxis::Count);
+ perm_vec_data[1] = perm->axis(loco::FeatureAxis::Height);
+ perm_vec_data[2] = perm->axis(loco::FeatureAxis::Width);
+ perm_vec_data[3] = perm->axis(loco::FeatureAxis::Depth);
+
+ exportAsTranspose(node, builder, perm_vec_data, gd);
+ }
+}
+
+void exportFeatureDecode(loco::FeatureDecode *node, FlatBufferBuilder &builder,
+ SerializedModelData &gd)
+{
+ auto decoder = dynamic_cast<loco::PermutingDecoder<loco::Domain::Feature> *>(node->decoder());
+ auto perm = decoder->perm();
+
+ if (isNHWC(perm))
+ {
+ // Note that tflite represents feature as NHWC
+ exportIdentity(node, builder, gd);
+ }
+ else
+ {
+ std::vector<int32_t> perm_vec_data(4);
+ perm_vec_data[perm->axis(loco::FeatureAxis::Count)] = 0;
+ perm_vec_data[perm->axis(loco::FeatureAxis::Height)] = 1;
+ perm_vec_data[perm->axis(loco::FeatureAxis::Width)] = 2;
+ perm_vec_data[perm->axis(loco::FeatureAxis::Depth)] = 3;
+
+ exportAsTranspose(node, builder, perm_vec_data, gd);
+ }
+}
+
+void exportFilterEncode(loco::FilterEncode *node, FlatBufferBuilder &builder,
+ SerializedModelData &gd)
+{
+ auto encoder = dynamic_cast<loco::PermutingEncoder<loco::Domain::Filter> *>(node->encoder());
+ auto perm = encoder->perm();
+
+ if (isNHWC(perm))
+ {
+ // Note that tflite represents filter as NHWC
+ exportIdentity(node, builder, gd);
+ }
+ else
+ {
+ std::vector<int32_t> perm_vec_data(4);
+ // NOTE In tflite, all tensors means NHWC, so 0 = N, 1 = H, 2 = W, 3 = C
+ perm_vec_data[0] = perm->axis(loco::FilterAxis::Count);
+ perm_vec_data[1] = perm->axis(loco::FilterAxis::Height);
+ perm_vec_data[2] = perm->axis(loco::FilterAxis::Width);
+ perm_vec_data[3] = perm->axis(loco::FilterAxis::Depth);
+
+ exportAsTranspose(node, builder, perm_vec_data, gd);
+ }
+}
+
+void exportBiasAdd(loco::BiasAdd<loco::Domain::Tensor> *node, FlatBufferBuilder &builder,
+ SerializedModelData &gd)
+{
+ uint32_t op_idx = gd.registerBuiltinOpcode(tflite::BuiltinOperator_ADD);
+ std::vector<int32_t> inputs_vec{gd._node_to_tensor_id[node->value()],
+ gd._node_to_tensor_id[node->bias()]};
+ std::vector<int32_t> outputs_vec{gd._node_to_tensor_id[static_cast<loco::Node *>(node)]};
+ auto inputs = builder.CreateVector(inputs_vec);
+ auto outputs = builder.CreateVector(outputs_vec);
+ auto options = CreateAddOptions(builder); // dummy option
+ auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
+ tflite::BuiltinOptions_AddOptions, options.Union());
+ gd._operators.push_back(op_offset);
+}
+
+/// @brief Export CONCATENATION of **TWO** tensors only
+void exportConcat(loco::TensorConcat *node, FlatBufferBuilder &builder, SerializedModelData &gd)
+{
+ uint32_t op_idx = gd.registerBuiltinOpcode(tflite::BuiltinOperator_CONCATENATION);
+ std::vector<int32_t> inputs_vec{gd._node_to_tensor_id[node->lhs()],
+ gd._node_to_tensor_id[node->rhs()]};
+ std::vector<int32_t> outputs_vec{gd._node_to_tensor_id[static_cast<loco::Node *>(node)]};
+ auto inputs = builder.CreateVector(inputs_vec);
+ auto outputs = builder.CreateVector(outputs_vec);
+ auto options = CreateConcatenationOptions(builder, node->axis());
+ auto op_offset = CreateOperator(builder, op_idx, inputs, outputs,
+ tflite::BuiltinOptions_ConcatenationOptions, options.Union());
+
+ gd._operators.push_back(op_offset);
+}
+
+void exportNode(loco::Node *node, flatbuffers::FlatBufferBuilder &builder,
+ SerializedModelData &data)
+{
+ if (auto *relu = dynamic_cast<loco::ReLU *>(node))
+ {
+ exportRelu(relu, builder, data);
+ }
+ else if (dynamic_cast<loco::Pull *>(node))
+ {
+ data._inputs.push_back(data._node_to_tensor_id[node]);
+ }
+ else if (dynamic_cast<loco::Push *>(node))
+ {
+ data._outputs.push_back(data._node_to_tensor_id[node->arg(0)]);
+ }
+ else if (auto *encode = dynamic_cast<loco::FeatureEncode *>(node))
+ {
+ exportFeatureEncode(encode, builder, data);
+ }
+ else if (auto *decode = dynamic_cast<loco::FeatureDecode *>(node))
+ {
+ exportFeatureDecode(decode, builder, data);
+ }
+ else if (auto *encode = dynamic_cast<loco::FilterEncode *>(node))
+ {
+ exportFilterEncode(encode, builder, data);
+ }
+ else if (dynamic_cast<loco::ConstGen *>(node))
+ {
+ // skip, everything is done in exportOpDefinedTensors
+ }
+ else if (auto *max_pool = dynamic_cast<loco::MaxPool2D *>(node))
+ {
+ exportMaxPool2D(max_pool, builder, data);
+ }
+ else if (auto *avg_pool = dynamic_cast<loco::AvgPool2D *>(node))
+ {
+ exportAvgPool2D(avg_pool, builder, data);
+ }
+ else if (auto *conv2d = dynamic_cast<loco::Conv2D *>(node))
+ {
+ exportConv2D(conv2d, builder, data);
+ }
+ else if (auto *tconcat = dynamic_cast<loco::TensorConcat *>(node))
+ {
+ exportConcat(tconcat, builder, data);
+ }
+ else if (auto *encode = dynamic_cast<loco::BiasEncode *>(node))
+ {
+ exportIdentity(encode, builder, data);
+ }
+ else if (auto *biasadd = dynamic_cast<loco::BiasAdd<loco::Domain::Tensor> *>(node))
+ {
+ exportBiasAdd(biasadd, builder, data);
+ }
+ else
+ {
+ assert(false && "unsupported node found");
+ }
+}
+
+} // namespace
+
+void exportNodes(loco::Graph::NodeContext *nodes, FlatBufferBuilder &builder,
+ SerializedModelData &gd)
+{
+ for (uint32_t node_id = 0; node_id < nodes->size(); node_id++)
+ {
+ exportNode(nodes->at(node_id), builder, gd);
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __OPERATION_EXPORTER_H__
+#define __OPERATION_EXPORTER_H__
+
+#include "ExporterUtils.h"
+
+#include <loco/IR/Graph.h>
+
+/**
+ * @brief create Operators corresponding to model nodes
+ * @param nodes container with nodes
+ * @param gd information about serializer parts of model
+ */
+void exportNodes(loco::Graph::NodeContext *nodes, flatbuffers::FlatBufferBuilder &builder,
+ SerializedModelData &gd);
+
+#endif // __OPERATION_EXPORTER_H__
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "exo/TFLExporter.h"
+
+#include "TFLExporterImpl.h"
+
+#include <stdex/Memory.h>
+
+#include <fstream>
+
+namespace exo
+{
+
+TFLExporter::TFLExporter(loco::Graph *graph) : _impl(stdex::make_unique<Impl>(graph))
+{
+ // NOTHING TO DO
+}
+
+TFLExporter::~TFLExporter() = default;
+
+void TFLExporter::dumpToFile(const char *path) const
+{
+ const char *ptr = _impl->getBufferPointer();
+ const size_t size = _impl->getBufferSize();
+ assert(ptr && "graph is not serialized for some reason");
+ std::ofstream file(path);
+ file.write(ptr, size);
+}
+
+} // namespace exo
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "exo/TFLExporter.h"
+
+#include <loco/IR/PermutingCodec.h>
+#include <stdex/Memory.h>
+
+#include <gtest/gtest.h>
+
+using stdex::make_unique;
+
+class TestLocoExporterModels : public ::testing::Test
+{
+public:
+ template <typename T> uint32_t setSampleShape(T *op)
+ {
+ const uint32_t n = 1;
+ const uint32_t h = 100;
+ const uint32_t w = 100;
+ const uint32_t c = 3;
+ op->rank(4);
+ op->dim(0).set(n);
+ op->dim(1).set(c);
+ op->dim(2).set(h);
+ op->dim(3).set(w);
+ return n * h * w * c;
+ }
+
+ loco::Pull *pullLayer()
+ {
+ loco::Pull *pull = _graph.nodes()->create<loco::Pull>();
+
+ auto graph_input = _graph.inputs()->create();
+ graph_input->name("graph_input");
+ graph_input->node(pull);
+
+ pull->dtype(loco::DataType::FLOAT32);
+ setSampleShape(pull);
+ return pull;
+ }
+
+ loco::ConstGen *constLayer()
+ {
+ loco::ConstGen *cst = _graph.nodes()->create<loco::ConstGen>();
+ cst->dtype(loco::DataType::FLOAT32);
+
+ const auto size = setSampleShape(cst);
+ cst->size<loco::DataType::FLOAT32>(size);
+ // fill cst layer with some data
+ for (uint32_t i = 0; i < size; ++i)
+ cst->at<loco::DataType::FLOAT32>(i) = i;
+ return cst;
+ }
+
+ loco::Push *pushLayer(loco::Node *input)
+ {
+ loco::Push *push = _graph.nodes()->create<loco::Push>();
+
+ auto graph_output = _graph.outputs()->create();
+ graph_output->name("graph_output");
+ graph_output->node(push);
+
+ push->from(input);
+ return push;
+ }
+
+ loco::ReLU *reluLayer(loco::Node *input)
+ {
+ loco::ReLU *relu = _graph.nodes()->create<loco::ReLU>();
+ relu->input(input);
+ return relu;
+ }
+
+ loco::MaxPool2D *validPoolLayer(loco::Node *input_fm)
+ {
+ loco::MaxPool2D *max_pool = _graph.nodes()->create<loco::MaxPool2D>();
+ auto &window = *max_pool->window();
+ window.vertical(2);
+ window.horizontal(3);
+ auto &strides = *max_pool->stride();
+ strides.vertical(4);
+ strides.horizontal(5);
+ max_pool->ifm(input_fm);
+ return max_pool;
+ }
+
+ loco::FeatureEncode *featureEncodeLayer(loco::Node *input)
+ {
+ loco::FeatureEncode *encode_layer = _graph.nodes()->create<loco::FeatureEncode>();
+ auto encoder = make_unique<loco::PermutingEncoder<loco::Domain::Feature>>();
+ (*encoder->perm())[loco::FeatureAxis::Count] = 0;
+ (*encoder->perm())[loco::FeatureAxis::Depth] = 1;
+ (*encoder->perm())[loco::FeatureAxis::Height] = 2;
+ (*encoder->perm())[loco::FeatureAxis::Width] = 3;
+ encode_layer->encoder(std::move(encoder));
+ encode_layer->input(input);
+ return encode_layer;
+ }
+
+ loco::FeatureDecode *featureDecodeLayer(loco::Node *input)
+ {
+ loco::FeatureDecode *decode_layer = _graph.nodes()->create<loco::FeatureDecode>();
+ auto decoder = make_unique<loco::PermutingDecoder<loco::Domain::Feature>>();
+ (*decoder->perm())[loco::FeatureAxis::Count] = 0;
+ (*decoder->perm())[loco::FeatureAxis::Depth] = 1;
+ (*decoder->perm())[loco::FeatureAxis::Height] = 2;
+ (*decoder->perm())[loco::FeatureAxis::Width] = 3;
+ decode_layer->decoder(std::move(decoder));
+ decode_layer->input(input);
+ return decode_layer;
+ }
+
+ loco::Graph &getGraph() { return _graph; }
+
+private:
+ loco::Graph _graph;
+};
+
+TEST_F(TestLocoExporterModels, MaxPool2D)
+{
+ // Create graph with one input, one output and single maxpool operation
+ loco::Pull *pull = pullLayer();
+ loco::FeatureEncode *encode = featureEncodeLayer(pull);
+ loco::MaxPool2D *max_pool = validPoolLayer(encode);
+ loco::FeatureDecode *decode = featureDecodeLayer(max_pool);
+ loco::Push *push = pushLayer(decode);
+ (void)push;
+
+ exo::TFLExporter e(&getGraph());
+ // TODO Use buffer instead
+ e.dumpToFile("maxpool2d.tflite");
+
+ ASSERT_TRUE(true);
+}
+
+TEST_F(TestLocoExporterModels, Const)
+{
+ // Create graph with single constGen operation that goes straight to output of net
+ loco::ConstGen *cst = constLayer();
+ loco::Push *push = pushLayer(cst);
+ (void)push;
+
+ exo::TFLExporter e(&getGraph());
+ // TODO Use buffer instead
+ e.dumpToFile("const.tflite");
+
+ ASSERT_TRUE(true);
+}
+
+TEST_F(TestLocoExporterModels, PoolWithActivation)
+{
+ // Create graph with one input, one output and single maxpool operation with relu
+ loco::Pull *pull = pullLayer();
+ loco::FeatureEncode *encode = featureEncodeLayer(pull);
+ loco::MaxPool2D *max_pool = validPoolLayer(encode);
+ loco::FeatureDecode *decode = featureDecodeLayer(max_pool);
+ loco::ReLU *relu = reluLayer(decode);
+ loco::Push *push = pushLayer(relu);
+ (void)push;
+
+ exo::TFLExporter e(&getGraph());
+ // TODO Use buffer instead
+ e.dumpToFile("maxpool_activation.tflite");
+
+ ASSERT_TRUE(true);
+}
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "TFLExporterImpl.h"
+
+#include "TensorExporter.h"
+#include "OperationExporter.h"
+#include "ExporterUtils.h"
+
+#include <unordered_map>
+#include <string>
+
+namespace exo
+{
+using namespace tflite;
+using namespace flatbuffers;
+
+TFLExporter::Impl::Impl(loco::Graph *graph) { exportGraph(graph); }
+
+Offset<Vector<Offset<OperatorCode>>>
+encodeOperatorCodes(FlatBufferBuilder &builder, std::unordered_map<OpCode, uint32_t> &opcodes)
+{
+ std::vector<Offset<OperatorCode>> operator_codes_vec(opcodes.size());
+ for (auto it : opcodes)
+ {
+ uint32_t idx = it.second;
+ operator_codes_vec[idx] = CreateOperatorCode(builder, it.first.opcode);
+ }
+ return builder.CreateVector(operator_codes_vec);
+}
+
+flatbuffers::Offset<tflite::SubGraph> TFLExporter::Impl::exportSubgraph(SerializedModelData &gd)
+{
+ auto tensors = _builder.CreateVector(gd._tensors);
+ auto inputs = _builder.CreateVector(gd._inputs);
+ auto outputs = _builder.CreateVector(gd._outputs);
+ auto operators = _builder.CreateVector(gd._operators);
+ auto subgraph = CreateSubGraph(_builder, tensors, inputs, outputs, operators);
+ return subgraph;
+}
+
+void TFLExporter::Impl::exportGraph(loco::Graph *graph)
+{
+ _builder.Clear();
+
+ SerializedModelData gd;
+
+ // This version is taken from comment in fbs
+ constexpr uint32_t version = 3;
+
+ registerGraphIOName(graph, gd);
+
+ // parse graph into SerializedModelData structure
+ exportOpDefinedTensors(graph->nodes(), _builder, gd);
+
+ exportNodes(graph->nodes(), _builder, gd);
+
+ // excode operator codes
+ auto operator_codes = encodeOperatorCodes(_builder, gd._operator_codes);
+
+ // Subgraphs
+ Offset<SubGraph> subgraph = exportSubgraph(gd);
+ auto subgraphs = _builder.CreateVector(std::vector<Offset<SubGraph>>{subgraph});
+
+ // Description
+ std::string description_str = "nnpackage";
+ auto description = _builder.CreateString(description_str);
+
+ // create array of buffers
+ auto buffers = _builder.CreateVector(gd._buffers);
+
+ // empty metadata
+ std::vector<int> metadata_buffer_vec;
+ auto metadata_buffer = _builder.CreateVector(metadata_buffer_vec);
+
+ // Model
+ auto model_offset = CreateModel(_builder, version, operator_codes, subgraphs, description,
+ buffers, metadata_buffer);
+ FinishModelBuffer(_builder, model_offset);
+}
+
+const char *TFLExporter::Impl::getBufferPointer() const
+{
+ return reinterpret_cast<const char *>(_builder.GetBufferPointer());
+}
+
+size_t TFLExporter::Impl::getBufferSize() const { return _builder.GetSize(); }
+
+} // namespace exo
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TFL_EXPORTER_IMPL_H__
+#define __TFL_EXPORTER_IMPL_H__
+
+#include "exo/TFLExporter.h"
+#include "schema_generated.h"
+
+#include <loco.h>
+
+struct SerializedModelData;
+
+namespace exo
+{
+
+/**
+ * internal implementation of interface exporter class
+ */
+class TFLExporter::Impl
+{
+public:
+ Impl() = delete;
+ ~Impl() = default;
+
+ explicit Impl(loco::Graph *graph);
+
+ /**
+ * @return pointer to buffer with serialized graph
+ */
+ const char *getBufferPointer() const;
+
+ /**
+ * @return size of buffer with serialized graph
+ */
+ size_t getBufferSize() const;
+
+private:
+ /**
+ * @brief create Subgraph using data stored in SerializedModelData
+ * @param gd information about serializer parts of model
+ * @return offset in buffer corresponding to serialized subgraph
+ */
+ flatbuffers::Offset<tflite::SubGraph> exportSubgraph(SerializedModelData &gd);
+
+ /**
+ * @brief root function that writes graph into internal buffer
+ * @param graph
+ */
+ void exportGraph(loco::Graph *graph);
+
+private:
+ flatbuffers::FlatBufferBuilder _builder;
+};
+
+} // namespace loco_exporter
+
+#endif // __TFL_EXPORTER_IMPL_H__
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "TensorExporter.h"
+#include "TypeInference.h"
+
+#include "loco/IR/Algorithm.h"
+
+using namespace tflite;
+using namespace flatbuffers;
+
+namespace
+{
+
+flatbuffers::Offset<Vector<int32_t>> encodeShape(FlatBufferBuilder &builder,
+ const ShapeDescription &shape)
+{
+ assert(shape._rank_known && "unknown number of dimensions is not supported");
+ return builder.CreateVector(shape._dims);
+}
+
+template <typename NodeT>
+flatbuffers::Offset<tflite::Buffer> encodeOpBuffer(FlatBufferBuilder &builder, NodeT *)
+{
+ return CreateBuffer(builder);
+}
+
+template <>
+flatbuffers::Offset<tflite::Buffer> encodeOpBuffer(FlatBufferBuilder &builder, loco::ConstGen *c)
+{
+ assert(c->dtype() == loco::DataType::FLOAT32);
+ std::vector<float> raw_data;
+ const uint32_t size = c->size<loco::DataType::FLOAT32>();
+ raw_data.reserve(size);
+ for (uint32_t i = 0; i < size; ++i)
+ {
+ raw_data.push_back(c->at<loco::DataType::FLOAT32>(i));
+ }
+ const size_t raw_size = size * sizeof(float);
+ auto array_offset = builder.CreateVector(reinterpret_cast<uint8_t *>(raw_data.data()), raw_size);
+ return CreateBuffer(builder, array_offset);
+}
+
+} // namespace
+
+template <typename NodeT>
+void exportOpDefinedTensor(NodeT *node, FlatBufferBuilder &builder, SerializedModelData &gd)
+{
+ // Create and register output tensor shape
+ ShapeDescription shape_description = getOpResultShape(node, gd);
+ gd._node_to_shape[node] = shape_description;
+ auto shape_offset = encodeShape(builder, shape_description);
+
+ // encode and register output tensor type
+ auto tensor_type = getOpResultType(node, gd);
+ gd._node_to_type[node] = tensor_type;
+
+ // encode and register output tensor buffer
+ auto buffer = encodeOpBuffer(builder, node);
+ auto buffer_id = static_cast<uint32_t>(gd._buffers.size());
+ gd._buffers.push_back(buffer);
+
+ // encode and register tensor itself using attributes from previous steps
+ auto tensor_id = static_cast<uint32_t>(gd._tensors.size());
+
+ std::string name;
+ // if current node is input
+ if (auto pull = dynamic_cast<loco::Pull *>(node))
+ {
+ name = gd._pull_to_name[pull];
+ }
+ // if next node is output
+ else if (auto push = dynamic_cast<loco::Push *>(*loco::succs(node).begin()))
+ {
+ name = gd._push_to_name[push];
+ }
+ else
+ {
+ name = "t_" + std::to_string(tensor_id);
+ }
+ auto name_offset = builder.CreateString(name);
+ auto tensor_offset = CreateTensor(builder, shape_offset, tensor_type, buffer_id, name_offset,
+ /*quantization*/ 0, /*is_variable*/ false);
+ gd._node_to_tensor_id[node] = tensor_id;
+ gd._tensors.push_back(tensor_offset);
+}
+
+void exportOpDefinedTensors(loco::Graph::NodeContext *nodes, FlatBufferBuilder &builder,
+ SerializedModelData &gd)
+{
+ // find entrances of graph
+ std::vector<loco::Node *> roots;
+ for (uint32_t node_id = 0; node_id < nodes->size(); ++node_id)
+ {
+ loco::Node *node = nodes->at(node_id);
+ if (dynamic_cast<loco::Push *>(node))
+ {
+ roots.push_back(node);
+ }
+ }
+
+ // Operations should be traversed in RPO because during processing of current operation
+ // we need to know all attributes of previous operations,
+ // like shape, type,tensor id related with previous operation
+ auto sequence = loco::postorder_traversal(roots);
+ for (loco::Node *node : sequence)
+ {
+ if (auto *pull = dynamic_cast<loco::Pull *>(node))
+ {
+ // Create tensor for input node
+ exportOpDefinedTensor(pull, builder, gd);
+ }
+ else if (dynamic_cast<loco::Push *>(node))
+ {
+ // Do nothing for exit node
+ }
+ else if (auto *cst = dynamic_cast<loco::ConstGen *>(node))
+ {
+ // Create tensor filled with constant data
+ exportOpDefinedTensor(cst, builder, gd);
+ }
+ else if (auto *encode = dynamic_cast<loco::FeatureEncode *>(node))
+ {
+ exportOpDefinedTensor(encode, builder, gd);
+ }
+ else if (auto *decode = dynamic_cast<loco::FeatureDecode *>(node))
+ {
+ exportOpDefinedTensor(decode, builder, gd);
+ }
+ else if (auto *encode = dynamic_cast<loco::FilterEncode *>(node))
+ {
+ exportOpDefinedTensor(encode, builder, gd);
+ }
+ else if (auto *max_pool = dynamic_cast<loco::MaxPool2D *>(node))
+ {
+ exportOpDefinedTensor(max_pool, builder, gd);
+ }
+ else if (auto *avg_pool = dynamic_cast<loco::AvgPool2D *>(node))
+ {
+ exportOpDefinedTensor(avg_pool, builder, gd);
+ }
+ else if (auto *conv2d = dynamic_cast<loco::Conv2D *>(node))
+ {
+ exportOpDefinedTensor(conv2d, builder, gd);
+ }
+ else if (auto *relu = dynamic_cast<loco::ReLU *>(node))
+ {
+ exportOpDefinedTensor(relu, builder, gd);
+ }
+ else if (auto *tconcat = dynamic_cast<loco::TensorConcat *>(node))
+ {
+ exportOpDefinedTensor(tconcat, builder, gd);
+ }
+ else if (auto *encode = dynamic_cast<loco::BiasEncode *>(node))
+ {
+ exportOpDefinedTensor(encode, builder, gd);
+ }
+ else if (auto *biasadd = dynamic_cast<loco::BiasAdd<loco::Domain::Tensor> *>(node))
+ {
+ exportOpDefinedTensor(biasadd, builder, gd);
+ }
+ else
+ {
+ assert(false && "unsupported node type");
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TENSOR_EXPORTER_H__
+#define __TENSOR_EXPORTER_H__
+
+#include "ExporterUtils.h"
+
+#include <loco/IR/Graph.h>
+
+#include <flatbuffers/flatbuffers.h>
+
+/**
+ * @brief create Tensors corresponding to results of all nodes in graph
+ * @param nodes list of nodes in computational graph
+ * @param gd information about serialized parts of model
+ */
+void exportOpDefinedTensors(loco::Graph::NodeContext *nodes,
+ flatbuffers::FlatBufferBuilder &builder, SerializedModelData &gd);
+
+#endif // __TENSOR_EXPORTER_H__
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "TypeInference.h"
+
+#include "schema_generated.h"
+
+#include <type_traits>
+
+namespace
+{
+
+tflite::TensorType translateLocoTypeToTFLite(loco::DataType dtype)
+{
+ switch (dtype)
+ {
+ case loco::DataType::U8:
+ return tflite::TensorType_UINT8;
+ // case loco::DataType::U16: unsupported
+ // case loco::DataType::U32: unsupported
+ // case loco::DataType::U64: unsupported
+ case loco::DataType::S8:
+ return tflite::TensorType_INT8;
+ case loco::DataType::S16:
+ return tflite::TensorType_INT16;
+ case loco::DataType::S32:
+ return tflite::TensorType_INT32;
+ case loco::DataType::S64:
+ return tflite::TensorType_INT64;
+ case loco::DataType::FLOAT16:
+ return tflite::TensorType_FLOAT16;
+ case loco::DataType::FLOAT32:
+ return tflite::TensorType_FLOAT32;
+ // case loco::DataType::FLOAT64: unsupported
+ default:
+ assert(false && "unsupported data type");
+ }
+}
+
+template <typename T, typename If = typename std::enable_if<std::is_integral<T>::value, int>::type>
+T ceil_div(T dividend, T divisor)
+{
+ assert(dividend > 0 && divisor > 0 && "this implementations is for positive numbers only");
+ return (dividend + divisor - 1) / divisor;
+}
+
+} // namespace
+
+tflite::TensorType getOpResultType(loco::ConstGen *node, SerializedModelData &)
+{
+ return translateLocoTypeToTFLite(node->dtype());
+}
+
+tflite::TensorType getOpResultType(loco::Pull *node, SerializedModelData &)
+{
+ return translateLocoTypeToTFLite(node->dtype());
+}
+
+tflite::TensorType getOpResultType(loco::ReLU *node, SerializedModelData &gd)
+{
+ return gd._node_to_type[node->input()];
+}
+
+tflite::TensorType getOpResultType(loco::MaxPool2D *node, SerializedModelData &gd)
+{
+ return gd._node_to_type[node->ifm()];
+}
+
+tflite::TensorType getOpResultType(loco::AvgPool2D *node, SerializedModelData &gd)
+{
+ return gd._node_to_type[node->ifm()];
+}
+
+tflite::TensorType getOpResultType(loco::Conv2D *node, SerializedModelData &gd)
+{
+ return gd._node_to_type[node->ifm()];
+}
+
+tflite::TensorType getOpResultType(loco::FeatureEncode *node, SerializedModelData &gd)
+{
+ return gd._node_to_type[node->input()];
+}
+
+tflite::TensorType getOpResultType(loco::FeatureDecode *node, SerializedModelData &gd)
+{
+ return gd._node_to_type[node->input()];
+}
+
+tflite::TensorType getOpResultType(loco::FilterEncode *node, SerializedModelData &gd)
+{
+ return gd._node_to_type[node->input()];
+}
+
+tflite::TensorType getOpResultType(loco::TensorConcat *node, SerializedModelData &gd)
+{
+ tflite::TensorType lhs_type = gd._node_to_type[node->lhs()];
+ tflite::TensorType rhs_type = gd._node_to_type[node->rhs()];
+
+ // TODO support heterogenous type combination
+ assert(lhs_type == rhs_type);
+
+ return lhs_type;
+}
+
+tflite::TensorType getOpResultType(loco::BiasEncode *node, SerializedModelData &gd)
+{
+ return gd._node_to_type[node->input()];
+}
+
+tflite::TensorType getOpResultType(loco::BiasAdd<loco::Domain::Tensor> *node,
+ SerializedModelData &gd)
+{
+ tflite::TensorType value_type = gd._node_to_type[node->value()];
+ tflite::TensorType bias_type = gd._node_to_type[node->bias()];
+
+ // TODO support heterogenous type combination
+ assert(value_type == bias_type);
+
+ return value_type;
+}
+
+int32_t decodeShapeDimension(const loco::Dimension &dim)
+{
+ if (!dim.known())
+ return -1;
+ return dim.value();
+}
+
+loco::Dimension encodeShapeDimension(const int32_t &value)
+{
+ if (value == -1)
+ return loco::Dimension();
+ return {static_cast<uint32_t>(value)};
+}
+
+ShapeDescription getOpResultShape(loco::Pull *node, SerializedModelData &)
+{
+ ShapeDescription shape;
+ shape._rank_known = true;
+ shape._dims.reserve(node->rank());
+ for (uint32_t i = 0; i < node->rank(); ++i)
+ {
+ shape._dims.push_back(decodeShapeDimension(node->dim(i)));
+ }
+ return shape;
+}
+
+ShapeDescription getOpResultShape(loco::ConstGen *node, SerializedModelData &)
+{
+ ShapeDescription shape;
+ shape._rank_known = true;
+ shape._dims.reserve(node->rank());
+ for (uint32_t i = 0; i < node->rank(); ++i)
+ {
+ shape._dims.push_back(decodeShapeDimension(node->dim(i)));
+ }
+ return shape;
+}
+
+ShapeDescription getOpResultShape(loco::MaxPool2D *node, SerializedModelData &gd)
+{
+ loco::Node *pred = node->ifm();
+ const ShapeDescription &pred_shape = gd._node_to_shape[pred];
+ if (!pred_shape._rank_known)
+ {
+ // return unknown shape
+ return {};
+ }
+ ShapeDescription shape;
+ shape._rank_known = true;
+ shape._dims.resize(4);
+ shape._dims[0] = pred_shape._dims[0];
+ shape._dims[3] = pred_shape._dims[3];
+ tflite::Padding padding = getOpPadding(node->pad());
+ switch (padding)
+ {
+ case tflite::Padding_SAME:
+ {
+ auto height = static_cast<uint32_t>(pred_shape._dims[1]);
+ auto width = static_cast<uint32_t>(pred_shape._dims[2]);
+
+ int32_t proposed_res_height = ceil_div(height, node->stride()->vertical());
+ int32_t proposed_res_width = ceil_div(width, node->stride()->horizontal());
+
+ shape._dims[1] = pred_shape._dims[1] == -1 ? -1 : proposed_res_height;
+ shape._dims[2] = pred_shape._dims[2] == -1 ? -1 : proposed_res_width;
+ break;
+ }
+ case tflite::Padding_VALID:
+ {
+ auto padded_h = static_cast<uint32_t>(pred_shape._dims[1] - (node->window()->vertical() - 1));
+ auto padded_w = static_cast<uint32_t>(pred_shape._dims[2] - (node->window()->horizontal() - 1));
+
+ int32_t proposed_height = ceil_div(padded_h, node->stride()->vertical());
+ int32_t proposed_width = ceil_div(padded_w, node->stride()->horizontal());
+
+ shape._dims[1] = pred_shape._dims[1] == -1 ? -1 : proposed_height;
+ shape._dims[2] = pred_shape._dims[2] == -1 ? -1 : proposed_width;
+ break;
+ }
+ default:
+ assert(false && "unknown padding type");
+ }
+ return shape;
+}
+
+ShapeDescription getOpResultShape(loco::AvgPool2D *node, SerializedModelData &gd)
+{
+ const ShapeDescription &ifm_shape = gd._node_to_shape[node->ifm()];
+ assert(ifm_shape._rank_known);
+
+ ShapeDescription shape;
+ shape._rank_known = true;
+ shape._dims.resize(4);
+ shape._dims[0] = ifm_shape._dims[0]; // copy batch
+ shape._dims[3] = ifm_shape._dims[3]; // copy channel
+
+ tflite::Padding padding = getOpPadding(node->pad());
+ switch (padding)
+ {
+ case tflite::Padding_SAME:
+ {
+ auto height = static_cast<uint32_t>(ifm_shape._dims[1]);
+ auto width = static_cast<uint32_t>(ifm_shape._dims[2]);
+
+ int32_t proposed_res_height = ceil_div(height, node->stride()->vertical());
+ int32_t proposed_res_width = ceil_div(width, node->stride()->horizontal());
+
+ shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_res_height;
+ shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_res_width;
+ break;
+ }
+ case tflite::Padding_VALID:
+ {
+ auto padded_h = static_cast<uint32_t>(ifm_shape._dims[1] - (node->window()->vertical() - 1));
+ auto padded_w = static_cast<uint32_t>(ifm_shape._dims[2] - (node->window()->horizontal() - 1));
+
+ int32_t proposed_height = ceil_div(padded_h, node->stride()->vertical());
+ int32_t proposed_width = ceil_div(padded_w, node->stride()->horizontal());
+
+ shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_height;
+ shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_width;
+ break;
+ }
+ default:
+ assert(false && "unknown padding type");
+ }
+ return shape;
+}
+
+ShapeDescription getOpResultShape(loco::Conv2D *node, SerializedModelData &gd)
+{
+ loco::Node *ifm = node->ifm();
+ const ShapeDescription &ifm_shape = gd._node_to_shape[ifm];
+ if (!ifm_shape._rank_known)
+ {
+ // return unknown shape
+ return {};
+ }
+
+ auto *ker = dynamic_cast<loco::FilterEncode *>(node->ker());
+ assert(ker);
+ const ShapeDescription &ker_shape = gd._node_to_shape[ker];
+ if (!ker_shape._rank_known)
+ {
+ // return unknown shape
+ return {};
+ }
+
+ ShapeDescription shape;
+ shape._rank_known = true;
+ shape._dims.resize(4);
+ shape._dims[0] = ifm_shape._dims[0];
+ shape._dims[3] = ker_shape._dims[0];
+ tflite::Padding padding = getOpPadding(node->pad());
+ switch (padding)
+ {
+ case tflite::Padding_SAME:
+ {
+ auto height = static_cast<uint32_t>(ifm_shape._dims[1]);
+ auto width = static_cast<uint32_t>(ifm_shape._dims[2]);
+
+ int32_t proposed_res_height = ceil_div(height, node->stride()->vertical());
+ int32_t proposed_res_width = ceil_div(width, node->stride()->horizontal());
+
+ shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_res_height;
+ shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_res_width;
+ break;
+ }
+ case tflite::Padding_VALID:
+ {
+ auto padded_h = static_cast<uint32_t>(ifm_shape._dims[1] - (ker_shape._dims[1] - 1));
+ auto padded_w = static_cast<uint32_t>(ifm_shape._dims[2] - (ker_shape._dims[2] - 1));
+
+ int32_t proposed_height = ceil_div(padded_h, node->stride()->vertical());
+ int32_t proposed_width = ceil_div(padded_w, node->stride()->horizontal());
+
+ shape._dims[1] = ifm_shape._dims[1] == -1 ? -1 : proposed_height;
+ shape._dims[2] = ifm_shape._dims[2] == -1 ? -1 : proposed_width;
+ break;
+ }
+ default:
+ assert(false && "unknown padding type");
+ }
+ return shape;
+}
+
+ShapeDescription getOpResultShape(loco::ReLU *node, SerializedModelData &gd)
+{
+ return gd._node_to_shape[node->input()];
+}
+
+ShapeDescription getOpResultShape(loco::FeatureEncode *node, SerializedModelData &gd)
+{
+ const ShapeDescription &pred_shape = gd._node_to_shape[node->input()];
+ if (!pred_shape._rank_known)
+ {
+ // return unknown shape
+ return {};
+ }
+ ShapeDescription shape;
+ shape._rank_known = true;
+ loco::TensorShape tensor_shape;
+ uint32_t num_dims = pred_shape._dims.size();
+ tensor_shape.rank(num_dims);
+ for (uint32_t i = 0; i < num_dims; ++i)
+ {
+ tensor_shape.dim(i) = encodeShapeDimension(pred_shape._dims[i]);
+ }
+ loco::FeatureShape feature_shape = node->encoder()->shape(tensor_shape);
+ shape._dims.resize(4);
+ shape._dims[0] = decodeShapeDimension(feature_shape.count());
+ shape._dims[1] = decodeShapeDimension(feature_shape.height());
+ shape._dims[2] = decodeShapeDimension(feature_shape.width());
+ shape._dims[3] = decodeShapeDimension(feature_shape.depth());
+ return shape;
+}
+
+ShapeDescription getOpResultShape(loco::FeatureDecode *node, SerializedModelData &gd)
+{
+ const ShapeDescription &pred_shape = gd._node_to_shape[node->input()];
+ if (!pred_shape._rank_known)
+ {
+ // return unknown shape
+ return {};
+ }
+ ShapeDescription shape;
+ shape._rank_known = true;
+ loco::FeatureShape feature_shape;
+ feature_shape.count() = encodeShapeDimension(pred_shape._dims[0]);
+ feature_shape.height() = encodeShapeDimension(pred_shape._dims[1]);
+ feature_shape.width() = encodeShapeDimension(pred_shape._dims[2]);
+ feature_shape.depth() = encodeShapeDimension(pred_shape._dims[3]);
+ loco::TensorShape tensor_shape = node->decoder()->shape(feature_shape);
+ shape._dims.resize(4);
+ for (uint32_t i = 0; i < 4; ++i)
+ {
+ shape._dims[i] = decodeShapeDimension(tensor_shape.dim(i));
+ }
+ return shape;
+}
+
+ShapeDescription getOpResultShape(loco::FilterEncode *node, SerializedModelData &gd)
+{
+ const ShapeDescription &input_shape = gd._node_to_shape[node->input()];
+ if (!input_shape._rank_known)
+ {
+ // return unknown shape
+ return {};
+ }
+ ShapeDescription shape;
+ shape._rank_known = true;
+ loco::TensorShape tensor_shape;
+ uint32_t num_dims = input_shape._dims.size();
+ tensor_shape.rank(num_dims);
+ for (uint32_t i = 0; i < num_dims; ++i)
+ {
+ tensor_shape.dim(i) = encodeShapeDimension(input_shape._dims[i]);
+ }
+ loco::FilterShape filter_shape = node->encoder()->shape(tensor_shape);
+ shape._dims.resize(4);
+ shape._dims[0] = decodeShapeDimension(filter_shape.count());
+ shape._dims[1] = decodeShapeDimension(filter_shape.height());
+ shape._dims[2] = decodeShapeDimension(filter_shape.width());
+ shape._dims[3] = decodeShapeDimension(filter_shape.depth());
+ return shape;
+}
+
+ShapeDescription getOpResultShape(loco::TensorConcat *node, SerializedModelData &gd)
+{
+ const ShapeDescription &lhs_shape = gd._node_to_shape[node->lhs()];
+ if (!lhs_shape._rank_known)
+ {
+ // return unknown shape
+ return {};
+ }
+
+ const ShapeDescription &rhs_shape = gd._node_to_shape[node->rhs()];
+ if (!rhs_shape._rank_known)
+ {
+ // return unknown shape
+ return {};
+ }
+
+ ShapeDescription ret;
+
+ assert(lhs_shape._dims.size() == rhs_shape._dims.size());
+ ret._dims.resize(lhs_shape._dims.size());
+
+ uint32_t axis = node->axis();
+
+ for (uint32_t i = 0; i < lhs_shape._dims.size(); ++i)
+ {
+ if (i == axis)
+ {
+ ret._dims[i] = lhs_shape._dims[i] + rhs_shape._dims[i];
+ }
+ else
+ {
+ assert(lhs_shape._dims[i] == rhs_shape._dims[i]);
+ ret._dims[i] = lhs_shape._dims[i];
+ }
+ }
+ ret._rank_known = true;
+
+ return ret;
+}
+
+ShapeDescription getOpResultShape(loco::BiasEncode *node, SerializedModelData &gd)
+{
+ const ShapeDescription &input_shape = gd._node_to_shape[node->input()];
+
+ // Bias should be rank 1
+ assert(input_shape._dims.size() == 1);
+
+ return input_shape;
+}
+
+ShapeDescription getOpResultShape(loco::BiasAdd<loco::Domain::Tensor> *node,
+ SerializedModelData &gd)
+{
+ const ShapeDescription &value_shape = gd._node_to_shape[node->value()];
+ const ShapeDescription &bias_shape = gd._node_to_shape[node->bias()];
+
+ // For TFlite, only supports last bias add axis. Unless, broadcasting is not performed as
+ // expected.
+ assert(node->axis() == value_shape._dims.size() - 1);
+
+ // Bias should be rank 1
+ assert(bias_shape._dims.size() == 1);
+
+ // Channel count coherency for proper broadcast
+ assert(bias_shape._dims[0] == value_shape._dims[node->axis()]);
+
+ return value_shape;
+}
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TYPE_INFERENCE_H__
+#define __TYPE_INFERENCE_H__
+
+#include "ExporterUtils.h"
+
+#include <loco/IR/Nodes.h>
+
+// Tensor type inference functions
+
+tflite::TensorType getOpResultType(loco::ConstGen *node, SerializedModelData &);
+
+tflite::TensorType getOpResultType(loco::Pull *node, SerializedModelData &);
+
+tflite::TensorType getOpResultType(loco::ReLU *node, SerializedModelData &gd);
+
+tflite::TensorType getOpResultType(loco::MaxPool2D *node, SerializedModelData &gd);
+
+tflite::TensorType getOpResultType(loco::AvgPool2D *node, SerializedModelData &gd);
+
+tflite::TensorType getOpResultType(loco::Conv2D *node, SerializedModelData &gd);
+
+tflite::TensorType getOpResultType(loco::FeatureEncode *node, SerializedModelData &gd);
+
+tflite::TensorType getOpResultType(loco::FeatureDecode *node, SerializedModelData &gd);
+
+tflite::TensorType getOpResultType(loco::FilterEncode *node, SerializedModelData &gd);
+
+tflite::TensorType getOpResultType(loco::TensorConcat *node, SerializedModelData &gd);
+
+tflite::TensorType getOpResultType(loco::BiasEncode *node, SerializedModelData &gd);
+
+tflite::TensorType getOpResultType(loco::BiasAdd<loco::Domain::Tensor> *node,
+ SerializedModelData &gd);
+
+// Shape inference functions
+
+ShapeDescription getOpResultShape(loco::Pull *node, SerializedModelData &);
+
+ShapeDescription getOpResultShape(loco::ConstGen *node, SerializedModelData &);
+
+ShapeDescription getOpResultShape(loco::MaxPool2D *node, SerializedModelData &gd);
+
+ShapeDescription getOpResultShape(loco::AvgPool2D *node, SerializedModelData &gd);
+
+ShapeDescription getOpResultShape(loco::Conv2D *node, SerializedModelData &gd);
+
+ShapeDescription getOpResultShape(loco::ReLU *node, SerializedModelData &gd);
+
+ShapeDescription getOpResultShape(loco::FeatureEncode *node, SerializedModelData &gd);
+
+ShapeDescription getOpResultShape(loco::FeatureDecode *node, SerializedModelData &gd);
+
+ShapeDescription getOpResultShape(loco::FilterEncode *node, SerializedModelData &gd);
+
+ShapeDescription getOpResultShape(loco::TensorConcat *node, SerializedModelData &gd);
+
+ShapeDescription getOpResultShape(loco::BiasEncode *node, SerializedModelData &gd);
+
+ShapeDescription getOpResultShape(loco::BiasAdd<loco::Domain::Tensor> *node,
+ SerializedModelData &gd);
+
+#endif // __TYPE_INFERENCE_H__