enum class ExportMethods {
METHOD_STRINGVECTOR = 0, /**< export to a string vector */
METHOD_TFLITE = 1, /**< export to tflite */
+ METHOD_FLATBUFFER = 2, /**< export to flatbuffer */
METHOD_UNDEFINED = 999, /**< undefined */
};
MODEL_FORMAT_INI_WITH_BIN =
ML_TRAIN_MODEL_FORMAT_INI_WITH_BIN, /**< ini file with save_path defined
where the binary will be saved */
+ MODEL_FORMAT_FLATBUFFER =
+ ML_TRAIN_MODEL_FORMAT_FLATBUFFER, /**< flatbuffer file */
};
/**
virtual int addLayer(std::shared_ptr<Layer> layer) = 0;
/**
- * @brief add refering to reference layers.
+ * @brief add referring to reference layers.
* @note This method does add the provided layers itself but adds a deep copy
* of the passed layers to the model. The layers passed to this function can
* be reused later.
ML_TRAIN_MODEL_FORMAT_INI =
1, /**< Ini format file saves model configurations. */
ML_TRAIN_MODEL_FORMAT_INI_WITH_BIN =
- 2 /**< Ini with bin format file saves configurations with parameters
+ 2, /**< Ini with bin format file saves configurations with parameters
required for inference and training. */
+ ML_TRAIN_MODEL_FORMAT_FLATBUFFER =
+ 3 /**< Flatbuffer format file saves model configurations and weights. */
} ml_train_model_format_e;
/**
--- /dev/null
+// SPDX-License-Identifier: Apache-2.0
+/**
+ * Copyright (C) 2023 DongHak Park <donghak.park@samsung.com>
+ *
+ * @file flatbuffer_interpreter.cpp
+ * @date 09 February 2023
+ * @brief NNTrainer *.flatbuffer Interpreter
+ * @see https://github.com/nnstreamer/nntrainer
+ * @author Donghak Park <donghak.park@samsung.com>
+ * @bug No known bugs except for NYI items
+ */
--- /dev/null
+// SPDX-License-Identifier: Apache-2.0
+/**
+ * Copyright (C) 2023 DongHak Park <donghak.park@samsung.com>
+ *
+ * @file flatbuffer_interpreter.h
+ * @date 09 February 2023
+ * @brief NNTrainer flatbuffer Interpreter
+ * @see https://github.com/nnstreamer/nntrainer
+ * @author Donghak Park <donghak.park@samsung.com>
+ * @bug No known bugs except for NYI items
+ */
+
+#ifndef __FLATBUFFER_INTERPRETER_H__
+#define __FLATBUFFER_INTERPRETER_H__
+
+#include <app_context.h>
+#include <interpreter.h>
+
+namespace nntrainer {
+
+/**
+ * @brief flatbuffer graph interpreter class
+ *
+ */
+class FlatBufferInterpreter : public GraphInterpreter {
+public:
+ /**
+ * @brief Construct a new flatbuffer Graph Interpreter object
+ *
+ * @param app_context_ app context to create layers
+ */
+ FlatBufferInterpreter(AppContext &app_context_ = AppContext::Global()) :
+ app_context(app_context_) {}
+
+ /**
+ * @brief Destroy the flatbuffer Interpreter object
+ *
+ */
+ virtual ~FlatBufferInterpreter() = default;
+
+ /**
+ * @copydoc GraphInterpreter::serialize(const std::string &out)
+ */
+ void serialize(const GraphRepresentation &representation,
+ const std::string &out) override;
+
+ /**
+ * @copydoc GraphInterpreter::deserialize(const std::string &in)
+ */
+ GraphRepresentation deserialize(const std::string &in) override;
+
+private:
+ AppContext &app_context;
+};
+
+} // namespace nntrainer
+
+#endif // __FLATBUFFER_INTERPRETER_H__
throw_status(ret);
break;
}
+ case ml::train::ModelFormat::MODEL_FORMAT_FLATBUFFER: {
+ break;
+ }
default:
throw nntrainer::exception::not_supported(
"loading with given format is not supported yet");
#endif
break;
}
+ case ml::train::ExportMethods::METHOD_FLATBUFFER: {
+
+ model_graph.deallocateTensors();
+ model_graph.allocateTensors(ExecutionMode::TRAIN);
+ break;
+ }
default:
throw std::runtime_error{"Unsupported export method"};
}