#include "nnfw.h"
+#ifdef __cplusplus
+extern "C" {
+#endif
+
// Used for custom kernel development
/*
*/
NNFW_STATUS nnfw_pop_pipeline_output(nnfw_session *session, void *outputs);
+/**
+ * Training C APIs
+ *
+ * Training APIs are designed to be used in the following order for training
+ * 1. nnfw_train_prepare
+ * 2. nnfw_train_set_input, nnfw_train_set_expected for inputs & expected outputs
+ * 3. nnfw_train
+ * 4. nnfw_train_get_loss
+ *
+ * If you want to inference after training with the same session, you can use the following order
+ * 1. nnfw_set_input
+ * 2. nnfw_set_output
+ * 3. nnfw_run
+ */
+
+//////////////////////////////////////////////
+// Essential APIs for training
+//////////////////////////////////////////////
+typedef enum
+{
+ NNFW_TRAIN_LOSS_MEAN_SQUARED_ERROR = 0,
+ NNFW_TRAIN_LOSS_CATEGORICAL_CROSSENTROPY = 1,
+} NNFW_TRAIN_LOSS;
+
+typedef enum
+{
+ NNFW_TRAIN_OPTIMIZER_SGD = 0,
+ NNFW_TRAIN_OPTIMIZER_ADAM = 1,
+} NNFW_TRAIN_OPTIMIZER;
+
+/**
+ * @brief Training information to prepare training
+ * @todo Add more training information
+ * (e.g. optimizer, loss function, ...)
+ */
+typedef struct nnfw_train_info
+{
+ /** Learning rate */
+ float learning_rate = 0.001f;
+ /** Batch size */
+ uint32_t batch_size = 1;
+ /** loss type */
+ NNFW_TRAIN_LOSS loss = NNFW_TRAIN_LOSS_MEAN_SQUARED_ERROR;
+ /** optimizer type */
+ NNFW_TRAIN_OPTIMIZER opt = NNFW_TRAIN_OPTIMIZER_SGD;
+} nnfw_train_info;
+
+/**
+ * @brief Prepare session to be ready for training
+ * @note The session will be entered into training mode
+ *
+ * @param[in] session The session to be prepared for training
+ * @param[in] info Training information.
+ * If info is nullptr, it will not change training information.
+ * If it is nullptr and model has not training information,
+ * it will use default training information.
+ * Default training information is {learning_rate = 0.001f, batch_size = 1}
+ *
+ * @return @c NNFW_STATUS_NO_ERROR if successful
+ */
+NNFW_STATUS nnfw_train_prepare(nnfw_session *session, const nnfw_train_info *info);
+
+/**
+ * @brief Set training input
+ * @note This function should be called after {@link nnfw_train_prepare}
+ *
+ * @param[in] session The session to be set training inputs and expected model outputs
+ * @param[in] index The index of training input
+ * @param[in] input The input buffers for training
+ * @param[in] input_info The shape and type of input buffer
+ * If it is nullptr, it will not change shape and batch size
+ * @return @c NNFW_STATUS_NO_ERROR if successful
+ */
+NNFW_STATUS nnfw_train_set_input(nnfw_session *session, uint32_t index, const void *input,
+ const nnfw_tensorinfo *input_info);
+
+/**
+ * @brief Set training expected output
+ * @note This function should be called after {@link nnfw_train_prepare}
+ *
+ * @param session The session to be set training inputs and expected model outputs
+ * @param index The index of training expected output
+ * @param expected The expected buffers for training
+ * @param expected_info The shape and type of expected buffer
+ * If it is nullptr, it will not change shape and batch size
+ * @return @c NNFW_STATUS_NO_ERROR if successful
+ */
+NNFW_STATUS nnfw_train_set_expected(nnfw_session *session, uint32_t index, const void *expected,
+ const nnfw_tensorinfo *expected_info);
+
+/**
+ * @brief Train the model
+ * @note This function should be called after {@link nnfw_train_set_input} and
+ * {@link nnfw_train_set_expected} for each input and expected output
+ *
+ * @param[in] session The session to be trained
+ * @param[in] update_weights If true, update weights of the model
+ * If false, do not update weights of the model (for validation)
+ * @return @c NNFW_STATUS_NO_ERROR if successful
+ */
+NNFW_STATUS nnfw_train(nnfw_session *session, bool update_weights);
+
+/**
+ * @brief Get loss value for expected output
+ * @note This function should be called after {@link nnfw_train}
+ *
+ * @param[in] session The session to get loss value
+ * @param[in] index The index of loss value [0, number of expected outputs)
+ * @param[out] loss The loss value
+ * @return @c NNFW_STATUS_NO_ERROR if successful
+ */
+NNFW_STATUS nnfw_train_get_loss(nnfw_session *session, uint32_t index, float *loss);
+
+/**
+ * @brief Export circle model
+ * @note This function should be called on training mode
+ * This function should be called after {@link nnfw_train}
+ *
+ * @param[in] session The session to export inference model
+ * @param[in] path The path to export inference model
+ * @return @c NNFW_STATUS_NO_ERROR if successful
+ */
+NNFW_STATUS nnfw_train_export_circle(nnfw_session *session, const char *path);
+
+//////////////////////////////////////////////
+// Optional APIs for training
+//////////////////////////////////////////////
+
+/**
+ * @brief Get the training model input information
+ * @note This function should be called after {@link nnfw_train_prepare}
+ *
+ * @param[in] session The session to get the training model input information
+ * @param[in] index The index of training model input
+ * @param[out] info The shape and type of training model input
+ * @return @c NNFW_STATUS_NO_ERROR if successful
+ */
+NNFW_STATUS nnfw_train_input_tensorinfo(nnfw_session *session, uint32_t index,
+ nnfw_tensorinfo *info);
+
+/**
+ * @brief Get the training model expected output information
+ * @note This function should be called after {@link nnfw_train_prepare}
+ *
+ * @param[in] session The session to get the training model expected output information
+ * @param[in] index The index of training model expected output
+ * @param[out] info The shape and type of training model expected output
+ * @return @c NNFW_STATUS_NO_ERROR if successful
+ */
+NNFW_STATUS nnfw_train_expected_tensorinfo(nnfw_session *session, uint32_t index,
+ nnfw_tensorinfo *info);
+
+//////////////////////////////////////////////
+// Not planned to be implemented
+//////////////////////////////////////////////
+
+/**
+ * @brief Convert between training mode and inference mode
+ * @note This function should be called after {@link nnfw_train} or {@link nnfw_prepare}
+ *
+ * @param[in] session The session to convert training mode to inference mode
+ * @param[in] train If false, convert training model to inference model
+ * If true, convert inference model to training model
+ * @return @c NNFW_STATUS_NO_ERROR if successful
+ */
+// NNFW_STATUS nnfw_set_training_mode(nnfw_session *session, bool train);
+
+/**
+ * @brief Set training information after prepare training
+ * @note This function may be used after {@link nnfw_train_prepare}
+ *
+ * @param[in] session The session prepared for training
+ * @param[in] info Training information
+ * @return @c NNFW_STATUS_NO_ERROR if successful
+ */
+// NNFW_STATUS nnfw_train_set_traininfo(nnfw_session *session, const nnfw_train_info info);
+
+/**
+ * On-Device Quantization APIs
+ *
+ * On-Device Quantization APIs are designed to be used in the following order
+ * 1. nnfw_set_quantization_type
+ * 2. nnfw_set_quantized_model_path
+ * 3. nnfw_quantize
+ *
+ * You should use Quantization APIs after {@link nnfw_load_model_from_file},
+ * before {@link nnfw_prepare} and {@link nnfw_set_input_tensorinfo}.
+ */
+
+/**
+ * @brief quantization type
+ */
+typedef enum
+{
+ /** default value: type not set */
+ NNFW_QUANTIZE_TYPE_NOT_SET,
+ /** asymmetric quantization with a scale and zero point */
+ NNFW_QUANTIZE_TYPE_U8_ASYM,
+ /** symmetric quantization with a scale only */
+ NNFW_QUANTIZE_TYPE_I16_SYM,
+} NNFW_QUANTIZE_TYPE;
+
+/**
+ * @brief Set quantization type
+ *
+ * This function should be called before {@link nnfw_quantize} is invoked.
+ *
+ * @param[in] session nnfw_session to set quantization type
+ * @param[in] pref @c NNFW_QUANTIZE_TYPE
+ * @return @c NNFW_STATUS_NO_ERROR if successful,
+ * @c NNFW_STATUS_UNEXPECTED_NULL if session is null,
+ * otherwise return @c NNFW_STATUS_ERROR
+ */
+NNFW_STATUS nnfw_set_quantization_type(nnfw_session *session, NNFW_QUANTIZE_TYPE qtype);
+
+/**
+ * @brief Set exported quantized model path
+ *
+ * This function should be called before {@link nnfw_quantize} is invoked.
+ *
+ * TODO: If this function is not called, quantized model will not be exported
+ *
+ * @param[in] session nnfw_session to set quantized model path
+ * @param[in] path Quantized model path
+ * @return @c NNFW_STATUS_NO_ERROR if successful, otherwise return @c NNFW_STATUS_ERROR
+ */
+NNFW_STATUS nnfw_set_quantized_model_path(nnfw_session *session, const char *path);
+
+/**
+ * @brief Quantize circle model
+ *
+ * @param[in] session nnfw_session to quantize
+ * @return @c ODC_STATUS_NO_ERROR if successful, otherwise return @c ODC_STATUS_ERROR
+ */
+NNFW_STATUS nnfw_quantize(nnfw_session *session);
+
+#ifdef __cplusplus
+}
+#endif
+
#endif // __NNFW_EXPERIMENTAL_H__