* @remarks The returned @a info is newly created so it does not reflect future
* changes in the model.
* @remarks On returning error, info must not be destroyed with
- * ml_tensors_info_destory()
+ * ml_tensors_info_destroy()
*
* @param[in] model The NNTrainer model handle.
* @param[out] info The tensors information handle.
* @remarks the returned @a info is newly created so it does not reflect future
* changes in the model
* @remarks On returning error, info must not be destroyed with
- * ml_tensors_info_destory()
+ * ml_tensors_info_destroy()
*
* @param[in] model The NNTrainer model handle.
* @param[out] info The tensors information handle.
* @retval #ML_ERROR_INVALID_PARAMETER Invalid parameter.
* @note For now the properties for Exponential learning rate
* scheduler(decay_rate, decay_steps) can be set using
- * ml_train_optimizer_set_property for backward compatibility. But
- * ml_train_optimizer_set_property will not support to set decay_rate,
- * decay_steps properties from tizen 8.0. Use ml_train_lr_scheduler_set_property
- * instead.
+ * ml_train_optimizer_set_property() for backward compatibility. But
+ * ml_train_optimizer_set_property() will not support to set decay_rate,
+ * decay_steps properties from tizen 8.0. Use
+ * ml_train_lr_scheduler_set_property() instead.
*/
int ml_train_optimizer_set_property(ml_train_optimizer_h optimizer, ...);
/**
* @brief Creates a learning rate scheduler for optimizer.
* @details Use this function to create learning rate scheduler for optimizer.
- * If not set to a optimizer, @a lr_sheduler should be released using
+ * If not set to a optimizer, @a lr_scheduler should be released using
* ml_train_lr_scheduler_destroy(). If set to a optimizer, @a lr_scheduler is
* available until optimizer is released.
* @since_tizen 7.5
/**
* @brief Gets neural network layer from the model with the given name.
* @details Use this function to get already created Neural Network Layer. The
- * returned layer must not be deleted as it is owned by the model.
+ * returned layer must not be released as it is owned by the model.
* @since_tizen 7.0
- * @remarks The modification through ml_trin_layer_set_property() after
+ * @remarks The modification through ml_train_layer_set_property() after
* compiling the model by calling `ml_train_model_compile()` strictly
* restricted.
* @param[in] model The NNTrainer model handler from the given description.
/**
* @brief Gets weight tensors and information of the layer.
* @details Use this function to get weight tensors and information of the
- * layer. destroy @a info with @c ml_tensors_info_destroy() after use. destroy
- * @a weight with @c ml_tensors_data_destory() after use.
+ * layer. destroy @a info with ml_tensors_info_destroy() after use. destroy
+ * @a weight with ml_tensors_data_destroy() after use.
* @since_tizen 7.5
* @remarks @a model must be compiled before calling this function.
- * @remarks the returned @a info @a weights are newly created so it does not
+ * @remarks the returned @a info @a weight are newly created so it does not
* reflect future changes in the model
* @remarks On returning error, info must not be destroyed with
- * ml_tensors_info_destory()
+ * ml_tensors_info_destroy()
*
* @param[in] model The NNTrainer model handle.
* @param[in] layer_name The name of the layer handle.
* @bug No known bugs except for NYI items
*/
-#ifndef __NNTRAINER_INTERNAL_H__
-#define __NNTRAINER_INTERNAL_H__
+#ifndef __TIZEN_MACHINELEARNING_NNTRAINER_INTERNAL_H__
+#define __TIZEN_MACHINELEARNING_NNTRAINER_INTERNAL_H__
#include <array>
#include <mutex>
#include <nntrainer_log.h>
+/**
+ * @brief Magic number of nntrainer.
+ * @since_tizen 6.0
+ */
#define ML_NNTRAINER_MAGIC 0x777F888F
/* Tizen ML feature */
#if defined(__TIZEN__)
+/**
+ * @brief Define enum for ML feature.
+ * @since_tizen 7.0
+ */
typedef enum {
- ML_FEATURE = 0,
- ML_FEATURE_INFERENCE,
- ML_FEATURE_TRAINING,
- ML_FEATURE_SERVICE,
-
- ML_FEATURE_MAX
+ ML_FEATURE = 0, /**< default option for ml feature */
+ ML_FEATURE_INFERENCE, /**< inference option for ml feature */
+ ML_FEATURE_TRAINING, /**< training option for ml feature */
+ ML_FEATURE_SERVICE, /**< service option for ml feature */
+ ML_FEATURE_MAX /**< max option for ml feature */
} ml_feature_e;
+/**
+ * @brief Define enum for ML feature state.
+ * @since_tizen 6.0
+ */
typedef enum {
- NOT_CHECKED_YET = -1,
- NOT_SUPPORTED = 0,
- SUPPORTED = 1
+ NOT_CHECKED_YET = -1, /**< not checked option for feature state */
+ NOT_SUPPORTED = 0, /**< not supported option for feature state */
+ SUPPORTED = 1 /**< supported option for feature state */
} feature_state_t;
#if defined(__FEATURE_CHECK_SUPPORT__)
+/**
+ * @brief Check feature state if it is supported.
+ * @since_tizen 6.0
+ * @return Error type
+ */
#define check_feature_state() \
do { \
int feature_ret = ml_tizen_get_feature_enabled(); \
return feature_ret; \
} while (0);
+/**
+ * @brief Set feature state if it is supported.
+ * @since_tizen 6.0
+ */
#define set_feature_state(...) ml_train_tizen_set_feature_state(__VA_ARGS__)
-#else /* __FEATURE_CHECK_SUPPORT__ */
+#else /** __FEATURE_CHECK_SUPPORT__ @sicne_tizen 6.0 */
#define check_feature_state()
#define set_feature_state(...)
#endif /* __FEATURE_CHECK_SUPPORT__ */
#endif /* __cplusplus */
/**
- * @brief Struct to wrap neural network layer for the API
+ * @brief Struct to wrap neural network layer for the API.
+ * @since_tizen 6.0
* @note model mutex must be locked before layer lock, if model lock is needed
*/
typedef struct {
- uint magic;
- std::shared_ptr<ml::train::Layer> layer;
- bool in_use;
- std::mutex m;
+ uint magic; /**< magic number */
+ std::shared_ptr<ml::train::Layer> layer; /**< layer object */
+ bool in_use; /**< in_use flag */
+ std::mutex m; /**< mutex for the optimizer */
} ml_train_layer;
/**
} ml_train_optimizer;
/**
- * @brief Struct to wrap data buffer for the API
+ * @brief Struct to wrap data buffer for the API.
+ * @since_tizen 6.0
* @note model mutex must be locked before dataset lock, if model lock is needed
*/
typedef struct {
- uint magic;
- std::array<std::shared_ptr<ml::train::Dataset>, 3> dataset;
- bool in_use;
- std::mutex m;
+ uint magic; /**< magic number */
+ std::array<std::shared_ptr<ml::train::Dataset>, 3>
+ dataset; /**< dataset object */
+ bool in_use; /**< in_use flag */
+ std::mutex m; /**< mutex for the dataset */
} ml_train_dataset;
/**
- * @brief Struct to wrap neural network model for the API
+ * @brief Struct to wrap neural network model for the API.
+ * @since_tizen 6.0
*/
typedef struct {
- uint magic;
- std::shared_ptr<ml::train::Model> model;
- std::unordered_map<std::string, ml_train_layer *> layers_map;
- ml_train_optimizer *optimizer;
- ml_train_dataset *dataset;
- std::mutex m;
+ uint magic; /**< magic number */
+ std::shared_ptr<ml::train::Model> model; /**< model object */
+ std::unordered_map<std::string, ml_train_layer *>
+ layers_map; /**< layers map */
+ ml_train_optimizer *optimizer; /**< optimizer object */
+ ml_train_dataset *dataset; /**< dataset object */
+ std::mutex m; /**< mutex for the model */
} ml_train_model;
/**
- * @brief Check validity of handle to be not NULL
+ * @brief Check validity of handle to be not NULL.
+ * @since_tizen 6.0
*/
#define ML_TRAIN_VERIFY_VALID_HANDLE(obj_h) \
do { \
} while (0)
/**
- * @brief Check validity of the user passed arguments and lock the object
+ * @brief Get handle to lock the passed object.
+ * @since_tizen 6.0
+ * @note Check validity of the user passed arguments and lock the object.
*/
#define ML_TRAIN_GET_VALID_HANDLE_LOCKED(obj, obj_h, obj_type, obj_name) \
do { \
} while (0)
/**
- * @brief Check validity of passed model and lock the object
+ * @brief Check validity of passed model and lock the object.
+ * @since_tizen 6.0
*/
#define ML_TRAIN_GET_VALID_MODEL_LOCKED(nnmodel, model) \
ML_TRAIN_GET_VALID_HANDLE_LOCKED(nnmodel, model, ml_train_model, "model")
/**
- * @brief Check validity of passed model, reset magic and lock the object
+ * @brief Check validity of passed model, reset magic and lock the object.
+ * @since_tizen 6.0
*/
#define ML_TRAIN_GET_VALID_MODEL_LOCKED_RESET(nnmodel, model) \
do { \
} while (0)
/**
- * @brief Check validity of passed layer and lock the object
+ * @brief Check validity of passed layer and lock the object.
+ * @since_tizen 6.0
*/
#define ML_TRAIN_GET_VALID_LAYER_LOCKED(nnlayer, layer) \
ML_TRAIN_GET_VALID_HANDLE_LOCKED(nnlayer, layer, ml_train_layer, "layer")
/**
- * @brief Check validity of passed layer, reset magic and lock the object
+ * @brief Check validity of passed layer, reset magic and lock the object.
+ * @since_tizen 6.0
*/
#define ML_TRAIN_GET_VALID_LAYER_LOCKED_RESET(nnlayer, layer) \
ML_TRAIN_GET_VALID_HANDLE_LOCKED_RESET(nnlayer, layer, ml_train_layer, \
"layer")
/**
- * @brief Check validity of passed optimizer and lock the object
+ * @brief Check validity of passed optimizer and lock the object.
+ * @since_tizen 6.0
*/
#define ML_TRAIN_GET_VALID_OPT_LOCKED(nnopt, opt) \
ML_TRAIN_GET_VALID_HANDLE_LOCKED(nnopt, opt, ml_train_optimizer, "optimizer")
/**
* @brief Check validity of passed optimizer, reset magic and lock the
- * object
+ * object.
+ * @since_tizen 6.0
*/
#define ML_TRAIN_GET_VALID_OPT_LOCKED_RESET(nnopt, opt) \
ML_TRAIN_GET_VALID_HANDLE_LOCKED_RESET(nnopt, opt, ml_train_optimizer, \
"dataset")
/**
- * @brief Check validity of passed dataset, reset magic and lock the object
+ * @brief Check validity of passed dataset, reset magic and lock the object.
+ * @since_tizen 6.0
*/
#define ML_TRAIN_GET_VALID_DATASET_LOCKED_RESET(nndataset, dataset) \
ML_TRAIN_GET_VALID_HANDLE_LOCKED_RESET(nndataset, dataset, ml_train_dataset, \
* @brief Get all neural network layer names from the model.
* @details Use this function to get already created Neural Network Layer names.
* This can be used to obtain layers when model is defined with ini file.
- * @note The caller must free the list of the layer names.
* @since_tizen 6.x
+ * @note The caller must free the list of the layer names.
* @param[in] model The NNTrainer model handler from the given description.
* @param[out] layers_name List of names of layers in the model ended with NULL.
* @return @c 0 on success. Otherwise a negative error value.
/**
* @brief Callback function to notify completion of training of the model.
+ * @since_tizen 6.0
* @param[in] model The NNTrainer model handler.
* @param[in] data Internal data to be given to the callback, cb.
*/
* @return @c 0 on success. Otherwise a negative error value.
* @retval #ML_ERROR_NONE Successful.
* @retval #ML_ERROR_INVALID_PARAMETER Invalid Parameter.
- * @details If length of @a input_layer_names is more than 1, the layer to be
+ * @note If length of @a input_layer_names is more than 1, the layer to be
* inserted should support multiple inputs. Otherwise
* #ML_ERROR_INVALID_PARAMETER is returned. If the layer in @a
* output_layer_names already have input connection, then they should support
#if defined(__TIZEN__)
/**
* @brief Checks whether machine_learning.training feature is enabled or not.
+ * @since_tizen 6.0
+ * @return flag to indicate whether the feature is enabled or not.
*/
int ml_tizen_get_feature_enabled(void);
/**
* @brief Set the feature status of machine_learning.training.
* This is only used for Unit test.
+ * @since_tizen 7.0
+ * @param[in] feature The feature to be set.
+ * @param[in] state The state to be set.
*/
void ml_train_tizen_set_feature_state(ml_feature_e feature,
feature_state_t state);
}
#endif /* __cplusplus */
-#endif
+#endif /* __TIZEN_MACHINELEARNING_NNTRAINER_INTERNAL_H__ */