ML_NNFW_TYPE_PYTORCH = 11, /**< PyTorch (.pt). (Since 6.5) */
ML_NNFW_TYPE_NNTR_INF = 12, /**< Inference supported from NNTrainer, SR On-device Training Framework (Since 6.5) */
ML_NNFW_TYPE_VD_AIFW = 13, /**< Inference framework for Samsung Tizen TV (Since 6.5) */
- ML_NNFW_TYPE_TRIx_ENGINE = 14, /**< TRIxENGINE accesses TRIV/TRIA NPU low-level drivers directly (.tvn). (Since 6.5) You may need to use high-level drivers wrapping this low-level driver in some devices: e.g., AIFW */
+ ML_NNFW_TYPE_TRIX_ENGINE = 14, /**< TRIxENGINE accesses TRIV/TRIA NPU low-level drivers directly (.tvn). (Since 6.5) You may need to use high-level drivers wrapping this low-level driver in some devices: e.g., AIFW */
ML_NNFW_TYPE_SNAP = 0x2001, /**< SNAP (Samsung Neural Acceleration Platform), only for Android. (Since 6.0) */
} ml_nnfw_type_e;
* Set the pipeline desc with nnfw.
*/
if (nnfw == ML_NNFW_TYPE_TENSORFLOW || nnfw == ML_NNFW_TYPE_SNAP ||
- nnfw == ML_NNFW_TYPE_PYTORCH || nnfw == ML_NNFW_TYPE_TRIx_ENGINE) {
+ nnfw == ML_NNFW_TYPE_PYTORCH || nnfw == ML_NNFW_TYPE_TRIX_ENGINE) {
/* set input and output tensors information */
if (in_tensors_info && out_tensors_info) {
status =
[ML_NNFW_TYPE_PYTORCH] = "pytorch",
[ML_NNFW_TYPE_NNTR_INF] = "nntrainer",
[ML_NNFW_TYPE_VD_AIFW] = "vd_aifw",
- [ML_NNFW_TYPE_TRIx_ENGINE] = "trix-engine",
+ [ML_NNFW_TYPE_TRIX_ENGINE] = "trix-engine",
NULL
};