*/
#include <mmi-manager-dbg.h>
+#include <mmi-common.h>
+#include <time.h>
+#include <stdio.h>
+#include <Ecore.h>
#include "vision.h"
+#include <mv_common.h>
+#include <mv_inference.h>
+
+#include "image_helper.h"
+#include "mv_video_helper.h"
+
+/*
+ * Hosted models
+ */
+#define IC_LABEL_MMI_TEACHABLE_PATH\
+ "/usr/share/mmi-provider/labels.txt"
+#define IC_TFLITE_WEIGHT_MMI_TEACHABLE_PATH \
+ "/usr/share/mmi-provider/model_quant.tflite"
+#define IC_TFLITE_META_MMI_TEACHABLE_PATH \
+ "/usr/share/mmi-provider/model_quant.json"
+
+#define TASK_IC 0
+
+#define RET_IF_FAIL(exp) \
+ do { \
+ int err = (exp); \
+ if (err != MEDIA_VISION_ERROR_NONE) { \
+ LOGE("[%s] %s failed\n", __func__, #exp); \
+ } \
+ } while (0)
+
+int max_confidence_idx = 0;
+mv_engine_config_h _engine_cfg = NULL;
+mv_inference_h _infer = NULL;
+
+int mv_inference_task_helper(mv_engine_config_h engine_cfg, int task_id)
+{
+ mv_inference_h infer = NULL;
+
+ int err = mv_inference_create(&infer);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to create inference handle [err:%i]\n", err);
+ return err;
+ }
+
+ err = mv_inference_configure(infer, engine_cfg);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to configure inference handle [err:%i]\n", err);
+ goto clean_mv_inference;
+ }
+
+ err = mv_inference_prepare(infer);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to prepare inference handle");
+ goto clean_mv_inference;
+ }
+ _infer = infer;
+
+ return err;
+
+clean_mv_inference:
+ RET_IF_FAIL(mv_inference_destroy(infer));
+ return err;
+}
+
+int engine_config_hosted_tflite_cpu(mv_engine_config_h handle,
+ const char *tf_weight,
+ const char *meta_file)
+{
+ RET_IF_FAIL(mv_engine_config_set_string_attribute(
+ handle, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH, tf_weight));
+ RET_IF_FAIL(mv_engine_config_set_int_attribute(
+ handle, MV_INFERENCE_BACKEND_TYPE, MV_INFERENCE_BACKEND_TFLITE));
+ RET_IF_FAIL(mv_engine_config_set_int_attribute(
+ handle, MV_INFERENCE_TARGET_TYPE, MV_INFERENCE_TARGET_CPU));
+ if (meta_file != NULL)
+ RET_IF_FAIL(mv_engine_config_set_string_attribute(
+ handle, MV_INFERENCE_MODEL_META_FILE_PATH, meta_file));
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int engine_config_user_hosted_tflite_cpu(mv_engine_config_h handle,
+ const char *tf_weight,
+ const char *user_file,
+ const char *meta_file)
+{
+ RET_IF_FAIL(engine_config_hosted_tflite_cpu(handle, tf_weight, meta_file));
+ RET_IF_FAIL(mv_engine_config_set_string_attribute(
+ handle, MV_INFERENCE_MODEL_USER_FILE_PATH, user_file));
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int image_classification_init(void)
+{
+ int err = MEDIA_VISION_ERROR_NONE;
+ mv_engine_config_h engine_cfg = NULL;
+
+ LOGI("image_classification by teachable machine model\n");
+
+ RET_IF_FAIL(mv_create_engine_config(&engine_cfg));
+
+ err = engine_config_user_hosted_tflite_cpu(
+ engine_cfg, IC_TFLITE_WEIGHT_MMI_TEACHABLE_PATH,
+ IC_LABEL_MMI_TEACHABLE_PATH,
+ IC_TFLITE_META_MMI_TEACHABLE_PATH);
+
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to perform config [err:%i]\n", err);
+ goto clean_image_engine;
+ }
+
+ _engine_cfg = engine_cfg;
+
+ err = mv_inference_task_helper(_engine_cfg, TASK_IC);
+ if (err != MEDIA_VISION_ERROR_NONE)
+ LOGE("Fail to detect with engine [err:%i]\n", err);
+
+ return err;
+
+clean_image_engine:
+ RET_IF_FAIL(mv_destroy_engine_config(engine_cfg));
+ return err;
+}
+
void vision_init(void)
{
+ int err = MEDIA_VISION_ERROR_NONE;
+ err = image_classification_init();
+
+ if (err != MEDIA_VISION_ERROR_NONE)
+ LOGE("Fail to detect with engine [err:%i]\n", err);
+ else
+ LOGD("Success vision_init");
}
void vision_shutdown(void)
{
+ LOGD("shutdown");
+ if(_infer)
+ {
+ RET_IF_FAIL(mv_inference_destroy(_infer));
+ }
+ if(_engine_cfg)
+ {
+ RET_IF_FAIL(mv_destroy_engine_config(_engine_cfg));
+ }
+ LOGD("end of shutdown");
}