"Turn on building of licensed port of the face module (if OFF - open port will be built)." OFF)
option(MEDIA_VISION_SURVEILLANCE_LICENSE_PORT
"Turn on building of licensed port of the surveillance module (if OFF - open port will be built)." OFF)
+option(MEDIA_VISION_INFERENCE_LICENSE_PORT
+ "Turn on building of licensed port of the inferece module (if OFF - open port will be built)." OFF)
+
set(MV_COMMON_LIB_NAME "mv_common")
set(MV_BARCODE_DETECTOR_LIB_NAME "mv_barcode_detector" CACHE STRING
"Name of the library will be built for barcode generating module (without extension).")
set(MV_SURVEILLANCE_LIB_NAME "mv_surveillance" CACHE STRING
"Name of the library will be built for surveillance module (without extension).")
+set(MV_INFERENCE_LIB_NAME "mv_inference" CACHE STRING
+ "Name of the library will be built for inference module (without extension).")
SET(INC_DIR "${PROJECT_SOURCE_DIR}/include")
SET(INC_SURVEILLANCE "${PROJECT_SOURCE_DIR}/mv_surveillance/surveillance/include")
endif()
+if(MEDIA_VISION_INFERENCE_LICENSE_PORT)
+ add_definitions(-DMEDIA_VISION_INFERENCE_LICENSE_PORT)
+ SET(INC_INFERENCE "${PROJECT_SOURCE_DIR}/mv_inference/inference_lic/include")
+else()
+ SET(INC_INFERENCE "${PROJECT_SOURCE_DIR}/mv_inference/inference/include")
+endif()
+
INCLUDE_DIRECTORIES(${INC_DIR}
${INC_COMMON}
${INC_BARCODE_DETECTOR}
${INC_BARCODE_GENERATOR}
${INC_FACE}
${INC_IMAGE}
- ${INC_SURVEILLANCE})
+ ${INC_SURVEILLANCE}
+ ${INC_INFERENCE})
SET(dependents "dlog capi-media-tool capi-system-info")
SET(pc_dependents "capi-media-tool")
ADD_SUBDIRECTORY(mv_image)
ADD_SUBDIRECTORY(mv_face)
ADD_SUBDIRECTORY(mv_surveillance)
+ADD_SUBDIRECTORY(mv_inference)
aux_source_directory(src SOURCES)
ADD_LIBRARY(${fw_name} SHARED ${SOURCES})
${MV_BARCODE_GENERATOR_LIB_NAME}
${MV_IMAGE_LIB_NAME}
${MV_FACE_LIB_NAME}
- ${MV_SURVEILLANCE_LIB_NAME})
+ ${MV_SURVEILLANCE_LIB_NAME}
+ ${MV_INFERENCE_LIB_NAME})
SET_TARGET_PROPERTIES(${fw_name}
PROPERTIES
)
INSTALL(FILES ${CMAKE_CURRENT_SOURCE_DIR}/${fw_name}-surveillance.pc DESTINATION ${LIB_INSTALL_DIR}/pkgconfig)
+SET(PC_NAME ${fw_name}-inference)
+SET(PC_LDFLAGS "-l${MV_INFERENCE_LIB_NAME} -l${MV_COMMON_LIB_NAME}")
+CONFIGURE_FILE(
+ ${fw_name}-inference.pc.in
+ ${CMAKE_CURRENT_SOURCE_DIR}/${fw_name}-inference.pc
+ @ONLY
+)
+INSTALL(FILES ${CMAKE_CURRENT_SOURCE_DIR}/${fw_name}-inference.pc DESTINATION ${LIB_INSTALL_DIR}/pkgconfig)
+
ADD_SUBDIRECTORY(test)
IF(UNIX)
--- /dev/null
+
+# Package Information for pkg-config
+
+prefix=@PREFIX@
+exec_prefix=/usr
+libdir=@LIB_INSTALL_DIR@
+includedir=/usr/include/media
+
+Name: @PC_NAME@
+Description: @PACKAGE_DESCRIPTION@
+Version: @VERSION@
+Requires: @PC_REQUIRED@
+Libs: -L${libdir} @PC_LDFLAGS@
+Cflags: -I${includedir}
\ No newline at end of file
* * Flat Image features extraction;\n
* * Surveillance: movement detection, person appearance/disappearance,
* person recognition.
+ * * Inference: Image classification, object detection,
+ * face detection and facial landmark detection;\n
+ *
*
* @defgroup CAPI_MEDIA_VISION_COMMON_MODULE Media Vision Common
* @ingroup CAPI_MEDIA_VISION_MODULE
* @defgroup CAPI_MEDIA_VISION_SURVEILLANCE_EVENT_TYPES Media Vision Surveillance Event Types
* @ingroup CAPI_MEDIA_VISION_SURVEILLANCE_MODULE
* @brief Event types supported by the Surveillance module.
+ *
+ * @defgroup CAPI_MEDIA_VISION_INFERENCE_MODULE Media Vision Inference
+ * @ingroup CAPI_MEDIA_VISION_MODULE
+ * @brief Image Classification, Object Detection, Face and Facial landmark detection.
+ * @section CAPI_MEDIA_VISION_INFERENCE_MODULE_HEADER Required Header
+ * \#include <mv_inference.h>
+ *
+ * @section CAPI_MEDIA_VISION_INFERECNE_MODULE_FEATURE Related Features
+ * This API is related with the following features:\n
+ * - http://tizen.org/feature/vision.inference\n
+ * - http://tizen.org/feature/vision.inference.image\n
+ * - http://tizen.org/feature/vision.inference.face\n
+ *
+ * It is recommended to use features in your application for reliability.\n
+ * You can check if the device supports the related features for this API by using
+ * System Information, and control your application's actions accordingly.\n
+ * To ensure your application is only running on devices with specific
+ * features, please define the features in your manifest file using the manifest
+ * editor in the SDK.\n
+ * More details on using features in your application can be found in
+ * <a href="https://developer.tizen.org/development/tizen-studio/native-tools/configuring-your-app/manifest-text-editor#feature">
+ * <b>Feature Element</b>.
+ * </a>
+ *
+ * @section CAPI_MEDIA_VISION_INFERENCE_MODULE_OVERVIEW Overview
+ * @ref CAPI_MEDIA_VISION_INFERENCE_MODULE contains @ref mv_inference_h handle to perform
+ * Image Classification, Object Detection, Face and Facial Landmark detection.
+ * Inference handle should be created with @ref mv_inference_create() and destoryed with
+ * @ref mv_inference_destroy(). @ref mv_inference_h should be configured by calling
+ * @ref mv_inference_configure(). After configuration, @ref mv_inference_h should be prepared by
+ * calling mv_inference_prepare() which loads models and set required parameters.
+ * After preparation, @ref mv_inference_image_classify() has to be called to classify images on @ref mv_source_h,
+ * and callback @ref mv_inference_image_classified_cb() will be invoked to process results.
+ * Module contains @ref mv_inference_object_detect() function to detect object on @ref mv_source_h, and
+ * @ref mv_inference_object_detected_cb() to process object detection results.
+ * Module also contains @ref mv_inference_face_detect() and
+ * @ref mv_inference_facial_landmark_detection() functionalities to detect faces and their landmark
+ * on @ref mv_source_h, and callbacks @ref mv_inference_face_detected_cb() and
+ * @ref mv_inference_facial_landmark_detected_cb() to process detection results.
*/
#endif /* __TIZEN_MEDIAVISION_DOC_H__ */
= TIZEN_ERROR_MEDIA_VISION | 0x03, /**< Invalid data */
MEDIA_VISION_ERROR_INVALID_PATH
= TIZEN_ERROR_MEDIA_VISION | 0x04, /**< Invalid path (Since 3.0) */
+ MEDIA_VISION_ERROR_NOT_SUPPORTED_ENGINE
+ = TIZEN_ERROR_MEDIA_VISION | 0x05 /**< Not supported engine (Since 5.5.) */
} mv_error_e;
/**
const char *name,
const char *value);
+/**
+ * @brief Sets the array of string attribute to the configuration.
+ *
+ * @since_tizen 5.5
+ * @param [in] engine_cfg Engine configuration for which @a values have
+ * to be set
+ * @param [in] name String key of the attribute will be used for
+ * storing the @a values into configuration
+ * dictionary
+ * @param [in] values The string values of the attribute
+ * @param [in] size The number of string @a values
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE Attribute key isn't available
+ *
+ * @see mv_engine_config_get_array_string_attribute()
+ */
+int mv_engine_config_set_array_string_attribute(
+ mv_engine_config_h engine_cfg,
+ const char *name,
+ const char **values,
+ unsigned int size);
+
/**
* @brief Gets the double attribute from the configuration dictionary.
*
const char *name,
char **value);
+/**
+ * @brief Gets the array of string attribute from the configuration dictionary.
+ *
+ * @since_tizen 5.5
+ * @remarks Each element in the @a values array should be released using free(),
+ * then the array itself should be released using free().
+ * @param [in] engine_cfg Engine configuration from which @a values
+ * should be obtained.
+ * @param [in] name String key of the attribute which will be used
+ * for getting the @a values from the
+ * configuration dictionary
+ * @param [out] values The attribute to be filled with the array of
+ * string value from dictionary
+ * @param [out] size The number of elements in @a values
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE Attribute key isn't available
+ *
+ * @see mv_engine_config_set_array_string_attribute()
+ */
+int mv_engine_config_get_array_string_attribute(
+ mv_engine_config_h engine_cfg,
+ const char *name,
+ char ***values,
+ int *size);
+
/**
* @brief Called to get information (type and name) once for each supported
* attribute.
--- /dev/null
+/**
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TIZEN_MEDIAVISION_INFERENCE_H__
+#define __TIZEN_MEDIAVISION_INFERENCE_H__
+
+#include <mv_common.h>
+#include <mv_inference_type.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @file mv_inference.h
+ * @brief This file contains the Inference based Media Vision API.
+ */
+
+/**
+ * @addtogroup CAPI_MEDIA_VISION_INFERENCE_MODULE
+ * @{
+ */
+
+/**
+ * @brief Defines #MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH to set inference
+ model's configuration file attribute of the engine configuration.
+ * @details Inference model's configuration can be changed to specify the path to the file
+ *
+ * @since_tizen 5.5
+ * @see mv_engine_config_set_string_attribute()
+ * @see mv_engine_config_get_string_attribute()
+ */
+#define MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH "MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH"
+
+/**
+ * @brief Defines #MV_INFERENCE_MODEL_WEIGHT_FILE_PATH to set inference
+ * model's weight file attribute of the engine configuration.
+ * @details Inference model's weight can be changed to specify the path to the file
+ *
+ * @since_tizen 5.5
+ * @see mv_engine_config_set_string_attribute()
+ * @see mv_engine_config_get_string_attribute()
+ */
+#define MV_INFERENCE_MODEL_WEIGHT_FILE_PATH "MV_INFERENCE_MODEL_WEIGHT_FILE_PATH"
+
+/**
+ * @brief Defines #MV_INFERENCE_MODEL_USER_FILE_PATH to set inference
+ * model's category file attribute of the engine configuration.
+ * @details Inference model's category can be changed to specify the path to the file
+ *
+ * @since_tizen 5.5
+ * @see mv_engine_config_set_string_attribute()
+ * @see mv_engine_config_get_string_attribute()
+ */
+#define MV_INFERENCE_MODEL_USER_FILE_PATH "MV_INFERENCE_MODEL_USER_FILE_PATH"
+
+/**
+ * @brief Defines #MV_INFERENCE_MODEL_MEAN_VALUE to set inference
+ * model's mean attribute of the engine configuration.
+ *
+ * @since_tizen 5.5
+ * @see mv_engine_config_set_double_attribute()
+ * @see mv_engine_config_get_double_attribute()
+ */
+#define MV_INFERENCE_MODEL_MEAN_VALUE "MV_INFERENCE_MODEL_MEAN_VALUE"
+
+/**
+ * @brief Defines #MV_INFERENCE_MODEL_STD_VALUE to set an input image's
+ * standard deviation attribute of the engine configuration.
+ *
+ * @since_tizen 5.5
+ * @see mv_engine_config_set_double_attribute()
+ * @see mv_engine_config_get_double_attribute()
+ */
+#define MV_INFERENCE_MODEL_STD_VALUE "MV_INFERENCE_MODEL_STD_VALUE"
+
+/**
+ * @brief Defines #MV_INFERENCE_BACKEND_TYPE to set the type used
+ * for inference attribute of the engine
+ * configuration.
+ * @details Switches between two types of the type used for
+ * neural network model inference. Possible values of the
+ * attribute are:\n
+ * #MV_INFERENCE_BACKEND_OPENCV,\n
+ * #MV_INFERENCE_BACKEND_TFLITE.\n
+ * The default type is #MV_INFERENCE_BACKEND_OPENCV.
+ *
+ * @since_tizen 5.5
+ * @see mv_engine_config_set_int_attribute()
+ * @see mv_engine_config_get_int_attribute()
+ */
+#define MV_INFERENCE_BACKEND_TYPE "MV_INFERENCE_BACKEND_TYPE"
+
+/**
+ * @brief Defines #MV_INFERENCE_TARGET_TYPE to set the type used
+ * for device running attribute of the engine configuration.
+ * @details Switches between CPU, GPU, or Custom:\n
+ * #MV_INFERENCE_TARGET_CPU,\n
+ * #MV_INFERENCE_TARGET_GPU,\n
+ * #MV_INFERENCE_TARGET_CUSTOM.\n
+ * The default type is CPU.
+ *
+ * @since_tizen 5.5
+ * @see mv_engine_config_set_int_attribute()
+ * @see mv_engine_config_get_int_attribute()
+ */
+#define MV_INFERENCE_TARGET_TYPE "MV_INFERENCE_TARGET_TYPE"
+
+/**
+ * @brief Defines #MV_INFERENCE_INPUT_TENSOR_WIDTH to set the width
+ * of input tensor.
+ *
+ * @since_tizen 5.5
+ * @see mv_engine_config_set_int_attribute()
+ * @see mv_engine_config_get_int_attribute()
+ */
+#define MV_INFERENCE_INPUT_TENSOR_WIDTH "MV_INFERENCE_INPUT_TENSOR_WIDTH"
+
+/**
+ * @brief Defines #MV_INFERENCE_INPUT_TENSOR_HEIGHT to set the height
+ * of input tensor.
+ *
+ * @since_tizen 5.5
+ * @see mv_engine_config_set_int_attribute()
+ * @see mv_engine_config_get_int_attribute()
+ */
+#define MV_INFERENCE_INPUT_TENSOR_HEIGHT "MV_INFERENCE_INPUT_TENSOR_HEIGHT"
+
+/**
+ * @brief Defines #MV_INFERENCE_INPUT_TENSOR_CHANNELS to set the channels,
+ * for example 3 in case of RGB colorspace, of input tensor.
+ *
+ * @since_tizen 5.5
+ * @see mv_engine_config_set_int_attribute()
+ * @see mv_engine_config_get_int_attribute()
+ */
+#define MV_INFERENCE_INPUT_TENSOR_CHANNELS "MV_INFERENCE_INPUT_TENSOR_CHANNELS"
+
+/**
+ * @brief Defines #MV_INFERENCE_INPUT_NODE_NAME to set the input node name.
+ *
+ * @since_tizen 5.5
+ * @see mv_engine_config_set_string_attribute()
+ * @see mv_engine_config_get_string_attribute()
+ */
+#define MV_INFERENCE_INPUT_NODE_NAME "MV_INFERENCE_INPUT_NODE_NAME"
+
+/**
+ * @brief Defines #MV_INFERENCE_OUTPUT_NODE_NAMES to set the output node names.
+ *
+ * @since_tizen 5.5
+ * @see mv_engine_config_set_array_string_attribute()
+ * @see mv_engine_config_get_arraY_string_attribute()
+ */
+#define MV_INFERENCE_OUTPUT_NODE_NAMES "MV_INFERENCE_OUTPUT_NODE_NAMES"
+
+/**
+ * @brief Defines #MV_INFERENCE_OUTPUT_MAX_NUMBER
+ * to set the maximum number of output attributes
+ * of the engine configuration.
+ * @details Default value is 5 and a value over 10 will be set to 10.
+ * A value under 1 will be set to 1.
+ *
+ * @since_tizen 5.5
+ * @see mv_engine_config_set_int_attribute()
+ * @see mv_engine_config_get_int_attribute()
+ */
+#define MV_INFERENCE_OUTPUT_MAX_NUMBER "MV_INFERENCE_OUTPUT_MAX_NUMBER"
+
+/**
+ * @brief Defines #MV_INFERENCE_CONFIDENCE_THRESHOLD
+ * to set the threshold value for the confidence of inference results.
+ * @details Default value is 0.6 and its range is between 0.0 and 1.0.
+ *
+ * @since_tizen 5.5
+ * @see mv_engine_config_set_double_attribute()
+ * @see mv_engine_config_get_double_attribute()
+ */
+#define MV_INFERENCE_CONFIDENCE_THRESHOLD "MV_INFERENCE_CONFIDENCE_THRESHOLD"
+
+/*************/
+/* Inference */
+/*************/
+/**
+ * @brief Creates inference handle.
+ * @details Use this function to create an inference. After the creation
+ * the inference has to be prepared with
+ * mv_inference_prepare() function to prepare a network
+ * for the inference.
+ *
+ * @since_tizen 5.5
+ * @remarks If the app sets #MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
+ * #MV_INFERENCE_MODEL_WEIGHT_FILE_PATH, and #MV_INFERENCE_MODEL_USER_FILE_PATH
+ * to media storage, then the media storage privilege
+ * %http://tizen.org/privilege/mediastorage is needed.\n
+ * If the app sets any of the paths mentioned in the previous sentence
+ * to external storage, then the external storage privilege
+ * %http://tizen.org/privilege/externalstorage is needed.\n
+ * If the required privileges aren't set properly, mv_inference_prepare() will return
+ * #MEDIA_VISION_ERROR_PERMISSION_DENIED.
+ *
+ * @remarks The @a infer should be released using mv_inference_destroy().
+ *
+ * @param[out] infer The handle to the inference to be created
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ *
+ * @see mv_inference_destroy()
+ * @see mv_inference_prepare()
+ */
+int mv_inference_create(mv_inference_h *infer);
+
+/**
+ * @brief Destroys inference handle and releases all its resources.
+ *
+ * @since_tizen 5.5
+ *
+ * @param[in] infer The handle to the inference to be destroyed
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ *
+ * @pre Create inference handle by using mv_inference_create()
+ *
+ * @see mv_inference_create()
+ */
+int mv_inference_destroy(mv_inference_h infer);
+
+/**
+ * @brief Configures the network of the inference.
+ * @details Use this function to configure the network of the inference
+ * which is set to @a engine_config.
+ *
+ * @since_tizen 5.5
+ *
+ * @param[in] infer The handle to the inference
+ * @param[in] engine_config The handle to the configuration of
+ * engine.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
+ * in @a engine_config
+ */
+int mv_inference_configure(mv_inference_h infer,
+ mv_engine_config_h engine_config);
+
+/**
+ * @brief Prepares inference.
+ * @details Use this function to prepare inference based on
+ * the configured network.
+ *
+ * @since_tizen 5.5
+ *
+ * @param[in] infer The handle to the inference
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ * @retval #MEDIA_VISION_ERROR_PERMISSION_DENIED Permission denied
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ * @retval #MEDIA_VISION_ERROR_INVALID_OPERATION Invalid operation
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT Not supported format
+ */
+int mv_inference_prepare(mv_inference_h infer);
+
+/**
+* @brief Called to provide information for supported engines for inference.
+*
+* @since_tizen 5.5
+*
+* @param[in] engine The supported engine.
+* The @a engine can be used only in the callback.
+* To use outside, make a copy.
+* @param[in] supported The flag whether the engine
+* is supported or not
+* @param[in] user_data The user data passed from
+* mv_inference_foreach_supported_engine()
+* @return @c true to continue with the next iteration of the loop,
+* otherwise @c false to break out of the loop
+*
+* @pre mv_inference_foreach_supported_engine()
+*/
+typedef bool(*mv_inference_supported_engine_cb) (
+ const char *engine,
+ bool supported,
+ void *user_data);
+
+/**
+* @brief Traverses the list of supported engines for inference.
+* @details Using this function the supported engines can be obtained.
+* The names can be used with #mv_engine_config_h related
+* getters and setters to get/set MV_INFERENCE_BACKEND_TYPE attribute
+* value.
+*
+* @since_tizen 5.5
+* @param[in] infer The handle to the inference
+* @param[in] callback The iteration callback function
+* @param[in] user_data The user data to be passed to the callback function
+* @return @c 0 on success, otherwise a negative error value
+* @retval #MEDIA_VISION_ERROR_NONE Successful
+* @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+* @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+*
+* @see mv_inference_supported_engine_cb()
+*/
+int mv_inference_foreach_supported_engine(
+ mv_inference_h infer,
+ mv_inference_supported_engine_cb callback,
+ void *user_data);
+
+/************************/
+/* Image classification */
+/************************/
+/**
+ * @brief Called when @a source is classified.
+ * @details This callback is invoked each time when
+ * mv_inference_image_classify() is called to provide the results of
+ * image classification.
+ *
+ * @since_tizen 5.5
+ * @remarks The @a indices, @a names, and @a confidences should not be released by the app.
+ * They can be used only in the callback. The number of elements in @a indices, @a names,
+ * and @a confidences is equal to @a number_of_classes.
+ *
+ * @param[in] source The handle to the source of the media where
+ * an image was classified. @a source is the same object
+ * for which mv_inference_image_classify() was called.
+ * It should be released by calling mv_destroy_source()
+ * when it's not needed anymore.
+ * @param[in] number_of_classes The number of classes
+ * @param[in] indices The indices of the classified image.
+ * @param[in] names Names corresponding to the indices.
+ * @param[in] confidences Each element is the confidence that the corresponding image belongs to the corresponding class.
+ * @param[in] user_data The user data passed from callback invoking code
+ *
+ * @pre Call mv_inference_image_classify() function to perform classification of the image
+ * and to invoke this callback as a result
+ *
+ * @see mv_inference_image_classify()
+ */
+typedef void (*mv_inference_image_classified_cb)(
+ mv_source_h source,
+ int number_of_classes,
+ const int *indices,
+ const char **names,
+ const float *confidences,
+ void *user_data);
+
+/**
+ * @brief Performs image classification on the @a source.
+ * @details Use this function to launch image classification.
+ * Each time when mv_inference_image_classify() is
+ * called, @a classified_cb will receive classes
+ * which the media source may belong to.
+ *
+ * @since_tizen 5.5
+ * @remarks This function is synchronous and may take considerable time to run.
+ *
+ * @param[in] source The handle to the source of the media
+ * @param[in] infer The handle to the inference
+ * @param[in] roi Rectangular area in the @a source which will be analyzed.
+ * If NULL, then the whole source will be analyzed.
+ * @param[in] classified_cb The callback which will be called for
+ * classification on @a source.
+ * This callback will receive classification results.
+ * @param[in] user_data The user data passed from the code where
+ * mv_inference_image_classify() is invoked. This data will
+ * be accessible in @a classified_cb callback.
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_INVALID_OPERATION Invalid operation
+ * @retval #MEDIA_VISION_ERROR_INTERNAL Internal error
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT Source colorspace
+ * isn't supported
+ *
+ * @pre Create a source handle by calling mv_create_source()
+ * @pre Create an inference handle by calling mv_inference_create()
+ * @pre Configure an inference handle by calling mv_inference_configure()
+ * @pre Prepare an inference by calling mv_inference_prepare()
+ * @post @a classified_cb will be called to provide classification results
+ *
+ * @see mv_inference_image_classified_cb()
+ */
+int mv_inference_image_classify(
+ mv_source_h source,
+ mv_inference_h infer,
+ mv_rectangle_s *roi,
+ mv_inference_image_classified_cb classified_cb,
+ void *user_data);
+
+
+/************************/
+/* Object Detection */
+/************************/
+/**
+ * @brief Called when objects in @a source are detected.
+ * @details This callback is invoked each time when
+ * mv_inference_object_detect() is called to provide the results of
+ * object detection.
+ *
+ * @since_tizen 5.5
+ * @remarks The @a indices, @a names, @a confidences, and @a locations should not be released by app.
+ * They can be used only in the callback. The number of elements in @a indices, @a names,
+ * @a confidences, and @a locations is equal to @a number_of_objects.
+ *
+ * @param[in] source The handle to the source of the media where
+ * an image was classified. @a source is the same object
+ * for which mv_inference_object_detect() was called.
+ * It should be released by calling mv_destroy_source()
+ * when it's not needed anymore.
+ * @param[in] number_of_objects The number of objects
+ * @param[in] indices The indices of objects.
+ * @param[in] names Names corresponding to the indices.
+ * @param[in] confidences Confidences of the detected objects.
+ * @param[in] locations Locations of the detected objects.
+ * @param[in] user_data The user data passed from callback invoking code
+ *
+ * @pre Call mv_inference_object_detect() function to perform detection of the objects
+ * in @a source and to invoke this callback as a result
+ *
+ * @see mv_inference_object_detect()
+ */
+typedef void (*mv_inference_object_detected_cb)(
+ mv_source_h source,
+ int number_of_objects,
+ const int *indices,
+ const char **names,
+ const float *confidences,
+ const mv_rectangle_s *locations,
+ void *user_data);
+
+/**
+ * @brief Performs object detection on the @a source.
+ * @details Use this function to launch object detection.
+ * Each time when mv_inference_object_detect() is
+ * called, @a detected_cb will receive a list of objects and their locations
+ * in the media source.
+ *
+ * @since_tizen 5.5
+ * @remarks This function is synchronous and may take considerable time to run.
+ *
+ * @param[in] source The handle to the source of the media
+ * @param[in] infer The handle to the inference
+ * @param[in] detected_cb The callback which will be called for
+ * detecting objects in the media source.
+ * This callback will receive the detection results.
+ * @param[in] user_data The user data passed from the code where
+ * mv_inference_object_detect() is invoked. This data will
+ * be accessible in @a detected_cb callback.
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_INTERNAL Internal error
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT Source colorspace
+ * isn't supported
+ *
+ * @pre Create a source handle by calling mv_create_source()
+ * @pre Create an inference handle by calling mv_inference_create()
+ * @pre Configure an inference handle by calling mv_inference_configure()
+ * @pre Prepare an inference by calling mv_inference_prepare()
+ * @post @a detected_cb will be called to provide detection results
+ *
+ * @see mv_inference_object_detected_cb()
+ */
+int mv_inference_object_detect(
+ mv_source_h source,
+ mv_inference_h infer,
+ mv_inference_object_detected_cb detected_cb,
+ void *user_data);
+
+/*************************************/
+/* Face and its landmark detection */
+/*************************************/
+/**
+ * @brief Called when faces in @a source are detected.
+ * @details This callback is invoked each time when
+ * mv_inference_face_detect() is called to provide the results of
+ * face detection.
+ *
+ * @since_tizen 5.5
+ * @remarks The @a confidences and @a locations should not be released by app.
+ * They can be used only in the callback. The number of elements in @a confidences
+ * and @a locations is equal to @a number_of_faces.
+ *
+ * @param[in] source The handle to the source of the media where
+ * faces were detected. @a source is the same object
+ * for which mv_inference_face_detect() was called.
+ * It should be released by calling mv_destroy_source()
+ * when it's not needed anymore.
+ * @param[in] number_of_faces The number of faces
+ * @param[in] confidences Confidences of the detected faces.
+ * @param[in] locations Locations of the detected faces.
+ * @param[in] user_data The user data passed from callback invoking code
+ *
+ * @pre Call mv_inference_face_detect() function to perform detection of the faces
+ * in @a source and to invoke this callback as a result
+ *
+ * @see mv_inference_face_detect()
+ */
+typedef void (*mv_inference_face_detected_cb)(
+ mv_source_h source,
+ int number_of_faces,
+ const float *confidences,
+ const mv_rectangle_s *locations,
+ void *user_data);
+
+/**
+ * @brief Performs face detection on the @a source.
+ * @details Use this function to launch face detection.
+ * Each time when mv_inference_face_detect() is
+ * called, @a detected_cb will receive a list of faces and their locations
+ * in the media source.
+ *
+ * @since_tizen 5.5
+ * @remarks This function is synchronous and may take considerable time to run.
+ *
+ * @param[in] source The handle to the source of the media
+ * @param[in] infer The handle to the inference
+ * @param[in] detected_cb The callback which will be called for
+ * detecting faces on media source.
+ * This callback will receive the detection results.
+ * @param[in] user_data The user data passed from the code where
+ * mv_inference_face_detect() is invoked. This data will
+ * be accessible in @a detected_cb callback.
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_INTERNAL Internal error
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT Source colorspace
+ * isn't supported
+ *
+ * @pre Create a source handle by calling mv_create_source()
+ * @pre Create an inference handle by calling mv_inference_create()
+ * @pre Configure an inference handle by calling mv_inference_configure()
+ * @pre Prepare an inference by calling mv_inference_prepare()
+ * @post @a detected_cb will be called to provide detection results
+ *
+ * @see mv_inference_face_detected_cb()
+ */
+int mv_inference_face_detect(
+ mv_source_h source,
+ mv_inference_h infer,
+ mv_inference_face_detected_cb detected_cb,
+ void *user_data);
+
+/**
+ * @brief Called when facial landmarks in @a source are detected.
+ * @details This type callback is invoked each time when
+ * mv_inference_facial_landmark_detect() is called to provide
+ * the results of the landmarks detection.
+ *
+ * @since_tizen 5.5
+ * @remarks The @a locations should not be released by app. They can be used only in the callback.
+ * The number of elements in @a locations is equal to @a number_of_landmarks.
+ *
+ * @param[in] source The handle to the source of the media where
+ * landmarks were detected. @a source is the same object
+ * for which mv_inference_facial_landmark_detect() was called.
+ * It should be released by calling mv_destroy_source()
+ * when it's not needed anymore.
+ * @param[in] number_of_landmarks The number of landmarks
+ * @param[in] locations Locations of the detected facial landmarks.
+ * @param[in] user_data The user data passed from callback invoking code
+ *
+ * @pre Call mv_inference_face_detect() function to perform detection of the faces
+ * in @a source and to invoke this callback as a result
+ *
+ * @see mv_inference_face_detect()
+ */
+typedef void (*mv_inference_facial_landmark_detected_cb)(
+ mv_source_h source,
+ int number_of_landmarks,
+ const mv_point_s *locations,
+ void *user_data);
+
+/**
+ * @brief Performs facial landmarks detection on the @a source.
+ * @details Use this function to launch facial landmark detection.
+ * Each time when mv_inference_facial_landmark_detection is
+ * called, @a detected_cb will receive a list facial landmark's locations
+ * in the media source.
+ *
+ * @since_tizen 5.5
+ * @remarks This function is synchronous and may take considerable time to run.
+ *
+ * @param[in] source The handle to the source of the media
+ * @param[in] infer The handle to the inference
+ * @param[in] roi Rectangular area including a face in @a source which
+ * will be analyzed. If NULL, then the whole source will be
+ * analyzed.
+ * @param[in] detected_cb The callback which will receive the detection results.
+ * @param[in] user_data The user data passed from the code where
+ * mv_inference_facial_landmark_detect() is invoked.
+ * This data will be accessible in @a detected_cb callback.
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_INTERNAL Internal error
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT Source colorspace
+ * isn't supported
+ *
+ * @pre Create a source handle by calling mv_create_source()
+ * @pre Create an inference handle by calling mv_inference_create()
+ * @pre Configure an inference handle by calling mv_inference_configure()
+ * @pre Prepare an inference by calling mv_inference_prepare()
+ * @post @a detected_cb will be called to provide detection results
+ *
+ * @see mv_inference_facial_landmark_detected_cb()
+ */
+int mv_inference_facial_landmark_detect(
+ mv_source_h source,
+ mv_inference_h infer,
+ mv_rectangle_s *roi,
+ mv_inference_facial_landmark_detected_cb detected_cb,
+ void *user_data);
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __TIZEN_MEDIAVISION_INFERENCE_H__ */
--- /dev/null
+/**
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __TIZEN_MEDIAVISION_INFERENCE_TYPE_H__
+#define __TIZEN_MEDIAVISION_INFERENCE_TYPE_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @file mv_inference_type.h
+ * @brief This file contains enumerations and a handle required by
+ * MediaVision inference API.
+ */
+
+/**
+ * @addtogroup CAPI_MEDIA_VISION_INFERENCE_MODULE
+ * @{
+ */
+
+/**
+ * @brief Enumeration for inference backend.
+ *
+ * @since_tizen 5.5
+ *
+ * @see mv_inference_prepare()
+ */
+typedef enum {
+ MV_INFERENCE_BACKEND_NONE = -1, /**< None */
+ MV_INFERENCE_BACKEND_OPENCV, /**< OpenCV */
+ MV_INFERENCE_BACKEND_TFLITE, /**< TensorFlow-Lite */
+ MV_INFERENCE_BACKEND_MAX /**< Backend MAX */
+} mv_inference_backend_type_e;
+
+/**
+ * @brief Enumeration for inference target.
+ *
+ * @since_tizen 5.5
+ *
+ */
+typedef enum {
+ MV_INFERENCE_TARGET_NONE = -1, /**< None */
+ MV_INFERENCE_TARGET_CPU, /**< CPU */
+ MV_INFERENCE_TARGET_GPU, /**< GPU*/
+ MV_INFERENCE_TARGET_CUSTOM, /**< CUSTOM*/
+ MV_INFERENCE_TARGET_MAX /**< Target MAX */
+} mv_inference_target_type_e;
+
+/**
+ * @brief The inference handle.
+ *
+ * @since_tizen 5.5
+ */
+typedef void *mv_inference_h;
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __TIZEN_MEDIAVISION_INFERENCE_TYPE_H__ */
bool __mv_barcode_generate_check_system_info_feature_supported();
bool __mv_face_check_system_info_feature_supported();
bool __mv_image_check_system_info_feature_supported();
+bool __mv_inference_check_system_info_feature_supported();
+bool __mv_inference_image_check_system_info_feature_supported();
+bool __mv_inference_face_check_system_info_feature_supported();
#ifdef __cplusplus
}
"name" : "MV_SURVEILLANCE_SKIP_FRAMES_COUNT",
"type" : "integer",
"value" : 0
+ },
+ {
+ "name" : "MV_INFERENCE_INPUT_TENSOR_WIDTH",
+ "type" : "integer",
+ "value" : -1
+ },
+ {
+ "name" : "MV_INFERENCE_INPUT_TENSOR_HEIGHT",
+ "type" : "integer",
+ "value" : -1
+ },
+ {
+ "name" : "MV_INFERENCE_INPUT_TENSOR_CHANNELS",
+ "type" : "integer",
+ "value" : -1
+ },
+ {
+ "name" : "MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH",
+ "type" : "string",
+ "value" : ""
+ },
+ {
+ "name" : "MV_INFERENCE_MODEL_WEIGHT_FILE_PATH",
+ "type" : "string",
+ "value" : ""
+ },
+ {
+ "name" : "MV_INFERENCE_MODEL_USER_FILE_PATH",
+ "type" : "string",
+ "value" : ""
+ },
+ {
+ "name" : "MV_INFERENCE_MODEL_MEAN_VALUE",
+ "type" : "double",
+ "value" : 127.5
+ },
+ {
+ "name" : "MV_INFERENCE_BACKEND_TYPE",
+ "type" : "integer",
+ "value" : 0
+ },
+ {
+ "name" : "MV_INFERENCE_TARGET_TYPE",
+ "type" : "integer",
+ "value" : 0
+ },
+ {
+ "name" : "MV_INFERENCE_MODEL_STD_VALUE",
+ "type" : "double",
+ "value" : 1.0
+ },
+ {
+ "name" : "MV_INFERENCE_OUTPUT_MAX_NUMBER",
+ "type" : "integer",
+ "value" : 5
+ },
+ {
+ "name" : "MV_INFERENCE_CONFIDENCE_THRESHOLD",
+ "type" : "double",
+ "value" : 0.6
+ },
+ {
+ "name" : "MV_INFERENCE_INPUT_NODE_NAME",
+ "type" : "string",
+ "value" : "input"
+ },
+ {
+ "name" : "MV_INFERENCE_OUTPUT_NODE_NAMES",
+ "type" : "array",
+ "subtype" : "string",
+ "value" : [ "output"
+ ]
}
]
}
bool isBarcodeGenerationSupported = false;
bool isFaceRecognitionSupported = false;
bool isImageRecognitionSupported = false;
+ bool isInferenceImageSupported = false;
+ bool isInferenceFaceSupported = false;
const int nRetVal1 = system_info_get_platform_bool(
"http://tizen.org/feature/vision.barcode_detection",
LOGE("SYSTEM_INFO_ERROR: vision.image_recognition");
return false;
}
+ const int nRetVal5 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.image",
+ &isInferenceImageSupported);
+ if (nRetVal4 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.image");
+ return false;
+ }
+
+ const int nRetVal6 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.face",
+ &isInferenceFaceSupported);
+
+ if (nRetVal4 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.face");
+ return false;
+ }
(isBarcodeDetectionSupported || isBarcodeGenerationSupported ||
- isFaceRecognitionSupported || isImageRecognitionSupported) ?
+ isFaceRecognitionSupported || isImageRecognitionSupported ||
+ isInferenceImageSupported || isInferenceFaceSupported) ?
LOGI("system_info_get_platform_bool returned"
"Supported one feature among barcode detection, "
"barcode generation, face recognition, "
- "and image recognition capability\n") :
+ "image recognition, and inference capability\n") :
LOGE("system_info_get_platform_bool returned"
"Unsupported all features of barcode detection, "
"barcode generation, face recognition, "
- "and image recognition capability\n") ;
+ "image recognition, inference capability\n") ;
return (isBarcodeDetectionSupported || isBarcodeGenerationSupported ||
- isFaceRecognitionSupported || isImageRecognitionSupported);
+ isFaceRecognitionSupported || isImageRecognitionSupported ||
+ isInferenceImageSupported || isInferenceFaceSupported);
}
bool __mv_barcode_detect_check_system_info_feature_supported()
return isImageRecognitionSupported;
}
+
+bool __mv_inference_check_system_info_feature_supported()
+{
+ bool isInferenceImageSupported = false;
+ bool isInferenceFaceSupported = false;
+
+ const int nRetVal1 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.image",
+ &isInferenceImageSupported);
+
+ if (nRetVal1 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.image");
+ return false;
+ }
+
+ const int nRetVal2 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.face",
+ &isInferenceFaceSupported);
+
+ if (nRetVal2 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.face");
+ return false;
+ }
+
+ (isInferenceImageSupported || isInferenceFaceSupported) ?
+ LOGI("system_info_get_platform_bool returned "
+ "Supported inference feature capability\n") :
+ LOGE("system_info_get_platform_bool returned "
+ "Unsupported inference feature capability\n");
+
+ return (isInferenceImageSupported || isInferenceFaceSupported);
+}
+
+bool __mv_inference_image_check_system_info_feature_supported()
+{
+ bool isInferenceImageSupported = false;
+
+ const int nRetVal = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.image",
+ &isInferenceImageSupported);
+
+ if (nRetVal != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.image");
+ return false;
+ }
+
+ isInferenceImageSupported ?
+ LOGI("system_info_get_platform_bool returned "
+ "Supported inference image feature capability\n") :
+ LOGE("system_info_get_platform_bool returned "
+ "Unsupported inference image feature capability\n");
+
+ return isInferenceImageSupported;
+}
+
+bool __mv_inference_face_check_system_info_feature_supported()
+{
+ bool isInferenceFaceSupported = false;
+
+ const int nRetVal = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.face",
+ &isInferenceFaceSupported);
+
+ if (nRetVal != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.face");
+ return false;
+ }
+
+ isInferenceFaceSupported ?
+ LOGI("system_info_get_platform_bool returned "
+ "Supported inference face feature capability\n") :
+ LOGE("system_info_get_platform_bool returned "
+ "Unsupported inference face feature capability\n");
+
+ return isInferenceFaceSupported;
+}
bool isBarcodeGenerationSupported = false;
bool isFaceRecognitionSupported = false;
bool isImageRecognitionSupported = false;
+ bool isInferenceImageSupported = false;
+ bool isInferenceFaceSupported = false;
const int nRetVal1 = system_info_get_platform_bool(
"http://tizen.org/feature/vision.barcode_detection",
LOGE("SYSTEM_INFO_ERROR: vision.image_recognition");
return false;
}
+ const int nRetVal5 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.image",
+ &isInferenceImageSupported);
+ if (nRetVal4 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.image");
+ return false;
+ }
+
+ const int nRetVal6 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.face",
+ &isInferenceFaceSupported);
+
+ if (nRetVal4 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.face");
+ return false;
+ }
(isBarcodeDetectionSupported || isBarcodeGenerationSupported ||
- isFaceRecognitionSupported || isImageRecognitionSupported) ?
+ isFaceRecognitionSupported || isImageRecognitionSupported ||
+ isInferenceImageSupported || isInferenceFaceSupported) ?
LOGI("system_info_get_platform_bool returned"
"Supported one feature among barcode detection, "
"barcode generation, face recognition, "
- "and image recognition capability\n") :
+ "image recognition, and inference capability\n") :
LOGE("system_info_get_platform_bool returned"
"Unsupported all features of barcode detection, "
"barcode generation, face recognition, "
- "and image recognition capability\n") ;
+ "image recognition, inference capability\n") ;
return (isBarcodeDetectionSupported || isBarcodeGenerationSupported ||
- isFaceRecognitionSupported || isImageRecognitionSupported);
+ isFaceRecognitionSupported || isImageRecognitionSupported ||
+ isInferenceImageSupported || isInferenceFaceSupported);
}
bool __mv_barcode_detect_check_system_info_feature_supported()
return isImageRecognitionSupported;
}
+
+bool __mv_inference_check_system_info_feature_supported()
+{
+ bool isInferenceImageSupported = false;
+ bool isInferenceFaceSupported = false;
+
+ const int nRetVal1 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.image",
+ &isInferenceImageSupported);
+
+ if (nRetVal1 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.image");
+ return false;
+ }
+
+ const int nRetVal2 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.face",
+ &isInferenceFaceSupported);
+
+ if (nRetVal2 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.face");
+ return false;
+ }
+
+ (isInferenceImageSupported || isInferenceFaceSupported) ?
+ LOGI("system_info_get_platform_bool returned "
+ "Supported inference feature capability\n") :
+ LOGE("system_info_get_platform_bool returned "
+ "Unsupported inference feature capability\n");
+
+ return (isInferenceImageSupported || isInferenceFaceSupported);
+}
+
+bool __mv_inference_image_check_system_info_feature_supported()
+{
+ bool isInferenceImageSupported = false;
+
+ const int nRetVal = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.image",
+ &isInferenceImageSupported);
+
+ if (nRetVal != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.image");
+ return false;
+ }
+
+ isInferenceImageSupported ?
+ LOGI("system_info_get_platform_bool returned "
+ "Supported inference image feature capability\n") :
+ LOGE("system_info_get_platform_bool returned "
+ "Unsupported inference image feature capability\n");
+
+ return isInferenceImageSupported;
+}
+
+bool __mv_inference_face_check_system_info_feature_supported()
+{
+ bool isInferenceFaceSupported = false;
+
+ const int nRetVal = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.face",
+ &isInferenceFaceSupported);
+
+ if (nRetVal != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.face");
+ return false;
+ }
+
+ isInferenceFaceSupported ?
+ LOGI("system_info_get_platform_bool returned "
+ "Supported inference face feature capability\n") :
+ LOGE("system_info_get_platform_bool returned "
+ "Unsupported inference face feature capability\n");
+
+ return isInferenceFaceSupported;
+}
#include <string>
#include <map>
+#include <vector>
#include "mv_common.h"
typedef std::map<std::string, bool>::const_iterator DictBoolConstIter;
typedef std::map<std::string, std::string>::const_iterator DictStrConstIter;
+typedef std::map<std::string, std::vector<std::string>>::const_iterator DictVecStrConstIter;
+
class EngineConfig {
public:
/**
*/
int setAttribute(const std::string& key, const std::string& value);
+ /**
+ * @brief Sets attribute with the vector of string value.
+ *
+ * @since_tizen 5.5
+ * @param [in] key The string name of the attribute
+ * @param [in] value The vector attribute value of string to be set
+ * @return @c MEDIA_VISION_ERROR_NONE on success,\n
+ * otherwise a negative error value
+ */
+ int setAttribute(const std::string& key, const std::vector<std::string>& value);
+
/**
* @brief Gets double attribute value by attribute name.
*
*/
int getStringAttribute(const std::string& key, std::string *value) const;
+ /**
+ * @brief Gets vector attribute value of string by attribute name.
+ *
+ * @since_tizen 5.5
+ * @param [in] key The string name of the attribute
+ * @param [out] value The vector attribute value of string to be obtained
+ * @return @c MEDIA_VISION_ERROR_NONE on success,\n
+ * otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE If attribute with name @a key
+ * doesn't exist in the engine configuration dictionary
+ */
+ int getStringAttribute(const std::string& key, std::vector<std::string> *value) const;
+
public:
static bool setDefaultConfigFilePath(const std::string& confFilePath);
static const std::map<std::string, int>& getDefaultIntDict();
static const std::map<std::string, bool>& getDefaultBoolDict();
static const std::map<std::string, std::string>& getDefaultStrDict();
+ static const std::map<std::string, std::vector<std::string>>& getDefaultVecStrDict();
static int cacheDictionaries(
bool isLazyCache = true,
std::string configFilePath = DefConfigFilePath);
std::map<std::string, int> m_intDict;
std::map<std::string, bool> m_boolDict;
std::map<std::string, std::string> m_strDict;
+ std::map<std::string, std::vector<std::string>> m_vecStrDict;
private:
static std::string DefConfigFilePath;
static std::map<std::string, int> DefIntDict;
static std::map<std::string, bool> DefBoolDict;
static std::map<std::string, std::string> DefStrDict;
+ static std::map<std::string, std::vector<std::string>> DefVecStrDict;
};
} /* Common */
const char *name,
const char *value);
+/**
+ * @brief Sets the array of string attribute to the configuration.
+ *
+ * @since_tizen 5.5
+ * @param [in] engine_cfg Engine configuration for which @a values has
+ * to be set
+ * @param [in] name String key of the attribute will be used for
+ * storing the @a values into configuration
+ * dictionary
+ * @param [in] values The string values of the attribute
+ * @param [in] size The number of string @values
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ *
+ * @see mv_engine_config_get_array_string_attribute_c()
+ */
+int mv_engine_config_set_array_string_attribute_c(
+ mv_engine_config_h engine_cfg,
+ const char *name,
+ const char **values,
+ unsigned int size);
+
/**
* @brief Gets the double attribute from the configuration dictionary.
*
const char *name,
char **value);
+/**
+ * @brief Gets the array of string attribute from the configuration dictionary.
+ *
+ * @since_tizen 5.5
+ * @remarks Function allocates memory required for output @a values, so
+ * it has to be removed by the user himself.
+ * @param [in] engine_cfg Engine configuration from which @a values
+ * has to be gotten
+ * @param [in] name String key of the attribute will be used for
+ * getting the @a values from the
+ * configuration dictionary
+ * @param [out] values The attribute to be filled with string value
+ * from dictionary
+ * @param [out] size The number of elements in @a values
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE Parameter key isn't available
+ *
+ * @see mv_engine_config_set_array_string_attribute_c()
+ */
+int mv_engine_config_get_array_string_attribute_c(
+ mv_engine_config_h engine_cfg,
+ const char *name,
+ char ***values,
+ int *size);
+
/**
* @brief Traverses the list of supported attribute names and types.
* @details Using this function names of supported attributes can be obtained.
std::map<std::string, int> EngineConfig::DefIntDict;
std::map<std::string, bool> EngineConfig::DefBoolDict;
std::map<std::string, std::string> EngineConfig::DefStrDict;
+std::map<std::string, std::vector<std::string>> EngineConfig::DefVecStrDict;
EngineConfig::EngineConfig()
{
m_intDict.insert(getDefaultIntDict().begin(), getDefaultIntDict().end());
m_boolDict.insert(getDefaultBoolDict().begin(), getDefaultBoolDict().end());
m_strDict.insert(getDefaultStrDict().begin(), getDefaultStrDict().end());
+ m_vecStrDict.insert(getDefaultVecStrDict().begin(), getDefaultVecStrDict().end());
}
EngineConfig::~EngineConfig()
return MEDIA_VISION_ERROR_NONE;
}
+int EngineConfig::setAttribute(const std::string& key, const std::vector<std::string>& value)
+{
+ LOGI("Set vector attribute of string for the engine config %p. [%s] = [%s, ...]",
+ this, key.c_str(), value[0].c_str());
+
+ if (m_vecStrDict.find(key) == m_vecStrDict.end()) {
+ LOGE("Vector attribute of string [%s] can't be set because isn't supported", key.c_str());
+ return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE;
+ }
+
+ m_vecStrDict[key] = value;
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
int EngineConfig::getDoubleAttribute(const std::string& key, double *value) const
{
DictDblConstIter dictIter = m_dblDict.find(key);
return MEDIA_VISION_ERROR_NONE;
}
+int EngineConfig::getStringAttribute(const std::string& key, std::vector<std::string> *value) const
+{
+ DictVecStrConstIter dictIter = m_vecStrDict.find(key);
+ if (dictIter == m_vecStrDict.end()) {
+ LOGE("Attempt to access to the unsupported vector attribute [%s] of string "
+ "of the engine config %p", key.c_str(), this);
+ return MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE;
+ }
+
+ LOGD("Get vector attribute of string from the engine config %p. [%s] = [%s,...]",
+ this, dictIter->first.c_str(), dictIter->second[0].c_str());
+
+ *value = dictIter->second;
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
/*
* static
*/
return DefStrDict;
}
+const std::map<std::string, std::vector<std::string>>& EngineConfig::getDefaultVecStrDict()
+{
+ cacheDictionaries();
+
+ return DefVecStrDict;
+}
+
int EngineConfig::cacheDictionaries(bool isLazyCache, std::string configFilePath)
{
static bool isCached = false;
DefIntDict.clear();
DefBoolDict.clear();
DefStrDict.clear();
+ DefVecStrDict.clear();
const char *conf_file = configFilePath.c_str();
JsonParser *parser;
} else if (0 == strcmp("string", typeStr)) {
DefStrDict[std::string(nameStr)] =
(char*)json_object_get_string_member(attr_obj, "value");
+ } else if (0 == strcmp("array", typeStr)) {
+ const char *subTypeStr = (char*)json_object_get_string_member(attr_obj, "subtype");
+ if (0 == strcmp("string", subTypeStr)) {
+ JsonArray *attr_array = json_object_get_array_member(attr_obj, "value");
+ std::vector<std::string> defaultVecStr;
+ for (unsigned int item = 0; item < json_array_get_length(attr_array); ++item) {
+ defaultVecStr.push_back(std::string(json_array_get_string_element(attr_array, item)));
+ }
+ DefVecStrDict[std::string(nameStr)] = defaultVecStr;
+
+ }
+ //TO-DO: add other subtypes
} else {
LOGW("Attribute %i:%s wasn't parsed from json file. "
"Type isn't supported.", attrInd, nameStr);
return ret;
}
+int mv_engine_config_set_array_string_attribute(
+ mv_engine_config_h engine_cfg,
+ const char *name,
+ const char **values,
+ unsigned int size)
+{
+ MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported());
+ MEDIA_VISION_INSTANCE_CHECK(engine_cfg);
+ MEDIA_VISION_NULL_ARG_CHECK(name);
+ MEDIA_VISION_NULL_ARG_CHECK(values);
+
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ int ret = mv_engine_config_set_array_string_attribute_c(
+ engine_cfg, name, values, size);
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return ret;
+}
+
int mv_engine_config_get_double_attribute(
mv_engine_config_h engine_cfg,
const char *name,
return ret;
}
+int mv_engine_config_get_array_string_attribute(
+ mv_engine_config_h engine_cfg,
+ const char *name,
+ char ***values,
+ int *size)
+{
+ MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported());
+ MEDIA_VISION_INSTANCE_CHECK(engine_cfg);
+ MEDIA_VISION_NULL_ARG_CHECK(name);
+ MEDIA_VISION_NULL_ARG_CHECK(values);
+
+ MEDIA_VISION_FUNCTION_ENTER();
+ int ret = mv_engine_config_get_array_string_attribute_c(
+ engine_cfg, name, values, size);
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return ret;
+}
+
int mv_engine_config_foreach_supported_attribute(
mv_supported_attribute_cb callback,
void *user_data)
return ret;
}
+int mv_engine_config_set_array_string_attribute_c(
+ mv_engine_config_h engine_cfg,
+ const char *name,
+ const char **values,
+ unsigned int size)
+{
+ if (!engine_cfg || name == NULL || values == NULL) {
+ LOGE("Impossible to set attribute. One of the required parameters is "
+ "NULL. engine_cfg = %p; name = %p; values = %p;",
+ engine_cfg, name, values);
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ unsigned int elmIdx = 0;
+ std::vector<std::string> arrayValues;
+ for (elmIdx = 0; elmIdx < size; elmIdx++) {
+ arrayValues.push_back(std::string(values[elmIdx]));
+ }
+
+ if (arrayValues.size() != size) {
+ LOGE("Fail to set attribute: size is %d, but actual %zd", size, arrayValues.size());
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ int ret = (static_cast<MediaVision::Common::EngineConfig*>(engine_cfg))->setAttribute(
+ std::string(name), arrayValues);
+
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Failed to set attribute [%s]. Error code (0x%08x)", name, ret);
+ return ret;
+ }
+
+ LOGD("Attribute [%s] has been set", name);
+ return ret;
+}
+
int mv_engine_config_get_double_attribute_c(
mv_engine_config_h engine_cfg,
const char *name,
return ret;
}
+int mv_engine_config_get_array_string_attribute_c(
+ mv_engine_config_h engine_cfg,
+ const char *name,
+ char ***values,
+ int *size)
+{
+ if (!engine_cfg || name == NULL || values == NULL) {
+ LOGE("Impossible to get attribute. One of the required parameters is "
+ "NULL. engine_cfg = %p; name = %p; values = %p;",
+ engine_cfg, name, values);
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ std::vector<std::string> attributeValue;
+ int ret = (static_cast<MediaVision::Common::EngineConfig*>(engine_cfg))->getStringAttribute(
+ std::string(name), &attributeValue);
+
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Failed to get attribute [%s]. Error code (0x%08x)",
+ name, ret);
+ return ret;
+ }
+
+ int attributeSize = attributeValue.size();
+ LOGD("Allocating %d arrays", attributeSize);
+
+ (*values) = (char**)malloc(sizeof(char*) * attributeSize);
+ if ((*values) == NULL) {
+ LOGE("Failed allocation");
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
+ (*size) = attributeSize;
+
+ int stringSize = 0;
+ for(int idx = 0; idx < attributeSize; ++idx) {
+ stringSize = attributeValue[idx].size();
+ LOGE("Converting %s with length %d to char*", attributeValue[idx].c_str(), stringSize);
+ (*values)[idx] = (char*)malloc(sizeof(char) * (stringSize + 1));
+ if ((*values)[idx] == NULL) {
+ LOGE("Failed to convert string to char*");
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
+
+ if (attributeValue[idx].copy((*values)[idx], stringSize) != attributeValue[idx].size()) {
+ LOGE("Conversion from string to char* failed");
+ free((*values)[idx]);
+ ((*values)[idx]) = NULL;
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ }
+ ((*values)[idx])[stringSize] = '\0';
+
+ LOGD("Attribute [%s] (value[%d] %s) has been gotten",
+ name, idx, (*values)[idx]);
+ }
+
+ /*
+ if ((*value) == NULL) {
+ LOGE("Failed to convert string to char*");
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
+
+ if (attributeValue.copy(*value, stringSize) != attributeValue.size()) {
+ LOGE("Conversion from string to char* failed");
+ free(*value);
+ (*value) = NULL;
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ }
+ (*value)[stringSize] = '\0';
+
+ LOGD("Attribute [%s] (value %s) has been gotten",
+ name, *value);
+ */
+ return ret;
+}
+
int mv_engine_config_foreach_supported_attribute_c(
mv_supported_attribute_cb callback,
void *user_data)
bool isBarcodeGenerationSupported = false;
bool isFaceRecognitionSupported = false;
bool isImageRecognitionSupported = false;
+ bool isInferenceImageSupported = false;
+ bool isInferenceFaceSupported = false;
const int nRetVal1 = system_info_get_platform_bool(
"http://tizen.org/feature/vision.barcode_detection",
LOGE("SYSTEM_INFO_ERROR: vision.image_recognition");
return false;
}
+ const int nRetVal5 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.image",
+ &isInferenceImageSupported);
+ if (nRetVal4 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.image");
+ return false;
+ }
+
+ const int nRetVal6 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.face",
+ &isInferenceFaceSupported);
+
+ if (nRetVal4 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.face");
+ return false;
+ }
(isBarcodeDetectionSupported || isBarcodeGenerationSupported ||
- isFaceRecognitionSupported || isImageRecognitionSupported) ?
+ isFaceRecognitionSupported || isImageRecognitionSupported ||
+ isInferenceImageSupported || isInferenceFaceSupported) ?
LOGI("system_info_get_platform_bool returned"
"Supported one feature among barcode detection, "
"barcode generation, face recognition, "
- "and image recognition capability\n") :
+ "image recognition, and inference capability\n") :
LOGE("system_info_get_platform_bool returned"
"Unsupported all features of barcode detection, "
"barcode generation, face recognition, "
- "and image recognition capability\n") ;
+ "image recognition, inference capability\n") ;
return (isBarcodeDetectionSupported || isBarcodeGenerationSupported ||
- isFaceRecognitionSupported || isImageRecognitionSupported);
+ isFaceRecognitionSupported || isImageRecognitionSupported ||
+ isInferenceImageSupported || isInferenceFaceSupported);
}
bool __mv_barcode_detect_check_system_info_feature_supported()
return isImageRecognitionSupported;
}
+
+bool __mv_inference_check_system_info_feature_supported()
+{
+ bool isInferenceImageSupported = false;
+ bool isInferenceFaceSupported = false;
+
+ const int nRetVal1 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.image",
+ &isInferenceImageSupported);
+
+ if (nRetVal1 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.image");
+ return false;
+ }
+
+ const int nRetVal2 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.face",
+ &isInferenceFaceSupported);
+
+ if (nRetVal2 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.face");
+ return false;
+ }
+
+ (isInferenceImageSupported || isInferenceFaceSupported) ?
+ LOGI("system_info_get_platform_bool returned "
+ "Supported inference feature capability\n") :
+ LOGE("system_info_get_platform_bool returned "
+ "Unsupported inference feature capability\n");
+
+ return (isInferenceImageSupported || isInferenceFaceSupported);
+}
+
+bool __mv_inference_image_check_system_info_feature_supported()
+{
+ bool isInferenceImageSupported = false;
+
+ const int nRetVal = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.image",
+ &isInferenceImageSupported);
+
+ if (nRetVal != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.image");
+ return false;
+ }
+
+ isInferenceImageSupported ?
+ LOGI("system_info_get_platform_bool returned "
+ "Supported inference image feature capability\n") :
+ LOGE("system_info_get_platform_bool returned "
+ "Unsupported inference image feature capability\n");
+
+ return isInferenceImageSupported;
+}
+
+bool __mv_inference_face_check_system_info_feature_supported()
+{
+ bool isInferenceFaceSupported = false;
+
+ const int nRetVal = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.face",
+ &isInferenceFaceSupported);
+
+ if (nRetVal != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.face");
+ return false;
+ }
+
+ isInferenceFaceSupported ?
+ LOGI("system_info_get_platform_bool returned "
+ "Supported inference face feature capability\n") :
+ LOGE("system_info_get_platform_bool returned "
+ "Unsupported inference face feature capability\n");
+
+ return isInferenceFaceSupported;
+}
grayImage(leftEyeRect),
leftEye,
leftEye.size());
- cv::resize(
+ cv::resize(
grayImage(rightEyeRect),
rightEye,
rightEye.size());
bool isBarcodeGenerationSupported = false;
bool isFaceRecognitionSupported = false;
bool isImageRecognitionSupported = false;
+ bool isInferenceImageSupported = false;
+ bool isInferenceFaceSupported = false;
const int nRetVal1 = system_info_get_platform_bool(
"http://tizen.org/feature/vision.barcode_detection",
LOGE("SYSTEM_INFO_ERROR: vision.image_recognition");
return false;
}
+ const int nRetVal5 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.image",
+ &isInferenceImageSupported);
+ if (nRetVal4 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.image");
+ return false;
+ }
+
+ const int nRetVal6 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.face",
+ &isInferenceFaceSupported);
+
+ if (nRetVal4 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.face");
+ return false;
+ }
(isBarcodeDetectionSupported || isBarcodeGenerationSupported ||
- isFaceRecognitionSupported || isImageRecognitionSupported) ?
+ isFaceRecognitionSupported || isImageRecognitionSupported ||
+ isInferenceImageSupported || isInferenceFaceSupported) ?
LOGI("system_info_get_platform_bool returned"
"Supported one feature among barcode detection, "
"barcode generation, face recognition, "
- "and image recognition capability\n") :
+ "image recognition, and inference capability\n") :
LOGE("system_info_get_platform_bool returned"
"Unsupported all features of barcode detection, "
"barcode generation, face recognition, "
- "and image recognition capability\n") ;
+ "image recognition, inference capability\n") ;
return (isBarcodeDetectionSupported || isBarcodeGenerationSupported ||
- isFaceRecognitionSupported || isImageRecognitionSupported);
+ isFaceRecognitionSupported || isImageRecognitionSupported ||
+ isInferenceImageSupported || isInferenceFaceSupported);
}
bool __mv_barcode_detect_check_system_info_feature_supported()
return isImageRecognitionSupported;
}
+
+bool __mv_inference_check_system_info_feature_supported()
+{
+ bool isInferenceImageSupported = false;
+ bool isInferenceFaceSupported = false;
+
+ const int nRetVal1 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.image",
+ &isInferenceImageSupported);
+
+ if (nRetVal1 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.image");
+ return false;
+ }
+
+ const int nRetVal2 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.face",
+ &isInferenceFaceSupported);
+
+ if (nRetVal2 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.face");
+ return false;
+ }
+
+ (isInferenceImageSupported || isInferenceFaceSupported) ?
+ LOGI("system_info_get_platform_bool returned "
+ "Supported inference feature capability\n") :
+ LOGE("system_info_get_platform_bool returned "
+ "Unsupported inference feature capability\n");
+
+ return (isInferenceImageSupported || isInferenceFaceSupported);
+}
+
+bool __mv_inference_image_check_system_info_feature_supported()
+{
+ bool isInferenceImageSupported = false;
+
+ const int nRetVal = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.image",
+ &isInferenceImageSupported);
+
+ if (nRetVal != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.image");
+ return false;
+ }
+
+ isInferenceImageSupported ?
+ LOGI("system_info_get_platform_bool returned "
+ "Supported inference image feature capability\n") :
+ LOGE("system_info_get_platform_bool returned "
+ "Unsupported inference image feature capability\n");
+
+ return isInferenceImageSupported;
+}
+
+bool __mv_inference_face_check_system_info_feature_supported()
+{
+ bool isInferenceFaceSupported = false;
+
+ const int nRetVal = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.face",
+ &isInferenceFaceSupported);
+
+ if (nRetVal != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.face");
+ return false;
+ }
+
+ isInferenceFaceSupported ?
+ LOGI("system_info_get_platform_bool returned "
+ "Supported inference face feature capability\n") :
+ LOGE("system_info_get_platform_bool returned "
+ "Unsupported inference face feature capability\n");
+
+ return isInferenceFaceSupported;
+}
bool isBarcodeGenerationSupported = false;
bool isFaceRecognitionSupported = false;
bool isImageRecognitionSupported = false;
+ bool isInferenceImageSupported = false;
+ bool isInferenceFaceSupported = false;
const int nRetVal1 = system_info_get_platform_bool(
"http://tizen.org/feature/vision.barcode_detection",
LOGE("SYSTEM_INFO_ERROR: vision.image_recognition");
return false;
}
+ const int nRetVal5 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.image",
+ &isInferenceImageSupported);
+ if (nRetVal4 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.image");
+ return false;
+ }
+
+ const int nRetVal6 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.face",
+ &isInferenceFaceSupported);
+
+ if (nRetVal4 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.face");
+ return false;
+ }
(isBarcodeDetectionSupported || isBarcodeGenerationSupported ||
- isFaceRecognitionSupported || isImageRecognitionSupported) ?
+ isFaceRecognitionSupported || isImageRecognitionSupported ||
+ isInferenceImageSupported || isInferenceFaceSupported) ?
LOGI("system_info_get_platform_bool returned"
"Supported one feature among barcode detection, "
"barcode generation, face recognition, "
- "and image recognition capability\n") :
+ "image recognition, and inference capability\n") :
LOGE("system_info_get_platform_bool returned"
"Unsupported all features of barcode detection, "
"barcode generation, face recognition, "
- "and image recognition capability\n") ;
+ "image recognition, inference capability\n") ;
return (isBarcodeDetectionSupported || isBarcodeGenerationSupported ||
- isFaceRecognitionSupported || isImageRecognitionSupported);
+ isFaceRecognitionSupported || isImageRecognitionSupported ||
+ isInferenceImageSupported || isInferenceFaceSupported);
}
bool __mv_barcode_detect_check_system_info_feature_supported()
return isImageRecognitionSupported;
}
+
+bool __mv_inference_check_system_info_feature_supported()
+{
+ bool isInferenceImageSupported = false;
+ bool isInferenceFaceSupported = false;
+
+ const int nRetVal1 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.image",
+ &isInferenceImageSupported);
+
+ if (nRetVal1 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.image");
+ return false;
+ }
+
+ const int nRetVal2 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.face",
+ &isInferenceFaceSupported);
+
+ if (nRetVal2 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.face");
+ return false;
+ }
+
+ (isInferenceImageSupported || isInferenceFaceSupported) ?
+ LOGI("system_info_get_platform_bool returned "
+ "Supported inference feature capability\n") :
+ LOGE("system_info_get_platform_bool returned "
+ "Unsupported inference feature capability\n");
+
+ return (isInferenceImageSupported || isInferenceFaceSupported);
+}
+
+bool __mv_inference_image_check_system_info_feature_supported()
+{
+ bool isInferenceImageSupported = false;
+
+ const int nRetVal = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.image",
+ &isInferenceImageSupported);
+
+ if (nRetVal != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.image");
+ return false;
+ }
+
+ isInferenceImageSupported ?
+ LOGI("system_info_get_platform_bool returned "
+ "Supported inference image feature capability\n") :
+ LOGE("system_info_get_platform_bool returned "
+ "Unsupported inference image feature capability\n");
+
+ return isInferenceImageSupported;
+}
+
+bool __mv_inference_face_check_system_info_feature_supported()
+{
+ bool isInferenceFaceSupported = false;
+
+ const int nRetVal = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.face",
+ &isInferenceFaceSupported);
+
+ if (nRetVal != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.face");
+ return false;
+ }
+
+ isInferenceFaceSupported ?
+ LOGI("system_info_get_platform_bool returned "
+ "Supported inference face feature capability\n") :
+ LOGE("system_info_get_platform_bool returned "
+ "Unsupported inference face feature capability\n");
+
+ return isInferenceFaceSupported;
+}
--- /dev/null
+project(mv_inference_port)
+cmake_minimum_required(VERSION 2.6)
+
+if(MEDIA_VISION_INFERENCE_LICENSE_PORT)
+ add_subdirectory(${PROJECT_SOURCE_DIR}/inference_lic) # Licensed port
+else()
+ add_subdirectory(${PROJECT_SOURCE_DIR}/inference) # Open port
+endif()
--- /dev/null
+project(${MV_INFERENCE_LIB_NAME})
+CMAKE_MINIMUM_REQUIRED(VERSION 2.6)
+
+SET_PROPERTY(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS_DEBUG _DEBUG)
+
+if(NOT SKIP_WARNINGS)
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Werror -std=c++11")
+endif()
+
+SET(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/${LIB_INSTALL_DIR})
+SET(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/${LIB_INSTALL_DIR})
+SET(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
+
+INCLUDE_DIRECTORIES("${INC_DIR}")
+INCLUDE_DIRECTORIES("${PROJECT_SOURCE_DIR}/include")
+INCLUDE_DIRECTORIES("${PROJECT_SOURCE_DIR}/src")
+
+SET(dependents "inference-engine-interface-vision iniparser")
+INCLUDE(FindPkgConfig)
+pkg_check_modules(${fw_name} REQUIRED ${dependents})
+FOREACH(flag ${${fw_name}_CFLAGS})
+ SET(EXTRA_CFLAGS "${EXTRA_CFLAGS} ${flag}")
+ SET(EXTRA_CXXFLAGS "${EXTRA_CXXFLAGS} ${flag}")
+ENDFOREACH(flag)
+
+
+SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${EXTRA_CXXFLAGS} -Wno-unused-parameter -Wno-sign-compare")
+FILE(GLOB MV_INFERENCE_INCLUDE_LIST "${PROJECT_SOURCE_DIR}/include/*.h" "${PROJECT_SOURCE_DIR}/include/*.hpp")
+FILE(GLOB MV_INFERENCE_SOURCE_LIST "${PROJECT_SOURCE_DIR}/src/*.c" "${PROJECT_SOURCE_DIR}/src/*.cpp")
+
+FIND_PACKAGE(OpenCV REQUIRED core dnn imgproc)
+if(NOT OpenCV_FOUND)
+ MESSAGE(SEND_ERROR "OpenCV NOT FOUND")
+ RETURN()
+else()
+ INCLUDE_DIRECTORIES(${OpenCV_INCLUDE_DIRS})
+endif()
+
+
+if(FORCED_STATIC_BUILD)
+ ADD_LIBRARY(${PROJECT_NAME} STATIC ${MV_INFERENCE_INCLUDE_LIST} ${MV_INFERENCE_SOURCE_LIST})
+else()
+ ADD_LIBRARY(${PROJECT_NAME} SHARED ${MV_INFERENCE_INCLUDE_LIST} ${MV_INFERENCE_SOURCE_LIST})
+endif()
+
+TARGET_LINK_LIBRARIES(${PROJECT_NAME} ${MV_COMMON_LIB_NAME} ${OpenCV_LIBS} dlog inference-engine-interface-vision iniparser)
+
+INSTALL(TARGETS ${PROJECT_NAME} DESTINATION ${LIB_INSTALL_DIR})
--- /dev/null
+/**
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MEDIA_VISION_INFERENCE_H__
+#define __MEDIA_VISION_INFERENCE_H__
+
+#include <string>
+#include <map>
+
+#include "mv_common.h"
+#include "inference_engine_error.h"
+#include "inference_engine_vision_impl.h"
+//#include "inference_engine_common_impl.h"
+#include <mv_inference_type.h>
+
+/**
+ * @file Inference.h
+ * @brief This file contains the inference class definition which
+ * provides inference interface.
+ */
+using namespace InferenceEngineInterface::Vision;
+namespace mediavision {
+namespace inference {
+
+struct TensorInfo {
+ int width;
+ int height;
+ int dim;
+ int ch;
+};
+
+struct InferenceConfig {
+ /**
+ * @brief Default constructor for the @ref InferenceConfig
+ *
+ * @since_tizen 5.0
+ */
+ InferenceConfig();
+
+ std::string mConfigFilePath; /**< Path of a model configuration file */
+
+ std::string mWeightFilePath; /**< Path of a model weight file */
+
+ std::string mUserFilePath; /**< Path of model user file */
+
+ TensorInfo mTensorInfo; /**< Tensor information */
+
+ mv_inference_backend_type_e mBackedType; /**< Backed type of model files */
+
+ inference_target_type_e mTargetType; /**< Target type to run inference */
+
+ double mConfidenceThresHold; /**< Confidence threshold value */
+
+ double mMeanValue; /**< The mean value for normalization */
+
+ double mStdValue; /**< The scale factor value for normalization */
+
+ int mMaxOutputNumbers;
+
+ std::string mInputNodeName; /**< The input node name */
+ std::vector<std::string> mOutputNodeNames; /**< The output node names */
+};
+
+
+class Inference {
+public:
+ /**
+ * @brief Creates an Inference class instance.
+ *
+ * @since_tizen 5.5
+ */
+ Inference();
+
+ /**
+ * @brief Destroys an Inference class instance including
+ * its all resources.
+ *
+ * @since_tizen 5.5
+ */
+ ~Inference();
+
+ /**
+ * @brief Configure modelfiles
+ *
+ * @since_tizen 5.5
+ */
+ void ConfigureModelFiles(
+ const std::string modelConfigFilePath,
+ const std::string modelWeightFilePath,
+ const std::string modelUserFilePath);
+
+ /**
+ * @brief Configure input tensor information
+ *
+ * @since_tizen 5.5
+ */
+ void ConfigureTensorInfo(int width,
+ int height,
+ int dim,
+ int ch,
+ double stdValue,
+ double meanValue);
+
+ /**
+ * @brief Configure inference backend and target types
+ *
+ * @since_tizen 5.5
+ */
+ int ConfigureEngine(mv_inference_backend_type_e backendType,
+ mv_inference_target_type_e targetType);
+
+ /**
+ * @brief Configure the maximum number of inference results
+ *
+ * @since_tizen 5.5
+ */
+ void ConfigureOutput(const int maxOutputNumbers);
+
+ /**
+ * @brief Configure the confidence threshold
+ *
+ * @since_tizen 5.5
+ */
+ void ConfigureThreshold(const double threshold);
+
+ /**
+ * @brief Configure the input node name
+ *
+ * @since_tizen 5.5
+ */
+ void ConfigureInputNodeName(const std::string nodeName);
+
+ /**
+ * @brief Configure the output node names
+ *
+ * @since_tizen 5.5
+ */
+ void ConfigureOutputNodeNames(const std::vector<std::string> nodeNames);
+
+ /**
+ * @brief Prepares inference
+ * @details Use this function to create the instance based on
+ * the configured backend, to set tensor information,
+ * and load the models.
+ *
+ * @since_tizen 5.5
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ */
+ int Prepare();
+
+ /**
+ * @brief Runs inference with the roi of a given image
+ * @details Use this function to run forward pass with the given image.
+ * The given image is preprocessed and the roi of the image is
+ * thrown to neural network. Then, the output tensor is returned.
+ * If roi is NULL, then full source will be analyzed.
+ *
+ * @since_tizen 5.5
+ * @return @c true on success, otherwise a negative error value
+ */
+ int Run(mv_source_h mvSource, mv_rectangle_s *roi = NULL);
+
+ /**
+ * @brief Gets that given engine is supported or not
+ *
+ * @since_tizen 5.5
+ * @return @c true on success, otherwise a negative error value
+ */
+ std::pair<std::string, bool> GetSupportedInferenceBackend(int backend);
+
+ /**
+ * @brief Gets the ImageClassificationResults
+ *
+ * @since_tizen 5.5
+ * @return @c true on success, otherwise a negative error value
+ */
+ int GetClassficationResults(ImageClassificationResults *classificationResults);
+
+ /**
+ * @brief Gets the ObjectDetectioResults
+ *
+ * @since_tizen 5.5
+ * @return @c true on success, otherwise a negative error value
+ */
+ int GetObjectDetectionResults(ObjectDetectionResults *detectionResults);
+
+ /**
+ * @brief Gets the FaceDetectioResults
+ *
+ * @since_tizen 5.5
+ * @return @c true on success, otherwise a negative error value
+ */
+ int GetFaceDetectionResults(FaceDetectionResults *detectionResults);
+
+ /**
+ * @brief Gets the FacialLandmarkDetectionResults
+ *
+ * @since_tizen 5.5
+ * @return @c true on success, otherwise a negative error value
+ */
+ int GetFacialLandMarkDetectionResults(FacialLandMarkDetectionResults* results);
+
+ int GetResults(std::vector<std::vector<int>>* dimInfo, std::vector<float*> *results);
+
+private:
+ bool mCanRun; /**< The flag indicating ready to run Inference */
+
+ InferenceConfig mConfig;
+
+ InferenceEngineVision * mBackend;
+
+ std::map<int, std::pair<std::string, bool>> mSupportedInferenceBackend;
+
+private:
+ void CheckSupportedInferenceBackend();
+ int ConvertEngineErrorToVisionError(int error);
+};
+
+} /* Inference */
+} /* MediaVision */
+
+#endif /* __MEDIA_VISION_INFERENCE_H__ */
--- /dev/null
+/**
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MEDIA_VISION_INFERENCE_INI_H__
+#define __MEDIA_VISION_INFERENCE_INI_H__
+
+#include <string>
+#include <vector>
+#include <mv_inference_type.h>
+
+namespace mediavision {
+namespace inference {
+
+class InferenceInI {
+public:
+ /**
+ * @brief Creates an Inference class instance.
+ *
+ * @since_tizen 5.5
+ */
+ InferenceInI();
+
+ /**
+ * @brief Destroys an Inference class instance including
+ * its all resources.
+ *
+ * @since_tizen 5.5
+ */
+ ~InferenceInI();
+
+ /**
+ * @brief Load()
+ *
+ * @since_tizen 5.5
+ */
+ int LoadInI();
+
+ /**
+ * @brief Unload()
+ *
+ * @since_tizen 5.5
+ */
+ void UnLoadInI();
+
+ std::vector<int> GetSupportedInferenceEngines();
+
+private:
+ std::vector<int> mSupportedInferenceBackend;
+ std::string mIniDefaultPath;
+ std::string mDefaultBackend;
+ std::string mDelimeter;
+};
+
+} /* Inference */
+} /* MediaVision */
+
+#endif /* __MEDIA_VISION_INFERENCE_H__ */
--- /dev/null
+/**
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MEDIA_VISION_INFERENCE_OPEN_H__
+#define __MEDIA_VISION_INFERENCE_OPEN_H__
+
+#include <mv_common.h>
+#include <mv_inference_type.h>
+#include <mv_inference.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @file mv_inference_open.h
+ * @brief This file contains the Media Vision Inference Open API.
+ */
+
+/*************/
+/* Inference */
+/*************/
+/**
+ * @brief Create infernce handle.
+ * @details Use this function to create an inference handle. After creation
+ * the inference handle has to be prepared with
+ * @ref mv_inference_prepare() function to prepare an inference.
+ *
+ * @since_tizen 5.5
+ *
+ * @param [out] infer The handle to the inference to be created
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ *
+ * @post Release @a infer by using
+ * @ref mv_inference_destroy() function when it is not needed
+ * anymore
+ *
+ * @see mv_inference_destroy_open()
+ * @see mv_inference_prepare_open()
+ */
+int mv_inference_create_open(mv_inference_h *infer);
+
+/**
+ * @brief Destroy inference handle and releases all its resources.
+ *
+ * @since_tizen 5.5
+ *
+ * @param [in] infer The handle to the inference to be destroyed
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ *
+ * @pre Create an inference handle by using @ref mv_inference_create_open()
+ *
+ * @see mv_inference_create_open()
+ */
+int mv_inference_destroy_open(mv_inference_h infer);
+
+/**
+ * @brief Configure the inference model data to inference handle
+ *
+ * @since_tizen 5.5
+ *
+ * @param [in] infer The handle to the inference
+ * @param [in] engine_config The handle to the configuration of
+ * engine.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ */
+int mv_inference_configure_model_open(mv_inference_h infer, mv_engine_config_h engine_config);
+
+/**
+ * @brief Configure the tensor information to the inference handle
+ *
+ * @since_tizen 5.5
+ *
+ * @param [in] infer The handle to the inference
+ * @param [in] engine_config The handle to the configuration of
+ * engine.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ */
+int mv_inference_configure_tensor_info_open(mv_inference_h infer, mv_engine_config_h engine_config);
+
+/**
+ * @brief Configure the backend to the inference handle
+ *
+ * @since_tizen 5.5
+ *
+ * @param [in] infer The handle to the inference
+ * @param [in] engine_config The handle to the configuration of
+ * engine.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ */
+int mv_inference_configure_engine_open(mv_inference_h infer, mv_engine_config_h engine_config);
+
+/**
+ * @brief Configure the number of output to the inference handle
+ *
+ * @since_tizen 5.5
+ *
+ * @param [in] infer The handle to the inference
+ * @param [in] engine_config The handle to the configuration of
+ * engine.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ */
+int mv_inference_configure_output_open(mv_inference_h infer, mv_engine_config_h engine_config);
+
+/**
+ * @brief Configure the confidence threshold value to the inference handle
+ *
+ * @since_tizen 5.5
+ *
+ * @param [in] infer The handle to the inference
+ * @param [in] engine_config The handle to the configuration of
+ * engine.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ */
+int mv_inference_configure_confidence_threshold_open(mv_inference_h infer, mv_engine_config_h engine_config);
+
+/**
+ * @brief Configure the input node name to the inference handle
+ *
+ * @since_tizen 5.5
+ *
+ * @param [in] infer The handle to the inference
+ * @param [in] engine_config The handle to the configuration of
+ * engine.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ */
+int mv_inference_configure_input_node_name_open(mv_inference_h infer, mv_engine_config_h engine_config);
+
+/**
+ * @brief Configure the set of output node names to the inference handle
+ *
+ * @since_tizen 5.5
+ *
+ * @param [in] infer The handle to the inference
+ * @param [in] engine_config The handle to the configuration of
+ * engine.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ */
+int mv_inference_configure_output_node_names_open(mv_inference_h infer, mv_engine_config_h engine_config);
+
+/**
+ * @brief Prepare inference.
+ * @details Use this function to prepare inference based on
+ * the configured network.
+ *
+ * @since_tizen 5.5
+ *
+ * @param [in] infer The handle to the inference
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ */
+int mv_inference_prepare_open(mv_inference_h infer);
+
+/**
+* @brief Traverses the list of supported engines for inference.
+* @details Using this function the supported engines can be obtained.
+* The names can be used with mv_engine_config_h related
+* getters and setters to get/set MV_INFERENCE_BACKEND_TYPE attribute
+* value.
+*
+* @since_tizen 5.5
+* @param [in] infer The handle to the inference
+* @param [in] callback The iteration callback function
+* @param [in] user_data The user data to be passed to the callback function
+* @return @c 0 on success, otherwise a negative error value
+* @retval #MEDIA_VISION_ERROR_NONE Successful
+* @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+* @retval #MEDIA_VISION_ERROR_INVALID_OPERATION Invalid operation
+* @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+*
+* @pre @a engine_cfg must be created
+*
+* @see mv_engine_config_set_string_attribute()
+* @see mv_engine_config_get_string_attribute()
+*/
+int mv_inference_foreach_supported_engine_open(
+ mv_inference_h infer,
+ mv_inference_supported_engine_cb callback,
+ void *user_data);
+
+/**
+ * @brief Performs image classification on the @a source
+ * @details Use this function to launch image classification.
+ * Each time when mv_inference_image_classify is
+ * called, @a classified_cb will receive classes
+ * which the media source may belong to.
+ *
+ * @since_tizen 5.5
+ *
+ * @param [in] source The handle to the source of the media
+ * @param [in] infer The handle to the inference
+ * @param [in] roi Rectangular box bounding the region-of-interest on the
+ * @a source. If NULL, then full source will be
+ * analyzed.
+ * @param [in] classified_cb The callback which will be called for
+ * classification on media source.
+ * This callback will receive classification results.
+ * @param [in] user_data The user data passed from the code where
+ * @ref mv_inference_image_classify_open() is invoked. This data will
+ * be accessible from @a classified_cb callback.
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_INTERNAL Internal error
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT Source colorspace
+ * isn't supported
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ *
+ * @pre Create a source handle by calling @ref mv_create_source()
+ * @pre Create an inference handle by calling @ref mv_inference_create()
+ * @pre Configure an inference handle by calling @ref mv_inference_configure()
+ * @pre Prepare an inference by calling @ref mv_inference_prepare()
+ * @post @a classified_cb will be called to process classification results
+ *
+ * @see mv_inference_image_classified_cb
+ */
+int mv_inference_image_classify_open(
+ mv_source_h source,
+ mv_inference_h infer,
+ mv_rectangle_s *roi,
+ mv_inference_image_classified_cb classified_cb,
+ void *user_data);
+
+
+/**
+ * @brief Performs object detection on the @a source
+ * @details Use this function to launch object detection.
+ * Each time when mv_inference_object_detection is
+ * called, @a detected_cb will receive a list of objects and their locations
+ * on the media source.
+ *
+ * @since_tizen 5.5
+ *
+ * @param [in] source The handle to the source of the media
+ * @param [in] infer The handle to the inference
+ * @param [in] detected_cb The callback which will be called for
+ * detecting objects on media source.
+ * This callback will receive the detection results.
+ * @param [in] user_data The user data passed from the code where
+ * @ref mv_inference_object_detect() is invoked. This data will
+ * be accessible from @a detected_cb callback.
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_INTERNAL Internal error
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT Source colorspace
+ * isn't supported
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ *
+ * @pre Create a source handle by calling @ref mv_create_source()
+ * @pre Create an inference handle by calling @ref mv_inference_create()
+ * @pre Configure an inference handle by calling @ref mv_inference_configure()
+ * @pre Prepare an inference by calling @ref mv_inference_prepare()
+ * @post @a detected_cb will be called to process detection results
+ *
+ * @see mv_inference_object_detected_cb
+ */
+int mv_inference_object_detect_open(
+ mv_source_h source,
+ mv_inference_h infer,
+ mv_inference_object_detected_cb detected_cb,
+ void *user_data);
+
+/**
+ * @brief Performs face detection on the @a source
+ * @details Use this function to launch face detection.
+ * Each time when mv_inference_face_detection is
+ * called, @a detected_cb will receive a list of faces and their locations
+ * on the media source.
+ *
+ * @since_tizen 5.5
+ *
+ * @param [in] source The handle to the source of the media
+ * @param [in] infer The handle to the inference
+ * @param [in] detected_cb The callback which will be called for
+ * detecting faces on media source.
+ * This callback will receive the detection results.
+ * @param [in] user_data The user data passed from the code where
+ * @ref mv_inference_face_detect() is invoked. This data will
+ * be accessible from @a detected_cb callback.
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_INTERNAL Internal error
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT Source colorspace
+ * isn't supported
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ *
+ * @pre Create a source handle by calling @ref mv_create_source()
+ * @pre Create an inference handle by calling @ref mv_inference_create()
+ * @pre Configure an inference handle by calling @ref mv_inference_configure()
+ * @pre Prepare an inference by calling @ref mv_inference_prepare()
+ * @post @a detected_cb will be called to process detection results
+ *
+ * @see mv_inference_face_detected_cb
+ */
+int mv_inference_face_detect_open(
+ mv_source_h source,
+ mv_inference_h infer,
+ mv_inference_face_detected_cb detected_cb,
+ void *user_data);
+
+/**
+ * @brief Performs facial landmarks detection on the @a source
+ * @details Use this function to launch facial landmark detection.
+ * Each time when mv_inference_facial_landmark_detection is
+ * called, @a detected_cb will receive a list facial landmark's locations
+ * on the media source.
+ *
+ * @since_tizen 5.5
+ *
+ * @param [in] source The handle to the source of the media
+ * @param [in] infer The handle to the inference
+ * @param[in] roi Rectangular box bounding face image on the
+ * @a source. If NULL, then full source will be
+ * analyzed.
+ * @param [in] detected_cb The callback which will be called for
+ * detecting facial landmark on media source.
+ * This callback will receive the detection results.
+ * @param [in] user_data The user data passed from the code where
+ * @ref mv_inference_facial_landmark_detect() is invoked.
+ * This data will be accessible from @a detected_cb callback.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_INTERNAL Internal error
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT Source colorspace
+ * isn't supported
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ *
+ * @pre Create a source handle by calling @ref mv_create_source()
+ * @pre Create an inference handle by calling @ref mv_inference_create()
+ * @pre Configure an inference handle by calling @ref mv_inference_configure()
+ * @pre Prepare an inference by calling @ref mv_inference_prepare()
+ * @post @a detected_cb will be called to process detection results
+ *
+ * @see mv_inference_facial_landmark_detected_cb
+ */
+int mv_inference_facial_landmark_detect_open(
+ mv_source_h source,
+ mv_inference_h infer,
+ mv_rectangle_s *roi,
+ mv_inference_facial_landmark_detected_cb detected_cb,
+ void *user_data);
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __MEDIA_VISION_INFERENCE_OPEN_H__ */
--- /dev/null
+/**
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "mv_private.h"
+#include "Inference.h"
+#include "InferenceIni.h"
+
+#include <map>
+
+#include <unistd.h>
+#include <fstream>
+#include <string>
+#include <algorithm>
+
+#define MV_INFERENCE_OUTPUT_NUMBERS_MAX 10
+#define MV_INFERENCE_OUTPUT_NUMBERS_MIN 1
+#define MV_INFERENCE_CONFIDENCE_THRESHOLD_MAX 1.0
+#define MV_INFERENCE_CONFIDENCE_THRESHOLD_MIN 0.0
+
+namespace mediavision {
+namespace inference {
+InferenceConfig::InferenceConfig() :
+ mConfigFilePath(),
+ mWeightFilePath(),
+ mUserFilePath(),
+ mBackedType(MV_INFERENCE_BACKEND_NONE),
+ mTargetType(INFERENCE_TARGET_NONE),
+ mConfidenceThresHold(0.0),
+ mMeanValue(0.0),
+ mStdValue(0.0),
+ mMaxOutputNumbers(1)
+{
+ mTensorInfo.width = -1;
+ mTensorInfo.height = -1;
+ mTensorInfo.dim = -1;
+ mTensorInfo.ch = -1;
+}
+
+Inference::Inference() :
+ mCanRun(),
+ mConfig(),
+ mSupportedInferenceBackend()
+{
+ LOGI("ENTER");
+
+ mBackend = NULL;
+ mSupportedInferenceBackend.insert(std::make_pair(MV_INFERENCE_BACKEND_OPENCV, std::make_pair("opencv", false)));
+ mSupportedInferenceBackend.insert(std::make_pair(MV_INFERENCE_BACKEND_TFLITE, std::make_pair("tflite", false)));
+
+ CheckSupportedInferenceBackend();
+
+ for(int i = 0; i < MV_INFERENCE_BACKEND_MAX; ++i) {
+ auto iter = mSupportedInferenceBackend.find(i);
+ LOGE("%d: %s: %s", i, (iter->second).first.c_str(), (iter->second).second ? "TRUE" : "FALSE");
+ }
+ LOGI("LEAVE");
+}
+
+Inference::~Inference()
+{
+ if (mBackend)
+ delete mBackend;
+}
+
+void Inference::CheckSupportedInferenceBackend()
+{
+ LOGE("ENTER");
+
+ InferenceInI ini;
+ ini.LoadInI();
+
+ std::vector<int> supportedBackend = ini.GetSupportedInferenceEngines();
+ for (std::vector<int>::const_iterator it = supportedBackend.begin();
+ it != supportedBackend.end(); ++it) {
+ LOGE("engine: %d", *it);
+
+ auto iter = mSupportedInferenceBackend.find(*it);
+ (iter->second).second = true;
+ }
+
+ LOGE("LEAVE");
+
+}
+
+int Inference::ConvertEngineErrorToVisionError(int error)
+{
+ int ret = MEDIA_VISION_ERROR_NONE;
+
+ switch(error) {
+ case INFERENCE_ENGINE_ERROR_NONE:
+ ret = MEDIA_VISION_ERROR_NONE;
+ break;
+ case INFERENCE_ENGINE_ERROR_NOT_SUPPORTED:
+ ret = MEDIA_VISION_ERROR_NOT_SUPPORTED;
+ break;
+ case INFERENCE_ENGINE_ERROR_MSG_TOO_LONG:
+ ret = MEDIA_VISION_ERROR_MSG_TOO_LONG;
+ break;
+ case INFERENCE_ENGINE_ERROR_NO_DATA:
+ ret = MEDIA_VISION_ERROR_NO_DATA;
+ break;
+ case INFERENCE_ENGINE_ERROR_KEY_NOT_AVAILABLE:
+ ret = MEDIA_VISION_ERROR_KEY_NOT_AVAILABLE;
+ break;
+ case INFERENCE_ENGINE_ERROR_OUT_OF_MEMORY:
+ ret = MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ break;
+ case INFERENCE_ENGINE_ERROR_INVALID_PARAMETER:
+ ret = MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ break;
+ case INFERENCE_ENGINE_ERROR_INVALID_OPERATION:
+ ret = MEDIA_VISION_ERROR_INVALID_OPERATION;
+ break;
+ case INFERENCE_ENGINE_ERROR_PERMISSION_DENIED:
+ ret = MEDIA_VISION_ERROR_PERMISSION_DENIED;
+ break;
+ case INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT:
+ ret = MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
+ break;
+ case INFERENCE_ENGINE_ERROR_INTERNAL:
+ ret = MEDIA_VISION_ERROR_INTERNAL;
+ break;
+ case INFERENCE_ENGINE_ERROR_INVALID_DATA:
+ ret = MEDIA_VISION_ERROR_INVALID_DATA;
+ break;
+ case INFERENCE_ENGINE_ERROR_INVALID_PATH:
+ ret = MEDIA_VISION_ERROR_INVALID_PATH;
+ break;
+ default:
+ LOGE("Unknown inference engine error type");
+ }
+
+ return ret;
+}
+
+void Inference::ConfigureModelFiles(const std::string modelConfigFilePath,
+ const std::string modelWeightFilePath,
+ const std::string modelUserFilePath)
+{
+ mConfig.mConfigFilePath = modelConfigFilePath;
+ mConfig.mWeightFilePath = modelWeightFilePath;
+ mConfig.mUserFilePath = modelUserFilePath;
+}
+
+void Inference::ConfigureTensorInfo(int width,
+ int height,
+ int dim,
+ int ch,
+ double stdValue,
+ double meanValue)
+{
+ mConfig.mTensorInfo = {width, height, dim, ch};
+ mConfig.mStdValue = stdValue;
+ mConfig.mMeanValue = meanValue;
+}
+
+int Inference::ConfigureEngine(const mv_inference_backend_type_e backendType,
+ const mv_inference_target_type_e targetType)
+{
+ std::pair<std::string, bool> backend = mSupportedInferenceBackend[backendType];
+ if (backend.second == false) {
+ LOGE("%s type is not supported", (backend.first).c_str());
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
+ }
+ mConfig.mBackedType = backendType;
+ mConfig.mTargetType = (inference_target_type_e)targetType;
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+void Inference::ConfigureOutput(const int maxOutputNumbers)
+{
+ mConfig.mMaxOutputNumbers = std::max(std::min(maxOutputNumbers, MV_INFERENCE_OUTPUT_NUMBERS_MAX),
+ MV_INFERENCE_OUTPUT_NUMBERS_MIN);
+}
+
+void Inference::ConfigureThreshold(const double threshold)
+{
+ mConfig.mConfidenceThresHold = std::max(std::min(threshold, MV_INFERENCE_CONFIDENCE_THRESHOLD_MAX),
+ MV_INFERENCE_CONFIDENCE_THRESHOLD_MIN);
+}
+
+void Inference::ConfigureInputNodeName(const std::string nodeName)
+{
+ mConfig.mInputNodeName = nodeName;
+}
+
+void Inference::ConfigureOutputNodeNames(const std::vector<std::string> nodeNames)
+{
+ mConfig.mOutputNodeNames = nodeNames;
+}
+
+int Inference::Prepare()
+{
+ int ret = INFERENCE_ENGINE_ERROR_NONE;
+ LOGE("ENTER");
+ if (mConfig.mBackedType <= MV_INFERENCE_BACKEND_NONE ||
+ mConfig.mBackedType >= MV_INFERENCE_BACKEND_MAX) {
+ LOGE("NOT SUPPORTED BACKEND %d", mConfig.mBackedType);
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ }
+
+ auto iter = mSupportedInferenceBackend.find(mConfig.mBackedType);
+ std::string backendName = (iter->second).first;
+ LOGE("backend string name: %s", backendName.c_str());
+ mBackend = new InferenceEngineVision(backendName);
+
+ if (!mBackend) {
+ LOGE("Fail to create backend");
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
+
+ ret = mBackend->Init(mConfig.mConfigFilePath,
+ mConfig.mWeightFilePath,
+ mConfig.mUserFilePath);
+
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ return ConvertEngineErrorToVisionError(ret);
+ }
+
+ // Input Tensor Param
+ mBackend->SetInputTensorParamInput(mConfig.mTensorInfo.width,
+ mConfig.mTensorInfo.height,
+ mConfig.mTensorInfo.dim,
+ mConfig.mTensorInfo.ch);
+
+ mBackend->SetInputTensorParamNorm(mConfig.mStdValue, mConfig.mMeanValue);
+
+ mBackend->SetInputTensorParamNode(mConfig.mInputNodeName);
+
+ // Output Tensor Param
+ mBackend->SetOutputTensorParamNumbers(mConfig.mMaxOutputNumbers);
+
+ mBackend->SetOutputTensorParamThresHold(mConfig.mConfidenceThresHold);
+
+ mBackend->SetOutputTensorParamNodes(mConfig.mOutputNodeNames);
+
+ // load model
+ ret = mBackend->Load();
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ delete mBackend;
+ LOGE("Fail to load model");
+ mCanRun = false;
+ return ConvertEngineErrorToVisionError(ret);
+ }
+
+ mCanRun = true;
+
+ // target type
+ // foreach supported??
+ mBackend->SetTargetDevice(mConfig.mTargetType);
+ LOGE("LEAVE");
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int Inference::Run(mv_source_h mvSource, mv_rectangle_s *roi)
+{
+ int ret = INFERENCE_ENGINE_ERROR_NONE;
+
+ if (!mCanRun) {
+ LOGE("Invalid to run inference");
+ return MEDIA_VISION_ERROR_INVALID_OPERATION;
+ }
+
+ /* convert mv_source to cv::Mat */
+ cv::Mat cvSource;
+ cv::Rect cvRoi;
+ unsigned int width = 0, height = 0;
+ unsigned int bufferSize = 0;
+ unsigned char *buffer = NULL;
+
+ mv_colorspace_e colorspace = MEDIA_VISION_COLORSPACE_INVALID;
+
+ if (mv_source_get_width(mvSource, &width) != MEDIA_VISION_ERROR_NONE ||
+ mv_source_get_height(mvSource, &height) != MEDIA_VISION_ERROR_NONE ||
+ mv_source_get_colorspace(mvSource, &colorspace) != MEDIA_VISION_ERROR_NONE ||
+ mv_source_get_buffer(mvSource, &buffer, &bufferSize))
+ return MEDIA_VISION_ERROR_INTERNAL;
+
+ if (colorspace != MEDIA_VISION_COLORSPACE_RGB888) {
+ LOGE("Not Supported format!\n");
+ return MEDIA_VISION_ERROR_NOT_SUPPORTED_FORMAT;
+ }
+
+ if (roi == NULL) {
+ cvSource = cv::Mat(cv::Size(width, height), CV_MAKETYPE(CV_8U, 3), buffer).clone();
+ } else {
+ cvRoi.x = roi->point.x;
+ cvRoi.y = roi->point.y;
+ cvRoi.width = (roi->point.x + roi->width) >= width ? width - roi->point.x : roi->width;
+ cvRoi.height = (roi->point.y + roi->height) >= height ? height - roi->point.y : roi->height;
+ cvSource = cv::Mat(cv::Size(width, height), CV_MAKETYPE(CV_8U, 3), buffer)(cvRoi).clone();
+ }
+
+ LOGE("Size: w:%d, h:%d", cvSource.size().width, cvSource.size().height);
+ ret = mBackend->Run(cvSource);
+
+ return ConvertEngineErrorToVisionError(ret);
+}
+
+std::pair<std::string, bool> Inference::GetSupportedInferenceBackend(int backend)
+{
+ return mSupportedInferenceBackend[backend];
+}
+
+int Inference::GetClassficationResults(ImageClassificationResults *classificationResults)
+{
+ ImageClassificationResults results;
+ int ret = mBackend->GetInferenceResult(results);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Fail to GetClassificationResults");
+ return ConvertEngineErrorToVisionError(ret);
+ }
+
+ *classificationResults = results;
+ LOGE("Inference: GetClassificationResults: %d\n", results.number_of_classes);
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int Inference::GetObjectDetectionResults(ObjectDetectionResults *detectionResults)
+{
+ ObjectDetectionResults results;
+ int ret = mBackend->GetInferenceResult(results);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Fail to GetObjectDetectionResults");
+ return ConvertEngineErrorToVisionError(ret);
+ }
+
+ *detectionResults = results;
+ LOGE("Inference: GetObjectDetectionResults: %d\n", results.number_of_objects);
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int Inference::GetFaceDetectionResults(FaceDetectionResults *detectionResults)
+{
+ FaceDetectionResults results;
+ mBackend->GetInferenceResult(results);
+
+ *detectionResults = results;
+ LOGE("Inference: GetFaceDetectionResults: %d\n", results.number_of_faces);
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int Inference::GetFacialLandMarkDetectionResults(FacialLandMarkDetectionResults *detectionResults)
+{
+ FacialLandMarkDetectionResults results;
+ mBackend->GetInferenceResult(results);
+
+ *detectionResults = results;
+ LOGE("Inference: FacialLandmarkDetectionResults: %d\n", results.number_of_landmarks);
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+} /* Inference */
+} /* MediaVision */
--- /dev/null
+/**
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iniparser.h>
+#include <unistd.h>
+#include <map>
+
+#include "mv_private.h"
+#include "InferenceIni.h"
+
+#define INFERENCE_INI_FILENAME "/multimedia/mmfw_media_vision.ini"
+namespace mediavision {
+namespace inference {
+
+
+static inline std::string& rtrim(std::string& s, const char* t = " \t\n\r\f\v")
+{
+ s.erase(s.find_last_not_of(t) + 1);
+ return s;
+}
+
+static inline std::string& ltrim(std::string& s, const char* t = " \t\n\r\f\v")
+{
+ s.erase(s.find_first_not_of(t) + 1);
+ return s;
+}
+
+static inline std::string& trim(std::string& s, const char* t = " \t\n\r\f\v")
+{
+ return ltrim(rtrim(s,t), t);
+}
+
+InferenceInI::InferenceInI() :
+ mIniDefaultPath(SYSCONFDIR),
+ mDefaultBackend("OPENCV"),
+ mDelimeter(",")
+{
+ mIniDefaultPath += INFERENCE_INI_FILENAME;
+}
+
+InferenceInI::~InferenceInI()
+{
+
+}
+
+int InferenceInI::LoadInI()
+{
+ LOGE("ENTER");
+ dictionary *dict = iniparser_load(mIniDefaultPath.c_str());
+ if (dict == NULL) {
+ LOGE("Fail to load ini");
+ return -1;
+ }
+
+ std::string list = std::string(iniparser_getstring(dict, "inference backend:supported backend types", (char*)mDefaultBackend.c_str()));
+
+ size_t pos = 0;
+ while((pos = list.find(mDelimeter)) != std::string::npos) {
+ std::string tmp = list.substr(0, pos);
+ mSupportedInferenceBackend.push_back(atoi(tmp.c_str()));
+
+ list.erase(0, pos + mDelimeter.length());
+ }
+ mSupportedInferenceBackend.push_back(atoi(list.c_str()));
+
+ if(dict) {
+ iniparser_freedict(dict);
+ dict = NULL;
+ }
+
+ LOGE("LEAVE");
+ return 0;
+}
+
+void InferenceInI::UnLoadInI()
+{
+ ;
+}
+
+std::vector<int> InferenceInI::GetSupportedInferenceEngines()
+{
+ return mSupportedInferenceBackend;
+}
+
+} /* Inference */
+} /* MediaVision */
--- /dev/null
+/**
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "mv_private.h"
+#include "mv_inference.h"
+
+#ifdef MEDIA_VISION_INFERENCE_LICENSE_PORT
+
+/* Include headers of licensed inference module here. */
+//#include "mv_inference_lic.h"
+
+#else
+
+/* Include headers of open inference module here. */
+#include "mv_inference_open.h"
+
+#endif /* MEDIA_VISION_INFERENCE_LICENSE_PORT */
+
+/**
+ * @file mv_inference.c
+ * @brief This file contains Media Vision inference module.
+ */
+
+int mv_inference_create(mv_inference_h *infer)
+{
+ MEDIA_VISION_SUPPORT_CHECK(__mv_inference_check_system_info_feature_supported());
+ MEDIA_VISION_NULL_ARG_CHECK(infer);
+
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ int ret = MEDIA_VISION_ERROR_NONE;
+
+#ifdef MEDIA_VISION_INFERENCE_LICENSE_PORT
+
+ //ret = mv_inference_create_lic(infer);
+
+#else
+
+ ret = mv_inference_create_open(infer);
+
+#endif /* MEDIA_VISION_INFERENCE_LICENSE_PORT */
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return ret;
+}
+
+int mv_inference_destroy(mv_inference_h infer)
+{
+ MEDIA_VISION_SUPPORT_CHECK(__mv_inference_check_system_info_feature_supported());
+ MEDIA_VISION_INSTANCE_CHECK(infer);
+
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ int ret = MEDIA_VISION_ERROR_NONE;
+
+#ifdef MEDIA_VISION_INFERENCE_LICENSE_PORT
+
+ //ret = mv_inference_destroy_lic(infer);
+
+#else
+
+ ret = mv_inference_destroy_open(infer);
+
+#endif /* MEDIA_VISION_INFERENCE_LICENSE_PORT */
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return ret;
+}
+
+int mv_inference_configure(mv_inference_h infer, mv_engine_config_h engine_config)
+{
+ MEDIA_VISION_SUPPORT_CHECK(__mv_inference_check_system_info_feature_supported());
+ MEDIA_VISION_INSTANCE_CHECK(infer);
+ MEDIA_VISION_INSTANCE_CHECK(engine_config);
+
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ int ret = MEDIA_VISION_ERROR_NONE;
+
+#ifdef MEDIA_VISION_INFERENCE_LICENSE_PORT
+
+ //ret = mv_inference_configure_lic(infer);
+
+#else
+
+ ret = mv_inference_configure_model_open(infer, engine_config);
+ if (ret != MEDIA_VISION_ERROR_NONE){
+ LOGE("Fail to configure model");
+ return ret;
+ }
+
+ ret = mv_inference_configure_tensor_info_open(infer, engine_config);
+ if (ret != MEDIA_VISION_ERROR_NONE){
+ LOGE("Fail to configure tensor information");
+ return ret;
+ }
+
+ ret = mv_inference_configure_engine_open(infer, engine_config);
+ if (ret != MEDIA_VISION_ERROR_NONE){
+ LOGE("Fail to configure engine and target");
+ return ret;
+ }
+
+ ret = mv_inference_configure_output_open(infer, engine_config);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to configure output");
+ return ret;
+ }
+
+ ret = mv_inference_configure_confidence_threshold_open(infer, engine_config);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to configure confidence threshold");
+ return ret;
+ }
+
+ ret = mv_inference_configure_input_node_name_open(infer, engine_config);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to configure input node name");
+ return ret;
+ }
+
+ ret = mv_inference_configure_output_node_names_open(infer, engine_config);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to configure output node names");
+ return ret;
+ }
+
+#endif /* MEDIA_VISION_INFERENCE_LICENSE_PORT */
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return ret;
+}
+
+
+int mv_inference_prepare(mv_inference_h infer)
+{
+ MEDIA_VISION_SUPPORT_CHECK(__mv_inference_check_system_info_feature_supported());
+ MEDIA_VISION_INSTANCE_CHECK(infer);
+
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ int ret = MEDIA_VISION_ERROR_NONE;
+
+#ifdef MEDIA_VISION_INFERENCE_LICENSE_PORT
+
+ //ret = mv_inference_prepare_lic(infer);
+
+#else
+
+ ret = mv_inference_prepare_open(infer);
+
+#endif /* MEDIA_VISION_INFERENCE_LICENSE_PORT */
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return ret;
+}
+
+int mv_inference_foreach_supported_engine(
+ mv_inference_h infer,
+ mv_inference_supported_engine_cb callback,
+ void *user_data)
+{
+ MEDIA_VISION_SUPPORT_CHECK(__mv_inference_check_system_info_feature_supported());
+ MEDIA_VISION_INSTANCE_CHECK(infer);
+ MEDIA_VISION_NULL_ARG_CHECK(callback);
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ int ret = MEDIA_VISION_ERROR_NONE;
+
+#ifdef MEDIA_VISION_INFERENCE_LICENCE_PORT
+
+ // ret = mv_inference_foreach_supported_engine_lic(infer, callback, user_data);
+
+#else
+
+ ret = mv_inference_foreach_supported_engine_open(infer, callback, user_data);
+
+#endif
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return ret;
+}
+
+
+int mv_inference_image_classify(
+ mv_source_h source,
+ mv_inference_h infer,
+ mv_rectangle_s *roi,
+ mv_inference_image_classified_cb classified_cb,
+ void *user_data)
+{
+ MEDIA_VISION_SUPPORT_CHECK(__mv_inference_image_check_system_info_feature_supported());
+ MEDIA_VISION_INSTANCE_CHECK(source);
+ MEDIA_VISION_INSTANCE_CHECK(infer);
+ MEDIA_VISION_NULL_ARG_CHECK(classified_cb);
+
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ int ret = MEDIA_VISION_ERROR_NONE;
+
+#ifdef MEDIA_VISION_INFERENCE_LICENSE_PORT
+
+ /*
+ ret = mv_inference_image_classify_lic(source, infer, classified_cb, user_data);
+ */
+
+#else
+
+ ret = mv_inference_image_classify_open(source, infer, roi, classified_cb, user_data);
+
+#endif /* MEDIA_VISION_INFERENCE_LICENSE_PORT */
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return ret;
+}
+
+int mv_inference_object_detect(
+ mv_source_h source,
+ mv_inference_h infer,
+ mv_inference_object_detected_cb detected_cb,
+ void *user_data)
+{
+ MEDIA_VISION_SUPPORT_CHECK(__mv_inference_image_check_system_info_feature_supported());
+ MEDIA_VISION_INSTANCE_CHECK(source);
+ MEDIA_VISION_INSTANCE_CHECK(infer);
+ MEDIA_VISION_NULL_ARG_CHECK(detected_cb);
+
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ int ret = MEDIA_VISION_ERROR_NONE;
+
+#ifdef MEDIA_VISION_INFERENCE_LICENSE_PORT
+
+ /*
+ ret = mv_inference_object_detect_lic(source, infer, classified_cb, user_data);
+ */
+
+#else
+
+ ret = mv_inference_object_detect_open(source, infer, detected_cb, user_data);
+
+#endif /* MEDIA_VISION_INFERENCE_LICENSE_PORT */
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return ret;
+}
+
+int mv_inference_face_detect(
+ mv_source_h source,
+ mv_inference_h infer,
+ mv_inference_face_detected_cb detected_cb,
+ void *user_data)
+{
+ MEDIA_VISION_SUPPORT_CHECK(__mv_inference_face_check_system_info_feature_supported());
+ MEDIA_VISION_INSTANCE_CHECK(source);
+ MEDIA_VISION_INSTANCE_CHECK(infer);
+ MEDIA_VISION_NULL_ARG_CHECK(detected_cb);
+
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ int ret = MEDIA_VISION_ERROR_NONE;
+
+#ifdef MEDIA_VISION_INFERENCE_LICENCE_PORT
+ /*
+ ret = mv_inference_face_detect_lic(source, infer, detected_cb, user_data);
+ */
+#else
+
+ ret = mv_inference_face_detect_open(source, infer, detected_cb, user_data);
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return ret;
+
+#endif
+}
+
+int mv_inference_facial_landmark_detect(
+ mv_source_h source,
+ mv_inference_h infer,
+ mv_rectangle_s *roi,
+ mv_inference_facial_landmark_detected_cb detected_cb,
+ void *user_data)
+{
+ MEDIA_VISION_SUPPORT_CHECK(__mv_inference_face_check_system_info_feature_supported());
+ MEDIA_VISION_INSTANCE_CHECK(source);
+ MEDIA_VISION_INSTANCE_CHECK(infer);
+ MEDIA_VISION_NULL_ARG_CHECK(detected_cb);
+
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ int ret = MEDIA_VISION_ERROR_NONE;
+
+#ifdef MEDIA_VISION_INFERENCE_LICENCE_PORT
+ /*
+ ret = mv_inference_facial_landmark_detect_lic(source, infer, detected_cb, user_data);
+ */
+#else
+
+ ret = mv_inference_facial_landmark_detect_open(source, infer, roi, detected_cb, user_data);
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return ret;
+
+#endif
+}
--- /dev/null
+/**
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "mv_private.h"
+#include "mv_inference_open.h"
+
+#include "Inference.h"
+
+#include <new>
+#include <unistd.h>
+#include <string>
+
+using namespace mediavision::inference;
+
+int mv_inference_create_open(mv_inference_h *infer)
+{
+ if (infer == NULL ) {
+ LOGE("Handle can't be created because handle pointer is NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ (*infer) = static_cast<mv_inference_h>(new (std::nothrow)Inference());
+
+ if (*infer == NULL) {
+ LOGE("Failed to create inference handle");
+ return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+ }
+
+ LOGD("Inference handle [%p] has been created", *infer);
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int mv_inference_destroy_open(mv_inference_h infer)
+{
+ if (!infer) {
+ LOGE("Handle can't be destroyed because handle is NULL");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ LOGD("Destroying inference handle [%p]", infer);
+ delete static_cast<Inference*>(infer);
+ LOGD("Inference handle has been destroyed");
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+
+int mv_inference_configure_model_open(mv_inference_h infer, mv_engine_config_h engine_config)
+{
+ Inference *pInfer = static_cast<Inference *>(infer);
+
+ int ret = MEDIA_VISION_ERROR_NONE;
+
+ char *modelConfigFilePath = NULL;
+ char *modelWeightFilePath = NULL;
+ char *modelUserFilePath = NULL;
+ double modelMeanValue = 0.0;
+ int backendType= 0;
+ size_t userFileLength = 0;
+ ret = mv_engine_config_get_string_attribute(engine_config,
+ MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
+ &modelConfigFilePath);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get model configuration file path");
+ goto _ERROR_;
+ }
+
+ ret = mv_engine_config_get_string_attribute(engine_config,
+ MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
+ &modelWeightFilePath);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get model weight file path");
+ goto _ERROR_;
+ }
+
+ ret = mv_engine_config_get_string_attribute(engine_config,
+ MV_INFERENCE_MODEL_USER_FILE_PATH,
+ &modelUserFilePath);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get model user file path");
+ goto _ERROR_;
+ }
+
+ ret = mv_engine_config_get_double_attribute(engine_config,
+ MV_INFERENCE_MODEL_MEAN_VALUE,
+ &modelMeanValue);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get model mean value");
+ goto _ERROR_;
+ }
+
+ ret = mv_engine_config_get_int_attribute(engine_config,
+ MV_INFERENCE_BACKEND_TYPE,
+ &backendType);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get inference backend type");
+ goto _ERROR_;
+ }
+
+ if ( access(modelWeightFilePath, F_OK)) {
+ LOGE("weightFilePath in [%s] ", modelWeightFilePath);
+ ret = MEDIA_VISION_ERROR_INVALID_PATH;
+ goto _ERROR_;
+ }
+
+ if ( (backendType > MV_INFERENCE_BACKEND_NONE && backendType < MV_INFERENCE_BACKEND_MAX)
+ && (backendType != MV_INFERENCE_BACKEND_TFLITE)) {
+ if ( access(modelConfigFilePath, F_OK)) {
+ LOGE("modelConfigFilePath in [%s] ", modelConfigFilePath);
+ ret = MEDIA_VISION_ERROR_INVALID_PATH;
+ goto _ERROR_;
+ }
+ }
+
+ userFileLength = strlen(modelUserFilePath);
+ if (userFileLength > 0 && access(modelUserFilePath, F_OK)) {
+ LOGE("categoryFilePath in [%s] ", modelUserFilePath);
+ ret = MEDIA_VISION_ERROR_INVALID_PATH;
+ goto _ERROR_;
+ }
+
+ pInfer->ConfigureModelFiles(std::string(modelConfigFilePath),
+ std::string(modelWeightFilePath),
+ std::string(modelUserFilePath));
+
+_ERROR_:
+ if (modelConfigFilePath)
+ free(modelConfigFilePath);
+
+ if (modelWeightFilePath)
+ free(modelWeightFilePath);
+
+ if (modelUserFilePath)
+ free(modelUserFilePath);
+
+ return ret;
+}
+
+int mv_inference_configure_tensor_info_open(mv_inference_h infer, mv_engine_config_h engine_config)
+{
+ Inference *pInfer = static_cast<Inference *>(infer);
+
+ int ret = MEDIA_VISION_ERROR_NONE;
+
+ int tensorWidth, tensorHeight, tensorDim, tensorCh;
+ double meanValue, stdValue;
+
+ // This should be one. only one batch is supported
+ tensorDim = 1;
+ ret = mv_engine_config_get_int_attribute(engine_config,
+ MV_INFERENCE_INPUT_TENSOR_WIDTH,
+ &tensorWidth);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get tensor width");
+ goto _ERROR_;
+ }
+
+ ret = mv_engine_config_get_int_attribute(engine_config,
+ MV_INFERENCE_INPUT_TENSOR_HEIGHT,
+ &tensorHeight);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get tensor height");
+ goto _ERROR_;
+ }
+
+ ret = mv_engine_config_get_int_attribute(engine_config,
+ MV_INFERENCE_INPUT_TENSOR_CHANNELS,
+ &tensorCh);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get tensor channels");
+ goto _ERROR_;
+ }
+
+ ret = mv_engine_config_get_double_attribute(engine_config,
+ MV_INFERENCE_MODEL_MEAN_VALUE,
+ &meanValue);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get meanValue");
+ goto _ERROR_;
+ }
+
+ ret = mv_engine_config_get_double_attribute(engine_config,
+ MV_INFERENCE_MODEL_STD_VALUE,
+ &stdValue);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get stdValue");
+ goto _ERROR_;
+ }
+
+ pInfer->ConfigureTensorInfo(tensorWidth, tensorHeight, tensorDim, tensorCh, stdValue, meanValue);
+
+_ERROR_ :
+
+ return ret;
+}
+
+int mv_inference_configure_engine_open(mv_inference_h infer, mv_engine_config_h engine_config)
+{
+ Inference *pInfer = static_cast<Inference *>(infer);
+
+ int backendType = 0;
+ int targetType = 0;
+ int ret = MEDIA_VISION_ERROR_NONE;
+
+ ret = mv_engine_config_get_int_attribute(engine_config,
+ MV_INFERENCE_BACKEND_TYPE,
+ &backendType);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get inference backend type");
+ goto _ERROR_;
+ }
+
+ ret = mv_engine_config_get_int_attribute(engine_config,
+ MV_INFERENCE_TARGET_TYPE,
+ &targetType);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get inference target type");
+ goto _ERROR_;
+ }
+
+ ret = pInfer->ConfigureEngine((mv_inference_backend_type_e)backendType,
+ (mv_inference_target_type_e)targetType);
+
+_ERROR_:
+ return ret;
+}
+
+int mv_inference_configure_output_open(mv_inference_h infer, mv_engine_config_h engine_config)
+{
+ Inference *pInfer = static_cast<Inference *>(infer);
+
+ int maxOutput = 0;
+ int ret = MEDIA_VISION_ERROR_NONE;
+
+ ret = mv_engine_config_get_int_attribute(engine_config,
+ MV_INFERENCE_OUTPUT_MAX_NUMBER,
+ &maxOutput);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get inference output maximum numbers");
+ goto _ERROR_;
+ }
+
+ pInfer->ConfigureOutput(maxOutput);
+
+_ERROR_:
+ return ret;
+}
+
+int mv_inference_configure_confidence_threshold_open(mv_inference_h infer, mv_engine_config_h engine_config)
+{
+ Inference *pInfer = static_cast<Inference *>(infer);
+
+ double threshold = 0;
+ int ret = MEDIA_VISION_ERROR_NONE;
+
+ ret = mv_engine_config_get_double_attribute(engine_config,
+ MV_INFERENCE_CONFIDENCE_THRESHOLD,
+ &threshold);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get inference confidence threshold value");
+ goto _ERROR_;
+ }
+
+ pInfer->ConfigureThreshold(threshold);
+
+_ERROR_:
+ return ret;
+}
+
+int mv_inference_configure_input_node_name_open(mv_inference_h infer, mv_engine_config_h engine_config)
+{
+ Inference *pInfer = static_cast<Inference *>(infer);
+
+ int ret = MEDIA_VISION_ERROR_NONE;
+
+ char *node_name = NULL;
+
+ ret = mv_engine_config_get_string_attribute(engine_config,
+ MV_INFERENCE_INPUT_NODE_NAME,
+ &node_name);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get tensor width");
+ goto _ERROR_;
+ }
+
+ pInfer->ConfigureInputNodeName(std::string(node_name));
+
+_ERROR_:
+
+ if (node_name) {
+ free(node_name);
+ node_name = NULL;
+ }
+
+ return ret;
+}
+
+int mv_inference_configure_output_node_names_open(mv_inference_h infer, mv_engine_config_h engine_config)
+{
+ Inference *pInfer = static_cast<Inference *>(infer);
+
+ int ret = MEDIA_VISION_ERROR_NONE;
+ int idx = 0;
+ char **node_names = NULL;
+ int size = 0;
+ std::vector<std::string> names;
+ ret = mv_engine_config_get_array_string_attribute(engine_config,
+ MV_INFERENCE_OUTPUT_NODE_NAMES,
+ &node_names,
+ &size);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get _output_node_names");
+ goto _ERROR_;
+ }
+
+ for (idx = 0 ; idx < size; ++idx)
+ names.push_back(std::string(node_names[idx]));
+
+ pInfer->ConfigureOutputNodeNames(names);
+
+_ERROR_:
+
+ if (node_names) {
+ for (idx = 0; idx < size; ++idx) {
+ free(node_names[idx]);
+ }
+ free(node_names);
+ node_names = NULL;
+ }
+
+ return ret;
+}
+
+int mv_inference_prepare_open(mv_inference_h infer)
+{
+ Inference *pInfer = static_cast<Inference *>(infer);
+
+ int ret = MEDIA_VISION_ERROR_NONE;
+
+ ret = pInfer->Prepare();
+ if (ret != MEDIA_VISION_ERROR_NONE)
+ LOGE("Fail to prepare inference");
+
+ return ret;
+}
+
+int mv_inference_foreach_supported_engine_open(
+ mv_inference_h infer,
+ mv_inference_supported_engine_cb callback,
+ void *user_data)
+{
+ Inference *pInfer = static_cast<Inference *>(infer);
+
+ int ret = MEDIA_VISION_ERROR_NONE;
+
+ //bool isSupported = false;
+ //char str[1024] = {'\0'};
+ std::pair<std::string, bool> backend;
+ for (int i = 0; i < MV_INFERENCE_BACKEND_MAX; ++i) {
+ backend = pInfer->GetSupportedInferenceBackend(i);
+ callback((backend.first).c_str(), backend.second, user_data);
+ }
+
+ return ret;
+}
+
+int mv_inference_image_classify_open(
+ mv_source_h source,
+ mv_inference_h infer,
+ mv_rectangle_s *roi,
+ mv_inference_image_classified_cb classified_cb,
+ void *user_data)
+{
+ Inference *pInfer = static_cast<Inference *>(infer);
+
+ int ret = MEDIA_VISION_ERROR_NONE;
+ int numberOfOutputs = 0;
+
+ ret = pInfer->Run(source, roi);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to run inference");
+ return ret;
+ }
+
+ ImageClassificationResults classificationResults;
+
+ ret = pInfer->GetClassficationResults(&classificationResults);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get inference results");
+ return ret;
+ }
+
+ numberOfOutputs = classificationResults.number_of_classes;
+
+
+ int *indices = classificationResults.indices.data();
+ float *confidences = classificationResults.confidences.data();
+ static const int START_CLASS_NUMBER = 10;
+ static std::vector<const char*> names(START_CLASS_NUMBER);
+
+ if (numberOfOutputs > START_CLASS_NUMBER)
+ names.resize(numberOfOutputs);
+
+ LOGE("mv_inference_open: number_of_classes: %d\n", classificationResults.number_of_classes);
+
+ for (int n = 0; n < numberOfOutputs; ++n) {
+ LOGE("names: %s", classificationResults.names[n].c_str());
+ names[n] = classificationResults.names[n].c_str();
+ }
+
+ classified_cb(source, numberOfOutputs, indices, names.data(), confidences, user_data);
+
+ return ret;
+}
+
+
+int mv_inference_object_detect_open(
+ mv_source_h source,
+ mv_inference_h infer,
+ mv_inference_object_detected_cb detected_cb,
+ void *user_data)
+{
+ Inference *pInfer = static_cast<Inference *>(infer);
+
+ int ret = MEDIA_VISION_ERROR_NONE;
+ int numberOfOutputs = 0;
+
+ ret = pInfer->Run(source, NULL);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to run inference");
+ return ret;
+ }
+
+ ObjectDetectionResults objectDetectionResults;
+ ret = pInfer->GetObjectDetectionResults(&objectDetectionResults);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get inference results");
+ return ret;
+ }
+
+ numberOfOutputs = objectDetectionResults.number_of_objects;
+
+ int *indices = objectDetectionResults.indices.data();
+ float *confidences = objectDetectionResults.confidences.data();
+ static const int START_OBJECT_NUMBER = 20;
+ static std::vector<const char*> names(START_OBJECT_NUMBER);
+ static std::vector<mv_rectangle_s> locations(START_OBJECT_NUMBER);
+
+ if (numberOfOutputs > START_OBJECT_NUMBER) {
+ names.resize(numberOfOutputs);
+ locations.resize(numberOfOutputs);
+ }
+
+ for (int n = 0; n < numberOfOutputs; ++n) {
+ LOGE("names: %s", objectDetectionResults.names[n].c_str());
+ names[n] = objectDetectionResults.names[n].c_str();
+
+ locations[n].point.x = objectDetectionResults.locations[n].x;
+ locations[n].point.y = objectDetectionResults.locations[n].y;
+ locations[n].width = objectDetectionResults.locations[n].width;
+ locations[n].height = objectDetectionResults.locations[n].height;
+ }
+
+ detected_cb(source, numberOfOutputs, indices, names.data(), confidences, locations.data(), user_data);
+
+ return ret;
+}
+
+int mv_inference_face_detect_open(
+ mv_source_h source,
+ mv_inference_h infer,
+ mv_inference_face_detected_cb detected_cb,
+ void *user_data)
+{
+ Inference *pInfer = static_cast<Inference *>(infer);
+
+ int ret = MEDIA_VISION_ERROR_NONE;
+ int numberOfOutputs = 0;
+
+ ret = pInfer->Run(source, NULL);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to run inference");
+ return ret;
+ }
+
+ FaceDetectionResults faceDetectionResults;
+ ret = pInfer->GetFaceDetectionResults(&faceDetectionResults);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get inference results");
+ return ret;
+ }
+
+ numberOfOutputs = faceDetectionResults.number_of_faces;
+
+ float *confidences = faceDetectionResults.confidences.data();
+ std::vector<mv_rectangle_s> locations(numberOfOutputs);
+
+ for (int n = 0; n < numberOfOutputs; ++n) {
+ locations[n].point.x = faceDetectionResults.locations[n].x;
+ locations[n].point.y = faceDetectionResults.locations[n].y;
+ locations[n].width = faceDetectionResults.locations[n].width;
+ locations[n].height = faceDetectionResults.locations[n].height;
+ }
+
+ detected_cb(source, numberOfOutputs, confidences, locations.data(), user_data);
+
+ return ret;
+}
+
+int mv_inference_facial_landmark_detect_open(
+ mv_source_h source,
+ mv_inference_h infer,
+ mv_rectangle_s *roi,
+ mv_inference_facial_landmark_detected_cb detected_cb,
+ void *user_data)
+{
+ Inference *pInfer = static_cast<Inference *>(infer);
+
+ int ret = MEDIA_VISION_ERROR_NONE;
+ int numberOfLandmarks = 0;
+
+ ret = pInfer->Run(source, roi);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to run inference");
+ return ret;
+ }
+
+ FacialLandMarkDetectionResults facialLandMarkDetectionResults;
+ ret = pInfer->GetFacialLandMarkDetectionResults(&facialLandMarkDetectionResults);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get inference results");
+ return ret;
+ }
+
+ numberOfLandmarks = facialLandMarkDetectionResults.number_of_landmarks;
+
+ std::vector<mv_point_s> locations(numberOfLandmarks);
+
+ for (int n = 0; n < numberOfLandmarks; ++n) {
+
+ locations[n].x = facialLandMarkDetectionResults.locations[n].x;
+ locations[n].y = facialLandMarkDetectionResults.locations[n].y;
+ }
+
+ detected_cb(source, numberOfLandmarks, locations.data(), user_data);
+
+ return ret;
+}
--- /dev/null
+/**
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <system_info.h>
+
+#include "mv_private.h"
+
+bool __mv_check_system_info_feature_supported()
+{
+ bool isBarcodeDetectionSupported = false;
+ bool isBarcodeGenerationSupported = false;
+ bool isFaceRecognitionSupported = false;
+ bool isImageRecognitionSupported = false;
+ bool isInferenceImageSupported = false;
+ bool isInferenceFaceSupported = false;
+
+ const int nRetVal1 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.barcode_detection",
+ &isBarcodeDetectionSupported);
+
+ if (nRetVal1 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.barcode_detection");
+ return false;
+ }
+
+ const int nRetVal2 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.barcode_generation",
+ &isBarcodeGenerationSupported);
+
+ if (nRetVal2 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.barcode_generation");
+ return false;
+ }
+
+ const int nRetVal3 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.face_recognition",
+ &isFaceRecognitionSupported);
+
+ if (nRetVal3 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.face_recognition");
+ return false;
+ }
+
+ const int nRetVal4 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.image_recognition",
+ &isImageRecognitionSupported);
+
+ if (nRetVal4 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.image_recognition");
+ return false;
+ }
+ const int nRetVal5 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.image",
+ &isInferenceImageSupported);
+
+ if (nRetVal4 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.image");
+ return false;
+ }
+
+ const int nRetVal6 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.face",
+ &isInferenceFaceSupported);
+
+ if (nRetVal4 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.face");
+ return false;
+ }
+ (isBarcodeDetectionSupported || isBarcodeGenerationSupported ||
+ isFaceRecognitionSupported || isImageRecognitionSupported ||
+ isInferenceImageSupported || isInferenceFaceSupported) ?
+ LOGI("system_info_get_platform_bool returned"
+ "Supported one feature among barcode detection, "
+ "barcode generation, face recognition, "
+ "image recognition, and inference capability\n") :
+ LOGE("system_info_get_platform_bool returned"
+ "Unsupported all features of barcode detection, "
+ "barcode generation, face recognition, "
+ "image recognition, inference capability\n") ;
+
+ return (isBarcodeDetectionSupported || isBarcodeGenerationSupported ||
+ isFaceRecognitionSupported || isImageRecognitionSupported ||
+ isInferenceImageSupported || isInferenceFaceSupported);
+}
+
+bool __mv_barcode_detect_check_system_info_feature_supported()
+{
+ bool isBarcodeDetectionSupported = false;
+
+ const int nRetVal = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.barcode_detection",
+ &isBarcodeDetectionSupported);
+
+ if (nRetVal != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.barcode_detection");
+ return false;
+ }
+
+ isBarcodeDetectionSupported ?
+ LOGI("system_info_get_platform_bool returned "
+ "Supported barcode detection feature capability\n") :
+ LOGE("system_info_get_platform_bool returned "
+ "Unsupported barcode detection feature capability\n");
+
+ return isBarcodeDetectionSupported;
+}
+
+bool __mv_barcode_generate_check_system_info_feature_supported()
+{
+ bool isBarcodeGenerationSupported = false;
+
+ const int nRetVal = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.barcode_generation",
+ &isBarcodeGenerationSupported);
+
+ if (nRetVal != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.barcode_generation");
+ return false;
+ }
+
+ isBarcodeGenerationSupported ?
+ LOGI("system_info_get_platform_bool returned "
+ "Supported barcode generation feature capability\n") :
+ LOGE("system_info_get_platform_bool returned "
+ "Unsupported barcode generation feature capability\n");
+
+ return isBarcodeGenerationSupported;
+}
+
+bool __mv_face_check_system_info_feature_supported()
+{
+ bool isFaceRecognitionSupported = false;
+
+ const int nRetVal = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.face_recognition",
+ &isFaceRecognitionSupported);
+
+ if (nRetVal != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.face_recognition");
+ return false;
+ }
+
+ isFaceRecognitionSupported ?
+ LOGI("system_info_get_platform_bool returned "
+ "Supported face recognition feature capability\n") :
+ LOGE("system_info_get_platform_bool returned "
+ "Unsupported face recognition feature capability\n");
+
+ return isFaceRecognitionSupported;
+}
+
+bool __mv_image_check_system_info_feature_supported()
+{
+ bool isImageRecognitionSupported = false;
+
+ const int nRetVal = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.image_recognition",
+ &isImageRecognitionSupported);
+
+ if (nRetVal != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.image_recognition");
+ return false;
+ }
+
+ isImageRecognitionSupported ?
+ LOGI("system_info_get_platform_bool returned "
+ "Supported image recognition feature capability\n") :
+ LOGE("system_info_get_platform_bool returned "
+ "Unsupported image recognition feature capability\n");
+
+ return isImageRecognitionSupported;
+}
+
+bool __mv_inference_check_system_info_feature_supported()
+{
+ bool isInferenceImageSupported = false;
+ bool isInferenceFaceSupported = false;
+
+ const int nRetVal1 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.image",
+ &isInferenceImageSupported);
+
+ if (nRetVal1 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.image");
+ return false;
+ }
+
+ const int nRetVal2 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.face",
+ &isInferenceFaceSupported);
+
+ if (nRetVal2 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.face");
+ return false;
+ }
+
+ (isInferenceImageSupported || isInferenceFaceSupported) ?
+ LOGI("system_info_get_platform_bool returned "
+ "Supported inference feature capability\n") :
+ LOGE("system_info_get_platform_bool returned "
+ "Unsupported inference feature capability\n");
+
+ return (isInferenceImageSupported || isInferenceFaceSupported);
+}
+
+bool __mv_inference_image_check_system_info_feature_supported()
+{
+ bool isInferenceImageSupported = false;
+
+ const int nRetVal = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.image",
+ &isInferenceImageSupported);
+
+ if (nRetVal != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.image");
+ return false;
+ }
+
+ isInferenceImageSupported ?
+ LOGI("system_info_get_platform_bool returned "
+ "Supported inference image feature capability\n") :
+ LOGE("system_info_get_platform_bool returned "
+ "Unsupported inference image feature capability\n");
+
+ return isInferenceImageSupported;
+}
+
+bool __mv_inference_face_check_system_info_feature_supported()
+{
+ bool isInferenceFaceSupported = false;
+
+ const int nRetVal = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.face",
+ &isInferenceFaceSupported);
+
+ if (nRetVal != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.face");
+ return false;
+ }
+
+ isInferenceFaceSupported ?
+ LOGI("system_info_get_platform_bool returned "
+ "Supported inference face feature capability\n") :
+ LOGE("system_info_get_platform_bool returned "
+ "Unsupported inference face feature capability\n");
+
+ return isInferenceFaceSupported;
+}
bool isBarcodeGenerationSupported = false;
bool isFaceRecognitionSupported = false;
bool isImageRecognitionSupported = false;
+ bool isInferenceImageSupported = false;
+ bool isInferenceFaceSupported = false;
const int nRetVal1 = system_info_get_platform_bool(
"http://tizen.org/feature/vision.barcode_detection",
LOGE("SYSTEM_INFO_ERROR: vision.image_recognition");
return false;
}
+ const int nRetVal5 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.image",
+ &isInferenceImageSupported);
+ if (nRetVal4 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.image");
+ return false;
+ }
+
+ const int nRetVal6 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.face",
+ &isInferenceFaceSupported);
+
+ if (nRetVal4 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.face");
+ return false;
+ }
(isBarcodeDetectionSupported || isBarcodeGenerationSupported ||
- isFaceRecognitionSupported || isImageRecognitionSupported) ?
+ isFaceRecognitionSupported || isImageRecognitionSupported ||
+ isInferenceImageSupported || isInferenceFaceSupported) ?
LOGI("system_info_get_platform_bool returned"
"Supported one feature among barcode detection, "
"barcode generation, face recognition, "
- "and image recognition capability\n") :
+ "image recognition, and inference capability\n") :
LOGE("system_info_get_platform_bool returned"
"Unsupported all features of barcode detection, "
"barcode generation, face recognition, "
- "and image recognition capability\n") ;
+ "image recognition, inference capability\n") ;
return (isBarcodeDetectionSupported || isBarcodeGenerationSupported ||
- isFaceRecognitionSupported || isImageRecognitionSupported);
+ isFaceRecognitionSupported || isImageRecognitionSupported ||
+ isInferenceImageSupported || isInferenceFaceSupported);
}
bool __mv_barcode_detect_check_system_info_feature_supported()
return isImageRecognitionSupported;
}
+
+bool __mv_inference_check_system_info_feature_supported()
+{
+ bool isInferenceImageSupported = false;
+ bool isInferenceFaceSupported = false;
+
+ const int nRetVal1 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.image",
+ &isInferenceImageSupported);
+
+ if (nRetVal1 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.image");
+ return false;
+ }
+
+ const int nRetVal2 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.face",
+ &isInferenceFaceSupported);
+
+ if (nRetVal2 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.face");
+ return false;
+ }
+
+ (isInferenceImageSupported || isInferenceFaceSupported) ?
+ LOGI("system_info_get_platform_bool returned "
+ "Supported inference feature capability\n") :
+ LOGE("system_info_get_platform_bool returned "
+ "Unsupported inference feature capability\n");
+
+ return (isInferenceImageSupported || isInferenceFaceSupported);
+}
+
+bool __mv_inference_image_check_system_info_feature_supported()
+{
+ bool isInferenceImageSupported = false;
+
+ const int nRetVal = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.image",
+ &isInferenceImageSupported);
+
+ if (nRetVal != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.image");
+ return false;
+ }
+
+ isInferenceImageSupported ?
+ LOGI("system_info_get_platform_bool returned "
+ "Supported inference image feature capability\n") :
+ LOGE("system_info_get_platform_bool returned "
+ "Unsupported inference image feature capability\n");
+
+ return isInferenceImageSupported;
+}
+
+bool __mv_inference_face_check_system_info_feature_supported()
+{
+ bool isInferenceFaceSupported = false;
+
+ const int nRetVal = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.face",
+ &isInferenceFaceSupported);
+
+ if (nRetVal != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.face");
+ return false;
+ }
+
+ isInferenceFaceSupported ?
+ LOGI("system_info_get_platform_bool returned "
+ "Supported inference face feature capability\n") :
+ LOGE("system_info_get_platform_bool returned "
+ "Unsupported inference face feature capability\n");
+
+ return isInferenceFaceSupported;
+}
Name: capi-media-vision
Summary: Media Vision library for Tizen Native API
-Version: 0.4.6
-Release: 10
+Version: 0.5.0
+Release: 1
Group: Multimedia/Framework
License: Apache-2.0 and BSD-3-Clause
Source0: %{name}-%{version}.tar.gz
BuildRequires: pkgconfig(gstreamer-base-1.0)
BuildRequires: pkgconfig(gstreamer-app-1.0)
BuildRequires: pkgconfig(libtzplatform-config)
+BuildRequires: pkgconfig(inference-engine-interface-vision)
+BuildRequires: pkgconfig(iniparser)
%description
Media Vision library for Tizen Native API. Includes barcode detecting, barcode generating, face and image modules.
%description surveillance-devel
Media Vision Surveillance library for Tizen Native API (DEV).
+%package inference
+Summary: Multimedia Vision Inference Library
+Group: Multimedia/Framework
+Requires: capi-media-vision-common
+
+%description inference
+Media Vision inference library for Tizen Native API.
+
+%package inference-devel
+Summary: Multimedia Vision inference Library
+Group: Multimedia/Framework
+Requires: capi-media-vision-inference
+Requires: capi-media-vision-common-devel
+
+%description inference-devel
+Media Vision Inference library for Tizen Native API (DEV).
+
%package testsuite
Summary: Multimedia Vision Test Suite
Group: Multimedia/Framework
Requires: capi-media-vision-face
Requires: capi-media-vision-image
Requires: capi-media-vision-surveillance
+Requires: capi-media-vision-inference
%description testsuite
Media Vision Test Suite.
export FFLAGS="$FFLAGS -DTIZEN_DEBUG_ENABLE"
%endif
+export CFLAGS+=" -DSYSCONFDIR=\\\"%{_sysconfdir}\\\""
+export CXXFLAGS+=" -DSYSCONFDIR=\\\"%{_sysconfdir}\\\""
export CFLAGS+=" -DMV_CONFIG_PATH=\\\"%{TZ_SYS_RO_SHARE}/%{name}/\\\""
export CXXFLAGS+=" -DMV_CONFIG_PATH=\\\"%{TZ_SYS_RO_SHARE}/%{name}/\\\""
%{_includedir}/media/mv_surveillance*.h
%{_libdir}/pkgconfig/*surveillance.pc
+%files inference
+%{_libdir}/libmv_inference*.so
+
+%files inference-devel
+%{_includedir}/media/mv_infer*.h
+%{_libdir}/pkgconfig/*inference.pc
+
%files testsuite
%{_libdir}/libmv_*helper.so
%{_libdir}/libmv_testsuite*.so
%TZ_SYS_BIN/mv_face*
%TZ_SYS_BIN/mv_image*
%TZ_SYS_BIN/mv_surveillance*
+%TZ_SYS_BIN/mv_infer*
%if 0%{?gcov:1}
%files gcov
return ret;
}
+int mv_engine_config_set_array_string_attribute(
+ mv_engine_config_h engine_cfg,
+ const char *name,
+ const char **values,
+ unsigned int size)
+{
+ MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported());
+ MEDIA_VISION_INSTANCE_CHECK(engine_cfg);
+ MEDIA_VISION_NULL_ARG_CHECK(name);
+ MEDIA_VISION_NULL_ARG_CHECK(values);
+
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ int ret = mv_engine_config_set_array_string_attribute_c(
+ engine_cfg, name, values, size);
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return ret;
+}
+
int mv_engine_config_get_double_attribute(
mv_engine_config_h engine_cfg,
const char *name,
return ret;
}
+int mv_engine_config_get_array_string_attribute(
+ mv_engine_config_h engine_cfg,
+ const char *name,
+ char ***values,
+ int *size)
+{
+ MEDIA_VISION_SUPPORT_CHECK(__mv_check_system_info_feature_supported());
+ MEDIA_VISION_INSTANCE_CHECK(engine_cfg);
+ MEDIA_VISION_NULL_ARG_CHECK(name);
+ MEDIA_VISION_NULL_ARG_CHECK(values);
+
+ MEDIA_VISION_FUNCTION_ENTER();
+ int ret = mv_engine_config_get_array_string_attribute_c(
+ engine_cfg, name, values, size);
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return ret;
+}
+
int mv_engine_config_foreach_supported_attribute(
mv_supported_attribute_cb callback,
void *user_data)
--- /dev/null
+/**
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "mv_private.h"
+#include "mv_inference.h"
+
+#ifdef MEDIA_VISION_INFERENCE_LICENSE_PORT
+
+/* Include headers of licensed inference module here. */
+//#include "mv_inference_lic.h"
+
+#else
+
+/* Include headers of open inference module here. */
+#include "mv_inference_open.h"
+
+#endif /* MEDIA_VISION_INFERENCE_LICENSE_PORT */
+
+/**
+ * @file mv_inference.c
+ * @brief This file contains Media Vision inference module.
+ */
+
+int mv_inference_create(mv_inference_h *infer)
+{
+ MEDIA_VISION_SUPPORT_CHECK(__mv_inference_check_system_info_feature_supported());
+ MEDIA_VISION_NULL_ARG_CHECK(infer);
+
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ int ret = MEDIA_VISION_ERROR_NONE;
+
+#ifdef MEDIA_VISION_INFERENCE_LICENSE_PORT
+
+ //ret = mv_inference_create_lic(infer);
+
+#else
+
+ ret = mv_inference_create_open(infer);
+
+#endif /* MEDIA_VISION_INFERENCE_LICENSE_PORT */
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return ret;
+}
+
+int mv_inference_destroy(mv_inference_h infer)
+{
+ MEDIA_VISION_SUPPORT_CHECK(__mv_inference_check_system_info_feature_supported());
+ MEDIA_VISION_INSTANCE_CHECK(infer);
+
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ int ret = MEDIA_VISION_ERROR_NONE;
+
+#ifdef MEDIA_VISION_INFERENCE_LICENSE_PORT
+
+ //ret = mv_inference_destroy_lic(infer);
+
+#else
+
+ ret = mv_inference_destroy_open(infer);
+
+#endif /* MEDIA_VISION_INFERENCE_LICENSE_PORT */
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return ret;
+}
+
+int mv_inference_configure(mv_inference_h infer, mv_engine_config_h engine_config)
+{
+ MEDIA_VISION_SUPPORT_CHECK(__mv_inference_check_system_info_feature_supported());
+ MEDIA_VISION_INSTANCE_CHECK(infer);
+ MEDIA_VISION_INSTANCE_CHECK(engine_config);
+
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ int ret = MEDIA_VISION_ERROR_NONE;
+
+#ifdef MEDIA_VISION_INFERENCE_LICENSE_PORT
+
+ //ret = mv_inference_configure_lic(infer);
+
+#else
+
+ ret = mv_inference_configure_model_open(infer, engine_config);
+ if (ret != MEDIA_VISION_ERROR_NONE){
+ LOGE("Fail to configure model");
+ return ret;
+ }
+
+ ret = mv_inference_configure_tensor_info_open(infer, engine_config);
+ if (ret != MEDIA_VISION_ERROR_NONE){
+ LOGE("Fail to configure tensor information");
+ return ret;
+ }
+
+ ret = mv_inference_configure_engine_open(infer, engine_config);
+ if (ret != MEDIA_VISION_ERROR_NONE){
+ LOGE("Fail to configure engine and target");
+ return ret;
+ }
+
+ ret = mv_inference_configure_output_open(infer, engine_config);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to configure output");
+ return ret;
+ }
+
+ ret = mv_inference_configure_confidence_threshold_open(infer, engine_config);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to configure confidence threshold");
+ return ret;
+ }
+
+ ret = mv_inference_configure_input_node_name_open(infer, engine_config);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to configure input node name");
+ return ret;
+ }
+
+ ret = mv_inference_configure_output_node_names_open(infer, engine_config);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to configure output node names");
+ return ret;
+ }
+
+#endif /* MEDIA_VISION_INFERENCE_LICENSE_PORT */
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return ret;
+}
+
+
+int mv_inference_prepare(mv_inference_h infer)
+{
+ MEDIA_VISION_SUPPORT_CHECK(__mv_inference_check_system_info_feature_supported());
+ MEDIA_VISION_INSTANCE_CHECK(infer);
+
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ int ret = MEDIA_VISION_ERROR_NONE;
+
+#ifdef MEDIA_VISION_INFERENCE_LICENSE_PORT
+
+ //ret = mv_inference_prepare_lic(infer);
+
+#else
+
+ ret = mv_inference_prepare_open(infer);
+
+#endif /* MEDIA_VISION_INFERENCE_LICENSE_PORT */
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+ return ret;
+}
+
+int mv_inference_foreach_supported_engine(
+ mv_inference_h infer,
+ mv_inference_supported_engine_cb callback,
+ void *user_data)
+{
+ MEDIA_VISION_SUPPORT_CHECK(__mv_inference_check_system_info_feature_supported());
+ MEDIA_VISION_INSTANCE_CHECK(infer);
+ MEDIA_VISION_NULL_ARG_CHECK(callback);
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ int ret = MEDIA_VISION_ERROR_NONE;
+
+#ifdef MEDIA_VISION_INFERENCE_LICENCE_PORT
+
+ // ret = mv_inference_foreach_supported_engine_lic(infer, callback, user_data);
+
+#else
+
+ ret = mv_inference_foreach_supported_engine_open(infer, callback, user_data);
+
+#endif
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return ret;
+}
+
+
+int mv_inference_image_classify(
+ mv_source_h source,
+ mv_inference_h infer,
+ mv_rectangle_s *roi,
+ mv_inference_image_classified_cb classified_cb,
+ void *user_data)
+{
+ MEDIA_VISION_SUPPORT_CHECK(__mv_inference_image_check_system_info_feature_supported());
+ MEDIA_VISION_INSTANCE_CHECK(source);
+ MEDIA_VISION_INSTANCE_CHECK(infer);
+ MEDIA_VISION_NULL_ARG_CHECK(classified_cb);
+
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ int ret = MEDIA_VISION_ERROR_NONE;
+
+#ifdef MEDIA_VISION_INFERENCE_LICENSE_PORT
+
+ /*
+ ret = mv_inference_image_classify_lic(source, infer, classified_cb, user_data);
+ */
+
+#else
+
+ ret = mv_inference_image_classify_open(source, infer, roi, classified_cb, user_data);
+
+#endif /* MEDIA_VISION_INFERENCE_LICENSE_PORT */
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return ret;
+}
+
+int mv_inference_object_detect(
+ mv_source_h source,
+ mv_inference_h infer,
+ mv_inference_object_detected_cb detected_cb,
+ void *user_data)
+{
+ MEDIA_VISION_SUPPORT_CHECK(__mv_inference_image_check_system_info_feature_supported());
+ MEDIA_VISION_INSTANCE_CHECK(source);
+ MEDIA_VISION_INSTANCE_CHECK(infer);
+ MEDIA_VISION_NULL_ARG_CHECK(detected_cb);
+
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ int ret = MEDIA_VISION_ERROR_NONE;
+
+#ifdef MEDIA_VISION_INFERENCE_LICENSE_PORT
+
+ /*
+ ret = mv_inference_object_detect_lic(source, infer, classified_cb, user_data);
+ */
+
+#else
+
+ ret = mv_inference_object_detect_open(source, infer, detected_cb, user_data);
+
+#endif /* MEDIA_VISION_INFERENCE_LICENSE_PORT */
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return ret;
+}
+
+int mv_inference_face_detect(
+ mv_source_h source,
+ mv_inference_h infer,
+ mv_inference_face_detected_cb detected_cb,
+ void *user_data)
+{
+ MEDIA_VISION_SUPPORT_CHECK(__mv_inference_face_check_system_info_feature_supported());
+ MEDIA_VISION_INSTANCE_CHECK(source);
+ MEDIA_VISION_INSTANCE_CHECK(infer);
+ MEDIA_VISION_NULL_ARG_CHECK(detected_cb);
+
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ int ret = MEDIA_VISION_ERROR_NONE;
+
+#ifdef MEDIA_VISION_INFERENCE_LICENCE_PORT
+ /*
+ ret = mv_inference_face_detect_lic(source, infer, detected_cb, user_data);
+ */
+#else
+
+ ret = mv_inference_face_detect_open(source, infer, detected_cb, user_data);
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return ret;
+
+#endif
+}
+
+int mv_inference_facial_landmark_detect(
+ mv_source_h source,
+ mv_inference_h infer,
+ mv_rectangle_s *roi,
+ mv_inference_facial_landmark_detected_cb detected_cb,
+ void *user_data)
+{
+ MEDIA_VISION_SUPPORT_CHECK(__mv_inference_face_check_system_info_feature_supported());
+ MEDIA_VISION_INSTANCE_CHECK(source);
+ MEDIA_VISION_INSTANCE_CHECK(infer);
+ MEDIA_VISION_NULL_ARG_CHECK(detected_cb);
+
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ int ret = MEDIA_VISION_ERROR_NONE;
+
+#ifdef MEDIA_VISION_INFERENCE_LICENCE_PORT
+ /*
+ ret = mv_inference_facial_landmark_detect_lic(source, infer, detected_cb, user_data);
+ */
+#else
+
+ ret = mv_inference_facial_landmark_detect_open(source, infer, roi, detected_cb, user_data);
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return ret;
+
+#endif
+}
bool isBarcodeGenerationSupported = false;
bool isFaceRecognitionSupported = false;
bool isImageRecognitionSupported = false;
+ bool isInferenceImageSupported = false;
+ bool isInferenceFaceSupported = false;
const int nRetVal1 = system_info_get_platform_bool(
"http://tizen.org/feature/vision.barcode_detection",
LOGE("SYSTEM_INFO_ERROR: vision.image_recognition");
return false;
}
+ const int nRetVal5 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.image",
+ &isInferenceImageSupported);
+ if (nRetVal4 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.image");
+ return false;
+ }
+
+ const int nRetVal6 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.face",
+ &isInferenceFaceSupported);
+
+ if (nRetVal4 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.face");
+ return false;
+ }
(isBarcodeDetectionSupported || isBarcodeGenerationSupported ||
- isFaceRecognitionSupported || isImageRecognitionSupported) ?
+ isFaceRecognitionSupported || isImageRecognitionSupported ||
+ isInferenceImageSupported || isInferenceFaceSupported) ?
LOGI("system_info_get_platform_bool returned"
"Supported one feature among barcode detection, "
"barcode generation, face recognition, "
- "and image recognition capability\n") :
+ "image recognition, and inference capability\n") :
LOGE("system_info_get_platform_bool returned"
"Unsupported all features of barcode detection, "
"barcode generation, face recognition, "
- "and image recognition capability\n") ;
+ "image recognition, inference capability\n") ;
return (isBarcodeDetectionSupported || isBarcodeGenerationSupported ||
- isFaceRecognitionSupported || isImageRecognitionSupported);
+ isFaceRecognitionSupported || isImageRecognitionSupported ||
+ isInferenceImageSupported || isInferenceFaceSupported);
}
bool __mv_barcode_detect_check_system_info_feature_supported()
return isImageRecognitionSupported;
}
+
+bool __mv_inference_check_system_info_feature_supported()
+{
+ bool isInferenceImageSupported = false;
+ bool isInferenceFaceSupported = false;
+
+ const int nRetVal1 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.image",
+ &isInferenceImageSupported);
+
+ if (nRetVal1 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.image");
+ return false;
+ }
+
+ const int nRetVal2 = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.face",
+ &isInferenceFaceSupported);
+
+ if (nRetVal2 != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.face");
+ return false;
+ }
+
+ (isInferenceImageSupported || isInferenceFaceSupported) ?
+ LOGI("system_info_get_platform_bool returned "
+ "Supported inference feature capability\n") :
+ LOGE("system_info_get_platform_bool returned "
+ "Unsupported inference feature capability\n");
+
+ return (isInferenceImageSupported || isInferenceFaceSupported);
+}
+
+bool __mv_inference_image_check_system_info_feature_supported()
+{
+ bool isInferenceImageSupported = false;
+
+ const int nRetVal = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.image",
+ &isInferenceImageSupported);
+
+ if (nRetVal != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.image");
+ return false;
+ }
+
+ isInferenceImageSupported ?
+ LOGI("system_info_get_platform_bool returned "
+ "Supported inference image feature capability\n") :
+ LOGE("system_info_get_platform_bool returned "
+ "Unsupported inference image feature capability\n");
+
+ return isInferenceImageSupported;
+}
+
+bool __mv_inference_face_check_system_info_feature_supported()
+{
+ bool isInferenceFaceSupported = false;
+
+ const int nRetVal = system_info_get_platform_bool(
+ "http://tizen.org/feature/vision.inference.face",
+ &isInferenceFaceSupported);
+
+ if (nRetVal != SYSTEM_INFO_ERROR_NONE) {
+ LOGE("SYSTEM_INFO_ERROR: vision.inference.face");
+ return false;
+ }
+
+ isInferenceFaceSupported ?
+ LOGI("system_info_get_platform_bool returned "
+ "Supported inference face feature capability\n") :
+ LOGE("system_info_get_platform_bool returned "
+ "Unsupported inference face feature capability\n");
+
+ return isInferenceFaceSupported;
+}
add_subdirectory(${PROJECT_SOURCE_DIR}/face)
add_subdirectory(${PROJECT_SOURCE_DIR}/image)
add_subdirectory(${PROJECT_SOURCE_DIR}/surveillance)
+add_subdirectory(${PROJECT_SOURCE_DIR}/inference)
--- /dev/null
+project(mv_infer_test_suite)
+cmake_minimum_required(VERSION 2.6)
+
+set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS_DEBUG _DEBUG)
+
+if(NOT SKIP_WARNINGS)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Werror")
+endif()
+
+set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/${LIB_INSTALL_DIR})
+set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/${LIB_INSTALL_DIR})
+set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
+
+include_directories(${PROJECT_SOURCE_DIR})
+include_directories(${MV_CAPI_MEDIA_VISION_INC_DIR})
+include_directories(${INC_IMAGE_HELPER})
+include_directories(${INC_VIDEO_HELPER})
+include_directories(${INC_TS_COMMON})
+
+MESSAGE("TESTSUITE: ${MV_CAPI_MEDIA_VISION_INC_DIR}")
+
+file(GLOB MV_INFER_TEST_SUITE_INC_LIST "${PROJECT_SOURCE_DIR}/*.h")
+file(GLOB MV_INFER_TEST_SUITE_SRC_LIST "${PROJECT_SOURCE_DIR}/*.c")
+
+add_executable(${PROJECT_NAME}
+ ${MV_INFER_TEST_SUITE_INC_LIST}
+ ${MV_INFER_TEST_SUITE_SRC_LIST}
+ ${MV_CAPI_MEDIA_VISION_INC_LIST})
+
+target_link_libraries(${PROJECT_NAME} ${MV_INFERENCE_LIB_NAME}
+ mv_image_helper
+ mv_video_helper
+ mv_testsuite_common)
+
+install(TARGETS ${PROJECT_NAME} DESTINATION ${testbin_dir})
--- /dev/null
+/**
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <mv_common.h>
+#include <mv_inference.h>
+
+#include <mv_testsuite_common.h>
+#include <image_helper.h>
+#include <mv_video_helper.h>
+
+#include <mv_log_cfg.h>
+
+#include <ctype.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <dirent.h>
+#include <string.h>
+#include <limits.h>
+
+#define FILE_PATH_SIZE 1024
+
+//Image Classification
+#define IC_LABEL_PATH "/usr/share/capi-media-vision/models/IC/tflite/ic_label.txt"
+#define IC_TFLITE_WEIGHT_PATH "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite"
+
+#define IC_OPENCV_LABEL_CAFFE_PATH "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_label_squeezenet.txt"
+#define IC_OPENCV_WEIGHT_CAFFE_PATH "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.caffemodel"
+#define IC_OPENCV_CONFIG_CAFFE_PATH "/usr/share/capi-media-vision/models/IC/caffe/ic_caffe_model_squeezenet.prototxt"
+
+//Obeject Detection
+#define OD_LABEL_PATH "/usr/share/capi-media-vision/models/OD/tflite/od_label.txt"
+#define OD_TFLITE_WEIGHT_PATH "/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite"
+
+//Face Detection
+#define FD_TFLITE_WEIGHT_PATH "/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite"
+
+//Facila LandmarkDetection
+#define FLD_TFLITE_WEIGHT_PATH "/usr/share/capi-media-vision/models/FLD/tflite/fld_tflite_model1.tflite"
+
+/******
+ * Public model:
+ * IC: mobilenet caffe, tf?
+ * OD: mobilenetv1-ssd caffe, tf?
+ * FD: caffe, tf
+ * FLD: caffe, tf
+ */
+
+void _object_detected_cb (
+ mv_source_h source,
+ const int number_of_objects,
+ const int *indices,
+ const char **names,
+ const float *confidences,
+ const mv_rectangle_s *locations,
+ void *user_data)
+{
+ printf("In callback: %d objects\n", number_of_objects);
+
+ for (int n = 0; n < number_of_objects; n++) {
+ printf("%2d\n", indices[n]);
+ printf("%s\n", names[n]);
+ printf("%.3f\n", confidences[n]);
+ printf("%d,%d,%d,%d\n", locations[n].point.x,
+ locations[n].point.y,
+ locations[n].width,
+ locations[n].height);
+ }
+}
+
+void _face_detected_cb (
+ mv_source_h source,
+ const int number_of_faces,
+ const float *confidences,
+ const mv_rectangle_s *locations,
+ void *user_data)
+{
+ printf("In callback: %d faces\n", number_of_faces);
+
+ for (int n = 0; n < number_of_faces; n++) {
+ printf("%.3f\n", confidences[n]);
+ printf("%d,%d,%d,%d\n", locations[n].point.x,
+ locations[n].point.y,
+ locations[n].width,
+ locations[n].height);
+ }
+
+}
+
+void _facial_landmark_detected_cb (
+ mv_source_h source,
+ const int number_of_landmarks,
+ const mv_point_s *locations,
+ void *user_data)
+{
+ printf("In callback, %d landmarks\n", number_of_landmarks);
+ for (int n = 0; n < number_of_landmarks; n++) {
+ printf("%d: x[%d], y[%d]\n", n, locations[n].x, locations[n].y);
+ }
+}
+
+void _image_classified_cb (
+ mv_source_h source,
+ const int number_of_classes,
+ const int *indices,
+ const char **names,
+ const float *confidences,
+ void *user_data)
+{
+ printf("In callback: %d classes\n", number_of_classes);
+
+ for (int n=0; n<number_of_classes; ++n) {
+ printf("%2d\n", indices[n]);
+ printf("%s\n", names[n]);
+ printf("%.3f\n", confidences[n]);
+ }
+}
+
+int show_menu(const char *title, const int *options, const char **names, int cnt)
+{
+ printf("*********************************************\n");
+ printf("* %38s *\n", title);
+ printf("*-------------------------------------------*\n");
+ int i = 0;
+ for (i = 0; i < cnt; ++i)
+ printf("* %2i. %34s *\n", options[i], names[i]);
+
+ printf("*********************************************\n\n");
+ int selection = 0;
+ printf("Your choice: ");
+ if (scanf("%20i", &selection) == 0) {
+ if (scanf("%*[^\n]%*c") != 0) {
+ printf("ERROR: Reading the input line error.\n");
+ return -1;
+ }
+ printf("ERROR: Incorrect input.\n");
+ }
+
+ return selection;
+}
+
+int perform_configure_set_model_config_path(mv_engine_config_h engine_cfg)
+{
+ int err = MEDIA_VISION_ERROR_NONE;
+ char *filePath = NULL;
+ while (-1 == input_string(
+ "Model configuration file path:",
+ FILE_PATH_SIZE,
+ &(filePath))) {
+ printf("Incorrect file path! Try again.\n");
+ }
+
+ err = mv_engine_config_set_string_attribute(engine_cfg,
+ MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
+ filePath);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to set model configuration file path: %s\n", filePath);
+ }
+
+ free(filePath);
+ filePath = NULL;
+
+ return err;
+}
+
+int perform_configure_set_model_weights_path(mv_engine_config_h engine_cfg)
+{
+ int err = MEDIA_VISION_ERROR_NONE;
+ char *filePath = NULL;
+ while (-1 == input_string(
+ "Model weights file path:",
+ FILE_PATH_SIZE,
+ &(filePath))) {
+ printf("Incorrect file path! Try again.\n");
+ }
+
+ err = mv_engine_config_set_string_attribute(engine_cfg,
+ MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
+ filePath);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to set model weights file path: %s\n", filePath);
+ }
+
+ free(filePath);
+ filePath = NULL;
+
+ return err;
+}
+
+int perform_configure_set_model_userfile_path(mv_engine_config_h engine_cfg)
+{
+ int err = MEDIA_VISION_ERROR_NONE;
+ char *filePath = NULL;
+ while (-1 == input_string(
+ "Model user file (category list) path:",
+ FILE_PATH_SIZE,
+ &(filePath))) {
+ printf("Incorrect file path! Try again.\n");
+ }
+
+ err = mv_engine_config_set_string_attribute(engine_cfg,
+ MV_INFERENCE_MODEL_USER_FILE_PATH,
+ filePath);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to set model user file path: %s\n", filePath);
+ }
+
+ free(filePath);
+ filePath = NULL;
+
+ return err;
+}
+
+int perform_configure_set_model_mean_value(mv_engine_config_h engine_cfg)
+{
+ int err = MEDIA_VISION_ERROR_NONE;
+ double meanValue = 0.0;
+ while (-1 == input_double(
+ "mean value:",
+ 0.0,
+ 255.0,
+ &meanValue)) {
+ printf("Invalid value! Try again.\n");
+ }
+
+ err = mv_engine_config_set_double_attribute(engine_cfg,
+ MV_INFERENCE_MODEL_MEAN_VALUE,
+ meanValue);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to set model mean value: %f\n", meanValue);
+ }
+
+ return err;
+}
+
+int perform_configure_set_image_scale(mv_engine_config_h engine_cfg)
+{
+ int err = MEDIA_VISION_ERROR_NONE;
+ double stdValue = 0.0;
+ while (-1 == input_double(
+ "Image scale factor:",
+ 1.0,
+ 255.0,
+ &stdValue)) {
+ printf("Invalid value! Try again.\n");
+ }
+
+ err = mv_engine_config_set_double_attribute(engine_cfg,
+ MV_INFERENCE_MODEL_STD_VALUE,
+ stdValue);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to set std value: %lf\n", stdValue);
+ }
+
+ return err;
+}
+
+int perform_configure_set_confidence_threshold(mv_engine_config_h engine_cfg)
+{
+ int err = MEDIA_VISION_ERROR_NONE;
+ double threshold = 0.0;
+ while (-1 == input_double(
+ "threshold:",
+ 0.0,
+ 1.0,
+ &threshold)) {
+ printf("Invalid value! Try again.\n");
+ }
+
+ err = mv_engine_config_set_double_attribute(engine_cfg,
+ MV_INFERENCE_CONFIDENCE_THRESHOLD,
+ threshold);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to set image scale value: %lf\n", threshold);
+ }
+
+ return err;
+}
+
+int perform_configure_set_backend(mv_engine_config_h engine_cfg)
+{
+ int err = MEDIA_VISION_ERROR_NONE;
+ int backendType = 0;
+ while (-1 == input_int(
+ "Backend Type:",
+ 1,
+ 3,
+ &backendType)) {
+ printf("Invalid type! Try again.\n");
+ }
+
+ err = mv_engine_config_set_int_attribute(engine_cfg,
+ MV_INFERENCE_BACKEND_TYPE,
+ (mv_inference_backend_type_e)backendType);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to set backend type: %d\n", backendType);
+ }
+
+ return err;
+}
+
+int perform_configure_set_target(mv_engine_config_h engine_cfg)
+{
+ int err = MEDIA_VISION_ERROR_NONE;
+ int targetType = 0;
+ while (-1 == input_int(
+ "Backend Type:",
+ 1,
+ 2,
+ &targetType)) {
+ printf("Invalid type! Try again.\n");
+ }
+
+ err = mv_engine_config_set_int_attribute(engine_cfg,
+ MV_INFERENCE_TARGET_TYPE,
+ (mv_inference_target_type_e)targetType);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to set target type: %d\n", targetType);
+ }
+
+ return err;
+}
+
+int perform_configure_set_tensor_width(mv_engine_config_h engine_cfg)
+{
+ int err = MEDIA_VISION_ERROR_NONE;
+ int tensorW = 0;
+ while (-1 == input_int(
+ "Tensor Width:",
+ INT_MIN,
+ INT_MAX,
+ &tensorW)) {
+ printf("Invalid value! Try again.\n");
+ }
+
+ err = mv_engine_config_set_int_attribute(engine_cfg,
+ MV_INFERENCE_INPUT_TENSOR_WIDTH,
+ tensorW);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to set tensor width: %d\n", tensorW);
+ }
+
+ return err;
+}
+
+int perform_configure_set_tensor_height(mv_engine_config_h engine_cfg)
+{
+ int err = MEDIA_VISION_ERROR_NONE;
+ int tensorH = 0;
+ while (-1 == input_int(
+ "Tensor Height:",
+ INT_MIN,
+ INT_MAX,
+ &tensorH)) {
+ printf("Invalid value! Try again.\n");
+ }
+
+ err = mv_engine_config_set_int_attribute(engine_cfg,
+ MV_INFERENCE_INPUT_TENSOR_HEIGHT,
+ tensorH);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to set tensor height: %d\n", tensorH);
+ }
+
+ return err;
+}
+
+int perform_configure_set_tensor_channels(mv_engine_config_h engine_cfg)
+{
+ int err = MEDIA_VISION_ERROR_NONE;
+ int tensorC = 0;
+ while (-1 == input_int(
+ "Tensor Channels:",
+ INT_MIN,
+ INT_MAX,
+ &tensorC)) {
+ printf("Invalid value! Try again.\n");
+ }
+
+ err = mv_engine_config_set_int_attribute(engine_cfg,
+ MV_INFERENCE_INPUT_TENSOR_CHANNELS,
+ tensorC);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to set tensor channels: %d\n", tensorC);
+ }
+
+ return err;
+}
+
+int perform_configuration(mv_engine_config_h *engine_cfg)
+{
+ int err = MEDIA_VISION_ERROR_NONE;
+
+ int sel_opt = 0;
+ const int options[12] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 ,12};
+ const char *names[12] = { "Set Model Configuration",
+ "Set Model Weights",
+ "Set Model UserFile",
+ "Set Model MeanFile",
+ "Set Image Scale",
+ "Set Confidence Threshold",
+ "Set Backend",
+ "Set Target",
+ "Set InputTensor Width",
+ "Set InputTensor Height",
+ "Set InputTensor Channels",
+ "Back" };
+
+ mv_engine_config_h handle = NULL;
+ err = mv_create_engine_config(&handle);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create engine configuration handle.\n");
+ if (handle) {
+ int err2 = mv_destroy_engine_config(handle);
+ if (err2 != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy engine cofniguration.\n");
+ }
+ }
+ return err;
+ }
+
+ while (sel_opt == 0) {
+ sel_opt = show_menu("Select Actions: ", options, names, 12);
+ switch (sel_opt) {
+ case 1:
+ err = perform_configure_set_model_config_path(handle);
+ sel_opt = 0;
+ break;
+ case 2:
+ err = perform_configure_set_model_weights_path(handle);
+ sel_opt = 0;
+ break;
+ case 3:
+ err = perform_configure_set_model_userfile_path(handle);
+ sel_opt = 0;
+ break;
+ case 4:
+ err = perform_configure_set_model_mean_value(handle);
+ sel_opt = 0;
+ break;
+ case 5:
+ err = perform_configure_set_image_scale(handle);
+ sel_opt = 0;
+ break;
+ case 6:
+ err = perform_configure_set_confidence_threshold(handle);
+ sel_opt = 0;
+ break;
+ case 7:
+ err = perform_configure_set_backend(handle);
+ sel_opt = 0;
+ break;
+ case 8:
+ err = perform_configure_set_target(handle);
+ sel_opt = 0;
+ break;
+ case 9:
+ err = perform_configure_set_tensor_width(handle);
+ sel_opt = 0;
+ break;
+ case 10:
+ err = perform_configure_set_tensor_height(handle);
+ sel_opt = 0;
+ break;
+ case 11:
+ err = perform_configure_set_tensor_channels(handle);
+ sel_opt = 0;
+ break;
+ case 12:
+ err = MEDIA_VISION_ERROR_NONE;
+ break;
+ default:
+ printf("Invalid option.\n");
+ sel_opt = 0;
+ }
+ }
+
+
+ *engine_cfg = handle;
+ return err;
+}
+
+int perform_tflite_mobilenetv1_config(mv_engine_config_h *engine_cfg)
+{
+ int err = MEDIA_VISION_ERROR_NONE;
+
+ mv_engine_config_h handle = NULL;
+ err = mv_create_engine_config(&handle);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create engine configuration handle.\n");
+ if (handle) {
+ int err2 = mv_destroy_engine_config(handle);
+ if (err2 != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy engine cofniguration.\n");
+ }
+ }
+ return err;
+ }
+
+ char *inputNodeName = "input_2";
+ char *outputNodeName[1] = {"dense_3/Softmax"};
+
+ mv_engine_config_set_string_attribute(handle,
+ MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
+ IC_TFLITE_WEIGHT_PATH);
+
+ mv_engine_config_set_string_attribute(handle,
+ MV_INFERENCE_MODEL_USER_FILE_PATH,
+ IC_LABEL_PATH);
+
+ mv_engine_config_set_double_attribute(handle,
+ MV_INFERENCE_MODEL_MEAN_VALUE,
+ 127.0);
+
+ mv_engine_config_set_double_attribute(handle,
+ MV_INFERENCE_MODEL_STD_VALUE,
+ 127.0);
+
+ mv_engine_config_set_double_attribute(handle,
+ MV_INFERENCE_CONFIDENCE_THRESHOLD,
+ 0.6);
+
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_BACKEND_TYPE,
+ MV_INFERENCE_BACKEND_TFLITE);
+
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_TARGET_TYPE,
+ MV_INFERENCE_TARGET_CPU);
+
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_INPUT_TENSOR_WIDTH,
+ 224);
+
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_INPUT_TENSOR_HEIGHT,
+ 224);
+
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_INPUT_TENSOR_CHANNELS,
+ 3);
+
+ mv_engine_config_set_string_attribute(handle,
+ MV_INFERENCE_INPUT_NODE_NAME,
+ inputNodeName);
+
+ mv_engine_config_set_array_string_attribute(handle,
+ MV_INFERENCE_OUTPUT_NODE_NAMES,
+ outputNodeName,
+ 1);
+
+
+ *engine_cfg = handle;
+ return err;
+}
+
+int perform_opencv_caffe_squeezenet_config(mv_engine_config_h *engine_cfg)
+{
+ int err = MEDIA_VISION_ERROR_NONE;
+
+ mv_engine_config_h handle = NULL;
+ err = mv_create_engine_config(&handle);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create engine configuration handle.\n");
+ if (handle) {
+ int err2 = mv_destroy_engine_config(handle);
+ if (err2 != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy engine cofniguration.\n");
+ }
+ }
+ return err;
+ }
+
+ char *inputNodeName = "data";
+ char *outputNodeName[1] = {"prob"};
+
+ mv_engine_config_set_string_attribute(handle,
+ MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
+ IC_OPENCV_WEIGHT_CAFFE_PATH);
+
+ mv_engine_config_set_string_attribute(handle,
+ MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
+ IC_OPENCV_CONFIG_CAFFE_PATH);
+
+ mv_engine_config_set_string_attribute(handle,
+ MV_INFERENCE_MODEL_USER_FILE_PATH,
+ IC_OPENCV_LABEL_CAFFE_PATH);
+
+ mv_engine_config_set_double_attribute(handle,
+ MV_INFERENCE_MODEL_MEAN_VALUE,
+ 0.0);
+
+ mv_engine_config_set_double_attribute(handle,
+ MV_INFERENCE_MODEL_STD_VALUE,
+ 1.0);
+
+ mv_engine_config_set_double_attribute(handle,
+ MV_INFERENCE_CONFIDENCE_THRESHOLD,
+ 0.6);
+
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_BACKEND_TYPE,
+ MV_INFERENCE_BACKEND_OPENCV);
+
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_TARGET_TYPE,
+ MV_INFERENCE_TARGET_CPU);
+
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_INPUT_TENSOR_WIDTH,
+ 227);
+
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_INPUT_TENSOR_HEIGHT,
+ 227);
+
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_INPUT_TENSOR_CHANNELS,
+ 3);
+
+
+ mv_engine_config_set_string_attribute(handle,
+ MV_INFERENCE_INPUT_NODE_NAME,
+ inputNodeName);
+
+ mv_engine_config_set_array_string_attribute(handle,
+ MV_INFERENCE_OUTPUT_NODE_NAMES,
+ outputNodeName,
+ 1);
+
+ *engine_cfg = handle;
+ return err;
+}
+
+
+int perform_image_classification()
+{
+ int err = MEDIA_VISION_ERROR_NONE;
+
+ int sel_opt = 0;
+ const int options[6] = { 1, 2, 3, 4, 5, 6};
+ const char *names[6] = { "Configuration",
+ "TFLite(cpu + Mobilenet)",
+ "OpenCV(cpu + Squeezenet)",
+ "Prepare",
+ "Run",
+ "Back"};
+
+ mv_engine_config_h engine_cfg = NULL;
+ mv_inference_h infer = NULL;
+ mv_source_h mvSource = NULL;
+
+ while(sel_opt == 0) {
+ sel_opt = show_menu("Select Action:", options, names, 6);
+ switch (sel_opt) {
+ case 1:
+ {
+ //perform configuration
+ if (engine_cfg) {
+ int err2 = mv_destroy_engine_config(engine_cfg);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err2);
+ }
+
+ err = perform_configuration(&engine_cfg);
+ }
+ break;
+ case 2:
+ {
+ // perform TFLite
+ if (engine_cfg) {
+ int err2 = mv_destroy_engine_config(engine_cfg);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err2);
+ }
+
+ err = perform_tflite_mobilenetv1_config(&engine_cfg);
+ }
+ break;
+
+ case 3:
+ {
+ // perform OpenCV
+ if (engine_cfg) {
+ int err2 = mv_destroy_engine_config(engine_cfg);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err2);
+ }
+
+ err = perform_opencv_caffe_squeezenet_config(&engine_cfg);
+ }
+ break;
+ case 4:
+ {
+ //create - configure - prepare
+ if (infer) {
+ int err2 = mv_inference_destroy(infer);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy inference handle [err:%i]\n", err2);
+ }
+
+ // inference
+ // create handle
+ err = mv_inference_create(&infer);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create inference handle [err:%i]\n", err);
+ break;
+ }
+
+ // configure
+ err = mv_inference_configure(infer, engine_cfg);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to configure inference handle\n");
+ break;
+ }
+
+ // prepare
+ err = mv_inference_prepare(infer);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to prepare inference handle.");
+ break;
+ }
+ }
+ break;
+ case 5:
+ {
+ if (mvSource) {
+ int err2 = mv_destroy_source(mvSource);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy mvSource.\n");
+ }
+
+ char *in_file_name = NULL;
+ /* Load media source */
+ while (input_string("Input file name to be inferred:", 1024, &(in_file_name)) == -1)
+ printf("Incorrect input! Try again.\n");
+
+ err = mv_create_source(&mvSource);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create mvSource.\n");
+ free(in_file_name);
+ break;
+ }
+
+ err = load_mv_source_from_file(in_file_name, mvSource);
+ if (MEDIA_VISION_ERROR_NONE != err) {
+ int err2 = mv_destroy_source(mvSource);
+ if (err2 != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy mvSource.\n", err2);
+ free(in_file_name);
+ return err2;
+ }
+ }
+
+ free(in_file_name);
+
+ // Run
+ err = mv_inference_image_classify(mvSource, infer, NULL, _image_classified_cb, NULL);
+ }
+ break;
+ case 6:
+ {
+ //perform destroy
+ if (engine_cfg) {
+ err = mv_destroy_engine_config(engine_cfg);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy engine_cfg [err:%i]\n", err);
+ }
+ }
+
+ if (infer) {
+ err = mv_inference_destroy(infer);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy inference handle [err:%i]\n", err);
+ }
+ }
+
+ }
+ break;
+ default:
+ printf("Invalid option.\n");
+ sel_opt = 0;
+ continue;
+ }
+
+ int do_another = 0;
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("ERROR: Action is finished with error code: %i\n");
+ }
+
+ sel_opt = 0;
+ const int options_last[2] = { 1, 2 };
+ const char *names_last[2] = { "Yes", "No" };
+
+ while (sel_opt == 0) {
+ sel_opt = show_menu("Run Image Classification again?: ", options_last, names_last, 2);
+ switch (sel_opt) {
+ case 1:
+ do_another = 1;
+ break;
+ case 2:
+ do_another = 0;
+ break;
+ default:
+ printf("Invalid option.\n");
+ sel_opt = 0;
+ }
+ }
+
+ sel_opt = (do_another == 1) ? 0 : 1;
+ }
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+/*
+ *
+ * Object Detection
+ *
+ */
+int perform_tflite_mobilenetv1ssd_config(mv_engine_config_h *engine_cfg)
+{
+ int err = MEDIA_VISION_ERROR_NONE;
+
+ mv_engine_config_h handle = NULL;
+ err = mv_create_engine_config(&handle);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create engine configuration handle.\n");
+ if (handle) {
+ int err2 = mv_destroy_engine_config(handle);
+ if (err2 != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy engine cofniguration.\n");
+ }
+ }
+ return err;
+ }
+
+
+ char *inputNodeName = "normalized_input_image_tensor";
+ char *outputNodeName[4] = {"TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3"};
+
+ mv_engine_config_set_string_attribute(handle,
+ MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
+ OD_TFLITE_WEIGHT_PATH);
+
+ mv_engine_config_set_string_attribute(handle,
+ MV_INFERENCE_MODEL_USER_FILE_PATH,
+ OD_LABEL_PATH);
+
+ mv_engine_config_set_double_attribute(handle,
+ MV_INFERENCE_MODEL_MEAN_VALUE,
+ 127.5);
+
+ mv_engine_config_set_double_attribute(handle,
+ MV_INFERENCE_MODEL_STD_VALUE,
+ 127.5);
+
+ mv_engine_config_set_double_attribute(handle,
+ MV_INFERENCE_CONFIDENCE_THRESHOLD,
+ 0.3);
+
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_BACKEND_TYPE,
+ MV_INFERENCE_BACKEND_TFLITE);
+
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_TARGET_TYPE,
+ MV_INFERENCE_TARGET_CPU);
+
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_INPUT_TENSOR_WIDTH,
+ 300);
+
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_INPUT_TENSOR_HEIGHT,
+ 300);
+
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_INPUT_TENSOR_CHANNELS,
+ 3);
+
+ mv_engine_config_set_string_attribute(handle,
+ MV_INFERENCE_INPUT_NODE_NAME,
+ inputNodeName);
+
+ mv_engine_config_set_array_string_attribute(handle,
+ MV_INFERENCE_OUTPUT_NODE_NAMES,
+ outputNodeName,
+ 4);
+
+ *engine_cfg = handle;
+ return err;
+}
+
+int perform_object_detection()
+{
+ int err = MEDIA_VISION_ERROR_NONE;
+
+ int sel_opt = 0;
+ const int options[5] = {1, 2, 3, 4, 5};
+ const *names[5] = { "Configuration",
+ "TFLITE(CPU) + MobileNetV1+SSD",
+ "Prepare",
+ "Run",
+ "Back"};
+
+ mv_engine_config_h engine_cfg = NULL;
+ mv_inference_h infer = NULL;
+ mv_source_h mvSource = NULL;
+
+ while(sel_opt == 0) {
+ sel_opt = show_menu("Select Action:", options, names, 5);
+ switch (sel_opt) {
+ case 1:
+ {
+ //perform configuration
+ if (engine_cfg) {
+ int err2 = mv_destroy_engine_config(engine_cfg);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err2);
+ }
+
+ err = perform_configuration(&engine_cfg);
+ }
+ break;
+ case 2:
+ {
+ //perform TFlite MobileSSD config
+ if (engine_cfg) {
+ int err2 = mv_destroy_engine_config(engine_cfg);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err2);
+ }
+
+ err = perform_tflite_mobilenetv1ssd_config(&engine_cfg);
+ }
+ break;
+ case 3:
+ {
+ // create - configure - prepare
+ if (infer) {
+ int err2 = mv_inference_destroy(infer);
+ if (err2 != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy inference handle [err:%i]\n", err2);
+ }
+ }
+
+ // inference
+ // create handle
+ err = mv_inference_create(&infer);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create inference handle [err:%i]\n", err);
+ break;
+ }
+
+ //configure
+ err = mv_inference_configure(infer, engine_cfg);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to configure inference handle [err:%i]\n", err);
+ break;
+ }
+
+ //prepare
+ err = mv_inference_prepare(infer);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to prepare inference handle");
+ break;
+ }
+ }
+ break;
+ case 4:
+ {
+ if (mvSource) {
+ int err2 = mv_destroy_source(mvSource);
+ if (err2 != MEDIA_VISION_ERROR_NONE);
+ printf("Fail to destroy mvSource\n");
+ }
+
+ char *in_file_name = NULL;
+ /* Load media source */
+ while (input_string("Input file name to be inferred:", 1024, &(in_file_name)) == -1)
+ printf("Incorrect input! Try again.\n");
+
+ err = mv_create_source(&mvSource);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create mvSource.\n");
+ free(in_file_name);
+ break;
+ }
+
+ err = load_mv_source_from_file(in_file_name, mvSource);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ int err2 = mv_destroy_source(mvSource);
+ if (err2 != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy mvSource.\n", err2);
+ }
+ free(in_file_name);
+ return err2;
+ }
+ free(in_file_name);
+
+ // Object Detect
+ err = mv_inference_object_detect(mvSource, infer, _object_detected_cb, NULL);
+ }
+ break;
+ case 5:
+ {
+ //perform destroy
+ if (engine_cfg) {
+ err = mv_destroy_engine_config(engine_cfg);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy engine_cfg [err:%i]\n", err);
+ }
+ }
+
+ if (infer) {
+ err = mv_inference_destroy(infer);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy inference handle [err:%i]\n", err);
+ }
+ }
+ }
+ break;
+ default:
+ printf("Invalid option.\n");
+ sel_opt = 0;
+ continue;
+ }
+
+ int do_another = 0;
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("ERROR: Action is finished with error code: %i\n");
+ }
+
+ sel_opt = 0;
+ const int options_last[2] = {1, 2};
+ const char *names_last[2] = { "Yes", "No" };
+
+ while (sel_opt == 0) {
+ sel_opt = show_menu("Run Object Detection again?:", options_last, names_last, 2);
+ switch(sel_opt) {
+ case 1:
+ do_another = 1;
+ break;
+ case 2:
+ do_another = 0;
+ break;
+ default:
+ printf("Invalid option.\n");
+ sel_opt = 0;
+ }
+ }
+
+ sel_opt = (do_another == 1) ? 0 : 1;
+ }
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int perform_tflite_mobilenetv1ssd_face(mv_engine_config_h *engine_cfg)
+{
+ int err = MEDIA_VISION_ERROR_NONE;
+
+ mv_engine_config_h handle = NULL;
+ err = mv_create_engine_config(&handle);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create engine configuration handle.\n");
+ if (handle) {
+ int err2 = mv_destroy_engine_config(handle);
+ if (err2 != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy engine cofniguration.\n");
+ }
+ }
+ return err;
+ }
+
+ char *inputNodeName = "normalized_input_image_tensor";
+ char *outputNodeName[4] = {"TFLite_Detection_PostProcess",
+ "TFLite_Detection_PostProcess:1",
+ "TFLite_Detection_PostProcess:2",
+ "TFLite_Detection_PostProcess:3"};
+
+ mv_engine_config_set_string_attribute(handle,
+ MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
+ FD_TFLITE_WEIGHT_PATH);
+
+ mv_engine_config_set_double_attribute(handle,
+ MV_INFERENCE_MODEL_MEAN_VALUE,
+ 127.5);
+
+ mv_engine_config_set_double_attribute(handle,
+ MV_INFERENCE_MODEL_STD_VALUE,
+ 127.5);
+
+ mv_engine_config_set_double_attribute(handle,
+ MV_INFERENCE_CONFIDENCE_THRESHOLD,
+ 0.3);
+
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_BACKEND_TYPE,
+ MV_INFERENCE_BACKEND_TFLITE);
+
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_TARGET_TYPE,
+ MV_INFERENCE_TARGET_CPU);
+
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_INPUT_TENSOR_WIDTH,
+ 300);
+
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_INPUT_TENSOR_HEIGHT,
+ 300);
+
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_INPUT_TENSOR_CHANNELS,
+ 3);
+
+ mv_engine_config_set_string_attribute(handle,
+ MV_INFERENCE_INPUT_NODE_NAME,
+ inputNodeName);
+
+ mv_engine_config_set_array_string_attribute(handle,
+ MV_INFERENCE_OUTPUT_NODE_NAMES,
+ outputNodeName,
+ 4);
+
+ *engine_cfg = handle;
+ return err;
+}
+
+int perform_face_detection()
+{
+ int err = MEDIA_VISION_ERROR_NONE;
+
+ int sel_opt = 0;
+ const int options[5] = {1, 2, 3, 4, 5};
+ const *names[5] = { "Configuration",
+ "TFLite(CPU) + MobileNetV1 + SSD",
+ "Prepare",
+ "Run",
+ "Back"};
+
+ mv_engine_config_h engine_cfg = NULL;
+ mv_inference_h infer = NULL;
+ mv_source_h mvSource = NULL;
+
+ while(sel_opt == 0) {
+ sel_opt = show_menu("Select Action:", options, names, 5);
+ switch (sel_opt) {
+ case 1:
+ {
+ //perform configuration
+ if (engine_cfg) {
+ int err2 = mv_destroy_engine_config(engine_cfg);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err2);
+ }
+
+ err = perform_configuration(&engine_cfg);
+ }
+ break;
+ case 2:
+ {
+ //perform TF Mobilenetssd config
+ if (engine_cfg) {
+ int err2 = mv_destroy_engine_config(engine_cfg);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err2);
+ }
+
+ err = perform_tflite_mobilenetv1ssd_face(&engine_cfg);
+ }
+ break;
+ case 3:
+ {
+ // create - configure - prepare
+ if (infer) {
+ int err2 = mv_inference_destroy(infer);
+ if (err2 != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy inference handle [err:%i]\n", err2);
+ }
+ }
+
+ // inference
+ // create handle
+ err = mv_inference_create(&infer);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create inference handle [err:%i]\n", err);
+ break;
+ }
+
+ //configure
+ err = mv_inference_configure(infer, engine_cfg);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to configure inference handle [err:%i]\n", err);
+ break;
+ }
+
+ //prepare
+ err = mv_inference_prepare(infer);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to prepare inference handle");
+ break;
+ }
+ }
+ break;
+ case 4:
+ {
+ if (mvSource) {
+ int err2 = mv_destroy_source(mvSource);
+ if (err2 != MEDIA_VISION_ERROR_NONE);
+ printf("Fail to destroy mvSource\n");
+ }
+
+ char *in_file_name = NULL;
+ /* Load media source */
+ while (input_string("Input file name to be inferred:", 1024, &(in_file_name)) == -1)
+ printf("Incorrect input! Try again.\n");
+
+ err = mv_create_source(&mvSource);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create mvSource.\n");
+ free(in_file_name);
+ break;
+ }
+
+ err = load_mv_source_from_file(in_file_name, mvSource);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ int err2 = mv_destroy_source(mvSource);
+ if (err2 != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy mvSource.\n", err2);
+ }
+ free(in_file_name);
+ return err2;
+ }
+ free(in_file_name);
+
+ // Object Detect
+ err = mv_inference_face_detect(mvSource, infer, _face_detected_cb, NULL);
+ }
+ break;
+ case 5:
+ {
+ //perform destroy
+ if (engine_cfg) {
+ err = mv_destroy_engine_config(engine_cfg);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy engine_cfg [err:%i]\n", err);
+ }
+ }
+
+ if (infer) {
+ err = mv_inference_destroy(infer);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy inference handle [err:%i]\n", err);
+ }
+ }
+ }
+ break;
+ default:
+ printf("Invalid option.\n");
+ sel_opt = 0;
+ continue;
+ }
+
+ int do_another = 0;
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("ERROR: Action is finished with error code: %i\n");
+ }
+
+ sel_opt = 0;
+ const int options_last[2] = {1, 2};
+ const char *names_last[2] = { "Yes", "No" };
+
+ while (sel_opt == 0) {
+ sel_opt = show_menu("Run Face Detection again?:", options_last, names_last, 2);
+ switch(sel_opt) {
+ case 1:
+ do_another = 1;
+ break;
+ case 2:
+ do_another = 0;
+ break;
+ default:
+ printf("Invalid option.\n");
+ sel_opt = 0;
+ }
+ }
+
+ sel_opt = (do_another == 1) ? 0 : 1;
+ }
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int perform_tflite_TweakCNN(mv_engine_config_h *engine_cfg)
+{
+ int err = MEDIA_VISION_ERROR_NONE;
+
+ mv_engine_config_h handle = NULL;
+ err = mv_create_engine_config(&handle);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create engine configuration handle.\n");
+ if (handle) {
+ int err2 = mv_destroy_engine_config(handle);
+ if (err2 != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy engine cofniguration.\n");
+ }
+ }
+ return err;
+ }
+
+ char *inputNodeName = "INPUT_TENSOR_NAME";
+ char *outputNodeName[1] = {"OUTPUT_TENSOR_NAME"};
+
+ mv_engine_config_set_string_attribute(handle,
+ MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
+ FLD_TFLITE_WEIGHT_PATH);
+
+ mv_engine_config_set_double_attribute(handle,
+ MV_INFERENCE_MODEL_MEAN_VALUE,
+ 0.0);
+
+ mv_engine_config_set_double_attribute(handle,
+ MV_INFERENCE_MODEL_STD_VALUE,
+ 1.0);
+
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_BACKEND_TYPE,
+ MV_INFERENCE_BACKEND_TFLITE);
+
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_TARGET_TYPE,
+ MV_INFERENCE_TARGET_CPU);
+
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_INPUT_TENSOR_WIDTH,
+ 128);
+
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_INPUT_TENSOR_HEIGHT,
+ 128);
+
+ mv_engine_config_set_int_attribute(handle,
+ MV_INFERENCE_INPUT_TENSOR_CHANNELS,
+ 3);
+
+ mv_engine_config_set_string_attribute(handle,
+ MV_INFERENCE_INPUT_NODE_NAME,
+ inputNodeName);
+
+ mv_engine_config_set_array_string_attribute(handle,
+ MV_INFERENCE_OUTPUT_NODE_NAMES,
+ outputNodeName,
+ 1);
+
+ *engine_cfg = handle;
+ return err;
+}
+
+int perform_facial_landmark_detection()
+{
+ int err = MEDIA_VISION_ERROR_NONE;
+
+ int sel_opt = 0;
+ const int options[5] = {1, 2, 3, 4, 5};
+ const *names[5] = { "Configuration",
+ "tflite(CPU) + TweakCNN",
+ "Prepare",
+ "Run",
+ "Back"};
+
+ mv_engine_config_h engine_cfg = NULL;
+ mv_inference_h infer = NULL;
+ mv_source_h mvSource = NULL;
+
+ while(sel_opt == 0) {
+ sel_opt = show_menu("Select Action:", options, names, 5);
+ switch (sel_opt) {
+ case 1:
+ {
+ //perform configuration
+ if (engine_cfg) {
+ int err2 = mv_destroy_engine_config(engine_cfg);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err2);
+ }
+
+ err = perform_configuration(&engine_cfg);
+ }
+ break;
+ case 2:
+ {
+ //perform SRID TweakCNN config
+ if (engine_cfg) {
+ int err2 = mv_destroy_engine_config(engine_cfg);
+ if (err2 != MEDIA_VISION_ERROR_NONE)
+ printf("Fail to destroy engine_cfg [err:%i]\n", err2);
+ }
+ err = perform_tflite_TweakCNN(&engine_cfg);
+ }
+ break;
+ case 3:
+ {
+ // create - configure - prepare
+ if (infer) {
+ int err2 = mv_inference_destroy(infer);
+ if (err2 != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy inference handle [err:%i]\n", err2);
+ }
+ }
+
+ // inference
+ // create handle
+ err = mv_inference_create(&infer);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create inference handle [err:%i]\n", err);
+ break;
+ }
+
+ //configure
+ err = mv_inference_configure(infer, engine_cfg);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to configure inference handle [err:%i]\n", err);
+ break;
+ }
+
+ //prepare
+ err = mv_inference_prepare(infer);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to prepare inference handle");
+ break;
+ }
+ }
+ break;
+ case 4:
+ {
+ if (mvSource) {
+ int err2 = mv_destroy_source(mvSource);
+ if (err2 != MEDIA_VISION_ERROR_NONE);
+ printf("Fail to destroy mvSource\n");
+ }
+
+ char *in_file_name = NULL;
+ /* Load media source */
+ while (input_string("Input file name to be inferred:", 1024, &(in_file_name)) == -1)
+ printf("Incorrect input! Try again.\n");
+
+ err = mv_create_source(&mvSource);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to create mvSource.\n");
+ free(in_file_name);
+ break;
+ }
+
+ err = load_mv_source_from_file(in_file_name, mvSource);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ int err2 = mv_destroy_source(mvSource);
+ if (err2 != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy mvSource.\n", err2);
+ }
+ free(in_file_name);
+ return err2;
+ }
+ free(in_file_name);
+
+ // Object Detect
+ err = mv_inference_facial_landmark_detect(mvSource, infer, NULL, _facial_landmark_detected_cb, NULL);
+ }
+ break;
+ case 5:
+ {
+ //perform destroy
+ if (engine_cfg) {
+ err = mv_destroy_engine_config(engine_cfg);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy engine_cfg [err:%i]\n", err);
+ }
+ }
+
+ if (infer) {
+ err = mv_inference_destroy(infer);
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to destroy inference handle [err:%i]\n", err);
+ }
+ }
+ }
+ break;
+ default:
+ printf("Invalid option.\n");
+ sel_opt = 0;
+ continue;
+ }
+
+ int do_another = 0;
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("ERROR: Action is finished with error code: %i\n");
+ }
+
+ sel_opt = 0;
+ const int options_last[2] = {1, 2};
+ const char *names_last[2] = { "Yes", "No" };
+
+ while (sel_opt == 0) {
+ sel_opt = show_menu("Run Facial Landmark Detection again?:", options_last, names_last, 2);
+ switch(sel_opt) {
+ case 1:
+ do_another = 1;
+ break;
+ case 2:
+ do_another = 0;
+ break;
+ default:
+ printf("Invalid option.\n");
+ sel_opt = 0;
+ }
+ }
+
+ sel_opt = (do_another == 1) ? 0 : 1;
+ }
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+int main()
+{
+ int sel_opt = 0;
+
+ const int options[5] = {1, 2, 3, 4, 5};
+ const char *names[5] = { "Image Classification",
+ "Object Detection",
+ "Face Detection",
+ "Facial LandmarkDetection"
+ "Exit"};
+
+ int err = MEDIA_VISION_ERROR_NONE;
+ while (sel_opt == 0) {
+ sel_opt = show_menu("Select Action:", options, names, 4);
+ switch (sel_opt) {
+ case 1:
+ {
+ err = perform_image_classification();
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to perform image classification\n");
+ }
+ }
+ break;
+ case 2:
+ {
+ err = perform_object_detection();
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to perform object detection\n");
+ }
+ }
+ break;
+ case 3:
+ {
+ err = perform_face_detection();
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to perform face detection\n");
+ }
+ }
+ break;
+ case 4:
+ {
+ err = perform_facial_landmark_detection();
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("Fail to perform facial landmark detection");
+ }
+ break;
+ }
+ case 5:
+ {
+ printf("Exit");
+ }
+ break;
+ default:
+ printf("Invalid option");
+ sel_opt = 0;
+ continue;
+ }
+
+ int do_another = 0;
+ if (err != MEDIA_VISION_ERROR_NONE) {
+ printf("ERROR: Action is finished with error code: %i\n");
+ }
+
+ sel_opt = 0;
+ const int options_last[2] = { 1, 2 };
+ const char *names_last[2] = { "Yes", "No" };
+
+ while (sel_opt == 0) {
+ sel_opt = show_menu("Another action?: ", options_last, names_last, 2);
+ switch (sel_opt) {
+ case 1:
+ do_another = 1;
+ break;
+ case 2:
+ do_another = 0;
+ break;
+ default:
+ printf("Invalid option.\n");
+ sel_opt = 0;
+ }
+ }
+
+ sel_opt = (do_another == 1) ? 0 : 1;
+ }
+
+ return 0;
+}