int mv_image_classification_set_model(mv_image_classification_h handle, const char *model_file, const char *meta_file,
const char *label_file);
+/**
+ * @brief Set user-given inference engine and device types for inference.
+ * @details Use this function to change the inference engine and device types for inference instead of default ones after calling @ref mv_image_classification_create().
+ *
+ * @since_tizen 7.5
+ *
+ * @param[in] handle The handle to the image classification object.
+ * @param[in] engine_type A string of inference engine type.
+ * @param[in] device_type A string of device type.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_INVALID_OPERATION Invalid operation
+ *
+ * @pre Create a image classification handle by calling @ref mv_image_classification_create()
+ */
+int mv_image_classification_set_engine(mv_image_classification_h handle, const char *engine_type,
+ const char *device_type);
+
+/**
+ * @brief Get a number of inference engines available for image classification task API.
+ * @details Use this function to get how many inference engines are supported for image classification after calling @ref mv_image_classification_create().
+ *
+ * @since_tizen 7.5
+ *
+ * @param[in] handle The handle to the image classification object.
+ * @param[out] engine_count A number of inference engines available for image classification API.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_INVALID_OPERATION Invalid operation
+ *
+ * @pre Create a image classification handle by calling @ref mv_image_classification_create()
+ */
+int mv_image_classification_get_engine_count(mv_image_classification_h handle, unsigned int *engine_count);
+
+/**
+ * @brief Get engine type to a given inference engine index.
+ * @details Use this function to get inference engine type with a given engine index after calling @ref mv_image_classification_get_engine_count().
+ *
+ * @since_tizen 7.5
+ *
+ * @param[in] handle The handle to the image classification object.
+ * @param[in] engine_index A inference engine index for getting the inference engine type.
+ * @param[out] engine_type A string to inference engine.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_INVALID_OPERATION Invalid operation
+ *
+ * @pre Get a number of inference engines available for image classification task API by calling @ref mv_image_classification_get_engine_count()
+ */
+int mv_image_classification_get_engine_type(mv_image_classification_h handle, const unsigned int engine_index,
+ char **engine_type);
+
+/**
+ * @brief Get a number of device types avaliable to a given inference engine.
+ * @details Use this function to get how many device types are supported for a given inference engine after calling @ref mv_image_classification_create().
+ *
+ * @since_tizen 7.5
+ *
+ * @param[in] handle The handle to the image classification object.
+ * @param[in] engine_type A inference engine string.
+ * @param[out] device_count A number of device types available for a given inference engine.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_INVALID_OPERATION Invalid operation
+ *
+ * @pre Create a image classification handle by calling @ref mv_image_classification_create()
+ */
+int mv_image_classification_get_device_count(mv_image_classification_h handle, const char *engine_type,
+ unsigned int *device_count);
+
+/**
+ * @brief Get device type list available.
+ * @details Use this function to get what device types are supported for current inference engine type after calling @ref mv_image_classification_configure().
+ *
+ * @since_tizen 7.5
+ *
+ * @param[in] handle The handle to the image classification object.
+ * @param[in] engine_type A inference engine string.
+ * @param[in] device_index A device index for getting the device type.
+ * @param[out] device_type A string to device type.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_INVALID_OPERATION Invalid operation
+ *
+ * @pre Create a image classification handle by calling @ref mv_image_classification_create()
+ * @pre Configure image classification task by calling @ref mv_image_classification_configure()
+ */
+int mv_image_classification_get_device_type(mv_image_classification_h handle, const char *engine_type,
+ const unsigned int device_index, char **device_type);
+
#ifdef __cplusplus
}
#endif /* __cplusplus */
public:
virtual ~ITask() {};
virtual void create(int type) = 0;
+ virtual void setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+ const char *model_name = "") = 0;
+ virtual void setEngineInfo(const char *engine_type, const char *device_type) = 0;
virtual void configure() = 0;
+ virtual void getNumberOfEngines(unsigned int *number_of_engines) = 0;
+ virtual void getEngineType(unsigned int engine_index, char **engine_type) = 0;
+ virtual void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices) = 0;
+ virtual void getDeviceType(const char *engine_type, unsigned int device_index, char **device_type) = 0;
virtual void prepare() = 0;
virtual void setInput(T &t) = 0;
virtual void perform() = 0;
--- /dev/null
+/**
+ * Copyright (c) 2023 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MV_MACHINE_LEARNING_COMMON_H__
+#define __MV_MACHINE_LEARNING_COMMON_H__
+
+#include <string>
+#include <vector>
+#include <map>
+
+#include "mv_inference_type.h"
+
+namespace mediavision
+{
+namespace common
+{
+static std::map<std::string, int> gBackendTypeTable = {
+ { "OPENCV", MV_INFERENCE_BACKEND_OPENCV }, { "TFLITE", MV_INFERENCE_BACKEND_TFLITE },
+ { "ARMNN", MV_INFERENCE_BACKEND_ARMNN }, { "ONE", MV_INFERENCE_BACKEND_ONE },
+ { "NNTRAINER", MV_INFERENCE_BACKEND_NNTRAINER }, { "SNPE", MV_INFERENCE_BACKEND_SNPE }
+};
+
+static std::map<std::string, int> gDeviceTypeTable = { { "CPU", MV_INFERENCE_TARGET_DEVICE_CPU },
+ { "GPU", MV_INFERENCE_TARGET_DEVICE_GPU },
+ { "NPU", MV_INFERENCE_TARGET_DEVICE_CUSTOM } };
+
+static int GetBackendType(std::string backend_type)
+{
+ auto item = gBackendTypeTable.find(backend_type);
+ if (item != gBackendTypeTable.end())
+ return item->second;
+
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+}
+
+static int GetDeviceType(std::string device_type)
+{
+ auto item = gDeviceTypeTable.find(device_type);
+ if (item != gDeviceTypeTable.end())
+ return item->second;
+
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+}
+
+} // namespace
+} // namespace
+
+#endif
\ No newline at end of file
}
void create(int type) override;
+ void setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+ const char *model_name) override;
+ void setEngineInfo(const char *engine_type, const char *device_type) override;
void configure() override;
+ void getNumberOfEngines(unsigned int *number_of_engines) override;
+ void getEngineType(unsigned int engine_index, char **engine_type) override;
+ void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices) override;
+ void getDeviceType(const char *engine_type, unsigned int device_index, char **device_type) override;
void prepare() override;
void setInput(T &t) override;
void perform() override;
~FacenetAdapter();
void create(int type) override;
-
+ void setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+ const char *model_name) override;
+ void setEngineInfo(const char *engine_type, const char *device_type) override;
void configure() override;
+ void getNumberOfEngines(unsigned int *number_of_engines) override;
+ void getEngineType(unsigned int engine_index, char **engine_type) override;
+ void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices) override;
+ void getDeviceType(const char *engine_type, unsigned int device_index, char **device_type) override;
void prepare() override;
void setInput(T &t) override;
void perform() override;
throw InvalidOperation("Not support yet.");
}
+template<typename T, typename V>
+void FaceRecognitionAdapter<T, V>::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+ const char *model_name)
+{}
+
+template<typename T, typename V>
+void FaceRecognitionAdapter<T, V>::setEngineInfo(const char *engine_type, const char *device_type)
+{}
+
template<typename T, typename V> void FaceRecognitionAdapter<T, V>::configure()
{
_config = make_unique<EngineConfig>(string(MV_CONFIG_PATH) + string(FACE_RECOGNITION_META_FILE_NAME));
_face_recognition->setConfig(config);
}
+template<typename T, typename V> void FaceRecognitionAdapter<T, V>::getNumberOfEngines(unsigned int *number_of_engines)
+{}
+
+template<typename T, typename V>
+void FaceRecognitionAdapter<T, V>::getEngineType(unsigned int engine_index, char **engine_type)
+{}
+
+template<typename T, typename V>
+void FaceRecognitionAdapter<T, V>::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
+{}
+
+template<typename T, typename V>
+void FaceRecognitionAdapter<T, V>::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
+{}
+
template<typename T, typename V> void FaceRecognitionAdapter<T, V>::prepare()
{
int ret = _face_recognition->initialize();
throw InvalidOperation("Not support yet.");
}
+template<typename T, typename V>
+void FacenetAdapter<T, V>::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+ const char *model_name)
+{}
+
+template<typename T, typename V>
+void FacenetAdapter<T, V>::setEngineInfo(const char *engine_type, const char *device_type)
+{}
+
template<typename T, typename V> void FacenetAdapter<T, V>::configure()
{
_facenet->parseMetaFile();
_facenet->configure();
}
+template<typename T, typename V> void FacenetAdapter<T, V>::getNumberOfEngines(unsigned int *number_of_engines)
+{}
+
+template<typename T, typename V> void FacenetAdapter<T, V>::getEngineType(unsigned int engine_index, char **engine_type)
+{}
+
+template<typename T, typename V>
+void FacenetAdapter<T, V>::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
+{}
+
+template<typename T, typename V>
+void FacenetAdapter<T, V>::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
+{}
+
template<typename T, typename V> void FacenetAdapter<T, V>::prepare()
{
_facenet->prepare();
{
private:
void loadLabel();
+ void getEngineList();
+ void getDeviceList(const char *engine_type);
protected:
std::unique_ptr<mediavision::inference::Inference> _inference;
std::string _modelMetaFilePath;
std::string _modelLabelFilePath;
std::vector<std::string> _labels;
+ std::vector<std::string> _valid_backends;
+ std::vector<std::string> _valid_devices;
int _backendType;
int _targetDeviceType;
virtual ~ImageClassification() = default;
void parseMetaFile();
void setUserModel(std::string model_file, std::string meta_file, std::string label_file);
+ void setEngineInfo(std::string engine_type, std::string device_type);
+ void getNumberOfEngines(unsigned int *number_of_engines);
+ void getEngineType(unsigned int engine_index, char **engine_type);
+ void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices);
+ void getDeviceType(const char *engine_type, const unsigned int device_index, char **device_type);
void configure();
void prepare();
void preprocess(mv_source_h &mv_src);
~ImageClassificationAdapter();
void create(int type) override;
-
+ void setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+ const char *model_name) override;
+ void setEngineInfo(const char *engine_type, const char *device_type) override;
void configure() override;
+ void getNumberOfEngines(unsigned int *number_of_engines) override;
+ void getEngineType(unsigned int engine_index, char **engine_type) override;
+ void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices) override;
+ void getDeviceType(const char *engine_type, unsigned int device_index, char **device_type) override;
void prepare() override;
void setInput(T &t) override;
void perform() override;
{
struct ImageClassificationInput {
mv_source_h inference_src;
- std::string model_file;
- std::string meta_file;
- std::string label_file;
+ // TODO.
};
/**
int mv_image_classification_set_model_open(mv_image_classification_h handle, const char *model_file,
const char *meta_file, const char *label_file);
+/**
+ * @brief Set user-given backend and device types for inference.
+ * @details Use this function to change the backend and device types for inference instead of default ones after calling @ref mv_image_classification_create_open().
+ *
+ * @since_tizen 7.5
+ *
+ * @param[in] handle The handle to the image classification object.
+ * @param[in] backend_type A string of backend type.
+ * @param[in] device_type A string of device type.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_INVALID_OPERATION Invalid operation
+ *
+ * @pre Create a image classification handle by calling @ref mv_image_classification_create_open()
+ */
+int mv_image_classification_set_engine_open(mv_image_classification_h handle, const char *backend_type,
+ const char *device_type);
+
+/**
+ * @brief Get a number of inference engines available for image classification task API.
+ * @details Use this function to get how many inference engines are supported for image classification after calling @ref mv_image_classification_create_open().
+ *
+ * @since_tizen 7.5
+ *
+ * @param[in] handle The handle to the image classification object.
+ * @param[out] engine_count A number of inference engines available for image classification API.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_INVALID_OPERATION Invalid operation
+ *
+ * @pre Create a image classification handle by calling @ref mv_image_classification_create_open()
+ */
+int mv_image_classification_get_engine_count_open(mv_image_classification_h handle, unsigned int *engine_count);
+
+/**
+ * @brief Get engine type to a given inference engine index.
+ * @details Use this function to get inference engine type with a given engine index after calling @ref mv_image_classification_get_engine_count().
+ *
+ * @since_tizen 7.5
+ *
+ * @param[in] handle The handle to the image classification object.
+ * @param[in] engine_index A inference engine index for getting the inference engine type.
+ * @param[out] engine_type A string to inference engine.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_INVALID_OPERATION Invalid operation
+ *
+ * @pre Get a number of inference engines available for image classification task API by calling @ref mv_image_classification_get_engine_count()
+ */
+int mv_image_classification_get_engine_type_open(mv_image_classification_h handle, const unsigned int engine_index,
+ char **engine_type);
+
+/**
+ * @brief Get a number of device types avaliable to a given inference engine.
+ * @details Use this function to get how many device types are supported for a given inference engine after calling @ref mv_image_classification_create_open().
+ *
+ * @since_tizen 7.5
+ *
+ * @param[in] handle The handle to the image classification object.
+ * @param[in] engine_type A inference engine string.
+ * @param[out] device_count A number of device types available for a given inference engine.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_INVALID_OPERATION Invalid operation
+ *
+ * @pre Create a image classification handle by calling @ref mv_image_classification_create_open()
+ */
+int mv_image_classification_get_device_count_open(mv_image_classification_h handle, const char *engine_type,
+ unsigned int *device_count);
+
+/**
+ * @brief Get device type list available.
+ * @details Use this function to get what device types are supported for current inference engine type after calling @ref mv_image_classification_configure().
+ *
+ * @since_tizen 7.5
+ *
+ * @param[in] handle The handle to the image classification object.
+ * @param[in] engine_type A inference engine string.
+ * @param[in] device_index A device index for getting the device type.
+ * @param[out] device_type A string to device type.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * @retval #MEDIA_VISION_ERROR_INVALID_OPERATION Invalid operation
+ *
+ * @pre Create a image classification handle by calling @ref mv_image_classification_create_open()
+ * @pre Configure image classification task by calling @ref mv_image_classification_configure_open()
+ */
+int mv_image_classification_get_device_type_open(mv_image_classification_h handle, const char *engine_type,
+ const unsigned int device_index, char **device_type);
+
#ifdef __cplusplus
}
#endif /* __cplusplus */
#include <algorithm>
#include "machine_learning_exception.h"
+#include "mv_machine_learning_common.h"
#include "mv_image_classification_config.h"
#include "image_classification.h"
using namespace std;
using namespace mediavision::inference;
using namespace MediaVision::Common;
+using namespace mediavision::common;
using namespace mediavision::machine_learning::exception;
namespace mediavision
{
namespace machine_learning
{
-ImageClassification::ImageClassification() : _backendType(), _targetDeviceType()
+ImageClassification::ImageClassification()
+ : _backendType(MV_INFERENCE_BACKEND_NONE), _targetDeviceType(MV_INFERENCE_TARGET_DEVICE_NONE)
{
_inference = make_unique<Inference>();
_parser = make_unique<ImageClassificationParser>();
readFile.close();
}
+void ImageClassification::getEngineList()
+{
+ for (auto idx = MV_INFERENCE_BACKEND_NONE + 1; idx < MV_INFERENCE_BACKEND_MAX; ++idx) {
+ auto backend = _inference->getSupportedInferenceBackend(idx);
+ // TODO. we need to describe what inference engines are supported by each Task API,
+ // and based on it, below inference engine types should be checked
+ // if a given type is supported by this Task API later. As of now, tflite only.
+ if (backend.second == true && backend.first.compare("tflite") == 0)
+ _valid_backends.push_back(backend.first);
+ }
+}
+
+void ImageClassification::getDeviceList(const char *engine_type)
+{
+ // TODO. add device types available for a given engine type later.
+ // In default, cpu and gpu only.
+ _valid_devices.push_back("cpu");
+ _valid_devices.push_back("gpu");
+}
+
void ImageClassification::setUserModel(string model_file, string meta_file, string label_file)
{
_modelFilePath = model_file;
_modelLabelFilePath = label_file;
}
+void ImageClassification::setEngineInfo(std::string engine_type, std::string device_type)
+{
+ if (engine_type.empty() || device_type.empty())
+ throw InvalidParameter("Invalid engine info.");
+
+ transform(engine_type.begin(), engine_type.end(), engine_type.begin(), ::toupper);
+ transform(device_type.begin(), device_type.end(), device_type.begin(), ::toupper);
+
+ _backendType = GetBackendType(engine_type);
+ _targetDeviceType = GetDeviceType(device_type);
+
+ LOGI("Engine type : %s => %d, Device type : %s => %d", engine_type.c_str(), GetBackendType(engine_type),
+ device_type.c_str(), GetDeviceType(device_type));
+
+ if (_backendType == MEDIA_VISION_ERROR_INVALID_PARAMETER ||
+ _targetDeviceType == MEDIA_VISION_ERROR_INVALID_PARAMETER)
+ throw InvalidParameter("backend or target device type not found.");
+}
+
+void ImageClassification::getNumberOfEngines(unsigned int *number_of_engines)
+{
+ if (!_valid_backends.empty()) {
+ *number_of_engines = _valid_backends.size();
+ return;
+ }
+
+ getEngineList();
+ *number_of_engines = _valid_backends.size();
+}
+
+void ImageClassification::getEngineType(unsigned int engine_index, char **engine_type)
+{
+ if (!_valid_backends.empty()) {
+ if (_valid_backends.size() <= engine_index)
+ throw InvalidParameter("Invalid engine index.");
+
+ *engine_type = const_cast<char *>(_valid_backends[engine_index].data());
+ return;
+ }
+
+ getEngineList();
+
+ if (_valid_backends.size() <= engine_index)
+ throw InvalidParameter("Invalid engine index.");
+
+ *engine_type = const_cast<char *>(_valid_backends[engine_index].data());
+}
+
+void ImageClassification::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
+{
+ if (!_valid_devices.empty()) {
+ *number_of_devices = _valid_devices.size();
+ return;
+ }
+
+ getDeviceList(engine_type);
+ *number_of_devices = _valid_devices.size();
+}
+
+void ImageClassification::getDeviceType(const char *engine_type, const unsigned int device_index, char **device_type)
+{
+ if (!_valid_devices.empty()) {
+ if (_valid_devices.size() <= device_index)
+ throw InvalidParameter("Invalid device index.");
+
+ *device_type = const_cast<char *>(_valid_devices[device_index].data());
+ return;
+ }
+
+ getDeviceList(engine_type);
+
+ if (_valid_devices.size() <= device_index)
+ throw InvalidParameter("Invalid device index.");
+
+ *device_type = const_cast<char *>(_valid_devices[device_index].data());
+}
+
void ImageClassification::parseMetaFile()
{
+ int ret = MEDIA_VISION_ERROR_NONE;
+
_config = make_unique<EngineConfig>(string(MV_CONFIG_PATH) + string(MV_IMAGE_CLASSIFICATION_CONFIG_FILE_NAME));
- int ret = _config->getIntegerAttribute(string(MV_IMAGE_CLASSIFICATION_BACKEND_TYPE), &_backendType);
- if (ret != MEDIA_VISION_ERROR_NONE)
- throw InvalidOperation("Fail to get backend engine type.");
+ if (_backendType == MV_INFERENCE_BACKEND_NONE) {
+ ret = _config->getIntegerAttribute(string(MV_IMAGE_CLASSIFICATION_BACKEND_TYPE), &_backendType);
+ if (ret != MEDIA_VISION_ERROR_NONE)
+ throw InvalidOperation("Fail to get backend engine type.");
+ }
- ret = _config->getIntegerAttribute(string(MV_IMAGE_CLASSIFICATION_TARGET_DEVICE_TYPE), &_targetDeviceType);
- if (ret != MEDIA_VISION_ERROR_NONE)
- throw InvalidOperation("Fail to get target device type.");
+ if (_targetDeviceType == MV_INFERENCE_TARGET_DEVICE_NONE) {
+ ret = _config->getIntegerAttribute(string(MV_IMAGE_CLASSIFICATION_TARGET_DEVICE_TYPE), &_targetDeviceType);
+ if (ret != MEDIA_VISION_ERROR_NONE)
+ throw InvalidOperation("Fail to get target device type.");
+ }
string modelDefaultPath;
throw InvalidOperation("Interface not supported.");
}
+template<typename T, typename V>
+void ImageClassificationAdapter<T, V>::setModelInfo(const char *model_file, const char *meta_file,
+ const char *label_file, const char *model_name)
+{
+ _image_classification->setUserModel(string(model_file), string(meta_file), string(label_file));
+}
+
+template<typename T, typename V>
+void ImageClassificationAdapter<T, V>::setEngineInfo(const char *engine_type, const char *device_type)
+{
+ _image_classification->setEngineInfo(string(engine_type), string(device_type));
+}
+
template<typename T, typename V> void ImageClassificationAdapter<T, V>::configure()
{
_image_classification->parseMetaFile();
_image_classification->configure();
}
+template<typename T, typename V>
+void ImageClassificationAdapter<T, V>::getNumberOfEngines(unsigned int *number_of_engines)
+{
+ _image_classification->getNumberOfEngines(number_of_engines);
+}
+
+template<typename T, typename V>
+void ImageClassificationAdapter<T, V>::getEngineType(unsigned int engine_index, char **engine_type)
+{
+ _image_classification->getEngineType(engine_index, engine_type);
+}
+
+template<typename T, typename V>
+void ImageClassificationAdapter<T, V>::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
+{
+ _image_classification->getNumberOfDevices(engine_type, number_of_devices);
+}
+
+template<typename T, typename V>
+void ImageClassificationAdapter<T, V>::getDeviceType(const char *engine_type, unsigned int device_index,
+ char **device_type)
+{
+ _image_classification->getDeviceType(engine_type, device_index, device_type);
+}
+
template<typename T, typename V> void ImageClassificationAdapter<T, V>::prepare()
{
_image_classification->prepare();
template<typename T, typename V> void ImageClassificationAdapter<T, V>::setInput(T &t)
{
_source = t;
-
- if (!_source.model_file.empty() && !_source.meta_file.empty() && !_source.label_file.empty())
- _image_classification->setUserModel(_source.model_file, _source.meta_file, _source.label_file);
}
template<typename T, typename V> void ImageClassificationAdapter<T, V>::perform()
MEDIA_VISION_FUNCTION_LEAVE();
return ret;
-}
\ No newline at end of file
+}
+
+int mv_image_classification_set_engine(mv_image_classification_h handle, const char *backend_type,
+ const char *device_type)
+{
+ MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
+
+ MEDIA_VISION_INSTANCE_CHECK(handle);
+ MEDIA_VISION_NULL_ARG_CHECK(backend_type);
+ MEDIA_VISION_NULL_ARG_CHECK(device_type);
+
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ int ret = mv_image_classification_set_engine_open(handle, backend_type, device_type);
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return ret;
+}
+
+int mv_image_classification_get_engine_count(mv_image_classification_h handle, unsigned int *engine_count)
+{
+ MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
+
+ MEDIA_VISION_INSTANCE_CHECK(handle);
+ MEDIA_VISION_NULL_ARG_CHECK(engine_count);
+
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ int ret = mv_image_classification_get_engine_count_open(handle, engine_count);
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return ret;
+}
+
+int mv_image_classification_get_engine_type(mv_image_classification_h handle, const unsigned int engine_index,
+ char **engine_type)
+{
+ MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
+
+ MEDIA_VISION_INSTANCE_CHECK(handle);
+ MEDIA_VISION_NULL_ARG_CHECK(engine_type);
+
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ int ret = mv_image_classification_get_engine_type_open(handle, engine_index, engine_type);
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return ret;
+}
+
+int mv_image_classification_get_device_count(mv_image_classification_h handle, const char *engine_type,
+ unsigned int *device_count)
+{
+ MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
+
+ MEDIA_VISION_INSTANCE_CHECK(handle);
+ MEDIA_VISION_NULL_ARG_CHECK(device_count);
+
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ int ret = mv_image_classification_get_device_count_open(handle, engine_type, device_count);
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return ret;
+}
+
+int mv_image_classification_get_device_type(mv_image_classification_h handle, const char *engine_type,
+ const unsigned int device_index, char **device_type)
+{
+ MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
+
+ MEDIA_VISION_INSTANCE_CHECK(handle);
+ MEDIA_VISION_NULL_ARG_CHECK(engine_type);
+ MEDIA_VISION_NULL_ARG_CHECK(device_type);
+
+ MEDIA_VISION_FUNCTION_ENTER();
+
+ int ret = mv_image_classification_get_device_type_open(handle, engine_type, device_index, device_type);
+
+ MEDIA_VISION_FUNCTION_LEAVE();
+
+ return ret;
+}
auto context = static_cast<Context *>(handle);
auto task = static_cast<ImageClassificationTask *>(context->__tasks.at("image_classification"));
- ImageClassificationInput input;
+ task->setModelInfo(model_file, meta_file, label_file);
+ } catch (const BaseException &e) {
+ LOGE("%s", e.what());
+ return e.getError();
+ }
- input.model_file = string(model_file);
- input.meta_file = string(meta_file);
- input.label_file = string(label_file);
+ LOGD("LEAVE");
- task->setInput(input);
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int mv_image_classification_set_engine_open(mv_image_classification_h handle, const char *backend_type,
+ const char *device_type)
+{
+ if (!handle) {
+ LOGE("Handle is NULL.");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ try {
+ auto context = static_cast<Context *>(handle);
+ auto task = static_cast<ImageClassificationTask *>(context->__tasks.at("image_classification"));
+
+ task->setEngineInfo(backend_type, device_type);
+ } catch (const BaseException &e) {
+ LOGE("%s", e.what());
+ return e.getError();
+ }
+
+ LOGD("LEAVE");
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int mv_image_classification_get_engine_count_open(mv_image_classification_h handle, unsigned int *engine_count)
+{
+ if (!handle) {
+ LOGE("Handle is NULL.");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ try {
+ auto context = static_cast<Context *>(handle);
+ auto task = static_cast<ImageClassificationTask *>(context->__tasks.at("image_classification"));
+
+ task->getNumberOfEngines(engine_count);
+ // TODO.
+ } catch (const BaseException &e) {
+ LOGE("%s", e.what());
+ return e.getError();
+ }
+
+ LOGD("LEAVE");
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int mv_image_classification_get_engine_type_open(mv_image_classification_h handle, const unsigned int engine_index,
+ char **engine_type)
+{
+ if (!handle) {
+ LOGE("Handle is NULL.");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ try {
+ auto context = static_cast<Context *>(handle);
+ auto task = static_cast<ImageClassificationTask *>(context->__tasks.at("image_classification"));
+
+ task->getEngineType(engine_index, engine_type);
+ // TODO.
+ } catch (const BaseException &e) {
+ LOGE("%s", e.what());
+ return e.getError();
+ }
+
+ LOGD("LEAVE");
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int mv_image_classification_get_device_count_open(mv_image_classification_h handle, const char *engine_type,
+ unsigned int *device_count)
+{
+ if (!handle) {
+ LOGE("Handle is NULL.");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ try {
+ auto context = static_cast<Context *>(handle);
+ auto task = static_cast<ImageClassificationTask *>(context->__tasks.at("image_classification"));
+
+ task->getNumberOfDevices(engine_type, device_count);
+ // TODO.
+ } catch (const BaseException &e) {
+ LOGE("%s", e.what());
+ return e.getError();
+ }
+
+ LOGD("LEAVE");
+
+ return MEDIA_VISION_ERROR_NONE;
+}
+
+int mv_image_classification_get_device_type_open(mv_image_classification_h handle, const char *engine_type,
+ const unsigned int device_index, char **device_type)
+{
+ if (!handle) {
+ LOGE("Handle is NULL.");
+ return MEDIA_VISION_ERROR_INVALID_PARAMETER;
+ }
+
+ try {
+ auto context = static_cast<Context *>(handle);
+ auto task = static_cast<ImageClassificationTask *>(context->__tasks.at("image_classification"));
+
+ task->getDeviceType(engine_type, device_index, device_type);
+ // TODO.
} catch (const BaseException &e) {
LOGE("%s", e.what());
return e.getError();
ObjectDetectionResult _result;
public:
- MobilenetV1Ssd();
+ MobilenetV1Ssd(ObjectDetectionTaskType task_type);
~MobilenetV1Ssd();
ObjectDetectionResult &result() override;
Box decodeBoxWithAnchor(const BoxAnchorParam *boxAnchorParm, Box &box, cv::Rect2f &anchor);
public:
- MobilenetV2Ssd();
+ MobilenetV2Ssd(ObjectDetectionTaskType task_type);
~MobilenetV2Ssd();
ObjectDetectionResult &result() override;
private:
void loadLabel();
+ ObjectDetectionTaskType _task_type;
+
protected:
std::unique_ptr<mediavision::inference::Inference> _inference;
std::unique_ptr<MediaVision::Common::EngineConfig> _config;
std::unique_ptr<MetaParser> _parser;
std::vector<std::string> _labels;
Preprocess _preprocess;
- std::string _modelName;
std::string _modelFilePath;
std::string _modelMetaFilePath;
std::string _modelDefaultPath;
void getOutputTensor(std::string target_name, std::vector<float> &tensor);
public:
- ObjectDetection();
+ ObjectDetection(ObjectDetectionTaskType task_type);
virtual ~ObjectDetection() = default;
- void setUserModel(std::string &model_name, std::string &model_file, std::string &meta_file,
- std::string &label_file);
- void setTaskType(ObjectDetectionTaskType task_type);
+ void setUserModel(std::string model_file, std::string meta_file, std::string label_file);
void parseMetaFile();
void configure();
void prepare();
private:
std::unique_ptr<ObjectDetection> _object_detection;
T _source;
- ObjectDetectionTaskType _task_type {};
+ std::string _model_name;
+ std::string _model_file;
+ std::string _meta_file;
+ std::string _label_file;
public:
ObjectDetectionAdapter();
void create(int type) override;
+ void setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+ const char *model_name) override;
+ void setEngineInfo(const char *engine_type, const char *device_type) override;
void configure() override;
+ void getNumberOfEngines(unsigned int *number_of_engines) override;
+ void getEngineType(unsigned int engine_index, char **engine_type) override;
+ void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices) override;
+ void getDeviceType(const char *engine_type, unsigned int device_index, char **device_type) override;
void prepare() override;
void setInput(T &t) override;
void perform() override;
{
struct ObjectDetectionInput {
mv_source_h inference_src;
- std::string model_name;
- std::string model_file;
- std::string meta_file;
- std::string label_file;
+ // TODO.
};
/**
{
namespace machine_learning
{
-MobilenetV1Ssd::MobilenetV1Ssd() : _result()
+MobilenetV1Ssd::MobilenetV1Ssd(ObjectDetectionTaskType task_type) : ObjectDetection(task_type), _result()
{}
MobilenetV1Ssd::~MobilenetV1Ssd()
{
namespace machine_learning
{
-MobilenetV2Ssd::MobilenetV2Ssd() : _result()
+MobilenetV2Ssd::MobilenetV2Ssd(ObjectDetectionTaskType task_type) : ObjectDetection(task_type), _result()
{}
MobilenetV2Ssd::~MobilenetV2Ssd()
#include <string>
#include <algorithm>
#include <mutex>
+#include <iostream>
using namespace std;
using namespace mediavision::inference;
auto context = static_cast<Context *>(handle);
auto task = static_cast<ObjectDetectionTask *>(context->__tasks.at("object_detection"));
- ObjectDetectionInput input;
-
- input.model_name = string(model_name);
- input.model_file = string(model_file);
- input.meta_file = string(meta_file);
- input.label_file = string(label_file);
-
- task->setInput(input);
+ task->setModelInfo(model_file, meta_file, label_file, model_name);
} catch (const BaseException &e) {
LOGE("%s", e.what());
return e.getError();
{
namespace machine_learning
{
-ObjectDetection::ObjectDetection() : _backendType(), _targetDeviceType()
+ObjectDetection::ObjectDetection(ObjectDetectionTaskType task_type)
+ : _task_type(task_type), _backendType(), _targetDeviceType()
{
_inference = make_unique<Inference>();
_parser = make_unique<ObjectDetectionParser>();
}
-void ObjectDetection::setUserModel(string &model_name, string &model_file, string &meta_file, string &label_file)
+void ObjectDetection::setUserModel(string model_file, string meta_file, string label_file)
{
- _modelName = model_name;
_modelFilePath = model_file;
_modelMetaFilePath = meta_file;
_modelLabelFilePath = label_file;
readFile.close();
}
-void ObjectDetection::setTaskType(ObjectDetectionTaskType task_type)
-{
- _parser->setTaskType(static_cast<int>(task_type));
-}
-
void ObjectDetection::parseMetaFile()
{
_config = make_unique<EngineConfig>(string(MV_CONFIG_PATH) + string(MV_OBJECT_DETECTION_META_FILE_NAME));
_modelMetaFilePath = _modelDefaultPath + _modelMetaFilePath;
LOGI("meta file path = %s", _modelMetaFilePath.c_str());
+ _parser->setTaskType(static_cast<int>(_task_type));
_parser->load(_modelMetaFilePath);
if (_modelLabelFilePath.empty()) {
template<typename T, typename V> void ObjectDetectionAdapter<T, V>::create(int type)
{
- if (!_source.model_name.empty()) {
- transform(_source.model_name.begin(), _source.model_name.end(), _source.model_name.begin(), ::toupper);
+ if (!_model_name.empty()) {
+ transform(_model_name.begin(), _model_name.end(), _model_name.begin(), ::toupper);
- if (_source.model_name == string("MOBILENET_V1_SSD"))
+ if (_model_name == string("MOBILENET_V1_SSD"))
type = static_cast<int>(ObjectDetectionTaskType::MOBILENET_V1_SSD);
- else if (_source.model_name == string("MOBILENET_V2_SSD"))
+ else if (_model_name == string("MOBILENET_V2_SSD"))
type = static_cast<int>(ObjectDetectionTaskType::MOBILENET_V2_SSD);
// TODO.
else
switch (static_cast<ObjectDetectionTaskType>(type)) {
case ObjectDetectionTaskType::MOBILENET_V1_SSD:
- _object_detection = make_unique<MobilenetV1Ssd>();
+ _object_detection = make_unique<MobilenetV1Ssd>(static_cast<ObjectDetectionTaskType>(type));
break;
case ObjectDetectionTaskType::MOBILENET_V2_SSD:
- _object_detection = make_unique<MobilenetV2Ssd>();
+ _object_detection = make_unique<MobilenetV2Ssd>(static_cast<ObjectDetectionTaskType>(type));
break;
default:
throw InvalidParameter("Invalid object detection task type.");
}
- _task_type = static_cast<ObjectDetectionTaskType>(type);
+ _object_detection->setUserModel(_model_file, _meta_file, _label_file);
}
-template<typename T, typename V> void ObjectDetectionAdapter<T, V>::configure()
+template<typename T, typename V>
+void ObjectDetectionAdapter<T, V>::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+ const char *model_name)
{
- if (!_source.model_name.empty() && !_source.model_file.empty() && !_source.meta_file.empty() &&
- !_source.label_file.empty())
- _object_detection->setUserModel(_source.model_name, _source.model_file, _source.meta_file, _source.label_file);
+ _model_name = string(model_name);
+
+ _model_file = string(model_file);
+ _meta_file = string(meta_file);
+ _label_file = string(label_file);
+
+ if (_model_file.empty() && _meta_file.empty() && _label_file.empty())
+ throw InvalidParameter("Model info not invalid.");
+}
- _object_detection->setTaskType(_task_type);
+template<typename T, typename V>
+void ObjectDetectionAdapter<T, V>::setEngineInfo(const char *engine_type, const char *device_type)
+{}
+
+template<typename T, typename V> void ObjectDetectionAdapter<T, V>::configure()
+{
_object_detection->parseMetaFile();
_object_detection->configure();
}
+template<typename T, typename V> void ObjectDetectionAdapter<T, V>::getNumberOfEngines(unsigned int *number_of_engines)
+{}
+
+template<typename T, typename V>
+void ObjectDetectionAdapter<T, V>::getEngineType(unsigned int engine_index, char **engine_type)
+{}
+
+template<typename T, typename V>
+void ObjectDetectionAdapter<T, V>::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
+{}
+
+template<typename T, typename V>
+void ObjectDetectionAdapter<T, V>::getDeviceType(const char *engine_type, unsigned int device_index, char **device_type)
+{}
+
template<typename T, typename V> void ObjectDetectionAdapter<T, V>::prepare()
{
_object_detection->prepare();
~ObjectDetection3dAdapter();
void create(int type) override;
-
+ void setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+ const char *model_name) override;
+ void setEngineInfo(const char *engine_type, const char *device_type) override;
+ void getNumberOfEngines(unsigned int *number_of_engines) override;
+ void getEngineType(unsigned int engine_index, char **engine_type) override;
+ void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices) override;
+ void getDeviceType(const char *engine_type, unsigned int device_index, char **device_type) override;
void configure() override;
void prepare() override;
void setInput(T &t) override;
}
}
+template<typename T, typename V>
+void ObjectDetection3dAdapter<T, V>::setModelInfo(const char *model_file, const char *meta_file, const char *label_file,
+ const char *model_name)
+{}
+
+template<typename T, typename V>
+void ObjectDetection3dAdapter<T, V>::setEngineInfo(const char *engine_type, const char *device_type)
+{}
+
template<typename T, typename V> void ObjectDetection3dAdapter<T, V>::configure()
{
_object_detection_3d->parseMetaFile();
_object_detection_3d->configure();
}
+template<typename T, typename V>
+void ObjectDetection3dAdapter<T, V>::getNumberOfEngines(unsigned int *number_of_engines)
+{}
+
+template<typename T, typename V>
+void ObjectDetection3dAdapter<T, V>::getEngineType(unsigned int engine_index, char **engine_type)
+{}
+
+template<typename T, typename V>
+void ObjectDetection3dAdapter<T, V>::getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices)
+{}
+
+template<typename T, typename V>
+void ObjectDetection3dAdapter<T, V>::getDeviceType(const char *engine_type, unsigned int device_index,
+ char **device_type)
+{}
+
template<typename T, typename V> void ObjectDetection3dAdapter<T, V>::prepare()
{
_object_detection_3d->prepare();
string label_file;
};
+TEST(ImageClassificationTest, GettingAvailableInferenceEnginesInfoShouldBeOk)
+{
+ mv_image_classification_h handle;
+
+ int ret = mv_image_classification_create(&handle);
+ ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+
+ unsigned int engine_count = 0;
+
+ ret = mv_image_classification_get_engine_count(handle, &engine_count);
+ ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+
+ cout << "Engine count = " << engine_count << endl;
+ ASSERT_GE(engine_count, 1);
+
+ for (unsigned int engine_idx = 0; engine_idx < engine_count; ++engine_idx) {
+ char *engine_type = nullptr;
+
+ ret = mv_image_classification_get_engine_type(handle, engine_idx, &engine_type);
+ ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+
+ cout << "Engine type : " << engine_type << endl;
+
+ unsigned int device_count = 0;
+
+ ret = mv_image_classification_get_device_count(handle, engine_type, &device_count);
+ ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+
+ cout << "Device count = " << device_count << endl;
+
+ ASSERT_GE(engine_count, 1);
+
+ for (unsigned int device_idx = 0; device_idx < device_count; ++device_idx) {
+ char *device_type = nullptr;
+
+ ret = mv_image_classification_get_device_type(handle, engine_type, device_idx, &device_type);
+ ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+
+ cout << "Device type : " << device_type << endl;
+ }
+ }
+
+ ret = mv_image_classification_destroy(handle);
+ ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+}
+
TEST(ImageClassificationTest, InferenceShouldBeOk)
{
mv_image_classification_h handle;
mv_image_classification_set_model(handle, model.model_file.c_str(), model.meta_file.c_str(),
model.label_file.c_str());
+ mv_image_classification_set_engine(handle, "tflite", "cpu");
ret = mv_image_classification_configure(handle);
ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);