int mMaxOutputNumbers;
- std::string mInputNodeName; /**< The input node name */
- std::vector<std::string> mOutputNodeNames; /**< The output node names */
+ std::vector<std::string> mInputLayerNames; /**< The input layer names */
+ std::vector<std::string> mOutputLayerNames; /**< The output layer names */
};
* @brief Configure input tensor information
*
* @since_tizen 5.5
+ * @remarks deprecated Replayced by ConfigureInputInfo
*/
void ConfigureTensorInfo(int width,
int height,
double stdValue,
double meanValue);
+ /**
+ * @brief Configure input infomation
+ *
+ * @since_tizen 6.0
+ */
+ void ConfigureInputInfo(int width,
+ int height,
+ int dim,
+ int ch,
+ double stdValue,
+ double meanValue,
+ const std::vector<std::string> names);
+
+ void ConfigureOutputInfo(std::vector<std::string> names);
+
/**
* @brief Configure inference backend type.
*
* @brief Configure the input node name
*
* @since_tizen 5.5
+ * @remarks deprecated Replayced by ConfigureInputInfo
*/
void ConfigureInputNodeName(const std::string nodeName);
* @brief Configure the output node names
*
* @since_tizen 5.5
+ * @remarks deprecated Replaced by ConfigureOutputInfo
*/
void ConfigureOutputNodeNames(const std::vector<std::string> nodeNames);
* @brief Configure the tensor information to the inference handle
*
* @since_tizen 5.5
+ * @remarks deprecated Replaced by mv_inference_configure_input_info
*
* @param [in] infer The handle to the inference
* @param [in] engine_config The handle to the configuration of
* @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
* @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
*/
-int mv_inference_configure_tensor_info_open(mv_inference_h infer, mv_engine_config_h engine_config);
+
+int mv_inference_configure_input_info_open(mv_inference_h infer, mv_engine_config_h engine_config);
+
+/**
+ * @brief Configure the input information to the inference handle
+ *
+ * @since_tizen 6.0
+ *
+ * @param [in] infer The handle to the inference
+ * @param [in] engine_config The handle to the configuration of
+ * engine.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ */
+int mv_inference_configure_input_info_open(mv_inference_h infer, mv_engine_config_h engine_config);
/**
* @brief Configure the backend to the inference handle
* @brief Configure the number of output to the inference handle
*
* @since_tizen 5.5
+ * @remarks deprecated Replaced by mv_inference_configure_post_process_info_open
*
* @param [in] infer The handle to the inference
* @param [in] engine_config The handle to the configuration of
* @brief Configure the confidence threshold value to the inference handle
*
* @since_tizen 5.5
+ * @remarks deprecated Replaced by mv_inference_configure_post_process_info_open
*
* @param [in] infer The handle to the inference
* @param [in] engine_config The handle to the configuration of
*/
int mv_inference_configure_confidence_threshold_open(mv_inference_h infer, mv_engine_config_h engine_config);
+/**
+ * @brief Configure the post process infomation to the inference handle
+ *
+ * @since_tizen 6.0
+ *
+ * @param [in] infer The handle to the inference
+ * @param [in] engine_config The handle to the configuration of
+ * engine.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ */
+int mv_inference_configure_post_process_info_open(mv_inference_h infer, mv_engine_config_h engine_config);
+
/**
* @brief Configure the input node name to the inference handle
*
* @since_tizen 5.5
+ * @remarks deprecated Replaced by mv_inference_configure_input_info_open
*
* @param [in] infer The handle to the inference
* @param [in] engine_config The handle to the configuration of
* @brief Configure the set of output node names to the inference handle
*
* @since_tizen 5.5
+ * @remarks deprecated Replaced by mv_inference_configure_output_info_open
*
* @param [in] infer The handle to the inference
* @param [in] engine_config The handle to the configuration of
*/
int mv_inference_configure_output_node_names_open(mv_inference_h infer, mv_engine_config_h engine_config);
+/**
+ * @brief Configure the output information to the inference handle
+ *
+ * @since_tizen 6.0
+ *
+ * @param [in] infer The handle to the inference
+ * @param [in] engine_config The handle to the configuration of
+ * engine.
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
+ * in @a engine_config
+ * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
+ * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
+ * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
+ */
+int mv_inference_configure_output_info_open(mv_inference_h infer, mv_engine_config_h engine_config);
+
/**
* @brief Prepare inference.
* @details Use this function to prepare inference based on
const std::string modelWeightFilePath,
const std::string modelUserFilePath)
{
+ LOGI("ENTER");
+
mConfig.mConfigFilePath = modelConfigFilePath;
mConfig.mWeightFilePath = modelWeightFilePath;
mConfig.mUserFilePath = modelUserFilePath;
+
+ LOGI("LEAVE");
}
void Inference::ConfigureTensorInfo(int width,
double stdValue,
double meanValue)
{
+ LOGI("ENTER");
+
+ mConfig.mTensorInfo = {width, height, dim, ch};
+ mConfig.mStdValue = stdValue;
+ mConfig.mMeanValue = meanValue;
+
+ LOGI("LEAVE");
+}
+
+void Inference::ConfigureInputInfo(int width,
+ int height,
+ int dim,
+ int ch,
+ double stdValue,
+ double meanValue,
+ const std::vector<std::string> names)
+{
+ LOGI("ENTER");
+
mConfig.mTensorInfo = {width, height, dim, ch};
mConfig.mStdValue = stdValue;
mConfig.mMeanValue = meanValue;
+ mConfig.mInputLayerNames = names;
+
+ inference_engine_layer_property property;
+ // In case of that a inference plugin deosn't support to get properties,
+ // the tensor info given by a user will be used.
+ // If the plugin supports that, the given info will be ignored.
+ inference_engine_tensor_info tensor_info;
+
+ tensor_info.data_type = TENSOR_DATA_TYPE_FLOAT32;
+ // In case of OpenCV, only supports NCHW
+ tensor_info.shape_type = TENSOR_SHAPE_NCHW;
+ // modify to handle multiple tensor infos
+ tensor_info.shape.push_back(mConfig.mTensorInfo.dim);
+ tensor_info.shape.push_back(mConfig.mTensorInfo.ch);
+ tensor_info.shape.push_back(mConfig.mTensorInfo.height);
+ tensor_info.shape.push_back(mConfig.mTensorInfo.width);
+
+ tensor_info.size = 1;
+ for (std::vector<int>::iterator iter = tensor_info.shape.begin();
+ iter != tensor_info.shape.end(); ++iter) {
+ tensor_info.size *= (*iter);
+ }
+
+ property.layer_names = mConfig.mInputLayerNames;
+ property.tensor_infos.push_back(tensor_info);
+
+ int ret = mBackend->SetInputLayerProperty(property);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Fail to set input layer property");
+ }
+
+ LOGI("LEAVE");
+}
+
+void Inference::ConfigureOutputInfo(const std::vector<std::string> names)
+{
+ LOGI("ENTER");
+
+ mConfig.mOutputLayerNames = names;
+
+ inference_engine_layer_property property;
+
+ property.layer_names = names;
+ int ret = mBackend->SetOutputLayerProperty(property);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Fail to set output layer property");
+ }
+
+ LOGI("LEAVE");
}
int Inference::ConfigureBackendType(const mv_inference_backend_type_e backendType)
void Inference::ConfigureInputNodeName(const std::string nodeName)
{
- mConfig.mInputNodeName = nodeName;
+ mConfig.mInputLayerNames.push_back(nodeName);
inference_engine_layer_property property;
void Inference::ConfigureOutputNodeNames(const std::vector<std::string> nodeNames)
{
- mConfig.mOutputNodeNames = nodeNames;
+ mConfig.mOutputLayerNames = nodeNames;
inference_engine_layer_property property;
return ret;
}
- ret = mv_inference_configure_tensor_info_open(infer, engine_config);
+ // input tensor, input layer
+ ret = mv_inference_configure_input_info_open(infer, engine_config);
if (ret != MEDIA_VISION_ERROR_NONE){
- LOGE("Fail to configure tensor information");
+ LOGE("Fail to configure input info");
return ret;
}
- ret = mv_inference_configure_output_open(infer, engine_config);
- if (ret != MEDIA_VISION_ERROR_NONE) {
- LOGE("Fail to configure output");
- return ret;
- }
-
- ret = mv_inference_configure_confidence_threshold_open(infer, engine_config);
- if (ret != MEDIA_VISION_ERROR_NONE) {
- LOGE("Fail to configure confidence threshold");
- return ret;
- }
-
- ret = mv_inference_configure_input_node_name_open(infer, engine_config);
- if (ret != MEDIA_VISION_ERROR_NONE) {
- LOGE("Fail to configure input node name");
+ // output layer
+ ret = mv_inference_configure_output_info_open(infer, engine_config);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to configure output info");
return ret;
}
- ret = mv_inference_configure_output_node_names_open(infer, engine_config);
+ // maximum candidates, threshold
+ ret = mv_inference_configure_post_process_info_open(infer, engine_config);
if (ret != MEDIA_VISION_ERROR_NONE) {
- LOGE("Fail to configure output node names");
+ LOGE("Fail to configure post process info");
return ret;
}
int mv_inference_configure_tensor_info_open(mv_inference_h infer, mv_engine_config_h engine_config)
{
+ LOGI("ENTER");
+
Inference *pInfer = static_cast<Inference *>(infer);
int ret = MEDIA_VISION_ERROR_NONE;
_ERROR_ :
+ LOGI("LEAVE");
+
+ return ret;
+}
+
+int mv_inference_configure_input_info_open(mv_inference_h infer, mv_engine_config_h engine_config)
+{
+ LOGI("ENTER");
+
+ Inference *pInfer = static_cast<Inference *>(infer);
+
+ int ret = MEDIA_VISION_ERROR_NONE;
+
+ int tensorWidth, tensorHeight, tensorDim, tensorCh;
+ double meanValue, stdValue;
+ char *node_name = NULL;
+
+ // This should be one. only one batch is supported
+ tensorDim = 1;
+ ret = mv_engine_config_get_int_attribute(engine_config,
+ MV_INFERENCE_INPUT_TENSOR_WIDTH,
+ &tensorWidth);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get tensor width");
+ goto _ERROR_;
+ }
+
+ ret = mv_engine_config_get_int_attribute(engine_config,
+ MV_INFERENCE_INPUT_TENSOR_HEIGHT,
+ &tensorHeight);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get tensor height");
+ goto _ERROR_;
+ }
+
+ ret = mv_engine_config_get_int_attribute(engine_config,
+ MV_INFERENCE_INPUT_TENSOR_CHANNELS,
+ &tensorCh);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get tensor channels");
+ goto _ERROR_;
+ }
+
+ ret = mv_engine_config_get_double_attribute(engine_config,
+ MV_INFERENCE_MODEL_MEAN_VALUE,
+ &meanValue);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get meanValue");
+ goto _ERROR_;
+ }
+
+ ret = mv_engine_config_get_double_attribute(engine_config,
+ MV_INFERENCE_MODEL_STD_VALUE,
+ &stdValue);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get stdValue");
+ goto _ERROR_;
+ }
+
+ ret = mv_engine_config_get_string_attribute(engine_config,
+ MV_INFERENCE_INPUT_NODE_NAME,
+ &node_name);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get tensor width");
+ goto _ERROR_;
+ }
+
+ pInfer->ConfigureInputInfo(tensorWidth,
+ tensorHeight,
+ tensorDim,
+ tensorCh,
+ stdValue,
+ meanValue,
+ std::vector<std::string>(1, std::string(node_name)));
+
+_ERROR_ :
+
+ if (node_name) {
+ free(node_name);
+ node_name = NULL;
+ }
+
+ LOGI("LEAVE");
+
return ret;
}
int mv_inference_configure_engine_open(mv_inference_h infer, mv_engine_config_h engine_config)
{
+ LOGI("ENTER");
+
Inference *pInfer = static_cast<Inference *>(infer);
int backendType = 0;
int ret = MEDIA_VISION_ERROR_NONE;
LOGE("Fail to bind a backend engine.");
}
+ LOGI("LEAVE");
_ERROR_:
return ret;
}
int mv_inference_configure_output_open(mv_inference_h infer, mv_engine_config_h engine_config)
{
+ LOGI("ENTER");
+
Inference *pInfer = static_cast<Inference *>(infer);
int maxOutput = 0;
pInfer->ConfigureOutput(maxOutput);
+ LOGI("LEAVE");
_ERROR_:
return ret;
}
int mv_inference_configure_confidence_threshold_open(mv_inference_h infer, mv_engine_config_h engine_config)
{
+ LOGI("ENTER");
+
Inference *pInfer = static_cast<Inference *>(infer);
double threshold = 0;
pInfer->ConfigureThreshold(threshold);
+ LOGI("LEAVE");
+_ERROR_:
+ return ret;
+}
+
+int mv_inference_configure_post_process_info_open(mv_inference_h infer, mv_engine_config_h engine_config)
+{
+ LOGI("ENTER");
+
+ Inference *pInfer = static_cast<Inference *>(infer);
+
+ int maxOutput = 0;
+ double threshold = 0;
+ int ret = MEDIA_VISION_ERROR_NONE;
+
+ ret = mv_engine_config_get_int_attribute(engine_config,
+ MV_INFERENCE_OUTPUT_MAX_NUMBER,
+ &maxOutput);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get inference output maximum numbers");
+ goto _ERROR_;
+ }
+
+ pInfer->ConfigureOutput(maxOutput);
+
+ ret = mv_engine_config_get_double_attribute(engine_config,
+ MV_INFERENCE_CONFIDENCE_THRESHOLD,
+ &threshold);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get inference confidence threshold value");
+ goto _ERROR_;
+ }
+
+ pInfer->ConfigureThreshold(threshold);
+
+ LOGI("LEAVE");
_ERROR_:
return ret;
}
int mv_inference_configure_input_node_name_open(mv_inference_h infer, mv_engine_config_h engine_config)
{
+ LOGI("ENTER");
+
Inference *pInfer = static_cast<Inference *>(infer);
int ret = MEDIA_VISION_ERROR_NONE;
node_name = NULL;
}
+ LOGI("LEAVE");
+
return ret;
}
int mv_inference_configure_output_node_names_open(mv_inference_h infer, mv_engine_config_h engine_config)
{
+ LOGI("ENTER");
+
Inference *pInfer = static_cast<Inference *>(infer);
int ret = MEDIA_VISION_ERROR_NONE;
node_names = NULL;
}
+ LOGI("LEAVE");
+
+ return ret;
+}
+
+int mv_inference_configure_output_info_open(mv_inference_h infer, mv_engine_config_h engine_config)
+{
+ LOGI("ENTER");
+
+ Inference *pInfer = static_cast<Inference *>(infer);
+
+ int ret = MEDIA_VISION_ERROR_NONE;
+ int idx = 0;
+ char **node_names = NULL;
+ int size = 0;
+ std::vector<std::string> names;
+ ret = mv_engine_config_get_array_string_attribute(engine_config,
+ MV_INFERENCE_OUTPUT_NODE_NAMES,
+ &node_names,
+ &size);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to get _output_node_names");
+ goto _ERROR_;
+ }
+
+ for (idx = 0 ; idx < size; ++idx)
+ names.push_back(std::string(node_names[idx]));
+
+ pInfer->ConfigureOutputInfo(names);
+
+_ERROR_:
+
+ if (node_names) {
+ for (idx = 0; idx < size; ++idx) {
+ free(node_names[idx]);
+ }
+ free(node_names);
+ node_names = NULL;
+ }
+
+ LOGI("LEAVE");
+
return ret;
}
int mv_inference_prepare_open(mv_inference_h infer)
{
+ LOGI("ENTER");
+
Inference *pInfer = static_cast<Inference *>(infer);
int ret = MEDIA_VISION_ERROR_NONE;
if (ret != MEDIA_VISION_ERROR_NONE)
LOGE("Fail to load model files.");
+ LOGI("LEAVE");
+
return ret;
}
mv_inference_supported_engine_cb callback,
void *user_data)
{
+ LOGI("ENTER");
+
Inference *pInfer = static_cast<Inference *>(infer);
int ret = MEDIA_VISION_ERROR_NONE;
callback((backend.first).c_str(), backend.second, user_data);
}
+ LOGI("LEAVE");
+
return ret;
}