mv_engine_config_h mv_inference_get_engine_config(mv_inference_h infer)
{
Inference *pInfer = static_cast<Inference *>(infer);
+
return pInfer->GetEngineConfig();
}
LOGI("ENTER");
Inference *pInfer = static_cast<Inference *>(infer);
-
- int ret = MEDIA_VISION_ERROR_NONE;
-
char *modelConfigFilePath = NULL;
char *modelWeightFilePath = NULL;
char *modelUserFilePath = NULL;
int backendType = 0;
size_t userFileLength = 0;
- ret = mv_engine_config_get_string_attribute(
+ int ret = mv_engine_config_get_string_attribute(
engine_config, MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
&modelConfigFilePath);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get model configuration file path");
- goto _ERROR_;
+ goto out_of_function;
}
ret = mv_engine_config_get_string_attribute(
&modelWeightFilePath);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get model weight file path");
- goto _ERROR_;
+ goto release_model_config_file_path;
}
ret = mv_engine_config_get_string_attribute(
&modelUserFilePath);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get model user file path");
- goto _ERROR_;
+ goto release_model_weight_file_path;
}
ret = mv_engine_config_get_string_attribute(
&modelMetaFilePath);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get model meta file path");
- goto _ERROR_;
+ goto release_model_user_file_path;
}
ret = mv_engine_config_get_double_attribute(
engine_config, MV_INFERENCE_MODEL_MEAN_VALUE, &modelMeanValue);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get model mean value");
- goto _ERROR_;
+ goto release_model_meta_file_path;
}
ret = mv_engine_config_get_int_attribute(
engine_config, MV_INFERENCE_BACKEND_TYPE, &backendType);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get inference backend type");
- goto _ERROR_;
+ goto release_model_meta_file_path;
}
if (access(modelWeightFilePath, F_OK)) {
LOGE("weightFilePath in [%s] ", modelWeightFilePath);
ret = MEDIA_VISION_ERROR_INVALID_PATH;
- goto _ERROR_;
+ goto release_model_meta_file_path;
}
if ((backendType > MV_INFERENCE_BACKEND_NONE &&
if (access(modelConfigFilePath, F_OK)) {
LOGE("modelConfigFilePath in [%s] ", modelConfigFilePath);
ret = MEDIA_VISION_ERROR_INVALID_PATH;
- goto _ERROR_;
+ goto release_model_meta_file_path;
}
}
userFileLength = strlen(modelUserFilePath);
+
if (userFileLength > 0 && access(modelUserFilePath, F_OK)) {
LOGE("categoryFilePath in [%s] ", modelUserFilePath);
ret = MEDIA_VISION_ERROR_INVALID_PATH;
- goto _ERROR_;
+ goto release_model_meta_file_path;
}
pInfer->ConfigureModelFiles(std::string(modelConfigFilePath),
if (std::string(modelMetaFilePath).empty()) {
LOGW("Skip ParseMetadata and run without Metadata");
- goto _ERROR_;
+ goto release_model_meta_file_path;
}
if (!IsJsonFile(std::string(modelMetaFilePath))) {
ret = MEDIA_VISION_ERROR_INVALID_PATH;
LOGE("Model meta file should be json");
- goto _ERROR_;
+ goto release_model_meta_file_path;
}
ret = pInfer->ParseMetadata(std::string(modelMetaFilePath));
LOGE("Fail to ParseMetadata");
}
-_ERROR_:
- if (modelConfigFilePath)
- free(modelConfigFilePath);
-
- if (modelWeightFilePath)
- free(modelWeightFilePath);
+release_model_meta_file_path:
+ if (modelMetaFilePath)
+ free(modelMetaFilePath);
+release_model_user_file_path:
if (modelUserFilePath)
free(modelUserFilePath);
- if (modelMetaFilePath)
- free(modelMetaFilePath);
+release_model_weight_file_path:
+ if (modelWeightFilePath)
+ free(modelWeightFilePath);
+
+release_model_config_file_path:
+ if (modelConfigFilePath)
+ free(modelConfigFilePath);
+out_of_function:
LOGI("LEAVE");
return ret;
LOGI("ENTER");
Inference *pInfer = static_cast<Inference *>(infer);
-
- int ret = MEDIA_VISION_ERROR_NONE;
-
- int tensorWidth, tensorHeight, tensorDim, tensorCh;
+ int tensorWidth, tensorHeight, tensorCh;
double meanValue, stdValue;
- // This should be one. only one batch is supported
- tensorDim = 1;
- ret = mv_engine_config_get_int_attribute(
+ int ret = mv_engine_config_get_int_attribute(
engine_config, MV_INFERENCE_INPUT_TENSOR_WIDTH, &tensorWidth);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get tensor width");
- goto _ERROR_;
+ goto out_of_function;
}
ret = mv_engine_config_get_int_attribute(
engine_config, MV_INFERENCE_INPUT_TENSOR_HEIGHT, &tensorHeight);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get tensor height");
- goto _ERROR_;
+ goto out_of_function;
}
ret = mv_engine_config_get_int_attribute(
engine_config, MV_INFERENCE_INPUT_TENSOR_CHANNELS, &tensorCh);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get tensor channels");
- goto _ERROR_;
+ goto out_of_function;
}
ret = mv_engine_config_get_double_attribute(
engine_config, MV_INFERENCE_MODEL_MEAN_VALUE, &meanValue);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get meanValue");
- goto _ERROR_;
+ goto out_of_function;
}
ret = mv_engine_config_get_double_attribute(
engine_config, MV_INFERENCE_MODEL_STD_VALUE, &stdValue);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get stdValue");
- goto _ERROR_;
+ goto out_of_function;
}
- pInfer->ConfigureTensorInfo(tensorWidth, tensorHeight, tensorDim, tensorCh,
+ pInfer->ConfigureTensorInfo(tensorWidth, tensorHeight, 1, tensorCh,
stdValue, meanValue);
-
-_ERROR_:
-
+out_of_function:
LOGI("LEAVE");
return ret;
LOGI("ENTER");
Inference *pInfer = static_cast<Inference *>(infer);
-
- int ret = MEDIA_VISION_ERROR_NONE;
-
- int tensorWidth, tensorHeight, tensorDim, tensorCh;
+ int tensorWidth, tensorHeight, tensorCh;
double meanValue, stdValue;
char *node_name = NULL;
int dataType = 0;
- // This should be one. only one batch is supported
- tensorDim = 1;
- ret = mv_engine_config_get_int_attribute(
+ int ret = mv_engine_config_get_int_attribute(
engine_config, MV_INFERENCE_INPUT_TENSOR_WIDTH, &tensorWidth);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get tensor width");
- goto _ERROR_;
+ goto out_of_function;
}
ret = mv_engine_config_get_int_attribute(
engine_config, MV_INFERENCE_INPUT_TENSOR_HEIGHT, &tensorHeight);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get tensor height");
- goto _ERROR_;
+ goto out_of_function;
}
ret = mv_engine_config_get_int_attribute(
engine_config, MV_INFERENCE_INPUT_TENSOR_CHANNELS, &tensorCh);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get tensor channels");
- goto _ERROR_;
+ goto out_of_function;
}
ret = mv_engine_config_get_double_attribute(
engine_config, MV_INFERENCE_MODEL_MEAN_VALUE, &meanValue);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get meanValue");
- goto _ERROR_;
+ goto out_of_function;
}
ret = mv_engine_config_get_double_attribute(
engine_config, MV_INFERENCE_MODEL_STD_VALUE, &stdValue);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get stdValue");
- goto _ERROR_;
+ goto out_of_function;
}
ret = mv_engine_config_get_int_attribute(
engine_config, MV_INFERENCE_INPUT_DATA_TYPE, &dataType);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get a input tensor data type");
- goto _ERROR_;
+ goto out_of_function;
}
ret = mv_engine_config_get_string_attribute(
engine_config, MV_INFERENCE_INPUT_NODE_NAME, &node_name);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get tensor width");
- goto _ERROR_;
+ goto out_of_function;
}
pInfer->ConfigureInputInfo(
- tensorWidth, tensorHeight, tensorDim, tensorCh, stdValue, meanValue,
+ tensorWidth, tensorHeight, 1, tensorCh, stdValue, meanValue,
dataType, std::vector<std::string>(1, std::string(node_name)));
-_ERROR_:
-
if (node_name) {
free(node_name);
node_name = NULL;
}
+out_of_function:
LOGI("LEAVE");
return ret;
Inference *pInfer = static_cast<Inference *>(infer);
int backendType = 0;
int targetTypes = 0;
- int ret = MEDIA_VISION_ERROR_NONE;
pInfer->SetEngineConfig(engine_config);
- ret = mv_engine_config_get_int_attribute(
+ int ret = mv_engine_config_get_int_attribute(
engine_config, MV_INFERENCE_BACKEND_TYPE, &backendType);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get inference backend type");
- goto _ERROR_;
+ goto out_of_function;
}
ret = mv_engine_config_get_int_attribute(
engine_config, MV_INFERENCE_TARGET_DEVICE_TYPE, &targetTypes);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get inference target type");
- goto _ERROR_;
+ goto out_of_function;
}
ret = pInfer->ConfigureBackendType(
(mv_inference_backend_type_e) backendType);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to configure a backend type.");
- goto _ERROR_;
+ goto out_of_function;
}
bool is_new_version;
// Ps. this function will be dropped with deprecated code version-after-next of Tizen.
ret = check_mv_inference_engine_version(engine_config, &is_new_version);
if (ret != MEDIA_VISION_ERROR_NONE)
- goto _ERROR_;
+ goto out_of_function;
// Convert old type to new one and then use it if is_new_version is false
if (pInfer->ConfigureTargetTypes(targetTypes, is_new_version) !=
MEDIA_VISION_ERROR_NONE) {
LOGE("Tried to configure invalid target types.");
- goto _ERROR_;
+ goto out_of_function;
}
// Create a inference-engine-common class object and load its corresponding library.
ret = pInfer->Bind();
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to bind a backend engine.");
- goto _ERROR_;
+ goto out_of_function;
}
if (!pInfer->IsTargetDeviceSupported(targetTypes)) {
LOGE("Tried to configure invalid target types.");
}
-_ERROR_:
+out_of_function:
LOGI("LEAVE");
+
return ret;
}
LOGI("ENTER");
Inference *pInfer = static_cast<Inference *>(infer);
-
int maxOutput = 0;
- int ret = MEDIA_VISION_ERROR_NONE;
- ret = mv_engine_config_get_int_attribute(
+ int ret = mv_engine_config_get_int_attribute(
engine_config, MV_INFERENCE_OUTPUT_MAX_NUMBER, &maxOutput);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get inference output maximum numbers");
- goto _ERROR_;
+ goto out_of_function;
}
pInfer->ConfigureOutput(maxOutput);
+out_of_function:
LOGI("LEAVE");
-_ERROR_:
+
return ret;
}
LOGI("ENTER");
Inference *pInfer = static_cast<Inference *>(infer);
-
double threshold = 0;
- int ret = MEDIA_VISION_ERROR_NONE;
- ret = mv_engine_config_get_double_attribute(
+ int ret = mv_engine_config_get_double_attribute(
engine_config, MV_INFERENCE_CONFIDENCE_THRESHOLD, &threshold);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get inference confidence threshold value");
- goto _ERROR_;
+ goto out_of_function;
}
pInfer->ConfigureThreshold(threshold);
+out_of_function:
LOGI("LEAVE");
-_ERROR_:
+
return ret;
}
LOGI("ENTER");
Inference *pInfer = static_cast<Inference *>(infer);
-
int maxOutput = 0;
double threshold = 0;
- int ret = MEDIA_VISION_ERROR_NONE;
- ret = mv_engine_config_get_int_attribute(
+ int ret = mv_engine_config_get_int_attribute(
engine_config, MV_INFERENCE_OUTPUT_MAX_NUMBER, &maxOutput);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get inference output maximum numbers");
- goto _ERROR_;
+ goto out_of_function;
}
pInfer->ConfigureOutput(maxOutput);
engine_config, MV_INFERENCE_CONFIDENCE_THRESHOLD, &threshold);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get inference confidence threshold value");
- goto _ERROR_;
+ goto out_of_function;
}
pInfer->ConfigureThreshold(threshold);
+out_of_function:
LOGI("LEAVE");
-_ERROR_:
+
return ret;
}
LOGI("ENTER");
Inference *pInfer = static_cast<Inference *>(infer);
-
- int ret = MEDIA_VISION_ERROR_NONE;
int idx = 0;
char **node_names = NULL;
int size = 0;
std::vector<std::string> names;
- ret = mv_engine_config_get_array_string_attribute(
+
+ int ret = mv_engine_config_get_array_string_attribute(
engine_config, MV_INFERENCE_OUTPUT_NODE_NAMES, &node_names, &size);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get _output_node_names");
- goto _ERROR_;
+ goto out_of_function;
}
for (idx = 0; idx < size; ++idx)
pInfer->ConfigureOutputInfo(names);
-_ERROR_:
-
if (node_names) {
- for (idx = 0; idx < size; ++idx) {
+ for (idx = 0; idx < size; ++idx)
free(node_names[idx]);
- }
+
free(node_names);
node_names = NULL;
}
+out_of_function:
LOGI("LEAVE");
return ret;
Inference *pInfer = static_cast<Inference *>(infer);
- int ret = MEDIA_VISION_ERROR_NONE;
-
// Pass parameters needed to load model files to a backend engine.
- ret = pInfer->Prepare();
+ int ret = pInfer->Prepare();
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to prepare inference");
- return ret;
+ goto out_of_function;
}
// Request to load model files to a backend engine.
if (ret != MEDIA_VISION_ERROR_NONE)
LOGE("Fail to load model files.");
+out_of_function:
LOGI("LEAVE");
return ret;
LOGI("ENTER");
Inference *pInfer = static_cast<Inference *>(infer);
-
- int ret = MEDIA_VISION_ERROR_NONE;
-
- //bool isSupported = false;
- //char str[1024] = {'\0'};
std::pair<std::string, bool> backend;
+
for (int i = 0; i < MV_INFERENCE_BACKEND_MAX; ++i) {
backend = pInfer->GetSupportedInferenceBackend(i);
callback((backend.first).c_str(), backend.second, user_data);
LOGI("LEAVE");
- return ret;
+ return MEDIA_VISION_ERROR_NONE;
}
int mv_inference_image_classify_open(
mv_inference_image_classified_cb classified_cb, void *user_data)
{
Inference *pInfer = static_cast<Inference *>(infer);
-
- int ret = MEDIA_VISION_ERROR_NONE;
- int numberOfOutputs = 0;
std::vector<mv_source_h> sources;
std::vector<mv_rectangle_s> rects;
if (roi != NULL)
rects.push_back(*roi);
- ret = pInfer->Run(sources, rects);
+ int ret = pInfer->Run(sources, rects);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to run inference");
return ret;
return ret;
}
- numberOfOutputs = classificationResults.number_of_classes;
+ int numberOfOutputs = classificationResults.number_of_classes;
- int *indices = classificationResults.indices.data();
- float *confidences = classificationResults.confidences.data();
static const int START_CLASS_NUMBER = 10;
static std::vector<const char *> names(START_CLASS_NUMBER);
names[n] = classificationResults.names[n].c_str();
}
+ int *indices = classificationResults.indices.data();
+ float *confidences = classificationResults.confidences.data();
+
classified_cb(source, numberOfOutputs, indices, names.data(), confidences,
user_data);
void *user_data)
{
Inference *pInfer = static_cast<Inference *>(infer);
-
- int ret = MEDIA_VISION_ERROR_NONE;
- int numberOfOutputs = 0;
std::vector<mv_source_h> sources;
std::vector<mv_rectangle_s> rects;
sources.push_back(source);
-
- ret = pInfer->Run(sources, rects);
+ int ret = pInfer->Run(sources, rects);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to run inference");
return ret;
}
ObjectDetectionResults objectDetectionResults;
+
ret = pInfer->GetObjectDetectionResults(&objectDetectionResults);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get inference results");
return ret;
}
- numberOfOutputs = objectDetectionResults.number_of_objects;
-
- int *indices = objectDetectionResults.indices.data();
- float *confidences = objectDetectionResults.confidences.data();
+ int numberOfOutputs = objectDetectionResults.number_of_objects;
static const int START_OBJECT_NUMBER = 20;
static std::vector<const char *> names(START_OBJECT_NUMBER);
static std::vector<mv_rectangle_s> locations(START_OBJECT_NUMBER);
locations[n].height = objectDetectionResults.locations[n].height;
}
+ int *indices = objectDetectionResults.indices.data();
+ float *confidences = objectDetectionResults.confidences.data();
+
detected_cb(source, numberOfOutputs, indices, names.data(), confidences,
locations.data(), user_data);
void *user_data)
{
Inference *pInfer = static_cast<Inference *>(infer);
-
- int ret = MEDIA_VISION_ERROR_NONE;
- int numberOfOutputs = 0;
std::vector<mv_source_h> sources;
std::vector<mv_rectangle_s> rects;
sources.push_back(source);
- ret = pInfer->Run(sources, rects);
+ int ret = pInfer->Run(sources, rects);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to run inference");
return ret;
}
FaceDetectionResults faceDetectionResults;
+
ret = pInfer->GetFaceDetectionResults(&faceDetectionResults);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get inference results");
return ret;
}
- numberOfOutputs = faceDetectionResults.number_of_faces;
-
- float *confidences = faceDetectionResults.confidences.data();
+ int numberOfOutputs = faceDetectionResults.number_of_faces;
std::vector<mv_rectangle_s> locations(numberOfOutputs);
for (int n = 0; n < numberOfOutputs; ++n) {
locations[n].height = faceDetectionResults.locations[n].height;
}
+ float *confidences = faceDetectionResults.confidences.data();
+
detected_cb(source, numberOfOutputs, confidences, locations.data(),
user_data);
mv_inference_facial_landmark_detected_cb detected_cb, void *user_data)
{
Inference *pInfer = static_cast<Inference *>(infer);
-
- int ret = MEDIA_VISION_ERROR_NONE;
- int numberOfLandmarks = 0;
- std::vector<mv_source_h> sources;
- std::vector<mv_rectangle_s> rects;
-
unsigned int width, height;
- ret = mv_source_get_width(source, &width);
+
+ int ret = mv_source_get_width(source, &width);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get width");
return ret;
return ret;
}
+ std::vector<mv_source_h> sources;
+ std::vector<mv_rectangle_s> rects;
+
sources.push_back(source);
if (roi != NULL)
}
FacialLandMarkDetectionResults facialLandMarkDetectionResults;
+
ret = pInfer->GetFacialLandMarkDetectionResults(
&facialLandMarkDetectionResults, width, height);
if (ret != MEDIA_VISION_ERROR_NONE) {
return ret;
}
- numberOfLandmarks = facialLandMarkDetectionResults.number_of_landmarks;
-
+ int numberOfLandmarks = facialLandMarkDetectionResults.number_of_landmarks;
std::vector<mv_point_s> locations(numberOfLandmarks);
for (int n = 0; n < numberOfLandmarks; ++n) {
mv_inference_pose_landmark_detected_cb detected_cb, void *user_data)
{
Inference *pInfer = static_cast<Inference *>(infer);
-
- int ret = MEDIA_VISION_ERROR_NONE;
- std::vector<mv_source_h> sources;
- std::vector<mv_rectangle_s> rects;
-
unsigned int width, height;
- ret = mv_source_get_width(source, &width);
+
+ int ret = mv_source_get_width(source, &width);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to get width");
return ret;
return ret;
}
+ std::vector<mv_source_h> sources;
+ std::vector<mv_rectangle_s> rects;
+
sources.push_back(source);
if (roi != NULL)
}
mv_inference_pose_result_h result = NULL;
+
ret = pInfer->GetPoseLandmarkDetectionResults(
&result, width, height);
if (ret != MEDIA_VISION_ERROR_NONE) {
return ret;
}
- mv_inference_pose_s *tmp = static_cast<mv_inference_pose_s *>(result);
- for (int pose = 0; pose < tmp->number_of_poses; ++pose) {
- for (int index = 0; index < tmp->number_of_landmarks_per_pose; ++index) {
+ mv_inference_pose_s *pose_obj = static_cast<mv_inference_pose_s *>(result);
+
+ for (int pose = 0; pose < pose_obj->number_of_poses; ++pose) {
+ for (int index = 0; index < pose_obj->number_of_landmarks_per_pose; ++index) {
LOGI("PoseIdx[%2d]: x[%d], y[%d], score[%.3f]", index,
- tmp->landmarks[pose][index].point.x,
- tmp->landmarks[pose][index].point.y,
- tmp->landmarks[pose][index].score);
+ pose_obj->landmarks[pose][index].point.x,
+ pose_obj->landmarks[pose][index].point.y,
+ pose_obj->landmarks[pose][index].score);
}
}
int *number_of_poses)
{
mv_inference_pose_s *handle = static_cast<mv_inference_pose_s *>(result);
-
*number_of_poses = handle->number_of_poses;
-
LOGI("%d", *number_of_poses);
return MEDIA_VISION_ERROR_NONE;
int *number_of_landmarks)
{
mv_inference_pose_s *handle = static_cast<mv_inference_pose_s *>(result);
-
*number_of_landmarks = handle->number_of_landmarks_per_pose;
-
LOGI("%d", *number_of_landmarks);
return MEDIA_VISION_ERROR_NONE;
mv_point_s *location,
float *score)
{
- mv_inference_pose_s *handle = static_cast<mv_inference_pose_s *>(result);
+ mv_inference_pose_s *pose_obj = static_cast<mv_inference_pose_s *>(result);
- if (pose_index < 0 || pose_index >= handle->number_of_poses)
+ if (pose_index < 0 || pose_index >= pose_obj->number_of_poses)
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- if (part_index < 0 || part_index >= handle->number_of_landmarks_per_pose)
+ if (part_index < 0 || part_index >= pose_obj->number_of_landmarks_per_pose)
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- *location = handle->landmarks[pose_index][part_index].point;
-
- *score = handle->landmarks[pose_index][part_index].score;
-
+ *location = pose_obj->landmarks[pose_index][part_index].point;
+ *score = pose_obj->landmarks[pose_index][part_index].score;
LOGI("[%d]:(%dx%d) - %.4f", pose_index, location->x, location->y, *score);
return MEDIA_VISION_ERROR_NONE;
int pose_index,
int *label)
{
- mv_inference_pose_s *handle = static_cast<mv_inference_pose_s *>(result);
+ mv_inference_pose_s *pose_obj = static_cast<mv_inference_pose_s *>(result);
- if (pose_index < 0 || pose_index >= handle->number_of_poses)
+ if (pose_index < 0 || pose_index >= pose_obj->number_of_poses)
return MEDIA_VISION_ERROR_INVALID_PARAMETER;
- *label = handle->landmarks[pose_index][0].label;
-
+ *label = pose_obj->landmarks[pose_index][0].label;
LOGI("[%d]: label(%d)", pose_index, *label);
return MEDIA_VISION_ERROR_NONE;
{
Posture *pPose = static_cast<Posture *>(pose);
- int ret = MEDIA_VISION_ERROR_NONE;
-
// check file
if (access(motionCaptureFilePath, F_OK) || access(motionMappingFilePath, F_OK)) {
LOGE("Invalid Motion Capture file path [%s]", motionCaptureFilePath);
return MEDIA_VISION_ERROR_INVALID_PATH;
}
- ret = pPose->setPoseFromFile(std::string(motionCaptureFilePath),
+ int ret = pPose->setPoseFromFile(std::string(motionCaptureFilePath),
std::string(motionMappingFilePath));
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to setPoseFromFile");
int mv_pose_compare_open(mv_pose_h pose, mv_inference_pose_result_h action, int parts, float *score)
{
Posture *pPose = static_cast<Posture *>(pose);
-
- int ret = MEDIA_VISION_ERROR_NONE;
-
std::vector<std::pair<bool, cv::Point>> actionParts;
-
mv_inference_pose_s *pAction = static_cast<mv_inference_pose_s *>(action);
for (int k = 0; k < HUMAN_POSE_MAX_LANDMARKS; ++k) {
}
- ret = pPose->compare(parts, actionParts, score);
+ int ret = pPose->compare(parts, actionParts, score);
if (ret != MEDIA_VISION_ERROR_NONE) {
LOGE("Fail to compare");
return ret;