2 * Copyright (c) 2023 Samsung Electronics Co., Ltd All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "mv_private.h"
19 #include "mv_face_detection_internal.h"
20 #include "face_detection_adapter.h"
21 #include "machine_learning_exception.h"
22 #include "object_detection_type.h"
33 using namespace mediavision::inference;
34 using namespace mediavision::common;
35 using namespace mediavision::machine_learning;
36 using namespace MediaVision::Common;
37 using namespace mediavision::machine_learning::exception;
38 using FaceDetectionTask = ITask<ObjectDetectionInput, ObjectDetectionResult>;
40 static mutex g_face_detection_mutex;
42 int mv_face_detection_create(mv_face_detection_h *handle)
44 MEDIA_VISION_SUPPORT_CHECK(_mv_inference_check_system_info_feature_supported());
45 MEDIA_VISION_NULL_ARG_CHECK(handle);
47 MEDIA_VISION_FUNCTION_ENTER();
49 Context *context = nullptr;
50 FaceDetectionTask *task = nullptr;
53 context = new Context();
54 task = new FaceDetectionAdapter<ObjectDetectionInput, ObjectDetectionResult>();
55 context->__tasks.insert(make_pair("face_detection", task));
56 *handle = static_cast<mv_face_detection_h>(context);
57 } catch (const BaseException &e) {
63 MEDIA_VISION_FUNCTION_LEAVE();
65 return MEDIA_VISION_ERROR_NONE;
68 int mv_face_detection_destroy(mv_face_detection_h handle)
70 // TODO. find proper solution later.
71 // For thread safety, lock is needed here but if async API is used then dead lock occurs
72 // because mv_face_detection_destroy_open function acquires a lock and,
73 // while waiting for the thread loop to finish, the same lock is also acquired
74 // within functions - mv_face_detection_get_result_open and mv_face_detection_get_label_open
75 // - called to obtain results from the thread loop.
77 MEDIA_VISION_SUPPORT_CHECK(_mv_inference_check_system_info_feature_supported());
78 MEDIA_VISION_INSTANCE_CHECK(handle);
80 MEDIA_VISION_FUNCTION_ENTER();
82 auto context = static_cast<Context *>(handle);
84 for (auto &m : context->__tasks)
85 delete static_cast<FaceDetectionTask *>(m.second);
89 MEDIA_VISION_FUNCTION_LEAVE();
91 return MEDIA_VISION_ERROR_NONE;
94 int mv_face_detection_set_model(mv_face_detection_h handle, const char *model_name, const char *model_file,
95 const char *meta_file, const char *label_file)
97 lock_guard<mutex> lock(g_face_detection_mutex);
99 MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
101 MEDIA_VISION_INSTANCE_CHECK(handle);
102 MEDIA_VISION_INSTANCE_CHECK(model_name);
103 MEDIA_VISION_NULL_ARG_CHECK(model_file);
104 MEDIA_VISION_NULL_ARG_CHECK(meta_file);
105 MEDIA_VISION_NULL_ARG_CHECK(label_file);
107 MEDIA_VISION_FUNCTION_ENTER();
110 auto context = static_cast<Context *>(handle);
111 auto task = static_cast<FaceDetectionTask *>(context->__tasks.at("face_detection"));
113 task->setModelInfo(model_file, meta_file, label_file, model_name);
114 } catch (const BaseException &e) {
115 LOGE("%s", e.what());
119 MEDIA_VISION_FUNCTION_LEAVE();
121 return MEDIA_VISION_ERROR_NONE;
124 int mv_face_detection_set_engine(mv_face_detection_h handle, const char *backend_type, const char *device_type)
126 lock_guard<mutex> lock(g_face_detection_mutex);
128 MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
130 MEDIA_VISION_INSTANCE_CHECK(handle);
131 MEDIA_VISION_NULL_ARG_CHECK(backend_type);
132 MEDIA_VISION_NULL_ARG_CHECK(device_type);
134 MEDIA_VISION_FUNCTION_ENTER();
137 auto context = static_cast<Context *>(handle);
138 auto task = static_cast<FaceDetectionTask *>(context->__tasks.at("face_detection"));
140 task->setEngineInfo(backend_type, device_type);
141 } catch (const BaseException &e) {
142 LOGE("%s", e.what());
146 MEDIA_VISION_FUNCTION_LEAVE();
148 return MEDIA_VISION_ERROR_NONE;
151 int mv_face_detection_get_engine_count(mv_face_detection_h handle, unsigned int *engine_count)
153 lock_guard<mutex> lock(g_face_detection_mutex);
155 MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
157 MEDIA_VISION_INSTANCE_CHECK(handle);
158 MEDIA_VISION_NULL_ARG_CHECK(engine_count);
160 MEDIA_VISION_FUNCTION_ENTER();
163 auto context = static_cast<Context *>(handle);
164 auto task = static_cast<FaceDetectionTask *>(context->__tasks.at("face_detection"));
166 task->getNumberOfEngines(engine_count);
168 } catch (const BaseException &e) {
169 LOGE("%s", e.what());
173 MEDIA_VISION_FUNCTION_LEAVE();
175 return MEDIA_VISION_ERROR_NONE;
178 int mv_face_detection_get_engine_type(mv_face_detection_h handle, const unsigned int engine_index, char **engine_type)
180 lock_guard<mutex> lock(g_face_detection_mutex);
182 MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
184 MEDIA_VISION_INSTANCE_CHECK(handle);
185 MEDIA_VISION_NULL_ARG_CHECK(engine_type);
187 MEDIA_VISION_FUNCTION_ENTER();
190 auto context = static_cast<Context *>(handle);
191 auto task = static_cast<FaceDetectionTask *>(context->__tasks.at("face_detection"));
193 task->getEngineType(engine_index, engine_type);
195 } catch (const BaseException &e) {
196 LOGE("%s", e.what());
200 MEDIA_VISION_FUNCTION_LEAVE();
202 return MEDIA_VISION_ERROR_NONE;
205 int mv_face_detection_get_device_count(mv_face_detection_h handle, const char *engine_type, unsigned int *device_count)
207 lock_guard<mutex> lock(g_face_detection_mutex);
209 MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
211 MEDIA_VISION_INSTANCE_CHECK(handle);
212 MEDIA_VISION_NULL_ARG_CHECK(device_count);
214 MEDIA_VISION_FUNCTION_ENTER();
217 auto context = static_cast<Context *>(handle);
218 auto task = static_cast<FaceDetectionTask *>(context->__tasks.at("face_detection"));
220 task->getNumberOfDevices(engine_type, device_count);
222 } catch (const BaseException &e) {
223 LOGE("%s", e.what());
227 MEDIA_VISION_FUNCTION_LEAVE();
229 return MEDIA_VISION_ERROR_NONE;
232 int mv_face_detection_get_device_type(mv_face_detection_h handle, const char *engine_type,
233 const unsigned int device_index, char **device_type)
235 lock_guard<mutex> lock(g_face_detection_mutex);
237 MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
239 MEDIA_VISION_INSTANCE_CHECK(handle);
240 MEDIA_VISION_NULL_ARG_CHECK(engine_type);
241 MEDIA_VISION_NULL_ARG_CHECK(device_type);
243 MEDIA_VISION_FUNCTION_ENTER();
246 auto context = static_cast<Context *>(handle);
247 auto task = static_cast<FaceDetectionTask *>(context->__tasks.at("face_detection"));
249 task->getDeviceType(engine_type, device_index, device_type);
251 } catch (const BaseException &e) {
252 LOGE("%s", e.what());
256 MEDIA_VISION_FUNCTION_LEAVE();
258 return MEDIA_VISION_ERROR_NONE;
261 int mv_face_detection_configure(mv_face_detection_h handle)
263 lock_guard<mutex> lock(g_face_detection_mutex);
265 MEDIA_VISION_SUPPORT_CHECK(_mv_inference_check_system_info_feature_supported());
266 MEDIA_VISION_INSTANCE_CHECK(handle);
268 MEDIA_VISION_FUNCTION_ENTER();
271 auto context = static_cast<Context *>(handle);
272 auto task = static_cast<FaceDetectionTask *>(context->__tasks.at("face_detection"));
275 } catch (const BaseException &e) {
276 LOGE("%s", e.what());
280 MEDIA_VISION_FUNCTION_LEAVE();
282 return MEDIA_VISION_ERROR_NONE;
285 int mv_face_detection_prepare(mv_face_detection_h handle)
287 lock_guard<mutex> lock(g_face_detection_mutex);
289 MEDIA_VISION_SUPPORT_CHECK(_mv_inference_check_system_info_feature_supported());
290 MEDIA_VISION_INSTANCE_CHECK(handle);
292 MEDIA_VISION_FUNCTION_ENTER();
295 auto context = static_cast<Context *>(handle);
296 auto task = static_cast<FaceDetectionTask *>(context->__tasks.at("face_detection"));
299 } catch (const BaseException &e) {
300 LOGE("%s", e.what());
304 MEDIA_VISION_FUNCTION_LEAVE();
306 return MEDIA_VISION_ERROR_NONE;
309 int mv_face_detection_inference(mv_face_detection_h handle, mv_source_h source)
311 lock_guard<mutex> lock(g_face_detection_mutex);
313 MEDIA_VISION_SUPPORT_CHECK(_mv_inference_image_check_system_info_feature_supported());
314 MEDIA_VISION_INSTANCE_CHECK(source);
315 MEDIA_VISION_INSTANCE_CHECK(handle);
317 MEDIA_VISION_FUNCTION_ENTER();
320 auto context = static_cast<Context *>(handle);
321 auto task = static_cast<FaceDetectionTask *>(context->__tasks.at("face_detection"));
323 ObjectDetectionInput input = { .inference_src = source };
325 task->setInput(input);
327 } catch (const BaseException &e) {
328 LOGE("%s", e.what());
332 MEDIA_VISION_FUNCTION_LEAVE();
334 return MEDIA_VISION_ERROR_NONE;
337 int mv_face_detection_inference_async(mv_face_detection_h handle, mv_source_h source, mv_completion_cb completion_cb,
342 lock_guard<mutex> lock(g_face_detection_mutex);
345 LOGE("Handle is NULL.");
346 return MEDIA_VISION_ERROR_INVALID_PARAMETER;
350 auto context = static_cast<Context *>(handle);
351 auto task = static_cast<FaceDetectionTask *>(context->__tasks.at("face_detection"));
353 ObjectDetectionInput input = { handle, source, completion_cb, user_data };
355 task->performAsync(input);
356 } catch (const BaseException &e) {
357 LOGE("%s", e.what());
363 return MEDIA_VISION_ERROR_NONE;
366 int mv_face_detection_get_result(mv_face_detection_h handle, unsigned int *number_of_objects,
367 const unsigned int **indices, const float **confidences, const int **left,
368 const int **top, const int **right, const int **bottom)
370 lock_guard<mutex> lock(g_face_detection_mutex);
372 MEDIA_VISION_SUPPORT_CHECK(_mv_inference_image_check_system_info_feature_supported());
373 MEDIA_VISION_INSTANCE_CHECK(handle);
374 MEDIA_VISION_INSTANCE_CHECK(number_of_objects);
375 MEDIA_VISION_INSTANCE_CHECK(indices);
376 MEDIA_VISION_INSTANCE_CHECK(confidences);
377 MEDIA_VISION_INSTANCE_CHECK(left);
378 MEDIA_VISION_INSTANCE_CHECK(top);
379 MEDIA_VISION_INSTANCE_CHECK(right);
380 MEDIA_VISION_INSTANCE_CHECK(bottom);
382 MEDIA_VISION_FUNCTION_ENTER();
385 auto context = static_cast<Context *>(handle);
386 auto task = static_cast<FaceDetectionTask *>(context->__tasks.at("face_detection"));
388 ObjectDetectionResult &result = task->getOutput();
389 *number_of_objects = result.number_of_objects;
390 *indices = result.indices.data();
391 *confidences = result.confidences.data();
392 *left = result.left.data();
393 *top = result.top.data();
394 *right = result.right.data();
395 *bottom = result.bottom.data();
396 } catch (const BaseException &e) {
397 LOGE("%s", e.what());
401 MEDIA_VISION_FUNCTION_LEAVE();
403 return MEDIA_VISION_ERROR_NONE;
406 int mv_face_detection_get_label(mv_face_detection_h handle, const unsigned int index, const char **out_label)
408 lock_guard<mutex> lock(g_face_detection_mutex);
410 MEDIA_VISION_SUPPORT_CHECK(_mv_inference_image_check_system_info_feature_supported());
411 MEDIA_VISION_INSTANCE_CHECK(handle);
412 MEDIA_VISION_INSTANCE_CHECK(out_label);
414 MEDIA_VISION_FUNCTION_ENTER();
417 auto context = static_cast<Context *>(handle);
418 auto task = static_cast<FaceDetectionTask *>(context->__tasks.at("face_detection"));
420 ObjectDetectionResult &result = task->getOutput();
422 if (result.number_of_objects <= index)
423 throw InvalidParameter("Invalid index range.");
425 *out_label = result.names[index].c_str();
426 } catch (const BaseException &e) {
427 LOGE("%s", e.what());
431 MEDIA_VISION_FUNCTION_LEAVE();
433 return MEDIA_VISION_ERROR_NONE;