* @internal
* @brief Performs asynchronously the face detection inference on the @a source.
*
- * @since_tizen 7.5
+ * @since_tizen 8.0
* @remarks This function operates asynchronously, so it returns immediately upon invocation.
- * Therefore, user needs to receive the result though a given callback function.
+ * The inference results are inserted into the outgoing queue within the framework
+ * in the order of processing, and the results can be obtained through mv_face_detection_get_result()
+ * and mv_face_detection_get_label().
*
* @param[in] handle The handle to the inference
* @param[in] source The handle to the source of the media
- * @param[in] completion_cb A callback which is called internally by the framework
- * once the given inference request is completed.
- * @param[in] user_data A pointer to user data object.
*
* @return @c 0 on success, otherwise a negative error value
* @retval #MEDIA_VISION_ERROR_NONE Successful
* @pre Prepare an inference by calling mv_face_detect_configure()
* @pre Prepare an inference by calling mv_face_detect_prepare()
*/
-int mv_face_detection_inference_async(mv_face_detection_h handle, mv_source_h source, mv_completion_cb completion_cb,
- void *user_data);
+int mv_face_detection_inference_async(mv_face_detection_h handle, mv_source_h source);
/**
* @internal
*
* @since_tizen 8.0
*
- * @param[in] handle The handle to the inference
+ * @param[in] handle The handle to the inference
* @param[out] number_of_objects A number of objects detected.
- * @param[out] indices Label indices to detected objects.
+ * @param[out] frame_number A frame number inferenced.
* @param[out] confidences Probability to detected objects.
* @param[out] left An left position array to bound boxes.
* @param[out] top An top position array to bound boxes.
* @pre Prepare an inference by calling mv_face_detect_inference()
*/
int mv_face_detection_get_result(mv_face_detection_h handle, unsigned int *number_of_objects,
- const unsigned int **indices, const float **confidences, const int **left,
+ unsigned long *frame_number, const float **confidences, const int **left,
const int **top, const int **right, const int **bottom);
/**
* @internal
* @brief Performs asynchronously the object detection inference on the @a source.
*
- * @since_tizen 7.5
+ * @since_tizen 8.0
* @remarks This function operates asynchronously, so it returns immediately upon invocation.
- * Therefore, user needs to receive the result though a given callback function.
+ * The inference results are inserted into the outgoing queue within the framework
+ * in the order of processing, and the results can be obtained through mv_object_detection_get_result()
+ * and mv_object_detection_get_label().
*
* @param[in] handle The handle to the inference
* @param[in] source The handle to the source of the media
- * @param[in] completion_cb A callback which is called internally by the framework
- * once the given inference request is completed.
- * @param[in] user_data A pointer to user data object.
*
* @return @c 0 on success, otherwise a negative error value
* @retval #MEDIA_VISION_ERROR_NONE Successful
* @pre Prepare an inference by calling mv_object_detect_configure()
* @pre Prepare an inference by calling mv_object_detect_prepare()
*/
-int mv_object_detection_inference_async(mv_object_detection_h handle, mv_source_h source,
- mv_completion_cb completion_cb, void *user_data);
+int mv_object_detection_inference_async(mv_object_detection_h handle, mv_source_h source);
/**
* @internal
*
* @param[in] infer The handle to the inference
* @param[out] number_of_objects A number of objects detected.
- * @param[out] indices Label indices to detected objects.
+ * @param[out] frame_number A frame number inferenced.
* @param[out] confidences Probability to detected objects.
* @param[out] left An left position array to bound boxes.
* @param[out] top An top position array to bound boxes.
* @pre Prepare an inference by calling mv_object_detect_inference()
*/
int mv_object_detection_get_result(mv_object_detection_h infer, unsigned int *number_of_objects,
- const unsigned int **indices, const float **confidences, const int **left,
+ unsigned long *frame_number, const float **confidences, const int **left,
const int **top, const int **right, const int **bottom);
/**
#include <queue>
#include <thread>
#include <mutex>
+#include <condition_variable>
+#include <atomic>
+
#include <mv_common.h>
#include <mv_inference_type.h>
#include "mv_private.h"
{
private:
ObjectDetectionTaskType _task_type;
- template<typename T> std::queue<ObjectDetectionQueue<T> > static _incoming_queue;
+ std::queue<ObjectDetectionQueue<unsigned char> > _incoming_queue;
std::queue<ObjectDetectionResult> _outgoing_queue;
std::mutex _incoming_queue_mutex;
std::mutex _outgoing_queue_mutex;
- int _input_data_type {};
std::unique_ptr<std::thread> _thread_handle;
- bool _exit_thread {};
+ std::atomic<bool> _exit_thread { false };
ObjectDetectionResult _current_result {};
- unsigned long _input_index {};
+ unsigned long _input_frame_number {};
+ std::condition_variable _cv_event;
void loadLabel();
void getEngineList();
void getDeviceList(const char *engine_type);
template<typename T>
void preprocess(mv_source_h &mv_src, std::shared_ptr<MetaInfo> metaInfo, std::vector<T> &inputVector);
- template<typename T> void pushToInput(ObjectDetectionQueue<T> &input);
+ template<typename T> void pushToInput(ObjectDetectionQueue<T> &inputQueue);
ObjectDetectionResult popFromOutput();
bool isOutputQueueEmpty();
+ void waitforOutputQueue();
template<typename T> ObjectDetectionQueue<T> popFromInput();
template<typename T> bool isInputQueueEmpty();
void pushToOutput(ObjectDetectionResult &output);
std::shared_ptr<MetaInfo> getInputMetaInfo();
template<typename T> void perform(mv_source_h &mv_src, std::shared_ptr<MetaInfo> metaInfo);
template<typename T> void performAsync(ObjectDetectionInput &input, std::shared_ptr<MetaInfo> metaInfo);
- bool exitThread();
protected:
std::unique_ptr<mediavision::inference::Inference> _inference;
struct ObjectDetectionInput {
void *handle {};
mv_source_h inference_src;
- mv_completion_cb completion_cb;
- void *user_data {};
// TODO.
};
template<typename T> struct ObjectDetectionQueue {
- unsigned long index {};
+ unsigned long frame_number {};
void *handle {};
mv_source_h inference_src;
- mv_completion_cb completion_cb;
std::vector<std::vector<T> > inputs;
void *user_data {};
};
* @details Contains object detection result.
*/
struct ObjectDetectionResult {
+ unsigned long frame_number;
unsigned int number_of_objects {};
std::vector<unsigned int> indices;
std::vector<std::string> names;
using namespace mediavision::machine_learning::exception;
using FaceDetectionTask = ITask<ObjectDetectionInput, ObjectDetectionResult>;
-static mutex g_face_detection_mutex;
-
int mv_face_detection_create(mv_face_detection_h *handle)
{
MEDIA_VISION_SUPPORT_CHECK(_mv_inference_check_system_info_feature_supported());
int mv_face_detection_destroy(mv_face_detection_h handle)
{
- // TODO. find proper solution later.
- // For thread safety, lock is needed here but if async API is used then dead lock occurs
- // because mv_face_detection_destroy_open function acquires a lock and,
- // while waiting for the thread loop to finish, the same lock is also acquired
- // within functions - mv_face_detection_get_result_open and mv_face_detection_get_label_open
- // - called to obtain results from the thread loop.
-
MEDIA_VISION_SUPPORT_CHECK(_mv_inference_check_system_info_feature_supported());
MEDIA_VISION_INSTANCE_CHECK(handle);
int mv_face_detection_set_model(mv_face_detection_h handle, const char *model_name, const char *model_file,
const char *meta_file, const char *label_file)
{
- lock_guard<mutex> lock(g_face_detection_mutex);
-
MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
MEDIA_VISION_INSTANCE_CHECK(handle);
int mv_face_detection_set_engine(mv_face_detection_h handle, const char *backend_type, const char *device_type)
{
- lock_guard<mutex> lock(g_face_detection_mutex);
-
MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
MEDIA_VISION_INSTANCE_CHECK(handle);
int mv_face_detection_get_engine_count(mv_face_detection_h handle, unsigned int *engine_count)
{
- lock_guard<mutex> lock(g_face_detection_mutex);
-
MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
MEDIA_VISION_INSTANCE_CHECK(handle);
int mv_face_detection_get_engine_type(mv_face_detection_h handle, const unsigned int engine_index, char **engine_type)
{
- lock_guard<mutex> lock(g_face_detection_mutex);
-
MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
MEDIA_VISION_INSTANCE_CHECK(handle);
int mv_face_detection_get_device_count(mv_face_detection_h handle, const char *engine_type, unsigned int *device_count)
{
- lock_guard<mutex> lock(g_face_detection_mutex);
-
MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
MEDIA_VISION_INSTANCE_CHECK(handle);
int mv_face_detection_get_device_type(mv_face_detection_h handle, const char *engine_type,
const unsigned int device_index, char **device_type)
{
- lock_guard<mutex> lock(g_face_detection_mutex);
-
MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
MEDIA_VISION_INSTANCE_CHECK(handle);
int mv_face_detection_configure(mv_face_detection_h handle)
{
- lock_guard<mutex> lock(g_face_detection_mutex);
-
MEDIA_VISION_SUPPORT_CHECK(_mv_inference_check_system_info_feature_supported());
MEDIA_VISION_INSTANCE_CHECK(handle);
int mv_face_detection_prepare(mv_face_detection_h handle)
{
- lock_guard<mutex> lock(g_face_detection_mutex);
-
MEDIA_VISION_SUPPORT_CHECK(_mv_inference_check_system_info_feature_supported());
MEDIA_VISION_INSTANCE_CHECK(handle);
int mv_face_detection_inference(mv_face_detection_h handle, mv_source_h source)
{
- lock_guard<mutex> lock(g_face_detection_mutex);
-
MEDIA_VISION_SUPPORT_CHECK(_mv_inference_image_check_system_info_feature_supported());
MEDIA_VISION_INSTANCE_CHECK(source);
MEDIA_VISION_INSTANCE_CHECK(handle);
return MEDIA_VISION_ERROR_NONE;
}
-int mv_face_detection_inference_async(mv_face_detection_h handle, mv_source_h source, mv_completion_cb completion_cb,
- void *user_data)
+int mv_face_detection_inference_async(mv_face_detection_h handle, mv_source_h source)
{
- LOGD("ENTER");
+ MEDIA_VISION_SUPPORT_CHECK(_mv_inference_image_check_system_info_feature_supported());
+ MEDIA_VISION_INSTANCE_CHECK(handle);
+ MEDIA_VISION_INSTANCE_CHECK(source);
- lock_guard<mutex> lock(g_face_detection_mutex);
+ MEDIA_VISION_FUNCTION_ENTER();
if (!handle) {
LOGE("Handle is NULL.");
auto context = static_cast<Context *>(handle);
auto task = static_cast<FaceDetectionTask *>(context->__tasks.at("face_detection"));
- ObjectDetectionInput input = { handle, source, completion_cb, user_data };
+ ObjectDetectionInput input = { handle, source };
task->performAsync(input);
} catch (const BaseException &e) {
}
int mv_face_detection_get_result(mv_face_detection_h handle, unsigned int *number_of_objects,
- const unsigned int **indices, const float **confidences, const int **left,
+ unsigned long *frame_number, const float **confidences, const int **left,
const int **top, const int **right, const int **bottom)
{
- lock_guard<mutex> lock(g_face_detection_mutex);
-
MEDIA_VISION_SUPPORT_CHECK(_mv_inference_image_check_system_info_feature_supported());
MEDIA_VISION_INSTANCE_CHECK(handle);
MEDIA_VISION_INSTANCE_CHECK(number_of_objects);
- MEDIA_VISION_INSTANCE_CHECK(indices);
+ MEDIA_VISION_INSTANCE_CHECK(frame_number);
MEDIA_VISION_INSTANCE_CHECK(confidences);
MEDIA_VISION_INSTANCE_CHECK(left);
MEDIA_VISION_INSTANCE_CHECK(top);
ObjectDetectionResult &result = task->getOutput();
*number_of_objects = result.number_of_objects;
- *indices = result.indices.data();
+ *frame_number = result.frame_number;
*confidences = result.confidences.data();
*left = result.left.data();
*top = result.top.data();
int mv_face_detection_get_label(mv_face_detection_h handle, const unsigned int index, const char **out_label)
{
- lock_guard<mutex> lock(g_face_detection_mutex);
-
MEDIA_VISION_SUPPORT_CHECK(_mv_inference_image_check_system_info_feature_supported());
MEDIA_VISION_INSTANCE_CHECK(handle);
MEDIA_VISION_INSTANCE_CHECK(out_label);
using namespace mediavision::machine_learning::exception;
using ObjectDetectionTask = ITask<ObjectDetectionInput, ObjectDetectionResult>;
-static mutex g_object_detection_mutex;
-
int mv_object_detection_create(mv_object_detection_h *handle)
{
MEDIA_VISION_SUPPORT_CHECK(_mv_inference_check_system_info_feature_supported());
MEDIA_VISION_FUNCTION_ENTER();
- // TODO. find proper solution later.
- // For thread safety, lock is needed here but if async API is used then dead lock occurs
- // because mv_object_detection_destroy_open function acquires a lock and,
- // while waiting for the thread loop to finish, the same lock is also acquired
- // within functions - mv_object_detection_get_result_open and mv_object_detection_get_label_open
- // - called to obtain results from the thread loop.
-
auto context = static_cast<Context *>(handle);
for (auto &m : context->__tasks)
int mv_object_detection_set_model(mv_object_detection_h handle, const char *model_name, const char *model_file,
const char *meta_file, const char *label_file)
{
- lock_guard<mutex> lock(g_object_detection_mutex);
-
MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
MEDIA_VISION_INSTANCE_CHECK(handle);
int mv_object_detection_set_engine(mv_object_detection_h handle, const char *backend_type, const char *device_type)
{
- lock_guard<mutex> lock(g_object_detection_mutex);
-
MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
MEDIA_VISION_INSTANCE_CHECK(handle);
int mv_object_detection_get_engine_count(mv_object_detection_h handle, unsigned int *engine_count)
{
- lock_guard<mutex> lock(g_object_detection_mutex);
-
MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
MEDIA_VISION_INSTANCE_CHECK(handle);
int mv_object_detection_get_engine_type(mv_object_detection_h handle, const unsigned int engine_index,
char **engine_type)
{
- lock_guard<mutex> lock(g_object_detection_mutex);
-
MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
MEDIA_VISION_INSTANCE_CHECK(handle);
int mv_object_detection_get_device_count(mv_object_detection_h handle, const char *engine_type,
unsigned int *device_count)
{
- lock_guard<mutex> lock(g_object_detection_mutex);
-
MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
MEDIA_VISION_INSTANCE_CHECK(handle);
int mv_object_detection_get_device_type(mv_object_detection_h handle, const char *engine_type,
const unsigned int device_index, char **device_type)
{
- lock_guard<mutex> lock(g_object_detection_mutex);
-
MEDIA_VISION_SUPPORT_CHECK(_mv_inference_face_check_system_info_feature_supported());
MEDIA_VISION_INSTANCE_CHECK(handle);
int mv_object_detection_configure(mv_object_detection_h handle)
{
- lock_guard<mutex> lock(g_object_detection_mutex);
-
MEDIA_VISION_SUPPORT_CHECK(_mv_inference_check_system_info_feature_supported());
MEDIA_VISION_INSTANCE_CHECK(handle);
int mv_object_detection_prepare(mv_object_detection_h handle)
{
- lock_guard<mutex> lock(g_object_detection_mutex);
-
MEDIA_VISION_SUPPORT_CHECK(_mv_inference_check_system_info_feature_supported());
MEDIA_VISION_INSTANCE_CHECK(handle);
int mv_object_detection_inference(mv_object_detection_h handle, mv_source_h source)
{
- lock_guard<mutex> lock(g_object_detection_mutex);
-
MEDIA_VISION_SUPPORT_CHECK(_mv_inference_image_check_system_info_feature_supported());
MEDIA_VISION_INSTANCE_CHECK(handle);
MEDIA_VISION_INSTANCE_CHECK(source);
return MEDIA_VISION_ERROR_NONE;
}
-int mv_object_detection_inference_async(mv_object_detection_h handle, mv_source_h source,
- mv_completion_cb completion_cb, void *user_data)
+int mv_object_detection_inference_async(mv_object_detection_h handle, mv_source_h source)
{
- lock_guard<mutex> lock(g_object_detection_mutex);
-
MEDIA_VISION_SUPPORT_CHECK(_mv_inference_image_check_system_info_feature_supported());
MEDIA_VISION_INSTANCE_CHECK(handle);
MEDIA_VISION_INSTANCE_CHECK(source);
- MEDIA_VISION_INSTANCE_CHECK(completion_cb);
MEDIA_VISION_FUNCTION_ENTER();
auto context = static_cast<Context *>(handle);
auto task = static_cast<ObjectDetectionTask *>(context->__tasks.at("object_detection"));
- ObjectDetectionInput input = { handle, source, completion_cb, user_data };
+ ObjectDetectionInput input = { handle, source };
task->performAsync(input);
} catch (const BaseException &e) {
}
int mv_object_detection_get_result(mv_object_detection_h handle, unsigned int *number_of_objects,
- const unsigned int **indices, const float **confidences, const int **left,
+ unsigned long *frame_number, const float **confidences, const int **left,
const int **top, const int **right, const int **bottom)
{
- lock_guard<mutex> lock(g_object_detection_mutex);
-
MEDIA_VISION_SUPPORT_CHECK(_mv_inference_image_check_system_info_feature_supported());
MEDIA_VISION_INSTANCE_CHECK(handle);
MEDIA_VISION_INSTANCE_CHECK(number_of_objects);
- MEDIA_VISION_INSTANCE_CHECK(indices);
+ MEDIA_VISION_INSTANCE_CHECK(frame_number);
MEDIA_VISION_INSTANCE_CHECK(confidences);
MEDIA_VISION_INSTANCE_CHECK(left);
MEDIA_VISION_INSTANCE_CHECK(top);
ObjectDetectionResult &result = task->getOutput();
*number_of_objects = result.number_of_objects;
- *indices = result.indices.data();
+ *frame_number = result.frame_number;
*confidences = result.confidences.data();
*left = result.left.data();
*top = result.top.data();
int mv_object_detection_get_label(mv_object_detection_h handle, const unsigned int index, const char **label)
{
- lock_guard<mutex> lock(g_object_detection_mutex);
-
MEDIA_VISION_SUPPORT_CHECK(_mv_inference_image_check_system_info_feature_supported());
MEDIA_VISION_INSTANCE_CHECK(handle);
MEDIA_VISION_INSTANCE_CHECK(label);
#include "object_detection.h"
using namespace std;
+using namespace std::chrono_literals;
using namespace mediavision::inference;
using namespace MediaVision::Common;
using namespace mediavision::common;
void ObjectDetection::preDestroy()
{
- if (_thread_handle) {
- _exit_thread = true;
- _thread_handle->join();
- }
-}
+ if (!_thread_handle)
+ return;
-bool ObjectDetection::exitThread()
-{
- return _exit_thread;
+ // Make sure to wait for the completion of all inference requests in the incoming queue.
+ _exit_thread = true;
+
+ _thread_handle->join();
+ _thread_handle = nullptr;
+
+ lock_guard<mutex> lock(_outgoing_queue_mutex);
+ queue<ObjectDetectionResult> empty;
+
+ swap(_outgoing_queue, empty);
}
ObjectDetectionTaskType ObjectDetection::getTaskType()
vector<vector<T> > inputVectors = { inputVector };
inference<T>(inputVectors);
+
+ // TODO. Update operation status here.
}
void ObjectDetection::perform(mv_source_h &mv_src)
template<typename T> void inferenceThreadLoop(ObjectDetection *object)
{
// If user called destroy API then this thread loop will be terminated.
- while (!object->exitThread() || !object->isInputQueueEmpty<T>()) {
+ while (!object->_exit_thread) {
// If input queue is empty then skip inference request.
if (object->isInputQueueEmpty<T>())
continue;
ObjectDetectionQueue<T> input = object->popFromInput<T>();
- LOGD("Popped : input index = %lu", input.index);
+ LOGD("Popped : input frame number = %lu", input.frame_number);
object->inference<T>(input.inputs);
ObjectDetectionResult &result = object->result();
+ result.frame_number = input.frame_number;
object->pushToOutput(result);
-
- input.completion_cb(input.handle, input.user_data);
}
+
+ // waitforOutputQueue function could wait for the notify event after while loop is exited.
+ // So make sure to call notify_one() here.
+ object->_cv_event.notify_one();
}
template<typename T> void ObjectDetection::performAsync(ObjectDetectionInput &input, shared_ptr<MetaInfo> metaInfo)
{
- _input_index++;
+ _input_frame_number++;
if (!isInputQueueEmpty<T>())
return;
preprocess<T>(input.inference_src, metaInfo, inputVector);
vector<vector<T> > inputVectors = { inputVector };
- ObjectDetectionQueue<T> in_queue = { _input_index, input.handle, input.inference_src,
- input.completion_cb, inputVectors, input.user_data };
+ ObjectDetectionQueue<T> in_queue = { _input_frame_number, input.handle, input.inference_src, inputVectors };
pushToInput<T>(in_queue);
- LOGD("Pushed : input index = %lu", in_queue.index);
+ LOGD("Pushed : input frame number = %lu", in_queue.frame_number);
if (!_thread_handle)
_thread_handle = make_unique<thread>(&inferenceThreadLoop<T>, this);
ObjectDetectionResult &ObjectDetection::getOutput()
{
if (_thread_handle) {
- if (isOutputQueueEmpty())
- throw InvalidOperation("No inference result.");
+ if (_exit_thread)
+ throw InvalidOperation("Object detection is already destroyed so invalid operation.");
+ waitforOutputQueue();
_current_result = popFromOutput();
} else {
+ // TODO. Check if inference request is completed or not here.
+ // If not then throw an exception.
_current_result = result();
}
copy(&raw_buffer[0], &raw_buffer[tensor_buffer->size / sizeof(float)], back_inserter(tensor));
}
-template<typename T> void ObjectDetection::pushToInput(ObjectDetectionQueue<T> &input)
+template<typename T> void ObjectDetection::pushToInput(ObjectDetectionQueue<T> &inputQueue)
{
lock_guard<mutex> lock(_incoming_queue_mutex);
- _incoming_queue<T>.push(input);
+ ObjectDetectionQueue<unsigned char> dstQueue;
+
+ dstQueue.frame_number = inputQueue.frame_number;
+ dstQueue.handle = inputQueue.handle;
+ dstQueue.inference_src = inputQueue.inference_src;
+ dstQueue.user_data = inputQueue.user_data;
+
+ for (auto &elms : inputQueue.inputs) {
+ vector<unsigned char> dst_vector;
+
+ for (auto &elm : elms) {
+ unsigned char *bytes = reinterpret_cast<unsigned char *>(&elm);
+
+ copy_n(bytes, sizeof(T), back_inserter(dst_vector));
+ }
+
+ dstQueue.inputs.push_back(dst_vector);
+ }
+
+ _incoming_queue.push(dstQueue);
}
template<typename T> ObjectDetectionQueue<T> ObjectDetection::popFromInput()
{
lock_guard<mutex> lock(_incoming_queue_mutex);
- ObjectDetectionQueue<T> input = _incoming_queue<T>.front();
- _incoming_queue<T>.pop();
+ ObjectDetectionQueue<unsigned char> inputQueue = _incoming_queue.front();
+
+ _incoming_queue.pop();
+ ObjectDetectionQueue<T> dstQueue;
+
+ dstQueue.frame_number = inputQueue.frame_number;
+ dstQueue.handle = inputQueue.handle;
+ dstQueue.inference_src = inputQueue.inference_src;
+ dstQueue.user_data = inputQueue.user_data;
- return input;
+ for (auto &elms : inputQueue.inputs) {
+ vector<T> dst_vector;
+
+ for (size_t idx = 0; idx < elms.size(); idx += sizeof(T)) {
+ T dst_data;
+
+ copy_n(elms.begin() + idx, sizeof(T), reinterpret_cast<unsigned char *>(&dst_data));
+ dst_vector.push_back(dst_data);
+ }
+
+ dstQueue.inputs.push_back(dst_vector);
+ }
+
+ return dstQueue;
}
template<typename T> bool ObjectDetection::isInputQueueEmpty()
{
lock_guard<mutex> lock(_incoming_queue_mutex);
- return _incoming_queue<T>.empty();
+
+ return _incoming_queue.empty();
}
void ObjectDetection::pushToOutput(ObjectDetectionResult &output)
{
lock_guard<mutex> lock(_outgoing_queue_mutex);
+
_outgoing_queue.push(output);
+ _cv_event.notify_one();
}
ObjectDetectionResult ObjectDetection::popFromOutput()
return _outgoing_queue.empty();
}
-template<typename T> queue<ObjectDetectionQueue<T> > ObjectDetection::_incoming_queue;
+void ObjectDetection::waitforOutputQueue()
+{
+ unique_lock<mutex> lock(_outgoing_queue_mutex);
+
+ if (!_cv_event.wait_for(lock, 10s, [this] {
+ if (_exit_thread)
+ throw InvalidOperation("already thread exit");
+
+ return !_outgoing_queue.empty();
+ })) {
+ throw InvalidOperation("Waiting for output queue has been timed out.");
+ }
+}
template void ObjectDetection::preprocess<float>(mv_source_h &mv_src, shared_ptr<MetaInfo> metaInfo,
vector<float> &inputVector);
template void ObjectDetection::inference<float>(vector<vector<float> > &inputVectors);
template void ObjectDetection::perform<float>(mv_source_h &mv_src, shared_ptr<MetaInfo> metaInfo);
-template void ObjectDetection::pushToInput<float>(ObjectDetectionQueue<float> &input);
+template void ObjectDetection::pushToInput<float>(ObjectDetectionQueue<float> &inputQueue);
template ObjectDetectionQueue<float> ObjectDetection::popFromInput();
template bool ObjectDetection::isInputQueueEmpty<float>();
template void ObjectDetection::performAsync<float>(ObjectDetectionInput &input, shared_ptr<MetaInfo> metaInfo);
vector<unsigned char> &inputVector);
template void ObjectDetection::inference<unsigned char>(vector<vector<unsigned char> > &inputVectors);
template void ObjectDetection::perform<unsigned char>(mv_source_h &mv_src, shared_ptr<MetaInfo> metaInfo);
-template void ObjectDetection::pushToInput<unsigned char>(ObjectDetectionQueue<unsigned char> &input);
+template void ObjectDetection::pushToInput<unsigned char>(ObjectDetectionQueue<unsigned char> &inputQueue);
template ObjectDetectionQueue<unsigned char> ObjectDetection::popFromInput();
template bool ObjectDetection::isInputQueueEmpty<unsigned char>();
return ObjectDetectionTaskType::OD_PLUGIN;
else if (model_name == "FD_PLUGIN")
return ObjectDetectionTaskType::FD_PLUGIN;
- else if (model_name == string("MOBILENET_V1_SSD"))
+ else if (model_name == "MOBILENET_V1_SSD")
return ObjectDetectionTaskType::MOBILENET_V1_SSD;
- else if (model_name == string("MOBILENET_V2_SSD"))
+ else if (model_name == "MOBILENET_V2_SSD")
return ObjectDetectionTaskType::MOBILENET_V2_SSD;
// TODO.
)
target_compile_definitions(${TEST_OBJECT_DETECTION_ASYNC} PRIVATE -DTEST_RES_PATH="${TEST_RES_PATH}")
-target_link_libraries(${TEST_OBJECT_DETECTION_ASYNC} gtest gtest_main
+target_link_libraries(${TEST_OBJECT_DETECTION_ASYNC} gtest gtest_main pthread
mv_inference
mv_object_detection
mv_image_helper
unsigned int number_of_objects;
const int *left, *top, *right, *bottom;
- const unsigned int *indices;
+ unsigned long frame_number;
const float *confidences;
- ret = mv_object_detection_get_result(handle, &number_of_objects, &indices, &confidences, &left, &top, &right,
- &bottom);
+ ret = mv_object_detection_get_result(handle, &number_of_objects, &frame_number, &confidences, &left, &top,
+ &right, &bottom);
ASSERT_EQ(ret, 0);
for (unsigned int idx = 0; idx < number_of_objects; ++idx) {
- cout << "index = " << indices[idx] << " probability = " << confidences[idx] << " " << left[idx] << " x "
- << top[idx] << " ~ " << right[idx] << " x " << bottom[idx] << endl;
+ cout << "Frame number = " << frame_number << " probability = " << confidences[idx] << " " << left[idx]
+ << " x " << top[idx] << " ~ " << right[idx] << " x " << bottom[idx] << endl;
}
for (unsigned int idx = 0; idx < number_of_objects; ++idx) {
const char *label;
- ret = mv_object_detection_get_label(handle, indices[idx], &label);
+ ret = mv_object_detection_get_label(handle, idx, &label);
ASSERT_EQ(ret, 0);
- cout << "index = " << indices[idx] << " label = " << label << endl;
+ cout << "index = " << idx << " label = " << label << endl;
string label_str(label);
unsigned int number_of_objects;
const int *left, *top, *right, *bottom;
- const unsigned int *indices;
+ unsigned long frame_number;
const float *confidences;
- ret = mv_face_detection_get_result(handle, &number_of_objects, &indices, &confidences, &left, &top, &right,
+ ret = mv_face_detection_get_result(handle, &number_of_objects, &frame_number, &confidences, &left, &top, &right,
&bottom);
ASSERT_EQ(ret, 0);
for (unsigned int idx = 0; idx < number_of_objects; ++idx) {
- cout << "index = " << indices[idx] << " probability = " << confidences[idx] << " " << left[idx] << " x "
- << top[idx] << " ~ " << right[idx] << " x " << bottom[idx] << endl;
+ cout << "Frame number = " << frame_number << " probability = " << confidences[idx] << " " << left[idx]
+ << " x " << top[idx] << " ~ " << right[idx] << " x " << bottom[idx] << endl;
}
for (unsigned int idx = 0; idx < number_of_objects; ++idx) {
const char *label;
- ret = mv_face_detection_get_label(handle, indices[idx], &label);
+ ret = mv_face_detection_get_label(handle, idx, &label);
ASSERT_EQ(ret, 0);
- cout << "index = " << indices[idx] << " label = " << label << endl;
+ cout << "index = " << idx << " label = " << label << endl;
string label_str(label);
#include <iostream>
#include <algorithm>
#include <string.h>
+#include <thread>
#include "gtest/gtest.h"
#define IMG_DOG TEST_RES_PATH "/res/inference/images/dog2.jpg"
#define IMG_FACE TEST_RES_PATH "/res/inference/images/faceDetection.jpg"
-#define MAX_INFERENCE_ITERATION 20
+#define MAX_INFERENCE_ITERATION 50
using namespace testing;
using namespace std;
mv_source_h source;
};
-void object_detection_completion_callback(mv_object_detection_h handle, void *user_data)
+void object_detection_callback(void *user_data)
{
unsigned int number_of_objects;
const int *left, *top, *right, *bottom;
- const unsigned int *indices;
+ unsigned long frame_number = 0;
const float *confidences;
- model_info *test_model(static_cast<model_info *>(user_data));
+ mv_object_detection_h handle = static_cast<mv_object_detection_h>(user_data);
- int ret = mv_object_detection_get_result(handle, &number_of_objects, &indices, &confidences, &left, &top, &right,
- &bottom);
- ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+ while (frame_number < MAX_INFERENCE_ITERATION - 10) {
+ int ret = mv_object_detection_get_result(handle, &number_of_objects, &frame_number, &confidences, &left, &top,
+ &right, &bottom);
+ if (ret == MEDIA_VISION_ERROR_INVALID_OPERATION)
+ break;
- for (unsigned int idx = 0; idx < number_of_objects; ++idx) {
- cout << "index = " << indices[idx] << " probability = " << confidences[idx] << " " << left[idx] << " x "
- << top[idx] << " ~ " << right[idx] << " x " << bottom[idx] << endl;
+ ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+
+ for (unsigned int idx = 0; idx < number_of_objects; ++idx) {
+ cout << "frame number = " << frame_number << " probability = " << confidences[idx] << " " << left[idx]
+ << " x " << top[idx] << " ~ " << right[idx] << " x " << bottom[idx] << endl;
+ }
+
+ for (unsigned int idx = 0; idx < number_of_objects; ++idx) {
+ const char *label;
+
+ ret = mv_object_detection_get_label(handle, idx, &label);
+ ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+ cout << "index = " << idx << " label = " << label << endl;
+
+ string label_str(label);
+
+ transform(label_str.begin(), label_str.end(), label_str.begin(), ::toupper);
+
+ ASSERT_EQ(label_str, "DOG");
+ }
}
+}
+
+TEST(ObjectDetectionAsyncTest, InferenceShouldBeOk)
+{
+ mv_object_detection_h handle;
+ vector<model_info> test_models {
+ { "", "", "", "", "DOG" }, // If empty then default model will be used.
+ { "mobilenet_v2_ssd", "od_mobilenet_v2_ssd_320x320.tflite", "od_mobilenet_v2_ssd_320x320.json",
+ "od_mobilenet_v2_ssd_label.txt", "DOG" }
+ // TODO.
+ };
+
+ for (auto &model : test_models) {
+ int ret = mv_object_detection_create(&handle);
+ ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+
+ cout << "model name : " << model.model_file << endl;
+
+ mv_object_detection_set_model(handle, model.model_name.c_str(), model.model_file.c_str(),
+ model.meta_file.c_str(), model.label_file.c_str());
+ mv_object_detection_set_engine(handle, "tflite", "cpu");
- for (unsigned int idx = 0; idx < number_of_objects; ++idx) {
- const char *label;
+ ret = mv_object_detection_configure(handle);
+ ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
- ret = mv_object_detection_get_label(handle, indices[idx], &label);
+ ret = mv_object_detection_prepare(handle);
ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
- cout << "index = " << indices[idx] << " label = " << label << endl;
- string label_str(label);
+ unique_ptr<thread> thread_handle;
+
+ for (unsigned int iter = 0; iter < MAX_INFERENCE_ITERATION; ++iter) {
+ mv_source_h mv_source = NULL;
+ ret = mv_create_source(&mv_source);
+ ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+
+ ret = ImageHelper::loadImageToSource(IMG_DOG, mv_source);
+ ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+
+ model.source = mv_source;
+
+ ret = mv_object_detection_inference_async(handle, mv_source);
+ ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+
+ if (iter == 0)
+ thread_handle = make_unique<thread>(&object_detection_callback, static_cast<void *>(handle));
+
+ ret = mv_destroy_source(mv_source);
+ ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+ }
- transform(label_str.begin(), label_str.end(), label_str.begin(), ::toupper);
+ thread_handle->join();
- ASSERT_EQ(label_str, test_model->answer);
+ ret = mv_object_detection_destroy(handle);
+ ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
}
}
-TEST(ObjectDetectionAsyncTest, InferenceShouldBeOk)
+TEST(ObjectDetectionAsyncTest, InferenceShouldBeOkWithDestroyFirst)
{
mv_object_detection_h handle;
vector<model_info> test_models {
ret = mv_object_detection_prepare(handle);
ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+ unique_ptr<thread> thread_handle;
+
for (unsigned int iter = 0; iter < MAX_INFERENCE_ITERATION; ++iter) {
mv_source_h mv_source = NULL;
ret = mv_create_source(&mv_source);
model.source = mv_source;
- ret = mv_object_detection_inference_async(handle, mv_source, object_detection_completion_callback,
- reinterpret_cast<void *>(&model));
+ ret = mv_object_detection_inference_async(handle, mv_source);
ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+ if (iter == 0)
+ thread_handle = make_unique<thread>(&object_detection_callback, static_cast<void *>(handle));
+
ret = mv_destroy_source(mv_source);
ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
}
ret = mv_object_detection_destroy(handle);
ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+
+ thread_handle->join();
}
}
-void face_detection_completion_callback(mv_object_detection_h handle, void *user_data)
+void face_detection_callback(void *user_data)
{
unsigned int number_of_objects;
const int *left, *top, *right, *bottom;
- const unsigned int *indices;
+ unsigned long frame_number = 0;
const float *confidences;
- model_info *test_model(static_cast<model_info *>(user_data));
+ mv_object_detection_h handle = static_cast<mv_object_detection_h>(user_data);
+
+ while (frame_number < MAX_INFERENCE_ITERATION - 10) {
+ int ret = mv_face_detection_get_result(handle, &number_of_objects, &frame_number, &confidences, &left, &top,
+ &right, &bottom);
+ if (ret == MEDIA_VISION_ERROR_INVALID_OPERATION)
+ break;
+
+ ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+
+ for (unsigned int idx = 0; idx < number_of_objects; ++idx) {
+ cout << "Frame number = " << frame_number << " probability = " << confidences[idx] << " " << left[idx]
+ << " x " << top[idx] << " ~ " << right[idx] << " x " << bottom[idx] << endl;
+ }
+
+ for (unsigned int idx = 0; idx < number_of_objects; ++idx) {
+ const char *label;
- int ret = mv_face_detection_get_result(handle, &number_of_objects, &indices, &confidences, &left, &top, &right,
- &bottom);
- ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+ ret = mv_face_detection_get_label(handle, idx, &label);
+ ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+ cout << "index = " << idx << " label = " << label << endl;
+
+ string label_str(label);
- for (unsigned int idx = 0; idx < number_of_objects; ++idx) {
- cout << "index = " << indices[idx] << " probability = " << confidences[idx] << " " << left[idx] << " x "
- << top[idx] << " ~ " << right[idx] << " x " << bottom[idx] << endl;
+ transform(label_str.begin(), label_str.end(), label_str.begin(), ::toupper);
+
+ ASSERT_EQ(label_str, "FACE");
+ }
}
+}
+
+TEST(FaceDetectionAsyncTest, InferenceShouldBeOk)
+{
+ mv_object_detection_h handle;
+ vector<model_info> test_models {
+ { "", "", "", "", "FACE" } // If empty then default model will be used.
+ // TODO.
+ };
+
+ for (auto &model : test_models) {
+ int ret = mv_face_detection_create(&handle);
+ ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+
+ cout << "model name : " << model.model_file << endl;
+
+ mv_face_detection_set_model(handle, model.model_name.c_str(), model.model_file.c_str(), model.meta_file.c_str(),
+ model.label_file.c_str());
+ mv_face_detection_set_engine(handle, "tflite", "cpu");
- for (unsigned int idx = 0; idx < number_of_objects; ++idx) {
- const char *label;
+ ret = mv_face_detection_configure(handle);
+ ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
- ret = mv_face_detection_get_label(handle, indices[idx], &label);
+ ret = mv_face_detection_prepare(handle);
ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
- cout << "index = " << indices[idx] << " label = " << label << endl;
- string label_str(label);
+ unique_ptr<thread> thread_handle;
+
+ for (unsigned int iter = 0; iter < MAX_INFERENCE_ITERATION; ++iter) {
+ mv_source_h mv_source = NULL;
+ ret = mv_create_source(&mv_source);
+ ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+
+ ret = ImageHelper::loadImageToSource(IMG_FACE, mv_source);
+ ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+
+ model.source = mv_source;
+
+ ret = mv_face_detection_inference_async(handle, mv_source);
+ ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+
+ if (iter == 0)
+ thread_handle = make_unique<thread>(&face_detection_callback, static_cast<void *>(handle));
+
+ ret = mv_destroy_source(mv_source);
+ ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+ }
- transform(label_str.begin(), label_str.end(), label_str.begin(), ::toupper);
+ thread_handle->join();
- ASSERT_EQ(label_str, test_model->answer);
+ ret = mv_face_detection_destroy(handle);
+ ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
}
}
-TEST(FaceDetectionAsyncTest, InferenceShouldBeOk)
+TEST(FaceDetectionAsyncTest, InferenceShouldBeOkWithDestroyFirst)
{
mv_object_detection_h handle;
vector<model_info> test_models {
ret = mv_face_detection_prepare(handle);
ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+ unique_ptr<thread> thread_handle;
+
for (unsigned int iter = 0; iter < MAX_INFERENCE_ITERATION; ++iter) {
mv_source_h mv_source = NULL;
ret = mv_create_source(&mv_source);
model.source = mv_source;
- ret = mv_face_detection_inference_async(handle, mv_source, face_detection_completion_callback,
- reinterpret_cast<void *>(&model));
+ ret = mv_face_detection_inference_async(handle, mv_source);
ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+ if (iter == 0)
+ thread_handle = make_unique<thread>(&face_detection_callback, static_cast<void *>(handle));
+
ret = mv_destroy_source(mv_source);
ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
}
ret = mv_face_detection_destroy(handle);
ASSERT_EQ(ret, MEDIA_VISION_ERROR_NONE);
+
+ thread_handle->join();
}
}
\ No newline at end of file