class ObjectDetection
{
private:
- void loadLabel();
- void getEngineList();
- void getDeviceList(const char *engine_type);
- void updateResult(ObjectDetectionResult &result);
- template<typename T>
- void preprocess(mv_source_h &mv_src, std::shared_ptr<MetaInfo> metaInfo, std::vector<T> &inputVector);
- template<typename T> void pushToInput(ObjectDetectionQueue<T> &input);
- ObjectDetectionResult popFromOutput();
- bool isOutputQueueEmpty();
-
ObjectDetectionTaskType _task_type;
template<typename T> std::queue<ObjectDetectionQueue<T> > static _incoming_queue;
std::queue<ObjectDetectionResult> _outgoing_queue;
ObjectDetectionResult _current_result {};
unsigned long _input_index {};
+ void loadLabel();
+ void getEngineList();
+ void getDeviceList(const char *engine_type);
+ void updateResult(ObjectDetectionResult &result);
+ template<typename T>
+ void preprocess(mv_source_h &mv_src, std::shared_ptr<MetaInfo> metaInfo, std::vector<T> &inputVector);
+ template<typename T> void pushToInput(ObjectDetectionQueue<T> &input);
+ ObjectDetectionResult popFromOutput();
+ bool isOutputQueueEmpty();
+ template<typename T> ObjectDetectionQueue<T> popFromInput();
+ template<typename T> bool isInputQueueEmpty();
+ void pushToOutput(ObjectDetectionResult &output);
+ std::shared_ptr<MetaInfo> getInputMetaInfo();
+ template<typename T> void perform(mv_source_h &mv_src, std::shared_ptr<MetaInfo> metaInfo);
+ template<typename T> void performAsync(ObjectDetectionInput &input, std::shared_ptr<MetaInfo> metaInfo);
+ bool exitThread();
+
protected:
std::unique_ptr<mediavision::inference::Inference> _inference;
std::unique_ptr<MediaVision::Common::EngineConfig> _config;
void getOutputNames(std::vector<std::string> &names);
void getOutputTensor(std::string target_name, std::vector<float> &tensor);
+ void parseMetaFile(std::string meta_file_name);
+ template<typename T> void inference(std::vector<std::vector<T> > &inputVectors);
+ virtual ObjectDetectionResult &result() = 0;
+ template<typename T> friend void inferenceThreadLoop(ObjectDetection *object);
public:
ObjectDetection(ObjectDetectionTaskType task_type);
void getEngineType(unsigned int engine_index, char **engine_type);
void getNumberOfDevices(const char *engine_type, unsigned int *number_of_devices);
void getDeviceType(const char *engine_type, const unsigned int device_index, char **device_type);
- std::shared_ptr<MetaInfo> getInputMetaInfo();
- bool exitThread();
- void parseMetaFile(const char *meta_file_name);
- void configure();
+ void configure(std::string configFile);
void prepare();
+ void perform(mv_source_h &mv_src);
+ void performAsync(ObjectDetectionInput &input);
template<typename V> V &getOutput();
- template<typename T> void perform(mv_source_h &mv_src, std::shared_ptr<MetaInfo> metaInfo);
- template<typename T> void performAsync(ObjectDetectionInput &input, std::shared_ptr<MetaInfo> metaInfo);
- template<typename T> void inference(std::vector<std::vector<T> > &inputVectors);
- template<typename T> ObjectDetectionQueue<T> popFromInput();
- template<typename T> bool isInputQueueEmpty();
- void pushToOutput(ObjectDetectionResult &output);
- virtual ObjectDetectionResult &result() = 0;
};
} // machine_learning
template<typename T, typename V> void FaceDetectionAdapter<T, V>::configure()
{
- _object_detection->parseMetaFile("face_detection.json");
- _object_detection->configure();
+ _object_detection->configure("face_detection.json");
}
template<typename T, typename V> void FaceDetectionAdapter<T, V>::getNumberOfEngines(unsigned int *number_of_engines)
template<typename T, typename V> void FaceDetectionAdapter<T, V>::perform()
{
- shared_ptr<MetaInfo> metaInfo = _object_detection->getInputMetaInfo();
- if (metaInfo->dataType == MV_INFERENCE_DATA_UINT8)
- _object_detection->perform<unsigned char>(_source.inference_src, metaInfo);
- else if (metaInfo->dataType == MV_INFERENCE_DATA_FLOAT32)
- _object_detection->perform<float>(_source.inference_src, metaInfo);
- else
- throw InvalidOperation("Invalid model data type.");
+ _object_detection->perform(_source.inference_src);
}
template<typename T, typename V> void FaceDetectionAdapter<T, V>::performAsync(T &t)
{
- shared_ptr<MetaInfo> metaInfo = _object_detection->getInputMetaInfo();
-
- if (metaInfo->dataType == MV_INFERENCE_DATA_UINT8) {
- _object_detection->performAsync<unsigned char>(t, metaInfo);
- } else if (metaInfo->dataType == MV_INFERENCE_DATA_FLOAT32) {
- _object_detection->performAsync<float>(t, metaInfo);
- // TODO
- } else {
- throw InvalidOperation("Invalid model data type.");
- }
+ _object_detection->performAsync(t);
}
template<typename T, typename V> V &FaceDetectionAdapter<T, V>::getOutput()
readFile.close();
}
-void ObjectDetection::parseMetaFile(const char *meta_file_name)
+void ObjectDetection::parseMetaFile(string meta_file_name)
{
- _config = make_unique<EngineConfig>(string(MV_CONFIG_PATH) + string(meta_file_name));
+ _config = make_unique<EngineConfig>(string(MV_CONFIG_PATH) + meta_file_name);
int ret = _config->getIntegerAttribute(string(MV_OBJECT_DETECTION_BACKEND_TYPE), &_backendType);
if (ret != MEDIA_VISION_ERROR_NONE)
loadLabel();
}
-void ObjectDetection::configure()
+void ObjectDetection::configure(string configFile)
{
+ parseMetaFile(configFile);
+
int ret = _inference->bind(_backendType, _targetDeviceType);
if (ret != MEDIA_VISION_ERROR_NONE)
throw InvalidOperation("Fail to bind a backend engine.");
inference<T>(inputVectors);
}
+void ObjectDetection::perform(mv_source_h &mv_src)
+{
+ shared_ptr<MetaInfo> metaInfo = getInputMetaInfo();
+ if (metaInfo->dataType == MV_INFERENCE_DATA_UINT8)
+ perform<unsigned char>(mv_src, metaInfo);
+ else if (metaInfo->dataType == MV_INFERENCE_DATA_FLOAT32)
+ perform<float>(mv_src, metaInfo);
+ else
+ throw InvalidOperation("Invalid model data type.");
+}
+
template<typename T> void inferenceThreadLoop(ObjectDetection *object)
{
// If user called destroy API then this thread loop will be terminated.
_thread_handle = make_unique<thread>(&inferenceThreadLoop<T>, this);
}
+void ObjectDetection::performAsync(ObjectDetectionInput &input)
+{
+ shared_ptr<MetaInfo> metaInfo = getInputMetaInfo();
+
+ if (metaInfo->dataType == MV_INFERENCE_DATA_UINT8) {
+ performAsync<unsigned char>(input, metaInfo);
+ } else if (metaInfo->dataType == MV_INFERENCE_DATA_FLOAT32) {
+ performAsync<float>(input, metaInfo);
+ // TODO
+ } else {
+ throw InvalidOperation("Invalid model data type.");
+ }
+}
+
void ObjectDetection::updateResult(ObjectDetectionResult &result)
{
_current_result = result;
template<typename T, typename V> void ObjectDetectionAdapter<T, V>::configure()
{
- _object_detection->parseMetaFile("object_detection.json");
- _object_detection->configure();
+ _object_detection->configure("object_detection.json");
}
template<typename T, typename V> void ObjectDetectionAdapter<T, V>::getNumberOfEngines(unsigned int *number_of_engines)
template<typename T, typename V> void ObjectDetectionAdapter<T, V>::perform()
{
- shared_ptr<MetaInfo> metaInfo = _object_detection->getInputMetaInfo();
- if (metaInfo->dataType == MV_INFERENCE_DATA_UINT8)
- _object_detection->perform<unsigned char>(_source.inference_src, metaInfo);
- else if (metaInfo->dataType == MV_INFERENCE_DATA_FLOAT32)
- _object_detection->perform<float>(_source.inference_src, metaInfo);
- else
- throw InvalidOperation("Invalid model data type.");
+ _object_detection->perform(_source.inference_src);
}
template<typename T, typename V> V &ObjectDetectionAdapter<T, V>::getOutput()
template<typename T, typename V> void ObjectDetectionAdapter<T, V>::performAsync(T &t)
{
- shared_ptr<MetaInfo> metaInfo = _object_detection->getInputMetaInfo();
-
- if (metaInfo->dataType == MV_INFERENCE_DATA_UINT8) {
- _object_detection->performAsync<unsigned char>(t, metaInfo);
- } else if (metaInfo->dataType == MV_INFERENCE_DATA_FLOAT32) {
- _object_detection->performAsync<float>(t, metaInfo);
- // TODO
- } else {
- throw InvalidOperation("Invalid model data type.");
- }
+ _object_detection->performAsync(t);
}
template class ObjectDetectionAdapter<ObjectDetectionInput, ObjectDetectionResult>;