{
namespace machine_learning
{
+namespace image_segmentation
+{
+enum class WorkingStatus { NONE, CONFIGURED, PREPARED, INFERENCED, RESULT_FETCHED };
+}
template<typename T> class ImageSegmentation : public IImageSegmentation
{
private:
std::unique_ptr<AsyncManager<T, ImageSegmentationResult> > _async_manager;
ImageSegmentationResult _current_result {};
+ image_segmentation::WorkingStatus _status { image_segmentation::WorkingStatus::NONE };
void loadLabel();
void getEngineList();
using namespace MediaVision::Common;
using namespace mediavision::common;
using namespace mediavision::machine_learning::exception;
+using namespace mediavision::machine_learning::image_segmentation;
namespace mediavision
{
int ret = _inference->bind(_config->getBackendType(), _config->getTargetDeviceType());
if (ret != MEDIA_VISION_ERROR_NONE)
throw InvalidOperation("Fail to bind a backend engine.");
+
+ _status = WorkingStatus::CONFIGURED;
}
template<typename T> void ImageSegmentation<T>::prepare()
{
+ if (_status < WorkingStatus::CONFIGURED)
+ throw InvalidOperation("Model is not configured yet.");
+
int ret = _inference->configureInputMetaInfo(_config->getInputMetaMap());
if (ret != MEDIA_VISION_ERROR_NONE)
throw InvalidOperation("Fail to configure input tensor info from meta file.");
throw InvalidOperation("Fail to load model files.");
configurePreprocess();
+
+ _status = WorkingStatus::PREPARED;
}
template<typename T> shared_ptr<MetaInfo> ImageSegmentation<T>::getInputMetaInfo()
if (ret != MEDIA_VISION_ERROR_NONE)
throw InvalidOperation("Fail to run inference");
+ // Here status can be higher than PREPARED in case async manager is used.
+ if (_status == WorkingStatus::PREPARED)
+ _status = WorkingStatus::INFERENCED;
+
LOGI("LEAVE");
}
template<typename T> void ImageSegmentation<T>::perform(mv_source_h &mv_src)
{
+ if (_status < WorkingStatus::PREPARED)
+ throw InvalidOperation("The model is not prepared");
+
vector<vector<T> > inputVectors(1);
_preprocess.run<T>(mv_src, inputVectors[0]);
template<typename T> void ImageSegmentation<T>::performAsync(ImageSegmentationInput &input)
{
+ if (_status < WorkingStatus::PREPARED)
+ throw InvalidOperation("The model is not prepared");
+
if (!_async_manager) {
_async_manager = make_unique<AsyncManager<T, ImageSegmentationResult> >([this]() {
AsyncInputQueue<T> inputQueue = _async_manager->popFromInput();
throw InvalidOperation("Object detection has been already destroyed so invalid operation.");
_current_result = _async_manager->pop();
+ _status = WorkingStatus::RESULT_FETCHED;
} else {
- // TODO. Check if inference request is completed or not here.
- // If not then throw an exception.
+ if (_status < WorkingStatus::INFERENCED)
+ throw InvalidOperation("Inference not called or failed.");
+
_current_result = result();
+ _status = WorkingStatus::RESULT_FETCHED;
}
return _current_result;
template<typename T> ImageSegmentationResult &ImageSegmentation<T>::getOutputCache()
{
+ if (_status < WorkingStatus::RESULT_FETCHED)
+ throw InvalidOperation("Result not fetched.");
+
return _current_result;
}
{
namespace machine_learning
{
+namespace landmark_detection
+{
+enum class WorkingStatus { NONE, CONFIGURED, PREPARED, INFERENCED, RESULT_FETCHED };
+}
template<typename T> class LandmarkDetection : public ILandmarkDetection
{
private:
std::unique_ptr<AsyncManager<T, LandmarkDetectionResult> > _async_manager;
LandmarkDetectionResult _current_result {};
LandmarkDetectionTaskType _task_type;
+ landmark_detection::WorkingStatus _status { landmark_detection::WorkingStatus::NONE };
void loadLabel();
void getEngineList();
using namespace MediaVision::Common;
using namespace mediavision::common;
using namespace mediavision::machine_learning::exception;
+using namespace mediavision::machine_learning::landmark_detection;
namespace mediavision
{
int ret = _inference->bind(_config->getBackendType(), _config->getTargetDeviceType());
if (ret != MEDIA_VISION_ERROR_NONE)
throw InvalidOperation("Fail to bind a backend engine.");
+
+ _status = WorkingStatus::CONFIGURED;
}
template<typename T> void LandmarkDetection<T>::prepare()
{
+ if (_status < WorkingStatus::CONFIGURED)
+ throw InvalidOperation("Model is not configured yet.");
+
int ret = _inference->configureInputMetaInfo(_config->getInputMetaMap());
if (ret != MEDIA_VISION_ERROR_NONE)
throw InvalidOperation("Fail to configure input tensor info from meta file.");
throw InvalidOperation("Fail to load model files.");
configurePreprocess();
+
+ _status = WorkingStatus::PREPARED;
}
template<typename T> shared_ptr<MetaInfo> LandmarkDetection<T>::getInputMetaInfo()
if (ret != MEDIA_VISION_ERROR_NONE)
throw InvalidOperation("Fail to run inference");
+ // Here status can be higher than PREPARED in case async manager is used.
+ if (_status == WorkingStatus::PREPARED)
+ _status = WorkingStatus::INFERENCED;
+
LOGI("LEAVE");
}
template<typename T> void LandmarkDetection<T>::perform(mv_source_h &mv_src)
{
+ if (_status < WorkingStatus::PREPARED)
+ throw InvalidOperation("The model is not prepared");
+
vector<vector<T> > inputVectors(1);
_preprocess.run<T>(mv_src, inputVectors[0]);
throw InvalidOperation("landmark detection has been already destroyed so invalid operation.");
_current_result = _async_manager->pop();
+ _status = WorkingStatus::RESULT_FETCHED;
} else {
- // TODO. Check if inference request is completed or not here.
- // If not then throw an exception.
+ if (_status < WorkingStatus::INFERENCED)
+ throw InvalidOperation("Inference not called or failed.");
+
_current_result = result();
+ _status = WorkingStatus::RESULT_FETCHED;
}
return _current_result;
template<typename T> LandmarkDetectionResult &LandmarkDetection<T>::getOutputCache()
{
+ if (_status < WorkingStatus::RESULT_FETCHED)
+ throw InvalidOperation("Result not fetched.");
+
return _current_result;
}
template<typename T> void LandmarkDetection<T>::performAsync(LandmarkDetectionInput &input)
{
+ if (_status < WorkingStatus::PREPARED)
+ throw InvalidOperation("The model is not prepared");
+
if (!_async_manager) {
_async_manager = make_unique<AsyncManager<T, LandmarkDetectionResult> >([this]() {
AsyncInputQueue<T> inputQueue = _async_manager->popFromInput();
{
namespace machine_learning
{
+namespace object_detection
+{
+enum class WorkingStatus { NONE, CONFIGURED, PREPARED, INFERENCED, RESULT_FETCHED };
+}
template<typename T> class ObjectDetection : public IObjectDetection
{
private:
ObjectDetectionTaskType _task_type { ObjectDetectionTaskType::OBJECT_DETECTION_TASK_NONE };
std::unique_ptr<AsyncManager<T, ObjectDetectionResult> > _async_manager;
ObjectDetectionResult _current_result;
+ object_detection::WorkingStatus _status { object_detection::WorkingStatus::NONE };
void loadLabel();
void getEngineList();
using namespace MediaVision::Common;
using namespace mediavision::common;
using namespace mediavision::machine_learning::exception;
+using namespace mediavision::machine_learning::object_detection;
namespace mediavision
{
int ret = _inference->bind(_config->getBackendType(), _config->getTargetDeviceType());
if (ret != MEDIA_VISION_ERROR_NONE)
throw InvalidOperation("Fail to bind a backend engine.");
+
+ _status = WorkingStatus::CONFIGURED;
}
template<typename T> void ObjectDetection<T>::prepare()
{
+ if (_status < WorkingStatus::CONFIGURED)
+ throw InvalidOperation("Model is not configured yet.");
+
int ret = _inference->configureInputMetaInfo(_config->getInputMetaMap());
if (ret != MEDIA_VISION_ERROR_NONE)
throw InvalidOperation("Fail to configure input tensor info from meta file.");
throw InvalidOperation("Fail to load model files.");
configurePreprocess();
+
+ _status = WorkingStatus::PREPARED;
}
template<typename T> shared_ptr<MetaInfo> ObjectDetection<T>::getInputMetaInfo()
if (ret != MEDIA_VISION_ERROR_NONE)
throw InvalidOperation("Fail to run inference");
+ // Here status can be higher than PREPARED in case async manager is used.
+ if (_status == WorkingStatus::PREPARED)
+ _status = WorkingStatus::INFERENCED;
+
LOGI("LEAVE");
}
template<typename T> void ObjectDetection<T>::perform(mv_source_h &mv_src)
{
+ if (_status < WorkingStatus::PREPARED)
+ throw InvalidOperation("The model is not prepared");
+
vector<vector<T> > inputVectors(1);
_preprocess.run<T>(mv_src, inputVectors[0]);
template<typename T> void ObjectDetection<T>::performAsync(ObjectDetectionInput &input)
{
+ if (_status < WorkingStatus::PREPARED)
+ throw InvalidOperation("The model is not prepared");
+
if (!_async_manager) {
_async_manager = make_unique<AsyncManager<T, ObjectDetectionResult> >([this]() {
AsyncInputQueue<T> inputQueue = _async_manager->popFromInput();
throw InvalidOperation("Object detection has been already destroyed so invalid operation.");
_current_result = _async_manager->pop();
+ _status = WorkingStatus::RESULT_FETCHED;
} else {
- // TODO. Check if inference request is completed or not here.
- // If not then throw an exception.
+ if (_status < WorkingStatus::INFERENCED)
+ throw InvalidOperation("Inference not called or failed.");
+
_current_result = result();
+ _status = WorkingStatus::RESULT_FETCHED;
}
return _current_result;
template<typename T> ObjectDetectionResult &ObjectDetection<T>::getOutputCache()
{
+ if (_status < WorkingStatus::RESULT_FETCHED)
+ throw InvalidOperation("Result not fetched.");
+
return _current_result;
}
{
namespace machine_learning
{
+namespace object_detection_3d
+{
+enum class WorkingStatus { NONE, CONFIGURED, PREPARED, INFERENCED, RESULT_FETCHED };
+}
template<typename T> class ObjectDetection3d : public IObjectDetection3d
{
private:
ObjectDetection3dTaskType _task_type;
ObjectDetection3dResult _current_result;
+ object_detection_3d::WorkingStatus _status { object_detection_3d::WorkingStatus::NONE };
void loadLabel();
void getEngineList();
using namespace MediaVision::Common;
using namespace mediavision::common;
using namespace mediavision::machine_learning::exception;
+using namespace mediavision::machine_learning::object_detection_3d;
namespace mediavision
{
int ret = _inference->bind(_config->getBackendType(), _config->getTargetDeviceType());
if (ret != MEDIA_VISION_ERROR_NONE)
throw InvalidOperation("Fail to bind a backend engine.");
+
+ _status = WorkingStatus::CONFIGURED;
}
template<typename T> void ObjectDetection3d<T>::prepare()
{
+ if (_status < WorkingStatus::CONFIGURED)
+ throw InvalidOperation("Model is not configured yet.");
+
int ret = _inference->configureInputMetaInfo(_config->getInputMetaMap());
if (ret != MEDIA_VISION_ERROR_NONE)
throw InvalidOperation("Fail to configure input tensor info from meta file.");
throw InvalidOperation("Fail to load model files.");
configurePreprocess();
+
+ _status = WorkingStatus::PREPARED;
}
template<typename T> shared_ptr<MetaInfo> ObjectDetection3d<T>::getInputMetaInfo()
if (ret != MEDIA_VISION_ERROR_NONE)
throw InvalidOperation("Fail to run inference");
+ // Here status can be higher than PREPARED in case async manager is used.
+ if (_status == WorkingStatus::PREPARED)
+ _status = WorkingStatus::INFERENCED;
+
LOGI("LEAVE");
}
template<typename T> void ObjectDetection3d<T>::perform(mv_source_h &mv_src)
{
+ if (_status < WorkingStatus::PREPARED)
+ throw InvalidOperation("The model is not prepared");
+
vector<vector<T> > inputVectors(1);
_preprocess.run<T>(mv_src, inputVectors[0]);
template<typename T> ObjectDetection3dResult &ObjectDetection3d<T>::getOutput()
{
// TODO. consider for async API later.
+
+ if (_status < WorkingStatus::INFERENCED)
+ throw InvalidOperation("Inference not called or failed.");
+
_current_result = result();
+ _status = WorkingStatus::RESULT_FETCHED;
return _current_result;
}
template<typename T> ObjectDetection3dResult &ObjectDetection3d<T>::getOutputCache()
{
+ if (_status < WorkingStatus::RESULT_FETCHED)
+ throw InvalidOperation("Result not fetched.");
+
return _current_result;
}