namespace fs = std::experimental::filesystem;
namespace InferenceEngineInterface {
namespace Common {
+
InferenceEngineCommon::InferenceEngineCommon() :
mSelectedBackendEngine(INFERENCE_BACKEND_NONE),
mBackendModule(nullptr),
LOGW("LEAVE");
}
+int InferenceEngineCommon::CheckTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers)
+{
+ if (buffers.size() == 0) {
+ LOGE("tensor buffer vector is empty.");
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
+
+ for (std::vector<inference_engine_tensor_buffer>::const_iterator iter = buffers.begin(); iter != buffers.end(); ++iter) {
+ inference_engine_tensor_buffer tensor_buffer = *iter;
+ if (tensor_buffer.buffer == nullptr || tensor_buffer.size == 0) {
+ LOGE("tensor buffer pointer is null or tensor buffer size is 0.");
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
+
+ if (tensor_buffer.data_type < TENSOR_DATA_TYPE_FLOAT16 || tensor_buffer.data_type > TENSOR_DATA_TYPE_UINT32) {
+ LOGE("tensor data type is invalid.");
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
+ }
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+int InferenceEngineCommon::CheckLayerProperty(inference_engine_layer_property &property)
+{
+ // Verity tensor info values.
+ std::vector<inference_engine_tensor_info>::const_iterator info_iter;
+ for (info_iter = property.tensor_infos.begin(); info_iter != property.tensor_infos.end(); ++info_iter) {
+ inference_engine_tensor_info tensor_info = *info_iter;
+ if (tensor_info.shape.size() == 0 || tensor_info.size == 0) {
+ LOGE("shape size of tensor info or size of it is 0.");
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
+
+ if (tensor_info.data_type < TENSOR_DATA_TYPE_FLOAT16 || tensor_info.data_type > TENSOR_DATA_TYPE_UINT32) {
+ LOGE("tensor data type is invalid.");
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
+
+ // TODO. we may need to check shape type also.
+ }
+
+ // Verity layer names.
+ std::vector<std::string>::const_iterator name_iter;
+ for (name_iter = property.layer_names.begin(); name_iter != property.layer_names.end(); ++name_iter) {
+ std::string name = *name_iter;
+
+ if (name.length() == 0) {
+ LOGE("layer name is invalid.");
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
+ }
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
int InferenceEngineCommon::EnableProfiler(bool enable)
{
if (enable != true && enable != false) {
int InferenceEngineCommon::GetInputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers)
{
- return mBackendHandle->GetInputTensorBuffers(buffers);
+ int ret = mBackendHandle->GetInputTensorBuffers(buffers);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Failed to get input tensor buffers.");
+ return ret;
+ }
+
+ // If backend engine doesn't provide tensor buffers then just return.
+ // In this case, InferenceEngineCommon framework will allocate the tensor buffers.
+ if (buffers.size() == 0) {
+ return ret;
+ }
+
+ return CheckTensorBuffers(buffers);
}
int InferenceEngineCommon::GetOutputTensorBuffers(std::vector<inference_engine_tensor_buffer> &buffers)
{
- return mBackendHandle->GetOutputTensorBuffers(buffers);
+ int ret = mBackendHandle->GetOutputTensorBuffers(buffers);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Failed to get output tensor buffers.");
+ return ret;
+ }
+
+ // If backend engine doesn't provide tensor buffers then just return.
+ // In this case, InferenceEngineCommon framework will allocate the tensor buffers.
+ if (buffers.size() == 0) {
+ return ret;
+ }
+
+ return CheckTensorBuffers(buffers);
}
int InferenceEngineCommon::GetInputLayerProperty(inference_engine_layer_property &property)
{
- return mBackendHandle->GetInputLayerProperty(property);
+ int ret = mBackendHandle->GetInputLayerProperty(property);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Failed to get input layer property.");
+ return ret;
+ }
+
+ // If backend engine doesn't provide input layer property information then just return.
+ // In this case, user has to provide the information manually.
+ if (property.layer_names.size() == 0 && property.tensor_infos.size() == 0) {
+ LOGI("backend doesn't provide input layer property.");
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
+
+ return CheckLayerProperty(property);
}
int InferenceEngineCommon::GetOutputLayerProperty(inference_engine_layer_property &property)
{
- return mBackendHandle->GetOutputLayerProperty(property);
+ int ret = mBackendHandle->GetOutputLayerProperty(property);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Failed to get output layer property.");
+ return ret;
+ }
+
+ // If backend engine doesn't provide output layer property information then just return.
+ // In this case, user has to provide the information manually.
+ if (property.layer_names.size() == 0 && property.tensor_infos.size() == 0) {
+ LOGI("backend doesn't provide output layer property.");
+ return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
+ }
+
+ return CheckLayerProperty(property);
}
int InferenceEngineCommon::SetInputLayerProperty(inference_engine_layer_property &property)
{
+ int ret = CheckLayerProperty(property);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Given input layer property is invalid.");
+ return ret;
+ }
+
return mBackendHandle->SetInputLayerProperty(property);
}
int InferenceEngineCommon::SetOutputLayerProperty(inference_engine_layer_property &property)
{
+ int ret = CheckLayerProperty(property);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Given output layer property is invalid.");
+ return ret;
+ }
+
return mBackendHandle->SetOutputLayerProperty(property);
}