Check if model file path is valid or not 27/236327/3 submit/tizen/20200626.050805
authorInki Dae <inki.dae@samsung.com>
Tue, 16 Jun 2020 08:21:46 +0000 (17:21 +0900)
committerInki Dae <inki.dae@samsung.com>
Thu, 18 Jun 2020 07:45:26 +0000 (16:45 +0900)
Change-Id: Id621bac742d9d2a5109462ffd284b956b0feae21
Signed-off-by: Inki Dae <inki.dae@samsung.com>
src/inference_engine_mlapi.cpp

index a00868a..ae8c740 100644 (file)
@@ -127,6 +127,14 @@ namespace MLAPIImpl
                                return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
                        }
 
+                       if (access(model_str.c_str(), R_OK) ||
+                                       access(model_paths[1].c_str(), R_OK)) {
+                               LOGE("model file path in [%s,%s], errno=%s", model_str.c_str(),
+                                                                                                  model_paths[1].c_str(),
+                                                                                                  strerror(errno));
+                               return INFERENCE_ENGINE_ERROR_INVALID_PATH;
+                       }
+
                        // ML Single API of MLAPI requires model_paths rule like below,
                        // "so library file path,nb model file path" or vise versa.
                        model_str += "," + model_paths[1];
@@ -134,6 +142,12 @@ namespace MLAPIImpl
                case INFERENCE_BACKEND_ONE:
                        nnfw_type = ML_NNFW_TYPE_NNFW;
 
+                       if (access(model_str.c_str(), R_OK)) {
+                               LOGE("model file path in [%s], errno=%s", model_str.c_str(),
+                                                                                                                 strerror(errno));
+                               return INFERENCE_ENGINE_ERROR_INVALID_PATH;
+                       }
+
                        if (mTargetDevice == INFERENCE_TARGET_CPU) {
                                nnfw_hw = ML_NNFW_HW_CPU_NEON;
                                LOGI("Target device is NEON.");
@@ -539,7 +553,7 @@ namespace MLAPIImpl
                        return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
                }
 
-               // TODO. Consider mutiple output tensors.
+               // TODO. Consider multiple output tensors.
 
                err = ml_tensors_data_get_tensor_data(
                                output_data, 0, (void **) &output_buffers[0].buffer,