mv_inference: Use model_format instead of key->second
authorInki Dae <inki.dae@samsung.com>
Wed, 12 Feb 2020 04:18:29 +0000 (13:18 +0900)
committerInki Dae <inki.dae@samsung.com>
Tue, 14 Apr 2020 00:40:31 +0000 (09:40 +0900)
Change-Id: Ib306b0dddc945ad43effa3b7eef3eaebc3e2901c
Signed-off-by: Inki Dae <inki.dae@samsung.com>
mv_inference/inference/src/Inference.cpp

index 95ce4370af88b61f6e9eb37826bf728c832eba70..a378dd895ef79b80e17cdb18bbe1102c0fe59cd4 100755 (executable)
@@ -458,8 +458,10 @@ int Inference::Load(void)
 
        std::vector<std::string> models;
 
+       inference_model_format_e model_format = (inference_model_format_e)key->second;
+
        // Push model file information to models vector properly according to detected model format.
-       switch (key->second) {
+       switch (model_format) {
        case INFERENCE_MODEL_CAFFE:
        case INFERENCE_MODEL_TF:
        case INFERENCE_MODEL_DARKNET:
@@ -477,7 +479,7 @@ int Inference::Load(void)
        }
 
     // Request model loading to backend engine.
-    ret = mBackend->Load(models, (inference_model_format_e)key->second);
+    ret = mBackend->Load(models, model_format);
        if (ret != INFERENCE_ENGINE_ERROR_NONE) {
                delete mBackend;
                LOGE("Fail to load model");