Receive model file format from inference engine interface
authorInki Dae <inki.dae@samsung.com>
Tue, 11 Feb 2020 06:29:51 +0000 (15:29 +0900)
committerInki Dae <inki.dae@samsung.com>
Tue, 11 Feb 2020 06:29:51 +0000 (15:29 +0900)
Change-Id: I9a63c2d214221eb56049a111d26e1123f7e7c58c
Signed-off-by: Inki Dae <inki.dae@samsung.com>
src/inference_engine_armnn.cpp
src/inference_engine_armnn_private.h

index 464c45997fcc0ed59161945fe1c36f4c6aefac1f..ef5f316cfb4a51971c282fe5fe5435b27360c70b 100644 (file)
@@ -197,7 +197,7 @@ int InferenceARMNN::CreateNetwork(std::string model_path)
     return CreateTfLiteNetwork(model_path);
 }
 
-int InferenceARMNN::Load(std::vector<std::string> model_paths, unsigned int num_of_models)
+int InferenceARMNN::Load(std::vector<std::string> model_paths, inference_model_format_e model_format)
 {
     LOGI("ENTER");
 
index 06dbfa5c461616e070f04b9b5d89d271eca4c5fe..fae068f4498a568187b81a5233642a7f4f4ee53c 100644 (file)
@@ -55,7 +55,7 @@ public:
 
     int SetTargetDevices(int types) override;
 
-    int Load(std::vector<std::string> model_paths, unsigned int num_of_models) override;
+    int Load(std::vector<std::string> model_paths, inference_model_format_e model_format) override;
 
     int CreateInputLayerPassage() override;