IVGCVSW-5114 Enable memory import in TfLiteYoloV3Big App
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>
Mon, 17 Aug 2020 16:02:12 +0000 (17:02 +0100)
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>
Mon, 17 Aug 2020 18:46:28 +0000 (18:46 +0000)
 * Enable memory import in TfLiteYoloV3Big App
 * Add isMemoryManaged flag to Concat and Splitter layers

Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Change-Id: I7e00f5da2a016c09d480b744fb17ea5611af8365

src/armnn/layers/ConcatLayer.cpp
src/armnn/layers/ConcatLayer.hpp
src/armnn/layers/SplitterLayer.cpp
src/armnn/layers/SplitterLayer.hpp
tests/TfLiteYoloV3Big-Armnn/TfLiteYoloV3Big-Armnn.cpp

index 5b6d252..0118426 100644 (file)
@@ -36,12 +36,14 @@ std::unique_ptr<IWorkload> ConcatLayer::CreateWorkload(const IWorkloadFactory& f
 }
 
 template<typename FactoryType>
-void ConcatLayer::CreateTensors(const TensorHandleFactoryRegistry& registry, const FactoryType& factory)
+void ConcatLayer::CreateTensors(const TensorHandleFactoryRegistry& registry,
+                                const FactoryType& factory,
+                                bool isMemoryManaged)
 {
     //If sub tensors are supported then the concat
     //just needs to make sure that the outputs of the prev layer
     //are made subtensors of the output of the concat layer.
-    m_OutputHandlers[0].CreateTensorHandles(factory);
+    m_OutputHandlers[0].CreateTensorHandles(factory, isMemoryManaged);
 
     if (factory.SupportsSubTensors())
     {
@@ -168,21 +170,20 @@ void ConcatLayer::CreateTensors(const TensorHandleFactoryRegistry& registry, con
 
 void ConcatLayer::CreateTensorHandles(const TensorHandleFactoryRegistry& registry,
                                       const IWorkloadFactory& workloadFactory,
-                                      const bool IsMemoryManaged)
+                                      const bool isMemoryManaged)
 {
-    IgnoreUnused(IsMemoryManaged);
     OutputSlot& slot = GetOutputSlot(0);
     ITensorHandleFactory::FactoryId factoryId = slot.GetTensorHandleFactoryId();
 
     if (factoryId == ITensorHandleFactory::LegacyFactoryId)
     {
-        CreateTensors(registry, workloadFactory);
+        CreateTensors(registry, workloadFactory, isMemoryManaged);
     }
     else
     {
         ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
         ARMNN_ASSERT(handleFactory);
-        CreateTensors(registry, *handleFactory);
+        CreateTensors(registry, *handleFactory, isMemoryManaged);
     }
 }
 
index eaa5c15..3d9ba18 100644 (file)
@@ -56,7 +56,7 @@ protected:
 
 private:
     template <typename FactoryType>
-    void CreateTensors(const TensorHandleFactoryRegistry& registry, const FactoryType& factory);
+    void CreateTensors(const TensorHandleFactoryRegistry& registry, const FactoryType& factory, bool isMemoryManaged);
 
 };
 
index 75fc537..e5c9903 100644 (file)
@@ -33,7 +33,9 @@ std::unique_ptr<IWorkload> SplitterLayer::CreateWorkload(const IWorkloadFactory&
 }
 
 template<typename FactoryType>
-void SplitterLayer::CreateTensors(const TensorHandleFactoryRegistry& registry, const FactoryType& factory)
+void SplitterLayer::CreateTensors(const TensorHandleFactoryRegistry& registry,
+                                  const FactoryType& factory,
+                                  bool isMemoryManaged)
 {
     //If sub tensors are supported than all the "splitter" need to do is to
     //set the outputs to be appropriate sub tensors of the input.
@@ -166,28 +168,27 @@ void SplitterLayer::CreateTensors(const TensorHandleFactoryRegistry& registry, c
     {
         for (unsigned int i = 0; i < m_Param.GetNumViews(); ++i)
         {
-            m_OutputHandlers[i].CreateTensorHandles(factory);
+            m_OutputHandlers[i].CreateTensorHandles(factory, isMemoryManaged);
         }
     }
 }
 
 void SplitterLayer::CreateTensorHandles(const TensorHandleFactoryRegistry& registry,
                                         const IWorkloadFactory& workloadFactory,
-                                        const bool IsMemoryManaged)
+                                        const bool isMemoryManaged)
 {
-    IgnoreUnused(IsMemoryManaged);
     OutputSlot& slot = GetOutputSlot(0);
     ITensorHandleFactory::FactoryId factoryId = slot.GetTensorHandleFactoryId();
 
     if (factoryId == ITensorHandleFactory::LegacyFactoryId)
     {
-        CreateTensors(registry, workloadFactory);
+        CreateTensors(registry, workloadFactory, isMemoryManaged);
     }
     else
     {
         ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
         ARMNN_ASSERT(handleFactory);
-        CreateTensors(registry, *handleFactory);
+        CreateTensors(registry, *handleFactory, isMemoryManaged);
     }
 }
 
index ae725b9..9999009 100644 (file)
@@ -57,7 +57,7 @@ protected:
 
 private:
     template <typename FactoryType>
-    void CreateTensors(const TensorHandleFactoryRegistry& registry, const FactoryType& factory);
+    void CreateTensors(const TensorHandleFactoryRegistry& registry, const FactoryType& factory, bool isMemoryManaged);
 };
 
 } // namespace
index bf3578c..fcc2177 100644 (file)
@@ -101,7 +101,8 @@ int LoadModel(const char* filename,
               ITfLiteParser& parser,
               IRuntime& runtime,
               NetworkId& networkId,
-              const std::vector<BackendId>& backendPreferences)
+              const std::vector<BackendId>& backendPreferences,
+              bool enableImport = false)
 {
     std::ifstream stream(filename, std::ios::in | std::ios::binary);
     if (!stream.is_open())
@@ -125,10 +126,10 @@ int LoadModel(const char* filename,
         return OPTIMIZE_NETWORK_ERROR;
     }
 
-    // Load backbone model into runtime
+    // Load model into runtime
     {
         std::string errorMessage;
-        INetworkProperties modelProps;
+        INetworkProperties modelProps(enableImport, enableImport);
         Status status = runtime.LoadNetwork(networkId, std::move(optimizedModel), errorMessage, modelProps);
         if (status != Status::Success)
         {
@@ -346,7 +347,8 @@ int main(int argc, char* argv[])
     // Load detector model
     ARMNN_LOG(info) << "Loading detector...";
     NetworkId detectorId;
-    CHECK_OK(LoadModel(progArgs.detectorDir.c_str(), *parser, *runtime, detectorId, progArgs.prefBackendsDetector));
+    CHECK_OK(LoadModel(
+        progArgs.detectorDir.c_str(), *parser, *runtime, detectorId, progArgs.prefBackendsDetector, true));
     auto detectIn0Id = parser->GetNetworkInputBindingInfo(0, "input_to_detector_1");
     auto detectIn1Id = parser->GetNetworkInputBindingInfo(0, "input_to_detector_2");
     auto detectIn2Id = parser->GetNetworkInputBindingInfo(0, "input_to_detector_3");