IVGCVSW-5012 Add importEnabled option for OptimizerOptions
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>
Wed, 19 Aug 2020 13:39:07 +0000 (14:39 +0100)
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>
Wed, 19 Aug 2020 13:43:09 +0000 (14:43 +0100)
 * Default importEnabled to false
 * Improve error messages

Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Change-Id: I17f78986aa1d23e48b0844297a52029b1a9bbe3e

include/armnn/INetwork.hpp
src/armnn/Network.cpp
src/armnn/Network.hpp
src/armnn/test/TensorHandleStrategyTest.cpp
src/backends/backendsCommon/test/CompatibilityTests.cpp
src/backends/neon/NeonTensorHandle.hpp
src/backends/neon/test/NeonFallbackTests.cpp
src/backends/neon/test/NeonTensorHandleTests.cpp
tests/TfLiteYoloV3Big-Armnn/TfLiteYoloV3Big-Armnn.cpp

index 6a143b0..5e8a6f2 100644 (file)
@@ -613,14 +613,17 @@ struct OptimizerOptions
         , m_Debug(false)
         , m_ReduceFp32ToBf16(false)
         , m_shapeInferenceMethod(armnn::ShapeInferenceMethod::ValidateOnly)
+        , m_ImportEnabled(false)
     {}
 
     OptimizerOptions(bool reduceFp32ToFp16, bool debug, bool reduceFp32ToBf16 = false,
-                     ShapeInferenceMethod shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly)
+                     ShapeInferenceMethod shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly,
+                     bool importEnabled = false)
         : m_ReduceFp32ToFp16(reduceFp32ToFp16)
         , m_Debug(debug)
         , m_ReduceFp32ToBf16(reduceFp32ToBf16)
         , m_shapeInferenceMethod(shapeInferenceMethod)
+        , m_ImportEnabled(importEnabled)
     {
         if (m_ReduceFp32ToFp16 && m_ReduceFp32ToBf16)
         {
@@ -639,6 +642,9 @@ struct OptimizerOptions
 
     // Infer output size when not available
     ShapeInferenceMethod m_shapeInferenceMethod;
+
+    // Enable Import
+    bool m_ImportEnabled;
 };
 
 /// Create an optimized version of the network
index 94a9961..dec9468 100644 (file)
@@ -861,7 +861,8 @@ EdgeStrategy CalculateEdgeStrategy(BackendsMap& backends,
                                    ITensorHandleFactory::FactoryId srcFactoryId,
                                    const Layer& layer,
                                    const Layer& connectedLayer,
-                                   TensorHandleFactoryRegistry& registry)
+                                   TensorHandleFactoryRegistry& registry,
+                                   bool importEnabled)
 {
     auto toBackend = backends.find(connectedLayer.GetBackendId());
     ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
@@ -899,7 +900,7 @@ EdgeStrategy CalculateEdgeStrategy(BackendsMap& backends,
 
     // Search for export/import options
     ITensorHandleFactory* srcFactory = registry.GetFactory(srcFactoryId);
-    if (srcFactory->GetExportFlags() != 0)
+    if (srcFactory->GetExportFlags() != 0 && importEnabled)
     {
         for (auto&& pref : dstPrefs)
         {
@@ -945,11 +946,12 @@ EdgeStrategy CalculateEdgeStrategy(BackendsMap& backends,
 OptimizationResult SelectTensorHandleStrategy(Graph& optGraph,
                                               BackendsMap& backends,
                                               TensorHandleFactoryRegistry& registry,
+                                              bool importEnabled,
                                               Optional<std::vector<std::string>&> errMessages)
 {
     OptimizationResult result;
 
-    optGraph.ForEachLayer([&backends, &registry, &result, &errMessages](Layer* layer)
+    optGraph.ForEachLayer([&backends, &registry, &result, &errMessages, importEnabled](Layer* layer)
     {
         ARMNN_ASSERT(layer);
 
@@ -985,7 +987,8 @@ OptimizationResult SelectTensorHandleStrategy(Graph& optGraph,
             {
                 const Layer& connectedLayer = connection->GetOwningLayer();
 
-                EdgeStrategy strategy = CalculateEdgeStrategy(backends, slotOption, *layer, connectedLayer, registry);
+                EdgeStrategy strategy = CalculateEdgeStrategy(backends, slotOption, *layer, connectedLayer,
+                                                              registry, importEnabled);
 
                 if (strategy == EdgeStrategy::Undefined)
                 {
@@ -1122,6 +1125,7 @@ IOptimizedNetworkPtr Optimize(const INetwork& inNetwork,
     OptimizationResult strategyResult = SelectTensorHandleStrategy(optGraph,
                                                                    backends,
                                                                    tensorHandleFactoryRegistry,
+                                                                   options.m_ImportEnabled,
                                                                    messages);
     if (strategyResult.m_Error)
     {
index 77d6b04..7136ee4 100644 (file)
@@ -323,6 +323,7 @@ BackendsMap CreateSupportedBackends(TensorHandleFactoryRegistry& handleFactoryRe
 OptimizationResult SelectTensorHandleStrategy(Graph& optGraph,
                                               BackendsMap& backends,
                                               TensorHandleFactoryRegistry& registry,
+                                              bool importEnabled,
                                               Optional<std::vector<std::string>&> errMessages);
 
 OptimizationResult AssignBackends(OptimizedNetwork* optNetObjPtr,
index 976e58e..c7aa30f 100644 (file)
@@ -339,7 +339,7 @@ BOOST_AUTO_TEST_CASE(TensorHandleSelectionStrategy)
     graph.TopologicalSort();
 
     std::vector<std::string> errors;
-    auto result = SelectTensorHandleStrategy(graph, backends, registry, errors);
+    auto result = SelectTensorHandleStrategy(graph, backends, registry, true, errors);
 
     BOOST_TEST(result.m_Error == false);
     BOOST_TEST(result.m_Warning == false);
index 599c984..90aa76e 100644 (file)
@@ -64,7 +64,7 @@ BOOST_AUTO_TEST_CASE(Neon_Cl_DirectCompatibility_Test)
     graph.TopologicalSort();
 
     std::vector<std::string> errors;
-    auto result = SelectTensorHandleStrategy(graph, backends, registry, errors);
+    auto result = SelectTensorHandleStrategy(graph, backends, registry, true, errors);
 
     BOOST_TEST(result.m_Error == false);
     BOOST_TEST(result.m_Warning == false);
index 4cc610c..be5bd45 100644 (file)
@@ -159,6 +159,14 @@ public:
                     return m_Imported;
                 }
             }
+            else
+            {
+                throw MemoryImportException("NeonTensorHandle::Import is disabled");
+            }
+        }
+        else
+        {
+            throw MemoryImportException("NeonTensorHandle::Incorrect import flag");
         }
         return false;
     }
index cf4d91b..9a07ed2 100644 (file)
@@ -60,7 +60,9 @@ BOOST_AUTO_TEST_CASE(FallbackImportToCpuAcc)
 
     // optimize the network
     std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
-    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+    OptimizerOptions optOptions;
+    optOptions.m_ImportEnabled = true;
+    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
     Graph& graph = optNetObjPtr->GetGraph();
@@ -196,7 +198,9 @@ BOOST_AUTO_TEST_CASE(FallbackPaddingCopyToCpuAcc)
 
     // optimize the network
     std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
-    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+    OptimizerOptions optOptions;
+    optOptions.m_ImportEnabled = true;
+    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
     Graph& graph = optNetObjPtr->GetGraph();
@@ -325,7 +329,9 @@ BOOST_AUTO_TEST_CASE(FallbackImportFromCpuAcc)
 
     // optimize the network
     std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
-    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+    OptimizerOptions optOptions;
+    optOptions.m_ImportEnabled = true;
+    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
     Graph& graph = optNetObjPtr->GetGraph();
@@ -461,7 +467,9 @@ BOOST_AUTO_TEST_CASE(FallbackPaddingCopyFromCpuAcc)
 
     // optimize the network
     std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
-    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+    OptimizerOptions optOptions;
+    optOptions.m_ImportEnabled = true;
+    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
 
     OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
     Graph& graph = optNetObjPtr->GetGraph();
@@ -544,4 +552,136 @@ BOOST_AUTO_TEST_CASE(FallbackPaddingCopyFromCpuAcc)
     BOOST_TEST(outputData == expectedOutput);
 }
 
+BOOST_AUTO_TEST_CASE(FallbackDisableImportFromCpuAcc)
+{
+    using namespace armnn;
+
+    // Create a mock backend object
+    MockImportBackendInitialiser initialiser; // Register the Mock Backend
+    auto backendObjPtr = CreateBackendObject(MockImportBackendId());
+    BOOST_TEST((backendObjPtr != nullptr));
+
+    BackendIdSet backendIds = BackendRegistryInstance().GetBackendIds();
+    if (backendIds.find("MockRef") == backendIds.end())
+    {
+        std::string message = "Cannot load MockRef";
+        BOOST_FAIL(message);
+    }
+
+    // Create runtime in which test will run and allow fallback to CpuRef.
+    IRuntime::CreationOptions options;
+    IRuntimePtr runtime(IRuntime::Create(options));
+
+    // Builds up the structure of the network.
+    INetworkPtr net(INetwork::Create());
+
+    IConnectableLayer* input0 = net->AddInputLayer(0, "input0");
+    IConnectableLayer* input1 = net->AddInputLayer(1, "input1");
+    IConnectableLayer* input2 = net->AddInputLayer(2, "input2");
+    IConnectableLayer* sub = net->AddSubtractionLayer("sub");
+    IConnectableLayer* add = net->AddAdditionLayer("add");
+    IConnectableLayer* output = net->AddOutputLayer(0, "output");
+
+    input0->GetOutputSlot(0).Connect(sub->GetInputSlot(0));
+    input1->GetOutputSlot(0).Connect(sub->GetInputSlot(1));
+    input2->GetOutputSlot(0).Connect(add->GetInputSlot(0));
+    sub->GetOutputSlot(0).Connect(add->GetInputSlot(1));
+    add->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+    TensorInfo info = TensorInfo({ 1, 2, 3, 2 }, DataType::Float32);
+
+    input0->GetOutputSlot(0).SetTensorInfo(info);
+    input1->GetOutputSlot(0).SetTensorInfo(info);
+    input2->GetOutputSlot(0).SetTensorInfo(info);
+    sub->GetOutputSlot(0).SetTensorInfo(info);
+    add->GetOutputSlot(0).SetTensorInfo(info);
+
+    // optimize the network
+    std::vector<BackendId> backends = { "MockRef", Compute::CpuAcc };
+    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+
+    OptimizedNetwork* optNetObjPtr = PolymorphicDowncast<OptimizedNetwork*>(optNet.get());
+    Graph& graph = optNetObjPtr->GetGraph();
+
+    armnn::Layer* const layer0 = GetFirstLayerWithName(graph, "input0");
+    armnn::Layer* const layer1 = GetFirstLayerWithName(graph, "input1");
+    armnn::Layer* const layer2 = GetFirstLayerWithName(graph, "input2");
+    armnn::Layer* const layer3 = GetFirstLayerWithName(graph, "sub");
+    armnn::Layer* const layer4 = GetFirstLayerWithName(graph, "[ sub (0) -> add (1) ]");
+    armnn::Layer* const layer5 = GetFirstLayerWithName(graph, "add");
+    armnn::Layer* const layer6 = GetFirstLayerWithName(graph, "output");
+
+    // Checks order is valid.
+    BOOST_TEST(CheckOrder(graph, layer0, layer1));
+    BOOST_TEST(CheckOrder(graph, layer1, layer2));
+    BOOST_TEST(CheckOrder(graph, layer2, layer3));
+    BOOST_TEST(CheckOrder(graph, layer3, layer4));
+    BOOST_TEST(CheckOrder(graph, layer4, layer5));
+    BOOST_TEST(CheckOrder(graph, layer5, layer6));
+
+    // Load it into the runtime. It should pass.
+    NetworkId netId;
+    std::string ignoredErrorMessage;
+    INetworkProperties networkProperties(false, false);
+
+    runtime->LoadNetwork(netId, std::move(optNet), ignoredErrorMessage, networkProperties);
+
+    // Creates structures for input & output
+    std::vector<float> inputData0
+    {
+        1.0f, 1.0f, 2.0f, 2.0f, 2.0f, 3.0f, 4.0f, 4.0f, 5.0f, 5.0f, 6.0f, 0.0f
+    };
+    std::vector<float> inputData1
+    {
+        0.0f, 1.0f, 1.0f, 2.0f, 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 5.0f, 5.0f, 6.0f
+    };
+    std::vector<float> inputData2
+    {
+        12.0f, 11.0f, 10.0f, 9.0f, 8.0f, 7.0f, 6.0f, 5.0f, 4.0f, 3.0f, 2.0f, 1.0f
+    };
+
+    std::vector<float> outputData(12);
+
+    std::vector<float> expectedOutput
+    {
+        13.0f, 11.0f, 11.0f, 9.0f, 7.0f, 7.0f, 7.0f, 5.0f, 5.0f, 3.0f, 3.0f, -5.0f
+    };
+
+    InputTensors inputTensors
+    {
+        { 0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData0.data()) },
+        { 1, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 1), inputData1.data()) },
+        { 2, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 2), inputData2.data()) }
+    };
+    OutputTensors outputTensors
+    {
+        { 0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data()) }
+    };
+
+    runtime->GetProfiler(netId)->EnableProfiling(true);
+
+    // Do the inference
+    runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
+
+    // Retrieve the Profiler.Print() output to get the workload execution
+    ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance();
+    std::stringstream ss;
+    profilerManager.GetProfiler()->Print(ss);;
+    std::string dump = ss.str();
+
+    // Contains CopyMemGeneric between the backends
+    std::size_t found = dump.find("CopyMemGeneric");
+    BOOST_TEST(found != std::string::npos);
+
+    // Does not contain ImportMemGeneric
+    found = dump.find("ImportMemGeneric");
+    BOOST_TEST(found == std::string::npos);
+
+    // Use memory import between backends
+    BOOST_TEST((layer4->GetType() == LayerType::MemCopy));
+
+    // Check output is as expected
+    BOOST_TEST(outputData == expectedOutput);
+}
+
 BOOST_AUTO_TEST_SUITE_END()
index c881632..97c7dd3 100644 (file)
@@ -661,7 +661,7 @@ BOOST_AUTO_TEST_CASE(NeonTensorHandleFactoryMemoryManaged)
 
     float testPtr[2] = { 2.5f, 5.5f };
     // Cannot import as import is disabled
-    BOOST_CHECK(!handle->Import(static_cast<void*>(testPtr), MemorySource::Malloc));
+    BOOST_CHECK_THROW(handle->Import(static_cast<void*>(testPtr), MemorySource::Malloc), MemoryImportException);
 }
 
 BOOST_AUTO_TEST_CASE(NeonTensorHandleFactoryImport)
index 93982fd..2d373cd 100644 (file)
@@ -119,7 +119,9 @@ int LoadModel(const char* filename,
     ARMNN_LOG(debug) << "Model loaded ok: " << filename;
 
     // Optimize backbone model
-    auto optimizedModel = Optimize(*model, backendPreferences, runtime.GetDeviceSpec());
+    OptimizerOptions options;
+    options.m_ImportEnabled = enableImport;
+    auto optimizedModel = Optimize(*model, backendPreferences, runtime.GetDeviceSpec(), options);
     if (!optimizedModel)
     {
         ARMNN_LOG(fatal) << "Could not optimize the model:" << filename;