IVGCVSW-3622 Add Neon TensorHandleFactory
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>
Wed, 14 Aug 2019 11:25:50 +0000 (12:25 +0100)
committerNarumol Prangnawarat <narumol.prangnawarat@arm.com>
Wed, 14 Aug 2019 14:38:24 +0000 (14:38 +0000)
Signed-off-by: James Conroy <james.conroy@arm.com>
Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Change-Id: I9f24f7d97c1e7d37ee7d58ff85c6c3f8496e52ec

src/armnn/test/TensorHandleStrategyTest.cpp
src/backends/backendsCommon/ITensorHandleFactory.hpp
src/backends/neon/CMakeLists.txt
src/backends/neon/NeonBackend.cpp
src/backends/neon/NeonBackend.hpp
src/backends/neon/NeonTensorHandleFactory.cpp [new file with mode: 0644]
src/backends/neon/NeonTensorHandleFactory.hpp [new file with mode: 0644]
src/backends/neon/backend.mk

index c391b04..2056b6f 100644 (file)
@@ -50,6 +50,12 @@ public:
         return nullptr;
     }
 
+    std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
+                                                      DataLayout dataLayout) const override
+    {
+        return nullptr;
+    }
+
     const FactoryId GetId() const override { return m_Id; }
 
     bool SupportsSubTensors() const override { return true; }
@@ -82,6 +88,12 @@ public:
         return nullptr;
     }
 
+    std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
+                                                      DataLayout dataLayout) const override
+    {
+        return nullptr;
+    }
+
     const FactoryId GetId() const override { return m_Id; }
 
     bool SupportsSubTensors() const override { return true; }
index 89a2a7f..9e61b5f 100644 (file)
@@ -27,6 +27,9 @@ public:
 
     virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo) const = 0;
 
+    virtual std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
+                                                              DataLayout dataLayout) const = 0;
+
     virtual const FactoryId GetId() const = 0;
 
     virtual bool SupportsSubTensors() const = 0;
index be318e9..7464a2e 100644 (file)
@@ -15,6 +15,8 @@ if(ARMCOMPUTENEON)
         NeonWorkloadFactory.cpp
         NeonWorkloadFactory.hpp
         NeonTensorHandle.hpp
+        NeonTensorHandleFactory.cpp
+        NeonTensorHandleFactory.hpp
         NeonTimer.hpp
         NeonTimer.cpp
     )
index d7be844..f86509c 100644 (file)
@@ -7,6 +7,7 @@
 #include "NeonBackendId.hpp"
 #include "NeonWorkloadFactory.hpp"
 #include "NeonLayerSupport.hpp"
+#include "NeonTensorHandleFactory.hpp"
 
 #include <aclCommon/BaseMemoryManager.hpp>
 
@@ -58,6 +59,17 @@ IBackendInternal::IWorkloadFactoryPtr NeonBackend::CreateWorkloadFactory(
         boost::polymorphic_pointer_downcast<NeonMemoryManager>(memoryManager));
 }
 
+IBackendInternal::IWorkloadFactoryPtr NeonBackend::CreateWorkloadFactory(
+    class TensorHandleFactoryRegistry& tensorHandleFactoryRegistry) const
+{
+    auto memoryManager = std::make_shared<NeonMemoryManager>(std::make_unique<arm_compute::Allocator>(),
+                                                             BaseMemoryManager::MemoryAffinity::Offset);
+
+    tensorHandleFactoryRegistry.RegisterMemoryManager(memoryManager);
+    return std::make_unique<NeonWorkloadFactory>(
+        boost::polymorphic_pointer_downcast<NeonMemoryManager>(memoryManager));
+}
+
 IBackendInternal::IBackendContextPtr NeonBackend::CreateBackendContext(const IRuntime::CreationOptions&) const
 {
     return IBackendContextPtr{};
@@ -83,4 +95,18 @@ OptimizationViews NeonBackend::OptimizeSubgraphView(const SubgraphView& subgraph
     return optimizationViews;
 }
 
+std::vector<ITensorHandleFactory::FactoryId> NeonBackend::GetHandleFactoryPreferences() const
+{
+    return std::vector<ITensorHandleFactory::FactoryId>() = {"Arm/Neon/TensorHandleFactory"};
+}
+
+void NeonBackend::RegisterTensorHandleFactories(class TensorHandleFactoryRegistry& registry)
+{
+    auto memoryManager = std::make_shared<NeonMemoryManager>(std::make_unique<arm_compute::Allocator>(),
+                                                             BaseMemoryManager::MemoryAffinity::Offset);
+
+    registry.RegisterMemoryManager(memoryManager);
+    registry.RegisterFactory(std::make_unique<NeonTensorHandleFactory>(memoryManager, "Arm/Neon/TensorHandleFactory"));
+}
+
 } // namespace armnn
index 398337f..49b03b0 100644 (file)
@@ -23,12 +23,19 @@ public:
     IWorkloadFactoryPtr CreateWorkloadFactory(
         const IBackendInternal::IMemoryManagerSharedPtr& memoryManager = nullptr) const override;
 
+    IWorkloadFactoryPtr CreateWorkloadFactory(
+        class TensorHandleFactoryRegistry& tensorHandleFactoryRegistry) const override;
+
     IBackendInternal::IBackendContextPtr CreateBackendContext(const IRuntime::CreationOptions&) const override;
 
     IBackendInternal::Optimizations GetOptimizations() const override;
     IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override;
 
     OptimizationViews OptimizeSubgraphView(const SubgraphView& subgraph) const override;
+
+    std::vector<ITensorHandleFactory::FactoryId> GetHandleFactoryPreferences() const override;
+
+    void RegisterTensorHandleFactories(class TensorHandleFactoryRegistry& registry) override;
 };
 
 } // namespace armnn
diff --git a/src/backends/neon/NeonTensorHandleFactory.cpp b/src/backends/neon/NeonTensorHandleFactory.cpp
new file mode 100644 (file)
index 0000000..ef34234
--- /dev/null
@@ -0,0 +1,79 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonTensorHandleFactory.hpp"
+#include "NeonTensorHandle.hpp"
+
+#include <boost/core/ignore_unused.hpp>
+
+namespace armnn
+{
+
+using FactoryId = std::string;
+
+std::unique_ptr<ITensorHandle> NeonTensorHandleFactory::CreateSubTensorHandle(ITensorHandle& parent,
+                                                                              TensorShape const& subTensorShape,
+                                                                              unsigned int const* subTensorOrigin)
+                                                                              const
+{
+    const arm_compute::TensorShape shape = armcomputetensorutils::BuildArmComputeTensorShape(subTensorShape);
+
+    arm_compute::Coordinates coords;
+    coords.set_num_dimensions(subTensorShape.GetNumDimensions());
+    for (unsigned int i = 0; i < subTensorShape.GetNumDimensions(); i++)
+    {
+        // Arm compute indexes tensor coords in reverse order.
+        unsigned int revertedIndex = subTensorShape.GetNumDimensions() - i - 1;
+        coords.set(i, boost::numeric_cast<int>(subTensorOrigin[revertedIndex]));
+    }
+
+    const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape());
+    if (!::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, parentShape, coords, shape))
+    {
+        return nullptr;
+    }
+
+    return std::make_unique<NeonSubTensorHandle>(
+            boost::polymorphic_downcast<IAclTensorHandle*>(&parent), shape, coords);
+}
+
+std::unique_ptr<ITensorHandle> NeonTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo) const
+{
+    auto tensorHandle = std::make_unique<NeonTensorHandle>(tensorInfo);
+    tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
+
+    return tensorHandle;
+}
+
+std::unique_ptr<ITensorHandle> NeonTensorHandleFactory::CreateTensorHandle(const TensorInfo& tensorInfo,
+                                                                           DataLayout dataLayout) const
+{
+    auto tensorHandle = std::make_unique<NeonTensorHandle>(tensorInfo, dataLayout);
+    tensorHandle->SetMemoryGroup(m_MemoryManager->GetInterLayerMemoryGroup());
+
+    return tensorHandle;
+}
+
+const FactoryId NeonTensorHandleFactory::GetId() const
+{
+    return m_Id;
+}
+
+bool NeonTensorHandleFactory::SupportsSubTensors() const
+{
+    return true;
+}
+
+MemorySourceFlags NeonTensorHandleFactory::GetExportFlags() const
+{
+    return m_ExportFlags;
+}
+
+MemorySourceFlags NeonTensorHandleFactory::GetImportFlags() const
+{
+    return m_ImportFlags;
+}
+
+} // namespace armnn
\ No newline at end of file
diff --git a/src/backends/neon/NeonTensorHandleFactory.hpp b/src/backends/neon/NeonTensorHandleFactory.hpp
new file mode 100644 (file)
index 0000000..a2e46e2
--- /dev/null
@@ -0,0 +1,46 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <aclCommon/BaseMemoryManager.hpp>
+#include <backendsCommon/ITensorHandleFactory.hpp>
+
+namespace armnn
+{
+
+class NeonTensorHandleFactory : public ITensorHandleFactory
+{
+public:
+    NeonTensorHandleFactory(std::weak_ptr<NeonMemoryManager> mgr, ITensorHandleFactory::FactoryId id)
+        : m_Id(id)
+        , m_MemoryManager(mgr)
+    {}
+
+    std::unique_ptr<ITensorHandle> CreateSubTensorHandle(ITensorHandle& parent,
+                                                         TensorShape const& subTensorShape,
+                                                         unsigned int const* subTensorOrigin) const override;
+
+    std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo) const override;
+
+    std::unique_ptr<ITensorHandle> CreateTensorHandle(const TensorInfo& tensorInfo,
+                                                      DataLayout dataLayout) const override;
+
+    const FactoryId GetId() const override;
+
+    bool SupportsSubTensors() const override;
+
+    MemorySourceFlags GetExportFlags() const override;
+
+    MemorySourceFlags GetImportFlags() const override;
+
+private:
+    FactoryId m_Id = "Arm/Neon/TensorHandleFactory";
+    MemorySourceFlags m_ImportFlags;
+    MemorySourceFlags m_ExportFlags;
+    mutable std::shared_ptr<NeonMemoryManager> m_MemoryManager;
+};
+
+} // namespace armnn
index 7fc0c1c..4a0d05e 100644 (file)
@@ -17,6 +17,7 @@ BACKEND_SOURCES := \
         NeonBackend.cpp \
         NeonInterceptorScheduler.cpp \
         NeonLayerSupport.cpp \
+        NeonTensorHandleFactory.cpp \
         NeonTimer.cpp \
         NeonWorkloadFactory.cpp \
         workloads/NeonActivationWorkload.cpp \