IVGCVSW-5363 Add Unmap layer and Unmap workload
authorJim Flynn <jim.flynn@arm.com>
Thu, 8 Oct 2020 10:42:30 +0000 (11:42 +0100)
committerJim Flynn <jim.flynn@arm.com>
Thu, 8 Oct 2020 14:24:58 +0000 (14:24 +0000)
Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Signed-off-by: Jim Flynn <jim.flynn@arm.com>
Change-Id: Ie5ecfa67e4763d0c058905592fe2e2fd7315f85c

13 files changed:
Android.mk
CMakeLists.txt
src/armnn/InternalTypes.hpp
src/armnn/LayersFwd.hpp
src/armnn/layers/UnmapLayer.cpp [new file with mode: 0644]
src/armnn/layers/UnmapLayer.hpp [new file with mode: 0644]
src/backends/backendsCommon/CMakeLists.txt
src/backends/backendsCommon/UnmapWorkload.cpp [new file with mode: 0644]
src/backends/backendsCommon/UnmapWorkload.hpp [new file with mode: 0644]
src/backends/backendsCommon/WorkloadData.cpp
src/backends/backendsCommon/WorkloadData.hpp
src/backends/backendsCommon/common.mk
src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp

index 9be1b59..d0b284e 100644 (file)
@@ -206,6 +206,7 @@ LOCAL_SRC_FILES := \
         src/armnn/layers/SwitchLayer.cpp \
         src/armnn/layers/TransposeConvolution2dLayer.cpp \
         src/armnn/layers/TransposeLayer.cpp \
+        src/armnn/layers/UnmapLayer.cpp \
         src/profiling/ActivateTimelineReportingCommandHandler.cpp \
         src/profiling/BufferManager.cpp \
         src/profiling/CommandHandler.cpp \
index c1c3af8..b7c8cb5 100644 (file)
@@ -382,6 +382,8 @@ list(APPEND armnn_sources
     src/armnn/layers/TransposeConvolution2dLayer.hpp
     src/armnn/layers/TransposeLayer.hpp
     src/armnn/layers/TransposeLayer.cpp
+    src/armnn/layers/UnmapLayer.cpp
+    src/armnn/layers/UnmapLayer.hpp
     src/armnn/BackendRegistry.cpp
     src/armnn/BackendSettings.hpp
     src/armnn/BackendHelper.cpp
index e95a63a..778408a 100644 (file)
@@ -74,7 +74,8 @@
     X(Subtraction) \
     X(Switch) \
     X(Transpose) \
-    X(TransposeConvolution2d)
+    X(TransposeConvolution2d) \
+    X(Unmap)
 
 /// When adding a new layer, adapt also the LastLayer enum value in the
 /// enum class LayerType below
@@ -87,7 +88,7 @@ enum class LayerType
   LIST_OF_LAYER_TYPE
 #undef X
   FirstLayer = Activation,
-  LastLayer = TransposeConvolution2d
+  LastLayer = Unmap
 };
 
 const char* GetLayerTypeAsCString(LayerType type);
index f22110d..ccc5ef2 100644 (file)
@@ -69,6 +69,7 @@
 #include "layers/SwitchLayer.hpp"
 #include "layers/TransposeConvolution2dLayer.hpp"
 #include "layers/TransposeLayer.hpp"
+#include "layers/UnmapLayer.hpp"
 
 namespace armnn
 {
@@ -160,5 +161,6 @@ DECLARE_LAYER(Subtraction)
 DECLARE_LAYER(Switch)
 DECLARE_LAYER(Transpose)
 DECLARE_LAYER(TransposeConvolution2d)
+DECLARE_LAYER(Unmap)
 
 }
diff --git a/src/armnn/layers/UnmapLayer.cpp b/src/armnn/layers/UnmapLayer.cpp
new file mode 100644 (file)
index 0000000..d2df9c1
--- /dev/null
@@ -0,0 +1,49 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include "UnmapLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <armnn/TypesUtils.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+#include <backendsCommon/UnmapWorkload.hpp>
+
+namespace armnn
+{
+
+UnmapLayer::UnmapLayer(const char* name)
+    : Layer(1, 0, LayerType::Unmap, name)
+{
+}
+
+UnmapLayer* UnmapLayer::Clone(Graph& graph) const
+{
+    return CloneBase<UnmapLayer>(graph, GetName());
+}
+
+std::unique_ptr<IWorkload> UnmapLayer::CreateWorkload(const IWorkloadFactory& factory) const
+{
+    IgnoreUnused(factory);
+    UnmapQueueDescriptor descriptor;
+
+    //This is different from other workloads. Does not get created by the workload factory.
+    return std::make_unique<UnmapWorkload>(descriptor, PrepInfoAndDesc(descriptor));
+}
+
+void UnmapLayer::ValidateTensorShapesFromInputs()
+{
+    // validates that the input is connected.
+    VerifyLayerConnections(1, CHECK_LOCATION());
+    ARMNN_ASSERT(GetNumOutputSlots() == 0);
+}
+
+void UnmapLayer::Accept(ILayerVisitor& visitor) const
+{
+    IgnoreUnused(visitor);
+    throw armnn::Exception("UnmapLayer should not appear in an input graph");
+}
+
+} // namespace armnn
diff --git a/src/armnn/layers/UnmapLayer.hpp b/src/armnn/layers/UnmapLayer.hpp
new file mode 100644 (file)
index 0000000..12d4342
--- /dev/null
@@ -0,0 +1,42 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <Layer.hpp>
+
+namespace armnn
+{
+
+/// This layer represents a memory copy operation.
+class UnmapLayer : public Layer
+{
+public:
+    /// Makes a workload for the Unmap type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
+    virtual std::unique_ptr<IWorkload>CreateWorkload(const IWorkloadFactory& factory) const override;
+
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
+    UnmapLayer* Clone(Graph& graph) const override;
+
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref UnmapLayer.
+    /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+    void ValidateTensorShapesFromInputs() override;
+
+    void Accept(ILayerVisitor& visitor) const override;
+
+protected:
+    /// Constructor to create a UnmapLayer.
+    /// @param [in] name Optional name for the layer.
+    UnmapLayer(const char* name);
+
+    /// Default destructor
+    ~UnmapLayer() = default;
+};
+
+} // namespace
index 28b3088..cf6da80 100644 (file)
@@ -34,6 +34,8 @@ list(APPEND armnnBackendsCommon_sources
     OptimizationViews.hpp
     TensorHandleFactoryRegistry.cpp
     TensorHandleFactoryRegistry.hpp
+    UnmapWorkload.cpp
+    UnmapWorkload.hpp
     WorkloadDataCollector.hpp
     Workload.hpp
     WorkloadData.cpp
diff --git a/src/backends/backendsCommon/UnmapWorkload.cpp b/src/backends/backendsCommon/UnmapWorkload.cpp
new file mode 100644 (file)
index 0000000..b22158c
--- /dev/null
@@ -0,0 +1,22 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <backendsCommon/UnmapWorkload.hpp>
+
+namespace armnn
+{
+
+UnmapWorkload::UnmapWorkload(const UnmapQueueDescriptor& descriptor,
+                             const WorkloadInfo& info)
+    : BaseWorkload<UnmapQueueDescriptor>(descriptor, info)
+{
+}
+
+void UnmapWorkload::Execute() const
+{
+    m_Data.m_Inputs[0]->Unmap();
+}
+
+} //namespace armnn
diff --git a/src/backends/backendsCommon/UnmapWorkload.hpp b/src/backends/backendsCommon/UnmapWorkload.hpp
new file mode 100644 (file)
index 0000000..7f13f0d
--- /dev/null
@@ -0,0 +1,19 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "Workload.hpp"
+
+namespace armnn
+{
+
+class UnmapWorkload : public BaseWorkload<UnmapQueueDescriptor>
+{
+public:
+    UnmapWorkload(const UnmapQueueDescriptor& descriptor, const WorkloadInfo& info);
+    void Execute() const override;
+};
+
+} //namespace armnn
index 1344959..6d88664 100644 (file)
@@ -469,7 +469,25 @@ void MapQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
     const std::string descriptorName{"MapQueueDescriptor"};
 
     ValidateNumInputs(workloadInfo,  descriptorName, 1);
-    ValidateNumOutputs(workloadInfo, descriptorName , 0);
+    ValidateNumOutputs(workloadInfo, descriptorName, 0);
+
+    for (unsigned int i = 0; i < m_Inputs.size(); ++i)
+    {
+        if (!m_Inputs[i])
+        {
+            throw InvalidArgumentException(
+                fmt::format("{}: Invalid NULL input {}.", descriptorName, static_cast<int>(i)));
+        }
+    }
+}
+
+//---------------------------------------------------------------
+void UnmapQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+    const std::string descriptorName{"UnmapQueueDescriptor"};
+
+    ValidateNumInputs(workloadInfo,  descriptorName, 1);
+    ValidateNumOutputs(workloadInfo, descriptorName, 0);
 
     for (unsigned int i = 0; i < m_Inputs.size(); ++i)
     {
index be0a67e..c563626 100644 (file)
@@ -60,6 +60,11 @@ struct MapQueueDescriptor : QueueDescriptor
     void Validate(const WorkloadInfo& workloadInfo) const;
 };
 
+struct UnmapQueueDescriptor : QueueDescriptor
+{
+    void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
 struct MemCopyQueueDescriptor : QueueDescriptor
 {
     void Validate(const WorkloadInfo& workloadInfo) const;
index ceec2ac..dd47d0a 100644 (file)
@@ -20,6 +20,7 @@ COMMON_SOURCES := \
     MemSyncWorkload.cpp \
     OptimizationViews.cpp \
     TensorHandleFactoryRegistry.cpp \
+    UnmapWorkload.cpp \
     WorkloadData.cpp \
     WorkloadFactory.cpp \
     WorkloadUtils.cpp
index 1078c2a..a8465b4 100644 (file)
@@ -7,6 +7,7 @@
 #include <Graph.hpp>
 
 #include <backendsCommon/MapWorkload.hpp>
+#include <backendsCommon/UnmapWorkload.hpp>
 #include <backendsCommon/WorkloadFactory.hpp>
 
 #include <armnn/utility/IgnoreUnused.hpp>
@@ -214,6 +215,22 @@ struct DummyLayer<armnn::SplitterLayer>
     armnn::SplitterLayer* m_Layer;
 };
 
+template<>
+struct DummyLayer<armnn::UnmapLayer, void>
+{
+    DummyLayer()
+    {
+        m_Layer = dummyGraph.AddLayer<armnn::UnmapLayer>("");
+    }
+
+    ~DummyLayer()
+    {
+        dummyGraph.EraseLayer(m_Layer);
+    }
+
+    armnn::UnmapLayer* m_Layer;
+};
+
 template <typename ConvolutionLayerType>
 struct DummyConvolutionLayer
 {
@@ -628,7 +645,6 @@ DECLARE_LAYER_POLICY_2_PARAM(Pooling2d)
 DECLARE_LAYER_POLICY_2_PARAM(PreCompiled)
 
 DECLARE_LAYER_POLICY_1_PARAM(Prelu)
-
 DECLARE_LAYER_POLICY_2_PARAM(QLstm)
 
 DECLARE_LAYER_POLICY_1_PARAM(QuantizedLstm)
@@ -665,6 +681,8 @@ DECLARE_LAYER_POLICY_2_PARAM(Transpose)
 
 DECLARE_LAYER_POLICY_2_PARAM(TransposeConvolution2d)
 
+DECLARE_LAYER_POLICY_MAP_PARAM(Unmap, void)
+
 
 // Generic implementation to get the number of input slots for a given layer type;
 template<armnn::LayerType Type>
@@ -798,6 +816,13 @@ bool IsLayerSupportedTest(FactoryType *factory, Tag<armnn::LayerType::Map>)
     return true;
 }
 
+template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
+bool IsLayerSupportedTest(FactoryType *factory, Tag<armnn::LayerType::Unmap>)
+{
+    IgnoreUnused(factory);
+    return true;
+}
+
 // Helper function to compute the next type in the LayerType enum.
 constexpr armnn::LayerType NextType(armnn::LayerType type)
 {