src/armnn/layers/SwitchLayer.cpp \
src/armnn/layers/TransposeConvolution2dLayer.cpp \
src/armnn/layers/TransposeLayer.cpp \
+ src/armnn/layers/UnmapLayer.cpp \
src/profiling/ActivateTimelineReportingCommandHandler.cpp \
src/profiling/BufferManager.cpp \
src/profiling/CommandHandler.cpp \
src/armnn/layers/TransposeConvolution2dLayer.hpp
src/armnn/layers/TransposeLayer.hpp
src/armnn/layers/TransposeLayer.cpp
+ src/armnn/layers/UnmapLayer.cpp
+ src/armnn/layers/UnmapLayer.hpp
src/armnn/BackendRegistry.cpp
src/armnn/BackendSettings.hpp
src/armnn/BackendHelper.cpp
X(Subtraction) \
X(Switch) \
X(Transpose) \
- X(TransposeConvolution2d)
+ X(TransposeConvolution2d) \
+ X(Unmap)
/// When adding a new layer, adapt also the LastLayer enum value in the
/// enum class LayerType below
LIST_OF_LAYER_TYPE
#undef X
FirstLayer = Activation,
- LastLayer = TransposeConvolution2d
+ LastLayer = Unmap
};
const char* GetLayerTypeAsCString(LayerType type);
#include "layers/SwitchLayer.hpp"
#include "layers/TransposeConvolution2dLayer.hpp"
#include "layers/TransposeLayer.hpp"
+#include "layers/UnmapLayer.hpp"
namespace armnn
{
DECLARE_LAYER(Switch)
DECLARE_LAYER(Transpose)
DECLARE_LAYER(TransposeConvolution2d)
+DECLARE_LAYER(Unmap)
}
--- /dev/null
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include "UnmapLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <armnn/TypesUtils.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+#include <backendsCommon/UnmapWorkload.hpp>
+
+namespace armnn
+{
+
+UnmapLayer::UnmapLayer(const char* name)
+ : Layer(1, 0, LayerType::Unmap, name)
+{
+}
+
+UnmapLayer* UnmapLayer::Clone(Graph& graph) const
+{
+ return CloneBase<UnmapLayer>(graph, GetName());
+}
+
+std::unique_ptr<IWorkload> UnmapLayer::CreateWorkload(const IWorkloadFactory& factory) const
+{
+ IgnoreUnused(factory);
+ UnmapQueueDescriptor descriptor;
+
+ //This is different from other workloads. Does not get created by the workload factory.
+ return std::make_unique<UnmapWorkload>(descriptor, PrepInfoAndDesc(descriptor));
+}
+
+void UnmapLayer::ValidateTensorShapesFromInputs()
+{
+ // validates that the input is connected.
+ VerifyLayerConnections(1, CHECK_LOCATION());
+ ARMNN_ASSERT(GetNumOutputSlots() == 0);
+}
+
+void UnmapLayer::Accept(ILayerVisitor& visitor) const
+{
+ IgnoreUnused(visitor);
+ throw armnn::Exception("UnmapLayer should not appear in an input graph");
+}
+
+} // namespace armnn
--- /dev/null
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <Layer.hpp>
+
+namespace armnn
+{
+
+/// This layer represents a memory copy operation.
+class UnmapLayer : public Layer
+{
+public:
+ /// Makes a workload for the Unmap type.
+ /// @param [in] graph The graph where this layer can be found.
+ /// @param [in] factory The workload factory which will create the workload.
+ /// @return A pointer to the created workload, or nullptr if not created.
+ virtual std::unique_ptr<IWorkload>CreateWorkload(const IWorkloadFactory& factory) const override;
+
+ /// Creates a dynamically-allocated copy of this layer.
+ /// @param [in] graph The graph into which this layer is being cloned.
+ UnmapLayer* Clone(Graph& graph) const override;
+
+ /// Check if the input tensor shape(s)
+ /// will lead to a valid configuration of @ref UnmapLayer.
+ /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+ void ValidateTensorShapesFromInputs() override;
+
+ void Accept(ILayerVisitor& visitor) const override;
+
+protected:
+ /// Constructor to create a UnmapLayer.
+ /// @param [in] name Optional name for the layer.
+ UnmapLayer(const char* name);
+
+ /// Default destructor
+ ~UnmapLayer() = default;
+};
+
+} // namespace
OptimizationViews.hpp
TensorHandleFactoryRegistry.cpp
TensorHandleFactoryRegistry.hpp
+ UnmapWorkload.cpp
+ UnmapWorkload.hpp
WorkloadDataCollector.hpp
Workload.hpp
WorkloadData.cpp
--- /dev/null
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <backendsCommon/UnmapWorkload.hpp>
+
+namespace armnn
+{
+
+UnmapWorkload::UnmapWorkload(const UnmapQueueDescriptor& descriptor,
+ const WorkloadInfo& info)
+ : BaseWorkload<UnmapQueueDescriptor>(descriptor, info)
+{
+}
+
+void UnmapWorkload::Execute() const
+{
+ m_Data.m_Inputs[0]->Unmap();
+}
+
+} //namespace armnn
--- /dev/null
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "Workload.hpp"
+
+namespace armnn
+{
+
+class UnmapWorkload : public BaseWorkload<UnmapQueueDescriptor>
+{
+public:
+ UnmapWorkload(const UnmapQueueDescriptor& descriptor, const WorkloadInfo& info);
+ void Execute() const override;
+};
+
+} //namespace armnn
const std::string descriptorName{"MapQueueDescriptor"};
ValidateNumInputs(workloadInfo, descriptorName, 1);
- ValidateNumOutputs(workloadInfo, descriptorName , 0);
+ ValidateNumOutputs(workloadInfo, descriptorName, 0);
+
+ for (unsigned int i = 0; i < m_Inputs.size(); ++i)
+ {
+ if (!m_Inputs[i])
+ {
+ throw InvalidArgumentException(
+ fmt::format("{}: Invalid NULL input {}.", descriptorName, static_cast<int>(i)));
+ }
+ }
+}
+
+//---------------------------------------------------------------
+void UnmapQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+ const std::string descriptorName{"UnmapQueueDescriptor"};
+
+ ValidateNumInputs(workloadInfo, descriptorName, 1);
+ ValidateNumOutputs(workloadInfo, descriptorName, 0);
for (unsigned int i = 0; i < m_Inputs.size(); ++i)
{
void Validate(const WorkloadInfo& workloadInfo) const;
};
+struct UnmapQueueDescriptor : QueueDescriptor
+{
+ void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
struct MemCopyQueueDescriptor : QueueDescriptor
{
void Validate(const WorkloadInfo& workloadInfo) const;
MemSyncWorkload.cpp \
OptimizationViews.cpp \
TensorHandleFactoryRegistry.cpp \
+ UnmapWorkload.cpp \
WorkloadData.cpp \
WorkloadFactory.cpp \
WorkloadUtils.cpp
#include <Graph.hpp>
#include <backendsCommon/MapWorkload.hpp>
+#include <backendsCommon/UnmapWorkload.hpp>
#include <backendsCommon/WorkloadFactory.hpp>
#include <armnn/utility/IgnoreUnused.hpp>
armnn::SplitterLayer* m_Layer;
};
+template<>
+struct DummyLayer<armnn::UnmapLayer, void>
+{
+ DummyLayer()
+ {
+ m_Layer = dummyGraph.AddLayer<armnn::UnmapLayer>("");
+ }
+
+ ~DummyLayer()
+ {
+ dummyGraph.EraseLayer(m_Layer);
+ }
+
+ armnn::UnmapLayer* m_Layer;
+};
+
template <typename ConvolutionLayerType>
struct DummyConvolutionLayer
{
DECLARE_LAYER_POLICY_2_PARAM(PreCompiled)
DECLARE_LAYER_POLICY_1_PARAM(Prelu)
-
DECLARE_LAYER_POLICY_2_PARAM(QLstm)
DECLARE_LAYER_POLICY_1_PARAM(QuantizedLstm)
DECLARE_LAYER_POLICY_2_PARAM(TransposeConvolution2d)
+DECLARE_LAYER_POLICY_MAP_PARAM(Unmap, void)
+
// Generic implementation to get the number of input slots for a given layer type;
template<armnn::LayerType Type>
return true;
}
+template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
+bool IsLayerSupportedTest(FactoryType *factory, Tag<armnn::LayerType::Unmap>)
+{
+ IgnoreUnused(factory);
+ return true;
+}
+
// Helper function to compute the next type in the LayerType enum.
constexpr armnn::LayerType NextType(armnn::LayerType type)
{