From 32b9046ea74d2387a08819cf5e67c183e03f6d3f Mon Sep 17 00:00:00 2001 From: narpra01 Date: Thu, 13 Sep 2018 11:07:48 +0100 Subject: [PATCH] IVGCVSW-1813 - Add MeanLayer * add MeanLayer functionalities * modify MeanQueueDescriptor to use parameter * add IsMeanSupported placeholder for all backends Change-Id: Ic69a34a61df667849977aad9b38f9a01eef565b5 --- Android.mk | 1 + CMakeLists.txt | 2 + include/armnn/Descriptors.hpp | 15 +++ include/armnn/DescriptorsFwd.hpp | 1 + include/armnn/INetwork.hpp | 6 ++ include/armnn/LayerSupport.hpp | 7 ++ src/armnn/InternalTypes.cpp | 1 + src/armnn/InternalTypes.hpp | 1 + src/armnn/LayerSupport.cpp | 10 ++ src/armnn/LayersFwd.hpp | 2 + src/armnn/Network.cpp | 5 + src/armnn/Network.hpp | 2 + src/armnn/backends/ClLayerSupport.cpp | 8 ++ src/armnn/backends/ClLayerSupport.hpp | 5 + src/armnn/backends/NeonLayerSupport.cpp | 8 ++ src/armnn/backends/NeonLayerSupport.hpp | 5 + src/armnn/backends/RefLayerSupport.cpp | 8 ++ src/armnn/backends/RefLayerSupport.hpp | 5 + src/armnn/backends/WorkloadData.cpp | 21 +---- src/armnn/backends/WorkloadData.hpp | 11 +-- src/armnn/backends/WorkloadFactory.cpp | 13 +++ .../backends/test/IsLayerSupportedTestImpl.hpp | 2 + src/armnn/layers/MeanLayer.cpp | 105 +++++++++++++++++++++ src/armnn/layers/MeanLayer.hpp | 29 ++++++ 24 files changed, 245 insertions(+), 28 deletions(-) create mode 100644 src/armnn/layers/MeanLayer.cpp create mode 100644 src/armnn/layers/MeanLayer.hpp diff --git a/Android.mk b/Android.mk index 9272aef..db3c6b3 100644 --- a/Android.mk +++ b/Android.mk @@ -177,6 +177,7 @@ LOCAL_SRC_FILES := \ src/armnn/layers/InputLayer.cpp \ src/armnn/layers/L2NormalizationLayer.cpp \ src/armnn/layers/LstmLayer.cpp \ + src/armnn/layers/MeanLayer.cpp \ src/armnn/layers/MemCopyLayer.cpp \ src/armnn/layers/MergerLayer.cpp \ src/armnn/layers/MultiplicationLayer.cpp \ diff --git a/CMakeLists.txt b/CMakeLists.txt index 4453a85..7656c5d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -305,6 +305,8 @@ list(APPEND armnn_sources src/armnn/layers/L2NormalizationLayer.cpp src/armnn/layers/LstmLayer.cpp src/armnn/layers/LstmLayer.hpp + src/armnn/layers/MeanLayer.hpp + src/armnn/layers/MeanLayer.cpp src/armnn/layers/MemCopyLayer.hpp src/armnn/layers/MemCopyLayer.cpp src/armnn/layers/MergerLayer.hpp diff --git a/include/armnn/Descriptors.hpp b/include/armnn/Descriptors.hpp index decbf99..5f9df6b 100644 --- a/include/armnn/Descriptors.hpp +++ b/include/armnn/Descriptors.hpp @@ -332,4 +332,19 @@ struct LstmDescriptor bool m_ProjectionEnabled; }; +struct MeanDescriptor +{ + MeanDescriptor() + : m_KeepDims(false) + {} + + MeanDescriptor(const std::vector& axis, bool keepDims) + : m_Axis(axis) + , m_KeepDims(keepDims) + {} + + std::vector m_Axis; + bool m_KeepDims; +}; + } diff --git a/include/armnn/DescriptorsFwd.hpp b/include/armnn/DescriptorsFwd.hpp index ed958fc..b161df8 100644 --- a/include/armnn/DescriptorsFwd.hpp +++ b/include/armnn/DescriptorsFwd.hpp @@ -15,6 +15,7 @@ struct FullyConnectedDescriptor; struct LstmDescriptor; struct PermuteDescriptor; struct NormalizationDescriptor; +struct MeanDescriptor; struct Pooling2dDescriptor; struct ReshapeDescriptor; struct ResizeBilinearDescriptor; diff --git a/include/armnn/INetwork.hpp b/include/armnn/INetwork.hpp index 0405074..7fd7a25 100644 --- a/include/armnn/INetwork.hpp +++ b/include/armnn/INetwork.hpp @@ -279,6 +279,12 @@ public: /// @return - Interface for configuring the layer. virtual IConnectableLayer* AddSubtractionLayer(const char* name = nullptr) = 0; + /// Add a Mean layer to the network. + /// @param meanDescriptor - Parameters for the mean operation. + /// @param name - Optional name for the layer. + /// @ return - Interface for configuring the layer. + virtual IConnectableLayer* AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name = nullptr) = 0; + protected: ~INetwork() {} }; diff --git a/include/armnn/LayerSupport.hpp b/include/armnn/LayerSupport.hpp index ac7d08f..d00691f 100644 --- a/include/armnn/LayerSupport.hpp +++ b/include/armnn/LayerSupport.hpp @@ -196,4 +196,11 @@ bool IsFloorSupported(Compute compute, char* reasonIfUnsupported = nullptr, size_t reasonIfUnsupportedMaxLength = 1024); +bool IsMeanSupported(Compute compute, + const TensorInfo& input, + const TensorInfo& output, + const MeanDescriptor& descriptor, + char* reasonIfUnsupported = nullptr, + size_t reasonIfUnsupportedMaxLength = 1024); + } diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp index ee93d48..fce1e95 100644 --- a/src/armnn/InternalTypes.cpp +++ b/src/armnn/InternalTypes.cpp @@ -29,6 +29,7 @@ char const* GetLayerTypeAsCString(LayerType type) case LayerType::Input: return "Input"; case LayerType::L2Normalization: return "L2Normalization"; case LayerType::Lstm: return "Lstm"; + case LayerType::Mean: return "Mean"; case LayerType::MemCopy: return "MemCopy"; case LayerType::Merger: return "Merger"; case LayerType::Multiplication: return "Multiplication"; diff --git a/src/armnn/InternalTypes.hpp b/src/armnn/InternalTypes.hpp index d2c83cd..13ab2bc 100644 --- a/src/armnn/InternalTypes.hpp +++ b/src/armnn/InternalTypes.hpp @@ -29,6 +29,7 @@ enum class LayerType Input, L2Normalization, Lstm, + Mean, MemCopy, Merger, Multiplication, diff --git a/src/armnn/LayerSupport.cpp b/src/armnn/LayerSupport.cpp index 59c1c8d..7ed56c5 100644 --- a/src/armnn/LayerSupport.cpp +++ b/src/armnn/LayerSupport.cpp @@ -345,4 +345,14 @@ bool IsFloorSupported(Compute compute, FORWARD_LAYER_SUPPORT_FUNC(compute, IsFloorSupported, input, output); } +bool IsMeanSupported(Compute compute, + const TensorInfo& input, + const TensorInfo& output, + const MeanDescriptor& descriptor, + char* reasonIfUnsupported, + size_t reasonIfUnsupportedMaxLength) +{ + FORWARD_LAYER_SUPPORT_FUNC(compute, IsMeanSupported, input, output, descriptor); +} + } diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp index a1dc355..c9ee9db 100644 --- a/src/armnn/LayersFwd.hpp +++ b/src/armnn/LayersFwd.hpp @@ -21,6 +21,7 @@ #include "layers/InputLayer.hpp" #include "layers/L2NormalizationLayer.hpp" #include "layers/LstmLayer.hpp" +#include "layers/MeanLayer.hpp" #include "layers/MemCopyLayer.hpp" #include "layers/MergerLayer.hpp" #include "layers/MultiplicationLayer.hpp" @@ -76,6 +77,7 @@ DECLARE_LAYER(FullyConnected) DECLARE_LAYER(Input) DECLARE_LAYER(L2Normalization) DECLARE_LAYER(Lstm) +DECLARE_LAYER(Mean) DECLARE_LAYER(MemCopy) DECLARE_LAYER(Merger) DECLARE_LAYER(Multiplication) diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp index dc531d1..22d80d3 100644 --- a/src/armnn/Network.cpp +++ b/src/armnn/Network.cpp @@ -594,6 +594,11 @@ IConnectableLayer* Network::AddSubtractionLayer(const char* name) return m_Graph->AddLayer(name); } +IConnectableLayer* Network::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name) +{ + return m_Graph->AddLayer(meanDescriptor,name); +} + OptimizedNetwork::OptimizedNetwork(std::unique_ptr graph) : m_Graph(std::move(graph)) { diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp index b6b8548..1411242 100644 --- a/src/armnn/Network.hpp +++ b/src/armnn/Network.hpp @@ -117,6 +117,8 @@ public: IConnectableLayer* AddSubtractionLayer(const char* name = nullptr) override; + IConnectableLayer* AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name = nullptr) override; + private: IConnectableLayer* AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor, const ConstTensor& weights, diff --git a/src/armnn/backends/ClLayerSupport.cpp b/src/armnn/backends/ClLayerSupport.cpp index aeb2759..4664c2e 100644 --- a/src/armnn/backends/ClLayerSupport.cpp +++ b/src/armnn/backends/ClLayerSupport.cpp @@ -462,4 +462,12 @@ bool IsConvertFp32ToFp16SupportedCl(const TensorInfo& input, reasonIfUnsupported); } +bool IsMeanSupportedCl(const TensorInfo& input, + const TensorInfo& output, + const MeanDescriptor& descriptor, + std::string* reasonIfUnsupported) +{ + return false; +} + } diff --git a/src/armnn/backends/ClLayerSupport.hpp b/src/armnn/backends/ClLayerSupport.hpp index dbe546c..f5c1226 100644 --- a/src/armnn/backends/ClLayerSupport.hpp +++ b/src/armnn/backends/ClLayerSupport.hpp @@ -142,6 +142,11 @@ bool IsFloorSupportedCl(const TensorInfo& input, const TensorInfo& output, std::string* reasonIfUnsupported = nullptr); +bool IsMeanSupportedCl(const TensorInfo& input, + const TensorInfo& output, + const MeanDescriptor& descriptor, + std::string* reasonIfUnsupported = nullptr); + bool IsConvertFp16ToFp32SupportedCl(const TensorInfo& input, const TensorInfo& output, std::string* reasonIfUnsupported = nullptr); diff --git a/src/armnn/backends/NeonLayerSupport.cpp b/src/armnn/backends/NeonLayerSupport.cpp index 73d2518..7f33c48 100644 --- a/src/armnn/backends/NeonLayerSupport.cpp +++ b/src/armnn/backends/NeonLayerSupport.cpp @@ -453,4 +453,12 @@ bool IsConvertFp32ToFp16SupportedNeon(const TensorInfo& input, return true; } +bool IsMeanSupportedNeon(const TensorInfo& input, + const TensorInfo& output, + const MeanDescriptor& descriptor, + std::string* reasonIfUnsupported) +{ + return false; +} + } diff --git a/src/armnn/backends/NeonLayerSupport.hpp b/src/armnn/backends/NeonLayerSupport.hpp index f7b6253..95b14b3 100644 --- a/src/armnn/backends/NeonLayerSupport.hpp +++ b/src/armnn/backends/NeonLayerSupport.hpp @@ -155,4 +155,9 @@ bool IsConvertFp32ToFp16SupportedNeon(const TensorInfo& input, const TensorInfo& output, std::string* reasonIfUnsupported = nullptr); +bool IsMeanSupportedNeon(const TensorInfo& input, + const TensorInfo& output, + const MeanDescriptor& descriptor, + std::string* reasonIfUnsupported = nullptr); + } diff --git a/src/armnn/backends/RefLayerSupport.cpp b/src/armnn/backends/RefLayerSupport.cpp index 41f57f1..d56cdeb 100644 --- a/src/armnn/backends/RefLayerSupport.cpp +++ b/src/armnn/backends/RefLayerSupport.cpp @@ -387,4 +387,12 @@ bool IsConvertFp32ToFp16SupportedRef(const TensorInfo& input, &FalseFuncU8<>)); } +bool IsMeanSupportedRef(const TensorInfo& input, + const TensorInfo& output, + const MeanDescriptor& descriptor, + std::string* reasonIfUnsupported) +{ + return false; +} + } diff --git a/src/armnn/backends/RefLayerSupport.hpp b/src/armnn/backends/RefLayerSupport.hpp index 464eb1c..ff2e7e3 100644 --- a/src/armnn/backends/RefLayerSupport.hpp +++ b/src/armnn/backends/RefLayerSupport.hpp @@ -147,4 +147,9 @@ bool IsConvertFp32ToFp16SupportedRef(const TensorInfo& input, const TensorInfo& output, std::string* reasonIfUnsupported = nullptr); +bool IsMeanSupportedRef(const TensorInfo& input, + const TensorInfo& output, + const MeanDescriptor& descriptor, + std::string* reasonIfUnsupported = nullptr); + } diff --git a/src/armnn/backends/WorkloadData.cpp b/src/armnn/backends/WorkloadData.cpp index 3ed77da..25144a4 100644 --- a/src/armnn/backends/WorkloadData.cpp +++ b/src/armnn/backends/WorkloadData.cpp @@ -129,18 +129,6 @@ void ValidateTensorNumDimensions(const TensorInfo& tensor, } } -void ValidateTensorMaxNumElements(const TensorInfo& tensor, - std::string const& descName, - unsigned int maxNumElements, - std::string const& tensorName) -{ - if (tensor.GetNumElements() > maxNumElements) - { - throw InvalidArgumentException(descName + ": Expected maximum of " + to_string(maxNumElements) + " but got " + - to_string(tensor.GetNumElements()) + " elements for " + tensorName + " tensor."); - } -} - //--------------------------------------------------------------- void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType, const std::string& descName, std::string const& tensorName) @@ -844,20 +832,17 @@ void MeanQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const const TensorInfo& input = workloadInfo.m_InputTensorInfos[0]; const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0]; - if (m_Keepdims) + if (m_Parameters.m_KeepDims) { ValidateTensorNumDimensions(output, "MeanQueueDescriptor", input.GetNumDimensions(), "output"); } - else if (m_Axis == nullptr) + else if (m_Parameters.m_Axis.empty()) { ValidateTensorNumDimensions(output, "MeanQueueDescriptor", 1, "output"); } else { - const TensorInfo& axis = m_Axis->GetTensorInfo(); - ValidateTensorNumDimensions(axis, "MeanQueueDescriptor", 1, "axis"); - ValidateTensorMaxNumElements(axis, "MeanQueueDescriptor", input.GetNumDimensions(), "axis"); - unsigned int outputDim = input.GetNumDimensions() - axis.GetNumElements(); + auto outputDim = input.GetNumDimensions() - boost::numeric_cast(m_Parameters.m_Axis.size()); ValidateTensorNumDimensions(output, "MeanQueueDescriptor", outputDim > 0 ? outputDim : 1, diff --git a/src/armnn/backends/WorkloadData.hpp b/src/armnn/backends/WorkloadData.hpp index face761..a36f0ad 100644 --- a/src/armnn/backends/WorkloadData.hpp +++ b/src/armnn/backends/WorkloadData.hpp @@ -197,17 +197,8 @@ struct SubtractionQueueDescriptor : QueueDescriptor }; // Mean layer workload data. -struct MeanQueueDescriptor : QueueDescriptor +struct MeanQueueDescriptor : QueueDescriptorWithParameters { - MeanQueueDescriptor() - : m_Axis(nullptr) - , m_Keepdims(false) - { - } - - const ConstCpuTensorHandle* m_Axis; - bool m_Keepdims; - void Validate(const WorkloadInfo& workloadInfo) const; }; diff --git a/src/armnn/backends/WorkloadFactory.cpp b/src/armnn/backends/WorkloadFactory.cpp index d188725..773a8c1 100644 --- a/src/armnn/backends/WorkloadFactory.cpp +++ b/src/armnn/backends/WorkloadFactory.cpp @@ -537,6 +537,19 @@ bool IWorkloadFactory::IsLayerSupported(Compute compute, const Layer& layer, boo reasonCapacity); break; } + case LayerType::Mean: + { + auto cLayer = boost::polymorphic_downcast(&layer); + const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo(); + const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo(); + result = IsMeanSupported(compute, + OverrideDataType(input, dataType), + OverrideDataType(output, dataType), + cLayer->GetParameters(), + reason, + reasonCapacity); + break; + } default: { BOOST_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer."); diff --git a/src/armnn/backends/test/IsLayerSupportedTestImpl.hpp b/src/armnn/backends/test/IsLayerSupportedTestImpl.hpp index 7745972..c5389df 100644 --- a/src/armnn/backends/test/IsLayerSupportedTestImpl.hpp +++ b/src/armnn/backends/test/IsLayerSupportedTestImpl.hpp @@ -328,6 +328,8 @@ DECLARE_LAYER_POLICY_1_PARAM(L2Normalization) DECLARE_LAYER_POLICY_2_PARAM(Lstm) +DECLARE_LAYER_POLICY_2_PARAM(Mean) + DECLARE_LAYER_POLICY_2_PARAM(Merger) DECLARE_LAYER_POLICY_1_PARAM(Multiplication) diff --git a/src/armnn/layers/MeanLayer.cpp b/src/armnn/layers/MeanLayer.cpp new file mode 100644 index 0000000..6bbb094 --- /dev/null +++ b/src/armnn/layers/MeanLayer.cpp @@ -0,0 +1,105 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#include "MeanLayer.hpp" +#include "LayerCloneBase.hpp" + +#include "backends/CpuTensorHandle.hpp" +#include "backends/WorkloadData.hpp" +#include "backends/WorkloadFactory.hpp" + +#include + +namespace armnn +{ + +MeanLayer::MeanLayer(const armnn::MeanDescriptor& param, const char* name) + : LayerWithParameters(1, 1, LayerType::Mean, param, name) +{} + +std::unique_ptr MeanLayer::CreateWorkload(const armnn::Graph& graph, + const armnn::IWorkloadFactory& factory) const +{ + MeanQueueDescriptor descriptor; + descriptor.m_Parameters.m_Axis = m_Param.m_Axis; + descriptor.m_Parameters.m_KeepDims = m_Param.m_KeepDims; + + return factory.CreateMean(descriptor, PrepInfoAndDesc(descriptor, graph)); +} + +MeanLayer* MeanLayer::Clone(Graph& graph) const +{ + auto layer = CloneBase(graph, m_Param, GetName()); + + layer->m_Param.m_Axis = m_Param.m_Axis; + layer->m_Param.m_KeepDims = m_Param.m_KeepDims; + + return std::move(layer); +} + +void MeanLayer::ValidateTensorShapesFromInputs() +{ + VerifyLayerConnections(1, CHECK_LOCATION()); + + const TensorInfo& input = GetInputSlot(0).GetConnection()->GetTensorInfo(); + + BOOST_ASSERT_MSG(input.GetNumDimensions() > 0 && input.GetNumDimensions() <= MaxNumOfTensorDimensions, + "MeanLayer: Mean supports up to 4D input."); + + unsigned int rank = input.GetNumDimensions(); + unsigned int outputRank = 0; + + // Calculate output dimension + if (m_Param.m_KeepDims) + { + outputRank = rank; + } + else if (m_Param.m_Axis.empty()) + { + outputRank = 1; + } + else if (m_Param.m_Axis.size() <= input.GetNumDimensions()) + { + throw LayerValidationException("MeanLayer: Dimensions to reduce can not be bigger than input dimensions"); + } + else + { + outputRank = input.GetNumDimensions() - boost::numeric_cast(m_Param.m_Axis.size()); + if (outputRank == 0) + { + outputRank = 1; + } + } + + unsigned int dimSizes[outputRank]; + memset(dimSizes, 1, outputRank * sizeof(unsigned int)); + + if (!m_Param.m_Axis.empty()) + { + // Skip the dimension that has been reduced unless keepDims is true. + unsigned int outputIndex = 0; + for (unsigned int i = 0; i < input.GetNumDimensions(); ++i) + { + if (std::find(m_Param.m_Axis.begin(), m_Param.m_Axis.end(), i) == m_Param.m_Axis.end()) + { + dimSizes[outputIndex] = boost::numeric_cast(input.GetShape()[i]); + ++outputIndex; + } + else if (m_Param.m_KeepDims) + { + dimSizes[outputIndex] = 1; + ++outputIndex; + } + } + } + const TensorShape& inferredShape = TensorShape(outputRank, dimSizes); + + ConditionalThrowIfNotEqual( + "MeanLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.", + GetOutputSlot(0).GetTensorInfo().GetShape(), + inferredShape); +} + +} // namespace armnn \ No newline at end of file diff --git a/src/armnn/layers/MeanLayer.hpp b/src/armnn/layers/MeanLayer.hpp new file mode 100644 index 0000000..ecb9297 --- /dev/null +++ b/src/armnn/layers/MeanLayer.hpp @@ -0,0 +1,29 @@ +// +// Copyright © 2017 Arm Ltd. All rights reserved. +// SPDX-License-Identifier: MIT +// + +#pragma once + +#include "LayerWithParameters.hpp" + +namespace armnn +{ + +class MeanLayer : public LayerWithParameters +{ +public: + virtual std::unique_ptr CreateWorkload(const Graph& graph, + const IWorkloadFactory& factory) const override; + + MeanLayer* Clone(Graph& graph) const override; + + void ValidateTensorShapesFromInputs() override; + +protected: + MeanLayer(const MeanDescriptor& param, const char* name); + ~MeanLayer() = default; + +}; + +} \ No newline at end of file -- 2.7.4