IVGCVSW-1813 - Add MeanLayer
authornarpra01 <narumol.prangnawarat@arm.com>
Thu, 13 Sep 2018 10:07:48 +0000 (11:07 +0100)
committerMatthew Bentham <matthew.bentham@arm.com>
Mon, 1 Oct 2018 13:56:48 +0000 (14:56 +0100)
 * add MeanLayer functionalities
 * modify MeanQueueDescriptor to use parameter
 * add IsMeanSupported placeholder for all backends

Change-Id: Ic69a34a61df667849977aad9b38f9a01eef565b5

24 files changed:
Android.mk
CMakeLists.txt
include/armnn/Descriptors.hpp
include/armnn/DescriptorsFwd.hpp
include/armnn/INetwork.hpp
include/armnn/LayerSupport.hpp
src/armnn/InternalTypes.cpp
src/armnn/InternalTypes.hpp
src/armnn/LayerSupport.cpp
src/armnn/LayersFwd.hpp
src/armnn/Network.cpp
src/armnn/Network.hpp
src/armnn/backends/ClLayerSupport.cpp
src/armnn/backends/ClLayerSupport.hpp
src/armnn/backends/NeonLayerSupport.cpp
src/armnn/backends/NeonLayerSupport.hpp
src/armnn/backends/RefLayerSupport.cpp
src/armnn/backends/RefLayerSupport.hpp
src/armnn/backends/WorkloadData.cpp
src/armnn/backends/WorkloadData.hpp
src/armnn/backends/WorkloadFactory.cpp
src/armnn/backends/test/IsLayerSupportedTestImpl.hpp
src/armnn/layers/MeanLayer.cpp [new file with mode: 0644]
src/armnn/layers/MeanLayer.hpp [new file with mode: 0644]

index 9272aef..db3c6b3 100644 (file)
@@ -177,6 +177,7 @@ LOCAL_SRC_FILES := \
         src/armnn/layers/InputLayer.cpp \
         src/armnn/layers/L2NormalizationLayer.cpp \
         src/armnn/layers/LstmLayer.cpp \
+        src/armnn/layers/MeanLayer.cpp \
         src/armnn/layers/MemCopyLayer.cpp \
         src/armnn/layers/MergerLayer.cpp \
         src/armnn/layers/MultiplicationLayer.cpp \
index 4453a85..7656c5d 100644 (file)
@@ -305,6 +305,8 @@ list(APPEND armnn_sources
     src/armnn/layers/L2NormalizationLayer.cpp
     src/armnn/layers/LstmLayer.cpp
     src/armnn/layers/LstmLayer.hpp
+    src/armnn/layers/MeanLayer.hpp
+    src/armnn/layers/MeanLayer.cpp
     src/armnn/layers/MemCopyLayer.hpp
     src/armnn/layers/MemCopyLayer.cpp
     src/armnn/layers/MergerLayer.hpp
index decbf99..5f9df6b 100644 (file)
@@ -332,4 +332,19 @@ struct LstmDescriptor
     bool m_ProjectionEnabled;
 };
 
+struct MeanDescriptor
+{
+    MeanDescriptor()
+    : m_KeepDims(false)
+    {}
+
+    MeanDescriptor(const std::vector<unsigned int>& axis, bool keepDims)
+    : m_Axis(axis)
+    , m_KeepDims(keepDims)
+    {}
+
+    std::vector<unsigned int> m_Axis;
+    bool m_KeepDims;
+};
+
 }
index ed958fc..b161df8 100644 (file)
@@ -15,6 +15,7 @@ struct FullyConnectedDescriptor;
 struct LstmDescriptor;
 struct PermuteDescriptor;
 struct NormalizationDescriptor;
+struct MeanDescriptor;
 struct Pooling2dDescriptor;
 struct ReshapeDescriptor;
 struct ResizeBilinearDescriptor;
index 0405074..7fd7a25 100644 (file)
@@ -279,6 +279,12 @@ public:
     /// @return - Interface for configuring the layer.
     virtual IConnectableLayer* AddSubtractionLayer(const char* name = nullptr) = 0;
 
+    /// Add a Mean layer to the network.
+    /// @param meanDescriptor - Parameters for the mean operation.
+    /// @param name - Optional name for the layer.
+    /// @ return - Interface for configuring the layer.
+    virtual IConnectableLayer* AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name = nullptr) = 0;
+
 protected:
     ~INetwork() {}
 };
index ac7d08f..d00691f 100644 (file)
@@ -196,4 +196,11 @@ bool IsFloorSupported(Compute compute,
                       char* reasonIfUnsupported = nullptr,
                       size_t reasonIfUnsupportedMaxLength = 1024);
 
+bool IsMeanSupported(Compute compute,
+                     const TensorInfo& input,
+                     const TensorInfo& output,
+                     const MeanDescriptor& descriptor,
+                     char* reasonIfUnsupported = nullptr,
+                     size_t reasonIfUnsupportedMaxLength = 1024);
+
 }
index ee93d48..fce1e95 100644 (file)
@@ -29,6 +29,7 @@ char const* GetLayerTypeAsCString(LayerType type)
         case LayerType::Input: return "Input";
         case LayerType::L2Normalization: return "L2Normalization";
         case LayerType::Lstm: return "Lstm";
+        case LayerType::Mean: return "Mean";
         case LayerType::MemCopy: return "MemCopy";
         case LayerType::Merger: return "Merger";
         case LayerType::Multiplication: return "Multiplication";
index d2c83cd..13ab2bc 100644 (file)
@@ -29,6 +29,7 @@ enum class LayerType
     Input,
     L2Normalization,
     Lstm,
+    Mean,
     MemCopy,
     Merger,
     Multiplication,
index 59c1c8d..7ed56c5 100644 (file)
@@ -345,4 +345,14 @@ bool IsFloorSupported(Compute compute,
     FORWARD_LAYER_SUPPORT_FUNC(compute, IsFloorSupported, input, output);
 }
 
+bool IsMeanSupported(Compute compute,
+                     const TensorInfo& input,
+                     const TensorInfo& output,
+                     const MeanDescriptor& descriptor,
+                     char* reasonIfUnsupported,
+                     size_t reasonIfUnsupportedMaxLength)
+{
+    FORWARD_LAYER_SUPPORT_FUNC(compute, IsMeanSupported, input, output, descriptor);
+}
+
 }
index a1dc355..c9ee9db 100644 (file)
@@ -21,6 +21,7 @@
 #include "layers/InputLayer.hpp"
 #include "layers/L2NormalizationLayer.hpp"
 #include "layers/LstmLayer.hpp"
+#include "layers/MeanLayer.hpp"
 #include "layers/MemCopyLayer.hpp"
 #include "layers/MergerLayer.hpp"
 #include "layers/MultiplicationLayer.hpp"
@@ -76,6 +77,7 @@ DECLARE_LAYER(FullyConnected)
 DECLARE_LAYER(Input)
 DECLARE_LAYER(L2Normalization)
 DECLARE_LAYER(Lstm)
+DECLARE_LAYER(Mean)
 DECLARE_LAYER(MemCopy)
 DECLARE_LAYER(Merger)
 DECLARE_LAYER(Multiplication)
index dc531d1..22d80d3 100644 (file)
@@ -594,6 +594,11 @@ IConnectableLayer* Network::AddSubtractionLayer(const char* name)
     return m_Graph->AddLayer<SubtractionLayer>(name);
 }
 
+IConnectableLayer* Network::AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name)
+{
+    return m_Graph->AddLayer<MeanLayer>(meanDescriptor,name);
+}
+
 OptimizedNetwork::OptimizedNetwork(std::unique_ptr<Graph> graph)
     : m_Graph(std::move(graph))
 {
index b6b8548..1411242 100644 (file)
@@ -117,6 +117,8 @@ public:
 
     IConnectableLayer* AddSubtractionLayer(const char* name = nullptr) override;
 
+    IConnectableLayer* AddMeanLayer(const MeanDescriptor& meanDescriptor, const char* name = nullptr) override;
+
 private:
     IConnectableLayer* AddFullyConnectedLayerImpl(const FullyConnectedDescriptor& fullyConnectedDescriptor,
         const ConstTensor& weights,
index aeb2759..4664c2e 100644 (file)
@@ -462,4 +462,12 @@ bool IsConvertFp32ToFp16SupportedCl(const TensorInfo& input,
                                    reasonIfUnsupported);
 }
 
+bool IsMeanSupportedCl(const TensorInfo& input,
+                       const TensorInfo& output,
+                       const MeanDescriptor& descriptor,
+                       std::string* reasonIfUnsupported)
+{
+    return false;
+}
+
 }
index dbe546c..f5c1226 100644 (file)
@@ -142,6 +142,11 @@ bool IsFloorSupportedCl(const TensorInfo& input,
                         const TensorInfo& output,
                         std::string* reasonIfUnsupported = nullptr);
 
+bool IsMeanSupportedCl(const TensorInfo& input,
+                       const TensorInfo& output,
+                       const MeanDescriptor& descriptor,
+                       std::string* reasonIfUnsupported = nullptr);
+
 bool IsConvertFp16ToFp32SupportedCl(const TensorInfo& input,
                                     const TensorInfo& output,
                                     std::string* reasonIfUnsupported = nullptr);
index 73d2518..7f33c48 100644 (file)
@@ -453,4 +453,12 @@ bool IsConvertFp32ToFp16SupportedNeon(const TensorInfo& input,
     return true;
 }
 
+bool IsMeanSupportedNeon(const TensorInfo& input,
+                         const TensorInfo& output,
+                         const MeanDescriptor& descriptor,
+                         std::string* reasonIfUnsupported)
+{
+    return false;
+}
+
 }
index f7b6253..95b14b3 100644 (file)
@@ -155,4 +155,9 @@ bool IsConvertFp32ToFp16SupportedNeon(const TensorInfo& input,
                                       const TensorInfo& output,
                                       std::string* reasonIfUnsupported = nullptr);
 
+bool IsMeanSupportedNeon(const TensorInfo& input,
+                         const TensorInfo& output,
+                         const MeanDescriptor& descriptor,
+                         std::string* reasonIfUnsupported = nullptr);
+
 }
index 41f57f1..d56cdeb 100644 (file)
@@ -387,4 +387,12 @@ bool IsConvertFp32ToFp16SupportedRef(const TensorInfo& input,
                                           &FalseFuncU8<>));
 }
 
+bool IsMeanSupportedRef(const TensorInfo& input,
+                        const TensorInfo& output,
+                        const MeanDescriptor& descriptor,
+                        std::string* reasonIfUnsupported)
+{
+    return false;
+}
+
 }
index 464eb1c..ff2e7e3 100644 (file)
@@ -147,4 +147,9 @@ bool IsConvertFp32ToFp16SupportedRef(const TensorInfo& input,
                                      const TensorInfo& output,
                                      std::string* reasonIfUnsupported = nullptr);
 
+bool IsMeanSupportedRef(const TensorInfo& input,
+                        const TensorInfo& output,
+                        const MeanDescriptor& descriptor,
+                        std::string* reasonIfUnsupported = nullptr);
+
 }
index 3ed77da..25144a4 100644 (file)
@@ -129,18 +129,6 @@ void ValidateTensorNumDimensions(const TensorInfo&  tensor,
     }
 }
 
-void ValidateTensorMaxNumElements(const TensorInfo& tensor,
-                                  std::string const& descName,
-                                  unsigned int maxNumElements,
-                                  std::string const& tensorName)
-{
-    if (tensor.GetNumElements() > maxNumElements)
-    {
-        throw InvalidArgumentException(descName + ": Expected maximum of " + to_string(maxNumElements) + " but got " +
-            to_string(tensor.GetNumElements()) + " elements for " + tensorName + " tensor.");
-    }
-}
-
 //---------------------------------------------------------------
 void ValidateTensorDataType(const TensorInfo& tensor, DataType dataType,
     const std::string& descName, std::string const& tensorName)
@@ -844,20 +832,17 @@ void MeanQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
     const TensorInfo& input  = workloadInfo.m_InputTensorInfos[0];
     const TensorInfo& output = workloadInfo.m_OutputTensorInfos[0];
 
-    if (m_Keepdims)
+    if (m_Parameters.m_KeepDims)
     {
         ValidateTensorNumDimensions(output, "MeanQueueDescriptor", input.GetNumDimensions(), "output");
     }
-    else if (m_Axis == nullptr)
+    else if (m_Parameters.m_Axis.empty())
     {
         ValidateTensorNumDimensions(output, "MeanQueueDescriptor", 1, "output");
     }
     else
     {
-        const TensorInfo& axis = m_Axis->GetTensorInfo();
-        ValidateTensorNumDimensions(axis, "MeanQueueDescriptor", 1, "axis");
-        ValidateTensorMaxNumElements(axis, "MeanQueueDescriptor", input.GetNumDimensions(), "axis");
-        unsigned int outputDim = input.GetNumDimensions() - axis.GetNumElements();
+        auto outputDim = input.GetNumDimensions() - boost::numeric_cast<unsigned int>(m_Parameters.m_Axis.size());
         ValidateTensorNumDimensions(output,
                                     "MeanQueueDescriptor",
                                     outputDim > 0 ? outputDim : 1,
index face761..a36f0ad 100644 (file)
@@ -197,17 +197,8 @@ struct SubtractionQueueDescriptor : QueueDescriptor
 };
 
 // Mean layer workload data.
-struct MeanQueueDescriptor : QueueDescriptor
+struct MeanQueueDescriptor : QueueDescriptorWithParameters<MeanDescriptor>
 {
-    MeanQueueDescriptor()
-        : m_Axis(nullptr)
-        , m_Keepdims(false)
-    {
-    }
-
-    const ConstCpuTensorHandle* m_Axis;
-    bool m_Keepdims;
-
     void Validate(const WorkloadInfo& workloadInfo) const;
 };
 
index d188725..773a8c1 100644 (file)
@@ -537,6 +537,19 @@ bool IWorkloadFactory::IsLayerSupported(Compute compute, const Layer& layer, boo
                                             reasonCapacity);
             break;
         }
+        case LayerType::Mean:
+        {
+            auto cLayer = boost::polymorphic_downcast<const MeanLayer*>(&layer);
+            const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+            const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+            result = IsMeanSupported(compute,
+                                     OverrideDataType(input, dataType),
+                                     OverrideDataType(output, dataType),
+                                     cLayer->GetParameters(),
+                                     reason,
+                                     reasonCapacity);
+            break;
+        }
         default:
         {
             BOOST_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
index 7745972..c5389df 100644 (file)
@@ -328,6 +328,8 @@ DECLARE_LAYER_POLICY_1_PARAM(L2Normalization)
 
 DECLARE_LAYER_POLICY_2_PARAM(Lstm)
 
+DECLARE_LAYER_POLICY_2_PARAM(Mean)
+
 DECLARE_LAYER_POLICY_2_PARAM(Merger)
 
 DECLARE_LAYER_POLICY_1_PARAM(Multiplication)
diff --git a/src/armnn/layers/MeanLayer.cpp b/src/armnn/layers/MeanLayer.cpp
new file mode 100644 (file)
index 0000000..6bbb094
--- /dev/null
@@ -0,0 +1,105 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "MeanLayer.hpp"
+#include "LayerCloneBase.hpp"
+
+#include "backends/CpuTensorHandle.hpp"
+#include "backends/WorkloadData.hpp"
+#include "backends/WorkloadFactory.hpp"
+
+#include <cstring>
+
+namespace armnn
+{
+
+MeanLayer::MeanLayer(const armnn::MeanDescriptor& param, const char* name)
+    : LayerWithParameters(1, 1, LayerType::Mean, param, name)
+{}
+
+std::unique_ptr<IWorkload> MeanLayer::CreateWorkload(const armnn::Graph& graph,
+                                                     const armnn::IWorkloadFactory& factory) const
+{
+    MeanQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_Axis = m_Param.m_Axis;
+    descriptor.m_Parameters.m_KeepDims = m_Param.m_KeepDims;
+
+    return factory.CreateMean(descriptor, PrepInfoAndDesc(descriptor, graph));
+}
+
+MeanLayer* MeanLayer::Clone(Graph& graph) const
+{
+    auto layer = CloneBase<MeanLayer>(graph, m_Param, GetName());
+
+    layer->m_Param.m_Axis = m_Param.m_Axis;
+    layer->m_Param.m_KeepDims = m_Param.m_KeepDims;
+
+    return std::move(layer);
+}
+
+void MeanLayer::ValidateTensorShapesFromInputs()
+{
+    VerifyLayerConnections(1, CHECK_LOCATION());
+
+    const TensorInfo& input = GetInputSlot(0).GetConnection()->GetTensorInfo();
+    
+    BOOST_ASSERT_MSG(input.GetNumDimensions() > 0 && input.GetNumDimensions() <= MaxNumOfTensorDimensions,
+                     "MeanLayer: Mean supports up to 4D input.");
+
+    unsigned int rank = input.GetNumDimensions();
+    unsigned int outputRank = 0;
+
+    // Calculate output dimension
+    if (m_Param.m_KeepDims)
+    {
+        outputRank = rank;
+    }
+    else if (m_Param.m_Axis.empty())
+    {
+        outputRank = 1;
+    }
+    else if (m_Param.m_Axis.size() <= input.GetNumDimensions())
+    {
+        throw LayerValidationException("MeanLayer: Dimensions to reduce can not be bigger than input dimensions");
+    }
+    else
+    {
+        outputRank = input.GetNumDimensions() - boost::numeric_cast<unsigned int>(m_Param.m_Axis.size());
+        if (outputRank == 0)
+        {
+            outputRank = 1;
+        }
+    }
+
+    unsigned int dimSizes[outputRank];
+    memset(dimSizes, 1, outputRank * sizeof(unsigned int));
+
+    if (!m_Param.m_Axis.empty())
+    {
+        // Skip the dimension that has been reduced unless keepDims is true.
+        unsigned int outputIndex = 0;
+        for (unsigned int i = 0; i < input.GetNumDimensions(); ++i)
+        {
+            if (std::find(m_Param.m_Axis.begin(), m_Param.m_Axis.end(), i) == m_Param.m_Axis.end())
+            {
+                dimSizes[outputIndex] = boost::numeric_cast<unsigned int>(input.GetShape()[i]);
+                ++outputIndex;
+            }
+            else if (m_Param.m_KeepDims)
+            {
+                dimSizes[outputIndex] = 1;
+                ++outputIndex;
+            }
+        }
+    }
+    const TensorShape& inferredShape = TensorShape(outputRank, dimSizes);
+
+    ConditionalThrowIfNotEqual<LayerValidationException>(
+        "MeanLayer: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+        GetOutputSlot(0).GetTensorInfo().GetShape(),
+        inferredShape);
+}
+
+} // namespace armnn
\ No newline at end of file
diff --git a/src/armnn/layers/MeanLayer.hpp b/src/armnn/layers/MeanLayer.hpp
new file mode 100644 (file)
index 0000000..ecb9297
--- /dev/null
@@ -0,0 +1,29 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+class MeanLayer : public LayerWithParameters<MeanDescriptor>
+{
+public:
+    virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph&            graph,
+                                                      const IWorkloadFactory& factory) const override;
+
+    MeanLayer* Clone(Graph& graph) const override;
+
+    void ValidateTensorShapesFromInputs() override;
+
+protected:
+    MeanLayer(const MeanDescriptor& param, const char* name);
+    ~MeanLayer() = default;
+
+};
+
+}
\ No newline at end of file