IVGCVSW-3722 Add front end support for ArgMinMax
authorNikhil Raj <nikhil.raj@arm.com>
Thu, 5 Sep 2019 16:50:44 +0000 (17:50 +0100)
committerNikhil Raj <nikhil.raj@arm.com>
Thu, 5 Sep 2019 16:50:44 +0000 (17:50 +0100)
Change-Id: I31c5616bea3097f30cde68442d3222e0b0fe2235
Signed-off-by: Nikhil Raj <nikhil.raj@arm.com>
25 files changed:
Android.mk
CMakeLists.txt
include/armnn/Descriptors.hpp
include/armnn/DescriptorsFwd.hpp
include/armnn/ILayerSupport.hpp
include/armnn/ILayerVisitor.hpp
include/armnn/INetwork.hpp
include/armnn/LayerVisitorBase.hpp
src/armnn/InternalTypes.hpp
src/armnn/LayerSupport.cpp
src/armnn/LayersFwd.hpp
src/armnn/Network.cpp
src/armnn/Network.hpp
src/armnn/layers/ArgMinMaxLayer.cpp [new file with mode: 0644]
src/armnn/layers/ArgMinMaxLayer.hpp [new file with mode: 0644]
src/armnnSerializer/Serializer.cpp
src/armnnSerializer/Serializer.hpp
src/backends/backendsCommon/LayerSupportBase.cpp
src/backends/backendsCommon/LayerSupportBase.hpp
src/backends/backendsCommon/WorkloadData.cpp
src/backends/backendsCommon/WorkloadData.hpp
src/backends/backendsCommon/WorkloadDataFwd.hpp
src/backends/backendsCommon/WorkloadFactory.cpp
src/backends/backendsCommon/WorkloadFactory.hpp
src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp

index 21b186e..3640e0c 100644 (file)
@@ -115,6 +115,7 @@ LOCAL_SRC_FILES := \
         src/armnn/layers/AbsLayer.cpp \
         src/armnn/layers/ActivationLayer.cpp \
         src/armnn/layers/AdditionLayer.cpp \
+        src/armnn/layers/ArgMinMaxLayer.cpp \
         src/armnn/layers/BatchNormalizationLayer.cpp \
         src/armnn/layers/BatchToSpaceNdLayer.cpp \
         src/armnn/layers/ConcatLayer.cpp \
index e4ca1fc..ef87c6d 100644 (file)
@@ -235,6 +235,8 @@ list(APPEND armnn_sources
     src/armnn/layers/ActivationLayer.cpp
     src/armnn/layers/AdditionLayer.hpp
     src/armnn/layers/AdditionLayer.cpp
+    src/armnn/layers/ArgMinMaxLayer.hpp
+    src/armnn/layers/ArgMinMaxLayer.cpp
     src/armnn/layers/BatchNormalizationLayer.hpp
     src/armnn/layers/BatchNormalizationLayer.cpp
     src/armnn/layers/BatchToSpaceNdLayer.hpp
index 9630d86..87f4bdb 100644 (file)
@@ -30,6 +30,15 @@ struct ActivationDescriptor
     float              m_B;
 };
 
+/// An ArgMinMaxDescriptor for ArgMinMaxLayer
+struct ArgMinMaxDescriptor
+{
+    ArgMinMaxDescriptor()
+    : m_Axis(-1) {}
+
+    int m_Axis;
+};
+
 /// A PermuteDescriptor for the PermuteLayer.
 struct PermuteDescriptor
 {
index eddf91f..8f81b4f 100644 (file)
@@ -8,6 +8,7 @@
 namespace armnn
 {
 struct ActivationDescriptor;
+struct ArgMinMaxDescriptor;
 struct BatchNormalizationDescriptor;
 struct BatchToSpaceNdDescriptor;
 struct Convolution2dDescriptor;
index c67569b..d168226 100644 (file)
@@ -14,6 +14,7 @@
 #include <functional>
 #include <memory>
 #include <vector>
+#include "ArmNN.hpp"
 
 namespace armnn
 {
@@ -41,6 +42,11 @@ public:
                                      const TensorInfo& output,
                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
 
+    virtual bool IsArgMinMaxSupported(const TensorInfo& input,
+                                      const TensorInfo& output,
+                                      const ArgMinMaxDescriptor& descriptor,
+                                      Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+
     virtual bool IsBatchNormalizationSupported(const TensorInfo& input,
                                                const TensorInfo& output,
                                                const TensorInfo& mean,
index a22de87..a504a41 100644 (file)
@@ -41,6 +41,14 @@ public:
     virtual void VisitAdditionLayer(const IConnectableLayer* layer,
                                     const char* name = nullptr) = 0;
 
+    /// Function that an arg min max layer should call back to when its Accept(ILayerVisitor&) function is invoked.
+    /// @param layer - pointer to the layer which is calling back to this visit function.
+    /// @param argMinMaxDescriptor - ArgMinMaxDescriptor to configure the activation.
+    /// @param name - Optional name for the layer.
+    virtual void VisitArgMinMaxLayer(const IConnectableLayer* layer,
+                                     const ArgMinMaxDescriptor& argMinMaxDescriptor,
+                                     const char* name = nullptr) = 0;
+
     /// Function that a batch normalization layer should call back to when its Accept(ILayerVisitor&)
     /// function is invoked.
     /// @param layer - pointer to the layer which is calling back to this visit function.
index ce0fda2..cd1b7a6 100644 (file)
@@ -102,6 +102,13 @@ public:
     /// @return - Interface for configuring the layer.
     virtual IConnectableLayer* AddInputLayer(LayerBindingId id, const char* name = nullptr) = 0;
 
+    /// Adds an ArgMinMax layer to the network.
+    /// @param desc - Parameters for the L2 normalization operation.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    virtual IConnectableLayer* AddArgMinMaxLayer(const ArgMinMaxDescriptor& desc,
+                                                 const char* name = nullptr) = 0;
+
     /// Adds a concatenation layer to the network.
     /// @param concatDescriptor - ConcatDescriptor (synonym for OriginsDescriptor) to configure the concatenation
     ///                           process. Number of Views must be equal to the number of inputs, and their order
index 363a091..0739b43 100644 (file)
@@ -39,6 +39,10 @@ public:
     void VisitAdditionLayer(const IConnectableLayer*,
                             const char*) override { DefaultPolicy::Apply(__func__); }
 
+    void VisitArgMinMaxLayer(const IConnectableLayer*,
+                             const ArgMinMaxDescriptor&,
+                             const char*) override { DefaultPolicy::Apply(__func__); }
+
     void VisitBatchNormalizationLayer(const IConnectableLayer*,
                                       const BatchNormalizationDescriptor&,
                                       const ConstTensor&,
index 6141f27..98308f9 100644 (file)
@@ -17,6 +17,7 @@ enum class LayerType
     Abs = FirstLayer,
     Activation,
     Addition,
+    ArgMinMax,
     BatchNormalization,
     BatchToSpaceNd,
     Concat,
index d730205..f88e4e1 100644 (file)
@@ -91,6 +91,16 @@ bool IsAdditionSupported(const BackendId& backend,
     FORWARD_LAYER_SUPPORT_FUNC(backend, IsAdditionSupported, input0, input1, output);
 }
 
+bool IsArgMinMaxSupported(const BackendId& backend,
+                          const TensorInfo& input,
+                          const TensorInfo& output,
+                          const ArgMinMaxDescriptor& descriptor,
+                          char* reasonIfUnsupported,
+                          size_t reasonIfUnsupportedMaxLength)
+{
+    FORWARD_LAYER_SUPPORT_FUNC(backend, IsArgMinMaxSupported, input, output, descriptor);
+}
+
 bool IsBatchNormalizationSupported(const BackendId& backend,
                                    const TensorInfo& input,
                                    const TensorInfo& output,
index 94a3b89..6e4cf6a 100644 (file)
@@ -9,6 +9,7 @@
 #include "layers/AbsLayer.hpp"
 #include "layers/ActivationLayer.hpp"
 #include "layers/AdditionLayer.hpp"
+#include "layers/ArgMinMaxLayer.hpp"
 #include "layers/BatchNormalizationLayer.hpp"
 #include "layers/BatchToSpaceNdLayer.hpp"
 #include "layers/ConcatLayer.hpp"
@@ -89,6 +90,7 @@ constexpr LayerType LayerEnumOf(const T* = nullptr);
 DECLARE_LAYER(Abs)
 DECLARE_LAYER(Activation)
 DECLARE_LAYER(Addition)
+DECLARE_LAYER(ArgMinMax)
 DECLARE_LAYER(BatchNormalization)
 DECLARE_LAYER(BatchToSpaceNd)
 DECLARE_LAYER(Concat)
index dc26a1b..6971cb8 100644 (file)
@@ -1116,6 +1116,12 @@ IConnectableLayer* Network::AddActivationLayer(const ActivationDescriptor& activ
     return m_Graph->AddLayer<ActivationLayer>(activationDescriptor, name);
 }
 
+IConnectableLayer* Network::AddArgMinMaxLayer(const ArgMinMaxDescriptor& argMinMaxDescriptor,
+                                              const char* name)
+{
+    return m_Graph->AddLayer<ArgMinMaxLayer>(argMinMaxDescriptor, name);
+}
+
 IConnectableLayer* Network::AddNormalizationLayer(const NormalizationDescriptor&
 normalizationDescriptor,
     const char* name)
index 4516c0a..aac875a 100644 (file)
@@ -36,6 +36,9 @@ public:
 
     IConnectableLayer* AddInputLayer(LayerBindingId id, const char* name=nullptr) override;
 
+    IConnectableLayer* AddArgMinMaxLayer(const ArgMinMaxDescriptor& desc,
+                                         const char* name = nullptr) override;
+
     IConnectableLayer* AddBatchToSpaceNdLayer(const BatchToSpaceNdDescriptor& batchToSpaceNdDescriptor,
                                               const char* name = nullptr) override;
 
diff --git a/src/armnn/layers/ArgMinMaxLayer.cpp b/src/armnn/layers/ArgMinMaxLayer.cpp
new file mode 100644 (file)
index 0000000..aad95eb
--- /dev/null
@@ -0,0 +1,52 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include "ArgMinMaxLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <armnn/TypesUtils.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+namespace armnn
+{
+
+ArgMinMaxLayer::ArgMinMaxLayer(const ArgMinMaxDescriptor& param, const char* name)
+    : LayerWithParameters(1, 1, LayerType::ArgMinMax, param, name)
+{
+}
+
+std::unique_ptr<IWorkload> ArgMinMaxLayer::CreateWorkload(const Graph& graph,
+    const IWorkloadFactory& factory) const
+{
+    ArgMinMaxQueueDescriptor descriptor;
+    return factory.CreateArgMinMax(descriptor, PrepInfoAndDesc(descriptor, graph));
+}
+
+ArgMinMaxLayer* ArgMinMaxLayer::Clone(Graph& graph) const
+{
+    return CloneBase<ArgMinMaxLayer>(graph, m_Param, GetName());
+}
+
+void ArgMinMaxLayer::ValidateTensorShapesFromInputs()
+{
+    VerifyLayerConnections(1, CHECK_LOCATION());
+
+    auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape() });
+
+    BOOST_ASSERT(inferredShapes.size() == 1);
+
+    ConditionalThrowIfNotEqual<LayerValidationException>(
+            "ArgMinMaxLayer: TensorShape set on OutputSlot does not match the inferred shape.",
+            GetOutputSlot(0).GetTensorInfo().GetShape(),
+            inferredShapes[0]);
+}
+
+void ArgMinMaxLayer::Accept(ILayerVisitor& visitor) const
+{
+    visitor.VisitArgMinMaxLayer(this, GetParameters(), GetName());
+}
+
+} // namespace armnn
\ No newline at end of file
diff --git a/src/armnn/layers/ArgMinMaxLayer.hpp b/src/armnn/layers/ArgMinMaxLayer.hpp
new file mode 100644 (file)
index 0000000..ca1337f
--- /dev/null
@@ -0,0 +1,44 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+/// This layer represents a ArgMinMax operation.
+class ArgMinMaxLayer : public LayerWithParameters<ArgMinMaxDescriptor>
+{
+public:
+    /// Makes a workload for the ArgMinMax type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
+    virtual std::unique_ptr<IWorkload> CreateWorkload(const Graph& graph,
+                                                      const IWorkloadFactory& factory) const override;
+
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
+    ArgMinMaxLayer* Clone(Graph& graph) const override;
+
+    /// Check if the input tensor shape(s)
+    /// will lead to a valid configuration of @ref ArgMinMaxLayer.
+    void ValidateTensorShapesFromInputs() override;
+
+    void Accept(ILayerVisitor& visitor) const override;
+
+protected:
+    /// Constructor to create a ArgMinMaxLayer.
+    /// @param [in] param ArgMinMaxDescriptor to configure the ArgMinMax operation.
+    /// @param [in] name Optional name for the layer.
+    ArgMinMaxLayer(const ArgMinMaxDescriptor& param, const char* name);
+
+    /// Default destructor
+    ~ArgMinMaxLayer() = default;
+};
+
+}
\ No newline at end of file
index 56d313f..a8d9c23 100644 (file)
@@ -146,6 +146,15 @@ void SerializerVisitor::VisitAdditionLayer(const armnn::IConnectableLayer* layer
     CreateAnyLayer(flatBufferAdditionLayer.o, serializer::Layer::Layer_AdditionLayer);
 }
 
+// Build FlatBuffer for ArgMinMax Layer
+void SerializerVisitor::VisitArgMinMaxLayer(const armnn::IConnectableLayer *layer,
+                                            const armnn::ArgMinMaxDescriptor& descriptor,
+                                            const char *name)
+{
+    // This will be implemented in IVGCVSW-3724
+    throw UnimplementedException("SerializerVisitor::VisitArgMinMaxLayer is not implemented");
+}
+
 // Build FlatBuffer for BatchToSpaceNd Layer
 void SerializerVisitor::VisitBatchToSpaceNdLayer(const armnn::IConnectableLayer* layer,
                                                  const armnn::BatchToSpaceNdDescriptor& descriptor,
index 7400885..190ed23 100644 (file)
@@ -52,6 +52,10 @@ public:
     void VisitAdditionLayer(const armnn::IConnectableLayer* layer,
                             const char* name = nullptr) override;
 
+    void VisitArgMinMaxLayer(const armnn::IConnectableLayer* layer,
+                             const armnn::ArgMinMaxDescriptor& argMinMaxDescriptor,
+                             const char* name = nullptr) override;
+
     void VisitBatchToSpaceNdLayer(const armnn::IConnectableLayer* layer,
                                   const armnn::BatchToSpaceNdDescriptor& descriptor,
                                   const char* name = nullptr) override;
index 464ec4e..a8d1ead 100644 (file)
@@ -57,6 +57,13 @@ bool LayerSupportBase::IsAdditionSupported(const TensorInfo& input0,
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
+bool LayerSupportBase::IsArgMinMaxSupported(const armnn::TensorInfo &input, const armnn::TensorInfo &output,
+                                            const armnn::ArgMinMaxDescriptor& descriptor,
+                                            armnn::Optional<std::string &> reasonIfUnsupported) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
 bool LayerSupportBase::IsBatchNormalizationSupported(const TensorInfo& input,
                                                      const TensorInfo& output,
                                                      const TensorInfo& mean,
index 3cf3d4e..25dbdf2 100644 (file)
@@ -27,6 +27,11 @@ public:
                              const TensorInfo& output,
                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsArgMinMaxSupported(const TensorInfo& input,
+                              const TensorInfo& output,
+                              const ArgMinMaxDescriptor& descriptor,
+                              Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsBatchNormalizationSupported(const TensorInfo& input,
                                        const TensorInfo& output,
                                        const TensorInfo& mean,
index fed159b..e7e6d52 100644 (file)
@@ -461,6 +461,29 @@ void ActivationQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
     ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
 }
 
+void ArgMinMaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+    const std::string descriptorName{"ArgMinMaxQueueDescriptor"};
+
+    ValidateNumInputs(workloadInfo,  descriptorName, 1);
+    ValidateNumOutputs(workloadInfo, descriptorName, 1);
+
+    const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
+    const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
+
+    std::vector<DataType> supportedTypes =
+            {
+                    DataType::Float16,
+                    DataType::Float32,
+                    DataType::QuantisedAsymm8,
+                    DataType::QuantisedSymm16
+            };
+
+    ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
+    ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
+    ValidateTensorShapesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
+}
+
 void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
 {
     const std::string descriptorName{"SoftmaxQueueDescriptor"};
index a43c7cc..751d37f 100644 (file)
@@ -132,6 +132,11 @@ struct ActivationQueueDescriptor : QueueDescriptorWithParameters<ActivationDescr
     void Validate(const WorkloadInfo& workloadInfo) const;
 };
 
+struct ArgMinMaxQueueDescriptor : QueueDescriptorWithParameters<ArgMinMaxDescriptor>
+{
+    void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
 // Fully connected layer workload data.
 struct FullyConnectedQueueDescriptor : QueueDescriptorWithParameters<FullyConnectedDescriptor>
 {
index abee316..d4352c7 100644 (file)
@@ -14,6 +14,7 @@ struct SoftmaxQueueDescriptor;
 struct SplitterQueueDescriptor;
 struct ConcatQueueDescriptor;
 struct ActivationQueueDescriptor;
+struct ArgMinMaxQueueDescriptor;
 struct FullyConnectedQueueDescriptor;
 struct PermuteQueueDescriptor;
 struct Pooling2dQueueDescriptor;
index 9d081af..17bd98b 100644 (file)
@@ -102,6 +102,20 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
                                         reason);
             break;
         }
+        case LayerType::ArgMinMax:
+        {
+            auto cLayer = boost::polymorphic_downcast<const ArgMinMaxLayer*>(&layer);
+            const ArgMinMaxDescriptor& descriptor = cLayer->GetParameters();
+
+            const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+            const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+            result = layerSupportObject->IsArgMinMaxSupported(
+                    OverrideDataType(input, dataType),
+                    OverrideDataType(output, dataType),
+                    descriptor,
+                    reason);
+            break;
+        }
         case LayerType::BatchNormalization:
         {
             auto cLayer = boost::polymorphic_downcast<const BatchNormalizationLayer*>(&layer);
@@ -979,6 +993,12 @@ std::unique_ptr<IWorkload> IWorkloadFactory::CreateAddition(const AdditionQueueD
     return std::unique_ptr<IWorkload>();
 }
 
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor,
+                                                             const WorkloadInfo& info) const
+{
+    return std::unique_ptr<IWorkload>();
+}
+
 std::unique_ptr<IWorkload> IWorkloadFactory::CreateBatchNormalization(
     const BatchNormalizationQueueDescriptor& descriptor, const WorkloadInfo& info) const
 {
index d0164b2..6fd334b 100644 (file)
@@ -58,6 +58,9 @@ public:
     virtual std::unique_ptr<IWorkload> CreateAddition(const AdditionQueueDescriptor& descriptor,
                                                       const WorkloadInfo&            info) const;
 
+    virtual std::unique_ptr<IWorkload> CreateArgMinMax(const ArgMinMaxQueueDescriptor& descriptor,
+                                                       const WorkloadInfo&            info) const;
+
     virtual std::unique_ptr<IWorkload> CreateBatchNormalization(const BatchNormalizationQueueDescriptor& descriptor,
                                                                 const WorkloadInfo& info) const;
 
index caf5e58..1dc9e97 100644 (file)
@@ -391,6 +391,8 @@ DECLARE_LAYER_POLICY_2_PARAM(Activation)
 
 DECLARE_LAYER_POLICY_1_PARAM(Addition)
 
+DECLARE_LAYER_POLICY_2_PARAM(ArgMinMax)
+
 DECLARE_LAYER_POLICY_2_PARAM(BatchNormalization)
 
 DECLARE_LAYER_POLICY_2_PARAM(BatchToSpaceNd)