IVGCVSW-4624 Add a RANK Reference Implementation
authorFinn Williams <Finn.Williams@arm.com>
Wed, 10 Jun 2020 14:53:46 +0000 (15:53 +0100)
committerFinn Williams <Finn.Williams@arm.com>
Mon, 6 Jul 2020 20:50:50 +0000 (21:50 +0100)
 * Add Rank front end
 * Add Rank reference implementation
 * Add Rank serialization support
 * Add Scalar serialization support

Signed-off-by: Finn Williams <Finn.Williams@arm.com>
Change-Id: I06e4a468c2a84e79bae2e6c5348596bbbf853b4b

44 files changed:
Android.mk
CMakeLists.txt
include/armnn/ILayerSupport.hpp
include/armnn/ILayerVisitor.hpp
include/armnn/INetwork.hpp
include/armnn/LayerVisitorBase.hpp
src/armnn/InternalTypes.hpp
src/armnn/LayersFwd.hpp
src/armnn/Network.cpp
src/armnn/Network.hpp
src/armnn/layers/RankLayer.cpp [new file with mode: 0644]
src/armnn/layers/RankLayer.hpp [new file with mode: 0644]
src/armnn/test/TestNameOnlyLayerVisitor.cpp
src/armnn/test/TestNameOnlyLayerVisitor.hpp
src/armnnDeserializer/Deserializer.cpp
src/armnnDeserializer/Deserializer.hpp
src/armnnDeserializer/DeserializerSupport.md
src/armnnDeserializer/test/DeserializeRank.cpp [new file with mode: 0644]
src/armnnSerializer/ArmnnSchema.fbs
src/armnnSerializer/Serializer.cpp
src/armnnSerializer/Serializer.hpp
src/armnnSerializer/SerializerSupport.md
src/armnnSerializer/test/SerializerTests.cpp
src/backends/backendsCommon/LayerSupportBase.cpp
src/backends/backendsCommon/LayerSupportBase.hpp
src/backends/backendsCommon/WorkloadData.cpp
src/backends/backendsCommon/WorkloadData.hpp
src/backends/backendsCommon/WorkloadFactory.cpp
src/backends/backendsCommon/WorkloadFactory.hpp
src/backends/backendsCommon/WorkloadFactoryBase.hpp
src/backends/backendsCommon/common.mk
src/backends/backendsCommon/test/CMakeLists.txt
src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
src/backends/backendsCommon/test/LayerTests.hpp
src/backends/backendsCommon/test/layerTests/RankTestImpl.cpp [new file with mode: 0644]
src/backends/backendsCommon/test/layerTests/RankTestImpl.hpp [new file with mode: 0644]
src/backends/reference/RefLayerSupport.cpp
src/backends/reference/RefLayerSupport.hpp
src/backends/reference/RefWorkloadFactory.cpp
src/backends/reference/RefWorkloadFactory.hpp
src/backends/reference/test/RefLayerTests.cpp
src/backends/reference/workloads/CMakeLists.txt
src/backends/reference/workloads/RefRankWorkload.hpp [new file with mode: 0644]
src/backends/reference/workloads/RefWorkloads.hpp

index 31fda2f..38b2ad4 100644 (file)
@@ -180,6 +180,7 @@ LOCAL_SRC_FILES := \
         src/armnn/layers/QLstmLayer.cpp \
         src/armnn/layers/QuantizeLayer.cpp \
         src/armnn/layers/QuantizedLstmLayer.cpp \
+        src/armnn/layers/RankLayer.cpp \
         src/armnn/layers/ReshapeLayer.cpp \
         src/armnn/layers/ResizeLayer.cpp \
         src/armnn/layers/SliceLayer.cpp \
index 8f5a0bd..968c57f 100644 (file)
@@ -1,5 +1,5 @@
 #
-# Copyright © 2020 Arm Ltd. All rights reserved.
+# Copyright © 2020 Arm Ltd. and Contributors All rights reserved.
 # Copyright 2020 NXP
 # SPDX-License-Identifier: MIT
 #
@@ -354,6 +354,8 @@ list(APPEND armnn_sources
     src/armnn/layers/PreCompiledLayer.cpp
     src/armnn/layers/PreluLayer.hpp
     src/armnn/layers/PreluLayer.cpp
+    src/armnn/layers/RankLayer.hpp
+    src/armnn/layers/RankLayer.cpp
     src/armnn/layers/ReshapeLayer.hpp
     src/armnn/layers/ReshapeLayer.cpp
     src/armnn/layers/ResizeLayer.hpp
@@ -894,6 +896,7 @@ if(BUILD_UNIT_TESTS)
             src/armnnDeserializer/test/DeserializePad.cpp
             src/armnnDeserializer/test/DeserializePermute.cpp
             src/armnnDeserializer/test/DeserializePooling2d.cpp
+            src/armnnDeserializer/test/DeserializeRank.cpp
             src/armnnDeserializer/test/DeserializeReshape.cpp
             src/armnnDeserializer/test/DeserializeResizeBilinear.cpp
             src/armnnDeserializer/test/DeserializeRsqrt.cpp
index 889b811..ed234fe 100644 (file)
@@ -314,6 +314,10 @@ public:
                                           const QuantizedLstmInputParamsInfo& paramsInfo,
                                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
 
+    virtual bool IsRankSupported(const TensorInfo& input,
+                                 const TensorInfo& output,
+                                 Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const = 0;
+
     virtual bool IsReshapeSupported(const TensorInfo& input,
                                     const TensorInfo& output,
                                     const ReshapeDescriptor& descriptor,
index 9b3998d..385ad62 100644 (file)
@@ -395,6 +395,12 @@ public:
                                          const QuantizedLstmInputParams& params,
                                          const char* name = nullptr) = 0;
 
+    /// Function a rank layer should call back to when its Accept(ILayerVisitor&) function is invoked.
+    /// @param layer - pointer to the layer which is calling back to this visit function.
+    /// @param name - Optional name for the layer.
+    virtual void VisitRankLayer(const IConnectableLayer* layer,
+                                const char* name = nullptr) = 0;
+
     /// Function a reshape layer should call back to when its Accept(ILayerVisitor&) function is invoked.
     /// @param layer - pointer to the layer which is calling back to this visit function.
     /// @param reshapeDescriptor - Parameters for the reshape operation.
index 8e7a443..c0c52f9 100644 (file)
@@ -352,6 +352,11 @@ public:
         const ConstTensor& gamma,
         const char* name = nullptr) = 0;
 
+    /// Adds a rank layer to the network.
+    /// @param name - Optional name for the layer.
+    /// @return - Interface for configuring the layer.
+    virtual IConnectableLayer* AddRankLayer(const char* name = nullptr) = 0;
+
     /// Adds a resize bilinear layer to the network.
     /// @param resizeDesc - Parameters for the resize operation.
     /// @param name - Optional name for the layer.
index 93ba7fe..75237a4 100644 (file)
@@ -201,6 +201,9 @@ public:
                                  const QuantizedLstmInputParams&,
                                  const char*) override { DefaultPolicy::Apply(__func__); }
 
+    void VisitRankLayer(const IConnectableLayer*,
+                        const char*) override { DefaultPolicy::Apply(__func__); }
+
     void VisitReshapeLayer(const IConnectableLayer*,
                            const ReshapeDescriptor&,
                            const char*) override { DefaultPolicy::Apply(__func__); }
index e2ad7a2..fc35d35 100644 (file)
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -60,6 +60,7 @@
     X(Quantize) \
     X(QuantizedLstm) \
     X(Reshape) \
+    X(Rank) \
     X(Resize) \
     X(Slice) \
     X(Softmax) \
index 575c3e5..fda00df 100644 (file)
@@ -53,6 +53,7 @@
 #include "layers/QuantizeLayer.hpp"
 #include "layers/QLstmLayer.hpp"
 #include "layers/QuantizedLstmLayer.hpp"
+#include "layers/RankLayer.hpp"
 #include "layers/ReshapeLayer.hpp"
 #include "layers/ResizeLayer.hpp"
 #include "layers/SliceLayer.hpp"
@@ -142,6 +143,7 @@ DECLARE_LAYER(Prelu)
 DECLARE_LAYER(Quantize)
 DECLARE_LAYER(QLstm)
 DECLARE_LAYER(QuantizedLstm)
+DECLARE_LAYER(Rank)
 DECLARE_LAYER(Reshape)
 DECLARE_LAYER(Resize)
 DECLARE_LAYER(Slice)
index 6c7314f..e0607bd 100644 (file)
@@ -1460,6 +1460,11 @@ IConnectableLayer* Network::AddBatchNormalizationLayer(const BatchNormalizationD
     return layer;
 }
 
+IConnectableLayer* Network::AddRankLayer(const char* name)
+{
+    return m_Graph->AddLayer<RankLayer>(name);
+}
+
 IConnectableLayer* Network::AddResizeBilinearLayer(const ResizeBilinearDescriptor& descriptor,
                                                    const char* name)
 {
index 53bf311..6bc0ac7 100644 (file)
@@ -160,6 +160,8 @@ public:
                                                   const ConstTensor&                  gamma,
                                                   const char*                         name = nullptr) override;
 
+    IConnectableLayer* AddRankLayer(const char* name = nullptr) override;
+
     ARMNN_DEPRECATED_MSG("Use AddResizeLayer instead")
     IConnectableLayer* AddResizeBilinearLayer(const ResizeBilinearDescriptor& resizeDesc,
                                               const char* name = nullptr) override;
diff --git a/src/armnn/layers/RankLayer.cpp b/src/armnn/layers/RankLayer.cpp
new file mode 100644 (file)
index 0000000..f1a24b1
--- /dev/null
@@ -0,0 +1,47 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RankLayer.hpp"
+
+#include "LayerCloneBase.hpp"
+
+#include <backendsCommon/WorkloadData.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+namespace armnn
+{
+
+RankLayer::RankLayer(const char* name)
+        : Layer(1, 1, LayerType::Rank, name)
+{}
+
+std::unique_ptr<IWorkload> RankLayer::CreateWorkload(const IWorkloadFactory& factory) const
+{
+    RankQueueDescriptor descriptor;
+    return factory.CreateRank(descriptor, PrepInfoAndDesc(descriptor));
+}
+
+Layer* RankLayer::Clone(Graph& graph) const
+{
+    RankLayer* clone = CloneBase<RankLayer>(graph, GetName());
+    return clone;
+}
+
+void RankLayer::ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod)
+{
+    IgnoreUnused(shapeInferenceMethod);
+    VerifyLayerConnections(1, CHECK_LOCATION());
+
+    ConditionalThrowIfNotEqual<LayerValidationException>(
+            "Rank: TensorShape set on OutputSlot[0] does not match the inferred shape.",
+            GetOutputSlot(0).GetTensorInfo().GetShape(), {TensorShape{Dimensionality::Scalar}});
+}
+
+void RankLayer::Accept(ILayerVisitor& visitor) const
+{
+    visitor.VisitRankLayer(this, GetName());
+}
+
+} //namespace armnn
\ No newline at end of file
diff --git a/src/armnn/layers/RankLayer.hpp b/src/armnn/layers/RankLayer.hpp
new file mode 100644 (file)
index 0000000..e160d60
--- /dev/null
@@ -0,0 +1,34 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <Layer.hpp>
+
+namespace armnn
+{
+
+class RankLayer : public Layer
+{
+    public:
+        /// Makes a workload for the Rank type.
+        /// @param [in] factory The workload factory which will create the workload.
+        /// @return A pointer to the created workload, or nullptr if not created.
+        virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
+
+        Layer* Clone(Graph& graph) const override;
+
+        void ValidateTensorShapesFromInputs(ShapeInferenceMethod shapeInferenceMethod) override;
+
+        void Accept(ILayerVisitor& visitor) const override;
+
+    protected:
+        RankLayer(const char* name);
+        ~RankLayer() = default;
+};
+
+} //namespace armnn
+
+
index 945afa8..994375d 100644 (file)
@@ -48,6 +48,7 @@ TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Minimum)
 TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Multiplication)
 TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Prelu)
 TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Quantize)
+TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Rank)
 TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Subtraction)
 TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Switch)
 
index 0e1ea8e..519cbba 100644 (file)
@@ -35,5 +35,6 @@ DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Minimum)
 DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Multiplication)
 DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Prelu)
 DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Quantize)
+DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Rank)
 DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Subtraction)
 DECLARE_TEST_NAME_ONLY_LAYER_VISITOR_CLASS(Switch)
index 31fae2a..7143cdb 100644 (file)
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -225,6 +225,7 @@ m_ParserFunctions(Layer_MAX+1, &Deserializer::ParseUnsupportedLayer)
     m_ParserFunctions[Layer_QLstmLayer]                  = &Deserializer::ParseQLstm;
     m_ParserFunctions[Layer_QuantizeLayer]               = &Deserializer::ParseQuantize;
     m_ParserFunctions[Layer_QuantizedLstmLayer]          = &Deserializer::ParseQuantizedLstm;
+    m_ParserFunctions[Layer_RankLayer]                   = &Deserializer::ParseRank;
     m_ParserFunctions[Layer_ReshapeLayer]                = &Deserializer::ParseReshape;
     m_ParserFunctions[Layer_ResizeBilinearLayer]         = &Deserializer::ParseResizeBilinear;
     m_ParserFunctions[Layer_ResizeLayer]                 = &Deserializer::ParseResize;
@@ -331,6 +332,8 @@ Deserializer::LayerBaseRawPtr Deserializer::GetBaseLayer(const GraphPtr& graphPt
             return graphPtr->layers()->Get(layerIndex)->layer_as_QuantizeLayer()->base();
         case Layer::Layer_QuantizedLstmLayer:
             return graphPtr->layers()->Get(layerIndex)->layer_as_QuantizedLstmLayer()->base();
+        case Layer::Layer_RankLayer:
+            return graphPtr->layers()->Get(layerIndex)->layer_as_RankLayer()->base();
         case Layer::Layer_ReshapeLayer:
             return graphPtr->layers()->Get(layerIndex)->layer_as_ReshapeLayer()->base();
         case Layer::Layer_ResizeBilinearLayer:
@@ -545,6 +548,16 @@ armnn::TensorInfo ToTensorInfo(Deserializer::TensorRawPtr tensorPtr)
         }
     }
 
+    if (tensorPtr->dimensionality() == static_cast<unsigned int>(Dimensionality::Scalar))
+    {
+        float quantizationScale = tensorPtr->quantizationScale();
+        int32_t quantizationOffset = tensorPtr->quantizationOffset();
+
+        return armnn::TensorInfo(armnn::TensorShape{armnn::Dimensionality::Scalar},
+                                 type,
+                                 quantizationScale,
+                                 quantizationOffset);
+    }
 
     auto dimensions = tensorPtr->dimensions();
     unsigned int size = dimensions->size();
@@ -2008,6 +2021,26 @@ armnn::TensorInfo Deserializer::OutputShapeOfReshape(const armnn::TensorInfo& in
     return reshapeInfo;
 }
 
+void Deserializer::ParseRank(GraphPtr graph, unsigned int layerIndex)
+{
+    CHECK_LAYERS(graph, 0, layerIndex);
+
+    Deserializer::TensorRawPtrVector inputs = GetInputs(graph, layerIndex);
+    CHECK_VALID_SIZE(inputs.size(), 1);
+
+    Deserializer::TensorRawPtrVector outputs = GetOutputs(graph, layerIndex);
+    CHECK_VALID_SIZE(outputs.size(), 1);
+
+    auto layerName = GetLayerName(graph, layerIndex);
+    IConnectableLayer* layer = m_Network->AddRankLayer( layerName.c_str());
+
+    armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+    RegisterInputSlots(graph, layerIndex, layer);
+    RegisterOutputSlots(graph, layerIndex, layer);
+}
+
 void Deserializer::ParseReshape(GraphPtr graph, unsigned int layerIndex)
 {
     CHECK_LAYERS(graph, 0, layerIndex);
index 69868c2..4a1bdad 100644 (file)
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -118,6 +118,7 @@ private:
     void ParsePrelu(GraphPtr graph, unsigned int layerIndex);
     void ParseQLstm(GraphPtr graph, unsigned int layerIndex);
     void ParseQuantize(GraphPtr graph, unsigned int layerIndex);
+    void ParseRank(GraphPtr graph, unsigned int layerIndex);
     void ParseReshape(GraphPtr graph, unsigned int layerIndex);
     void ParseResize(GraphPtr graph, unsigned int layerIndex);
     void ParseResizeBilinear(GraphPtr graph, unsigned int layerIndex);
index b4982ec..4e2ead4 100644 (file)
@@ -45,6 +45,7 @@ The Arm NN SDK Deserialize parser currently supports the following layers:
 * Quantize
 * QLstm
 * QuantizedLstm
+* Rank
 * Reshape
 * Resize
 * ResizeBilinear
diff --git a/src/armnnDeserializer/test/DeserializeRank.cpp b/src/armnnDeserializer/test/DeserializeRank.cpp
new file mode 100644 (file)
index 0000000..8f14af1
--- /dev/null
@@ -0,0 +1,151 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <boost/test/unit_test.hpp>
+#include "ParserFlatbuffersSerializeFixture.hpp"
+#include "../Deserializer.hpp"
+
+#include <string>
+
+BOOST_AUTO_TEST_SUITE(Deserializer)
+
+struct RankFixture : public ParserFlatbuffersSerializeFixture
+{
+    explicit RankFixture(const std::string &inputShape,
+                         const std::string &dataType)
+    {
+        m_JsonString = R"(
+        {
+            inputIds: [0],
+            outputIds: [2],
+              layers: [
+               {
+                 layer_type: "InputLayer",
+                 layer: {
+                   base: {
+                     base: {
+                       layerName: "",
+                       layerType: "Input",
+                       inputSlots: [
+
+                       ],
+                       outputSlots: [
+                         {
+                           tensorInfo: {
+                             dimensions: )" + inputShape + R"(,
+                             dataType: )" + dataType + R"(,
+                             quantizationScale: 0.0
+                           }
+                         }
+                       ]
+                     }
+                   }
+                 }
+               },
+               {
+                 layer_type: "RankLayer",
+                 layer: {
+                   base: {
+                     index: 1,
+                     layerName: "rank",
+                     layerType: "Rank",
+                     inputSlots: [
+                       {
+                         connection: {
+                           sourceLayerIndex: 0,
+                           outputSlotIndex: 0
+                         }
+                       }
+                     ],
+                     outputSlots: [
+                       {
+                         tensorInfo: {
+                           dimensions: [ 1 ],
+                           dataType: "Signed32",
+                           quantizationScale: 0.0,
+                           dimensionality: 2
+                         }
+                       }
+                     ]
+                   }
+                 }
+               },
+               {
+                 layer_type: "OutputLayer",
+                 layer: {
+                   base: {
+                     base: {
+                       index: 2,
+                       layerName: "",
+                       layerType: "Output",
+                       inputSlots: [
+                         {
+                           connection: {
+                             sourceLayerIndex: 1,
+                             outputSlotIndex: 0
+                           }
+                         }
+                       ],
+                       outputSlots: []
+                     }
+                   }
+                 }
+               }
+             ],
+         }
+     )";
+        Setup();
+    }
+};
+
+struct SimpleRankDimSize1Fixture : RankFixture
+{
+    SimpleRankDimSize1Fixture() : RankFixture("[ 8 ]", "QSymmS16") {}
+};
+
+struct SimpleRankDimSize2Fixture : RankFixture
+{
+    SimpleRankDimSize2Fixture() : RankFixture("[ 3, 3 ]", "QSymmS8") {}
+};
+
+struct SimpleRankDimSize3Fixture : RankFixture
+{
+    SimpleRankDimSize3Fixture() : RankFixture("[ 2, 2, 1 ]", "Signed32") {}
+};
+
+struct SimpleRankDimSize4Fixture : RankFixture
+{
+    SimpleRankDimSize4Fixture() : RankFixture("[ 2, 2, 1, 1 ]", "Float32") {}
+};
+
+BOOST_FIXTURE_TEST_CASE(RankDimSize1Float16, SimpleRankDimSize1Fixture)
+{
+    RunTest<1, armnn::DataType::QSymmS16, armnn::DataType::Signed32>( 0,
+                                                                      { 1, 2, 3, 4, 5, 6, 7, 8 },
+                                                                      { 1 });
+}
+
+BOOST_FIXTURE_TEST_CASE(RankDimSize2QAsymmU8, SimpleRankDimSize2Fixture)
+{
+    RunTest<1, armnn::DataType::QSymmS8, armnn::DataType::Signed32>( 0,
+                                                                    { 1, 2, 3, 4, 5, 6, 7, 8, 9 },
+                                                                    { 2 });
+}
+
+BOOST_FIXTURE_TEST_CASE(RankDimSize3Signed32, SimpleRankDimSize3Fixture)
+{
+    RunTest<1, armnn::DataType::Signed32, armnn::DataType::Signed32>( 0,
+                                                                    { 111, 85, 226, 3 },
+                                                                    { 3 });
+}
+
+BOOST_FIXTURE_TEST_CASE(RankDimSize4Float32, SimpleRankDimSize4Fixture)
+{
+    RunTest<1, armnn::DataType::Float32, armnn::DataType::Signed32>( 0,
+                                                                   { 111, 85, 226, 3 },
+                                                                   { 4 });
+}
+
+BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
index 6a388db..e1b6e1f 100644 (file)
@@ -59,6 +59,7 @@ table TensorInfo {
     quantizationOffset:int = 0;
     quantizationScales:[float];
     quantizationDim:uint;
+    dimensionality:uint = 1;
 }
 
 struct Connection {
@@ -157,7 +158,8 @@ enum LayerType : uint {
     ElementwiseUnary = 54,
     Transpose = 55,
     QLstm = 56,
-    Fill = 57
+    Fill = 57,
+    Rank = 58
 }
 
 // Base layer table to be used as part of other layers
@@ -859,6 +861,10 @@ table StandInLayer {
     descriptor:StandInDescriptor;
 }
 
+table RankLayer {
+    base:LayerBase;
+}
+
 union Layer {
     ActivationLayer,
     AdditionLayer,
@@ -917,7 +923,8 @@ union Layer {
     ElementwiseUnaryLayer,
     TransposeLayer,
     QLstmLayer,
-    FillLayer
+    FillLayer,
+    RankLayer
 }
 
 table AnyLayer {
index 6555a34..8a1f771 100644 (file)
@@ -2,7 +2,6 @@
 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
-
 #include "Serializer.hpp"
 
 #include <armnn/Descriptors.hpp>
@@ -851,6 +850,16 @@ void SerializerVisitor::VisitPermuteLayer(const armnn::IConnectableLayer* layer,
     CreateAnyLayer(flatBufferPermuteLayer.o, serializer::Layer::Layer_PermuteLayer);
 }
 
+// Build FlatBuffer for Rank Layer
+void SerializerVisitor::VisitRankLayer(const armnn::IConnectableLayer* layer,
+                                       const char* name)
+{
+    IgnoreUnused(name);
+    auto flatBufferBaseLayer = CreateLayerBase(layer, serializer::LayerType::LayerType_Rank);
+    auto flatBufferRankLayer = serializer::CreateRankLayer(m_flatBufferBuilder, flatBufferBaseLayer);
+
+    CreateAnyLayer(flatBufferRankLayer.o, serializer::Layer::Layer_RankLayer);
+}
 // Build FlatBuffer for Reshape Layer
 void SerializerVisitor::VisitReshapeLayer(const armnn::IConnectableLayer* layer,
                                           const armnn::ReshapeDescriptor& reshapeDescriptor,
@@ -1584,7 +1593,9 @@ flatbuffers::Offset<TensorInfo>  SerializerVisitor::CreateTensorInfo(const armnn
                                          tensorInfo.GetQuantizationScales()[0],
                                          tensorInfo.GetQuantizationOffset(),
                                          m_flatBufferBuilder.CreateVector(tensorInfo.GetQuantizationScales()),
-                                         tensorInfo.GetQuantizationDim().value());
+                                         tensorInfo.GetQuantizationDim().value(),
+                                         static_cast<unsigned int>
+                                         (tensorInfo.GetShape().GetDimensionality()));
         return flatBufferTensorInfo;
     }
 
@@ -1593,7 +1604,11 @@ flatbuffers::Offset<TensorInfo>  SerializerVisitor::CreateTensorInfo(const armnn
                                                              m_flatBufferBuilder.CreateVector(shape),
                                                              GetFlatBufferDataType(tensorInfo.GetDataType()),
                                                              tensorInfo.GetQuantizationScale(),
-                                                             tensorInfo.GetQuantizationOffset());
+                                                             tensorInfo.GetQuantizationOffset(),
+                                                             0,
+                                                             0,
+                                                             static_cast<unsigned int>
+                                                             (tensorInfo.GetShape().GetDimensionality()));
     return flatBufferTensorInfo;
 }
 
@@ -1742,4 +1757,4 @@ bool Serializer::SaveSerializedToStream(std::ostream& stream)
     return !stream.bad();
 }
 
-} // namespace armnnSerializer
\ No newline at end of file
+} // namespace armnnSerializer
index e4104dd..babecdc 100644 (file)
@@ -219,6 +219,9 @@ public:
                                  const armnn::QuantizedLstmInputParams& params,
                                  const char* name = nullptr) override;
 
+    void VisitRankLayer(const armnn::IConnectableLayer* layer,
+                        const char* name = nullptr) override;
+
     void VisitReshapeLayer(const armnn::IConnectableLayer* layer,
                            const armnn::ReshapeDescriptor& reshapeDescriptor,
                            const char* name = nullptr) override;
index 4f7868b..4383353 100644 (file)
@@ -44,6 +44,7 @@ The Arm NN SDK Serializer currently supports the following layers:
 * QLstm
 * Quantize
 * QuantizedLstm
+* Rank
 * Reshape
 * Resize
 * Slice
index 088282a..e059511 100644 (file)
@@ -2202,6 +2202,32 @@ BOOST_AUTO_TEST_CASE(SerializeQuantize)
     deserializedNetwork->Accept(verifier);
 }
 
+BOOST_AUTO_TEST_CASE(SerializeRank)
+{
+    DECLARE_LAYER_VERIFIER_CLASS(Rank)
+
+    const std::string layerName("rank");
+    const armnn::TensorInfo inputInfo({1, 9}, armnn::DataType::Float32);
+    const armnn::TensorInfo outputInfo({1}, armnn::DataType::Signed32);
+
+    armnn::INetworkPtr network = armnn::INetwork::Create();
+    armnn::IConnectableLayer* const inputLayer = network->AddInputLayer(0);
+    armnn::IConnectableLayer* const rankLayer = network->AddRankLayer(layerName.c_str());
+    armnn::IConnectableLayer* const outputLayer = network->AddOutputLayer(0);
+
+    inputLayer->GetOutputSlot(0).Connect(rankLayer->GetInputSlot(0));
+    rankLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+
+    inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
+    rankLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+
+    armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
+    BOOST_CHECK(deserializedNetwork);
+
+    RankLayerVerifier verifier(layerName, {inputInfo}, {outputInfo});
+    deserializedNetwork->Accept(verifier);
+}
+
 BOOST_AUTO_TEST_CASE(SerializeReshape)
 {
     DECLARE_LAYER_VERIFIER_CLASS_WITH_DESCRIPTOR(Reshape)
index 52e615a..92c1023 100644 (file)
@@ -488,6 +488,13 @@ bool LayerSupportBase::IsQuantizedLstmSupported(const TensorInfo&, // input
     return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
 }
 
+bool LayerSupportBase::IsRankSupported(const TensorInfo&, // input
+                                       const TensorInfo&,  // output
+                                       Optional<std::string&> reasonIfUnsupported) const
+{
+    return DefaultLayerSupport(__func__, __FILE__, __LINE__, reasonIfUnsupported);
+}
+
 bool LayerSupportBase::IsReshapeSupported(const TensorInfo&, // input
                                           const TensorInfo&, // output
                                           const ReshapeDescriptor&, // descriptor
index 8d5535a..13fd39e 100644 (file)
@@ -300,6 +300,10 @@ public:
                                   const QuantizedLstmInputParamsInfo& paramsInfo,
                                   Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsRankSupported(const TensorInfo& input,
+                         const TensorInfo& output,
+                         Optional<std::string&> reasonIfUnsupported) const override;
+
     bool IsReshapeSupported(const TensorInfo& input,
                             const TensorInfo& output,
                             const ReshapeDescriptor& descriptor,
index 3949fa9..c7650dc 100644 (file)
@@ -3515,4 +3515,33 @@ void ElementwiseUnaryQueueDescriptor::Validate(const WorkloadInfo& workloadInfo)
     ValidateTensorDataTypesMatch(inputTensorInfo, outputTensorInfo, descriptorName, "input", "output");
 }
 
+void RankQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+    const std::string descriptorName{"RankQueueDescriptor"};
+
+    ValidateNumInputs(workloadInfo,  descriptorName, 1);
+    ValidateNumOutputs(workloadInfo, descriptorName, 1);
+
+    const TensorInfo& inputTensorInfo  = workloadInfo.m_InputTensorInfos[0];
+    const TensorInfo& outputTensorInfo = workloadInfo.m_OutputTensorInfos[0];
+
+    ValidateTensorNumDimensions(outputTensorInfo, descriptorName, 1, "output");
+    ValidateTensorNumElements(outputTensorInfo, descriptorName, 1, "output");
+
+    std::vector<DataType> supportedTypes =
+    {
+        DataType::BFloat16,
+        DataType::Float16,
+        DataType::Float32,
+        DataType::QAsymmS8,
+        DataType::QAsymmU8,
+        DataType::QSymmS8,
+        DataType::QSymmS16,
+        DataType::Signed32
+    };
+
+    ValidateDataTypes(inputTensorInfo, supportedTypes, descriptorName);
+    ValidateDataTypes(outputTensorInfo, { DataType::Signed32 }, descriptorName);
+}
+
 } // namespace armnn
index 6b2c00e..1f54f9a 100644 (file)
@@ -290,6 +290,11 @@ struct BatchNormalizationQueueDescriptor : QueueDescriptorWithParameters<BatchNo
     void Validate(const WorkloadInfo& workloadInfo) const;
 };
 
+struct RankQueueDescriptor : QueueDescriptor
+{
+    void Validate(const WorkloadInfo& workloadInfo) const;
+};
+
 struct ResizeBilinearQueueDescriptor : QueueDescriptorWithParameters<ResizeBilinearDescriptor>
 {
     void Validate(const WorkloadInfo& workloadInfo) const;
index 788cb7e..09d7c2d 100644 (file)
 #include <armnn/utility/PolymorphicDowncast.hpp>
 
 #include <backendsCommon/WorkloadFactory.hpp>
-#include <armnn/backends/IBackendInternal.hpp>
 #include <backendsCommon/CpuTensorHandle.hpp>
-#include <backendsCommon/WorkloadFactory.hpp>
 
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
 
 #include <boost/iterator/transform_iterator.hpp>
 
-#include <cstring>
 #include <sstream>
 
 namespace armnn
@@ -924,6 +921,15 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
                                          reason);
             break;
         }
+        case LayerType::Rank:
+        {
+            const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
+            const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
+            result = layerSupportObject->IsRankSupported(OverrideDataType(input, dataType),
+                                                         OverrideDataType(output, dataType),
+                                                         reason);
+            break;
+        }
         case LayerType::Reshape:
         {
             auto cLayer = PolymorphicDowncast<const ReshapeLayer*>(&layer);
@@ -1515,6 +1521,11 @@ std::unique_ptr<IWorkload> IWorkloadFactory::CreateQuantizedLstm(const Quantized
 {
     return std::unique_ptr<IWorkload>();
 }
+std::unique_ptr<IWorkload> IWorkloadFactory::CreateRank(const RankQueueDescriptor& /*descriptor*/,
+                                                        const WorkloadInfo& /*info*/) const
+{
+    return std::unique_ptr<IWorkload>();
+}
 
 std::unique_ptr<IWorkload> IWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& /*descriptor*/,
                                                            const WorkloadInfo& /*info*/) const
index e373a4f..04b895e 100644 (file)
@@ -206,6 +206,9 @@ public:
     virtual std::unique_ptr<IWorkload> CreateQuantizedLstm(const QuantizedLstmQueueDescriptor& descriptor,
                                                            const WorkloadInfo& info) const;
 
+    virtual std::unique_ptr<IWorkload> CreateRank(const RankQueueDescriptor& descriptor,
+                                                  const WorkloadInfo& info) const;
+
     virtual std::unique_ptr<IWorkload> CreateReshape(const ReshapeQueueDescriptor& descriptor,
                                                      const WorkloadInfo& info) const;
 
index 960dbd3..bfdb5e9 100644 (file)
@@ -218,6 +218,10 @@ public:
                                                    const WorkloadInfo& /*info*/) const override
     { return nullptr; }
 
+    std::unique_ptr<IWorkload> CreateRank(const RankQueueDescriptor& /*descriptor*/,
+                                          const WorkloadInfo& /*info*/) const override
+    { return nullptr; }
+
     std::unique_ptr<IWorkload> CreateReshape(const ReshapeQueueDescriptor& /*descriptor*/,
                                              const WorkloadInfo& /*info*/) const override
     { return nullptr; }
index c31b1f0..7829abb 100644 (file)
@@ -71,6 +71,7 @@ COMMON_TEST_SOURCES := \
     test/layerTests/NormalizationTestImpl.cpp \
     test/layerTests/PadTestImpl.cpp \
     test/layerTests/Pooling2dTestImpl.cpp \
+    test/layerTests/RankTestImpl.cpp \
     test/layerTests/ReshapeTestImpl.cpp \
     test/layerTests/ResizeTestImpl.cpp \
     test/layerTests/RsqrtTestImpl.cpp \
index 0ce5299..8373c28 100644 (file)
@@ -131,6 +131,8 @@ list(APPEND armnnBackendsCommonUnitTests_sources
     layerTests/PreluTestImpl.hpp
     layerTests/QuantizeTestImpl.cpp
     layerTests/QuantizeTestImpl.hpp
+    layerTests/RankTestImpl.cpp
+    layerTests/RankTestImpl.hpp
     layerTests/ReshapeTestImpl.cpp
     layerTests/ReshapeTestImpl.hpp
     layerTests/ResizeTestImpl.cpp
index e30cbb3..ed4b6ff 100644 (file)
@@ -595,6 +595,8 @@ DECLARE_LAYER_POLICY_1_PARAM(QuantizedLstm)
 
 DECLARE_LAYER_POLICY_1_PARAM(Division)
 
+DECLARE_LAYER_POLICY_1_PARAM(Rank)
+
 DECLARE_LAYER_POLICY_2_PARAM(Resize)
 
 DECLARE_LAYER_POLICY_2_PARAM(Reshape)
index 7e8b301..a461591 100644 (file)
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -46,6 +46,7 @@
 #include <backendsCommon/test/layerTests/Pooling2dTestImpl.hpp>
 #include <backendsCommon/test/layerTests/PreluTestImpl.hpp>
 #include <backendsCommon/test/layerTests/QuantizeTestImpl.hpp>
+#include <backendsCommon/test/layerTests/RankTestImpl.hpp>
 #include <backendsCommon/test/layerTests/ReshapeTestImpl.hpp>
 #include <backendsCommon/test/layerTests/ResizeTestImpl.hpp>
 #include <backendsCommon/test/layerTests/RsqrtTestImpl.hpp>
diff --git a/src/backends/backendsCommon/test/layerTests/RankTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/RankTestImpl.cpp
new file mode 100644 (file)
index 0000000..82de11a
--- /dev/null
@@ -0,0 +1,262 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RankTestImpl.hpp"
+
+#include <backendsCommon/test/DataTypeUtils.hpp>
+#include <backendsCommon/test/TensorCopyUtils.hpp>
+#include <backendsCommon/test/WorkloadTestUtils.hpp>
+
+#include <test/TensorHelpers.hpp>
+
+template<typename T, std::size_t n>
+LayerTestResult<int32_t, 1> RankTest(
+        armnn::TensorInfo inputTensorInfo,
+        boost::multi_array<T, n> input,
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    IgnoreUnused(memoryManager);
+
+    const armnn::TensorShape outputShape{armnn::Dimensionality::Scalar};
+    armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
+
+    LayerTestResult<int32_t , 1> ret(outputTensorInfo);
+    ret.outputExpected = MakeTensor<uint32_t, 1>(outputTensorInfo, { n });
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::RankQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateRank(data, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), input.origin());
+
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0], outputHandle.get());
+
+    return ret;
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<int32_t, 1> RankDimSize1Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    armnn::TensorInfo inputTensorInfo({6}, ArmnnType, 1.0f, 0);
+    auto input = MakeTensor<T, 1>(inputTensorInfo, ConvertToDataType<ArmnnType>(
+            { -37.5f, -15.2f, -8.76f, -2.0f, -1.3f, -0.5f },
+            inputTensorInfo));
+
+    return RankTest<T, 1>(inputTensorInfo, input, workloadFactory, memoryManager);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<int32_t, 1> RankDimSize2Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    armnn::TensorInfo inputTensorInfo({1, 3}, ArmnnType, 1.0f, 0);
+    auto input = MakeTensor<T, 2>(inputTensorInfo, ConvertToDataType<ArmnnType>(
+            { -37.5f, -15.2f, -8.76f },
+            inputTensorInfo));
+
+    return RankTest<T, 2>(inputTensorInfo, input, workloadFactory, memoryManager);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<int32_t, 1> RankDimSize3Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    armnn::TensorInfo inputTensorInfo({1, 3, 2}, ArmnnType, 1.0f, 0);
+    auto input = MakeTensor<T, 3>(inputTensorInfo, ConvertToDataType<ArmnnType>(
+            { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f},
+            inputTensorInfo));
+
+    return RankTest<T, 3>(inputTensorInfo, input, workloadFactory, memoryManager);
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<int32_t, 1> RankDimSize4Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+{
+    armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, ArmnnType, 1.0f, 0);
+    auto input = MakeTensor<T, 4>(inputTensorInfo, ConvertToDataType<ArmnnType>(
+            { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
+              1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f },
+            inputTensorInfo));
+
+    return RankTest<T, 4>(inputTensorInfo, input, workloadFactory, memoryManager);
+}
+
+template LayerTestResult<int32_t, 1>
+RankDimSize4Test<armnn::DataType::Float16>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize4Test<armnn::DataType::Float32>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize4Test<armnn::DataType::QAsymmU8>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize4Test<armnn::DataType::Signed32>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize4Test<armnn::DataType::QSymmS16>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize4Test<armnn::DataType::QSymmS8>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize4Test<armnn::DataType::QAsymmS8>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize4Test<armnn::DataType::BFloat16>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize3Test<armnn::DataType::Float16>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize3Test<armnn::DataType::Float32>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize3Test<armnn::DataType::QAsymmU8>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize3Test<armnn::DataType::Signed32>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize3Test<armnn::DataType::QSymmS16>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize3Test<armnn::DataType::QSymmS8>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize3Test<armnn::DataType::QAsymmS8>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize3Test<armnn::DataType::BFloat16>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize2Test<armnn::DataType::Float16>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize2Test<armnn::DataType::Float32>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize2Test<armnn::DataType::QAsymmU8>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize2Test<armnn::DataType::Signed32>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize2Test<armnn::DataType::QSymmS16>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize2Test<armnn::DataType::QSymmS8>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize2Test<armnn::DataType::QAsymmS8>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize2Test<armnn::DataType::BFloat16>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize1Test<armnn::DataType::Float16>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize1Test<armnn::DataType::Float32>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize1Test<armnn::DataType::QAsymmU8>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize1Test<armnn::DataType::Signed32>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize1Test<armnn::DataType::QSymmS16>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize1Test<armnn::DataType::QSymmS8>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize1Test<armnn::DataType::QAsymmS8>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template LayerTestResult<int32_t, 1>
+RankDimSize1Test<armnn::DataType::BFloat16>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
\ No newline at end of file
diff --git a/src/backends/backendsCommon/test/layerTests/RankTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/RankTestImpl.hpp
new file mode 100644 (file)
index 0000000..ac82ac9
--- /dev/null
@@ -0,0 +1,40 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerTestResult.hpp"
+
+#include <ResolveType.hpp>
+
+#include <armnn/backends/IBackendInternal.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+template<typename T , std::size_t n>
+LayerTestResult<int32_t, 1> RankTest(
+        armnn::TensorInfo inputTensorInfo,
+        boost::multi_array<T, n> input,
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<int32_t, 1> RankDimSize1Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<int32_t, 1> RankDimSize2Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<int32_t, 1> RankDimSize3Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<int32_t, 1> RankDimSize4Test(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
index 696c6d9..877d200 100644 (file)
@@ -1651,6 +1651,21 @@ bool RefLayerSupport::IsQuantizeSupported(const TensorInfo& input,
     return supported;
 }
 
+bool RefLayerSupport::IsRankSupported(const TensorInfo& input,
+                                      const TensorInfo& output,
+                                      Optional<std::string&> reasonIfUnsupported) const
+{
+    IgnoreUnused(input);
+    // Define supported output types.
+    std::array<DataType,1> supportedOutputTypes =
+    {
+        DataType::Signed32,
+    };
+
+    return CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
+           "Reference rank: input type not supported.");
+}
+
 bool RefLayerSupport::IsReshapeSupported(const TensorInfo& input,
                                          const TensorInfo& output,
                                          const ReshapeDescriptor& descriptor,
index 7d2bbf2..a233082 100644 (file)
@@ -265,6 +265,10 @@ public:
                           const LstmInputParamsInfo& paramsInfo,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsRankSupported(const TensorInfo& input,
+                         const TensorInfo& output,
+                         Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsReshapeSupported(const TensorInfo& input,
                             const TensorInfo& output,
                             const ReshapeDescriptor& descriptor,
index dcdabe1..cac1d1b 100644 (file)
@@ -549,6 +549,12 @@ std::unique_ptr<IWorkload> RefWorkloadFactory::CreateQuantize(const QuantizeQueu
     return std::make_unique<RefQuantizeWorkload>(descriptor, info);
 }
 
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateRank(const RankQueueDescriptor& descriptor,
+                                                          const WorkloadInfo& info) const
+{
+    return std::make_unique<RefRankWorkload>(descriptor, info);
+}
+
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor,
                                                              const WorkloadInfo& info) const
 {
index 941f1a6..e2eab07 100644 (file)
@@ -209,6 +209,9 @@ public:
     std::unique_ptr<IWorkload> CreateQuantize(const QuantizeQueueDescriptor& descriptor,
                                               const WorkloadInfo& info) const override;
 
+    std::unique_ptr<IWorkload> CreateRank(const RankQueueDescriptor& descriptor,
+                                          const WorkloadInfo& info) const override;
+
     std::unique_ptr<IWorkload> CreateReshape(const ReshapeQueueDescriptor& descriptor,
                                              const WorkloadInfo& info) const override;
 
index d96fa8b..53df9a3 100644 (file)
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -9,7 +9,6 @@
 
 #include <reference/RefWorkloadFactory.hpp>
 
-#include <test/TensorHelpers.hpp>
 #include <test/UnitTests.hpp>
 
 #include <boost/test/unit_test.hpp>
@@ -797,6 +796,43 @@ ARMNN_AUTO_TEST_CASE(BatchNormUint8Nhwc, BatchNormUint8NhwcTest)
 ARMNN_AUTO_TEST_CASE(BatchNormInt16, BatchNormInt16Test)
 ARMNN_AUTO_TEST_CASE(BatchNormInt16Nhwc, BatchNormInt16NhwcTest)
 
+// Rank
+ARMNN_AUTO_TEST_CASE(RankDimSize1Float16,  RankDimSize1Test<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE(RankDimSize1Float32,  RankDimSize1Test<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(RankDimSize1QAsymmU8, RankDimSize1Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(RankDimSize1Signed32, RankDimSize1Test<DataType::Signed32>)
+ARMNN_AUTO_TEST_CASE(RankDimSize1QSymmS16, RankDimSize1Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(RankDimSize1QSymmS8,  RankDimSize1Test<DataType::QSymmS8>)
+ARMNN_AUTO_TEST_CASE(RankDimSize1QAsymmS8, RankDimSize1Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(RankDimSize1BFloat16, RankDimSize1Test<DataType::BFloat16>)
+
+ARMNN_AUTO_TEST_CASE(RankDimSize2Float16,  RankDimSize2Test<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE(RankDimSize2Float32,  RankDimSize2Test<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(RankDimSize2QAsymmU8, RankDimSize2Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(RankDimSize2Signed32, RankDimSize2Test<DataType::Signed32>)
+ARMNN_AUTO_TEST_CASE(RankDimSize2QSymmS16, RankDimSize2Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(RankDimSize2QSymmS8,  RankDimSize2Test<DataType::QSymmS8>)
+ARMNN_AUTO_TEST_CASE(RankDimSize2QAsymmS8, RankDimSize2Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(RankDimSize2BFloat16, RankDimSize2Test<DataType::BFloat16>)
+
+ARMNN_AUTO_TEST_CASE(RankDimSize3Float16,  RankDimSize3Test<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE(RankDimSize3Float32,  RankDimSize3Test<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(RankDimSize3QAsymmU8, RankDimSize3Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(RankDimSize3Signed32, RankDimSize3Test<DataType::Signed32>)
+ARMNN_AUTO_TEST_CASE(RankDimSize3QSymmS16, RankDimSize3Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(RankDimSize3QSymmS8,  RankDimSize3Test<DataType::QSymmS8>)
+ARMNN_AUTO_TEST_CASE(RankDimSize3QAsymmS8, RankDimSize3Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(RankDimSize3BFloat16, RankDimSize3Test<DataType::BFloat16>)
+
+ARMNN_AUTO_TEST_CASE(RankDimSize4Float16,  RankDimSize4Test<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE(RankDimSize4Float32,  RankDimSize4Test<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE(RankDimSize4QAsymmU8, RankDimSize4Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_CASE(RankDimSize4Signed32, RankDimSize4Test<DataType::Signed32>)
+ARMNN_AUTO_TEST_CASE(RankDimSize4QSymmS16, RankDimSize4Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_CASE(RankDimSize4QSymmS8,  RankDimSize4Test<DataType::QSymmS8>)
+ARMNN_AUTO_TEST_CASE(RankDimSize4QAsymmS8, RankDimSize4Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_CASE(RankDimSize4BFloat16, RankDimSize4Test<DataType::BFloat16>)
+
 // Resize Bilinear - NCHW
 ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear,
                      SimpleResizeBilinearTest<DataType::Float32>,
index d51db36..937a320 100644 (file)
@@ -1,5 +1,5 @@
 #
-# Copyright © 2017 Arm Ltd. All rights reserved.
+# Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 # SPDX-License-Identifier: MIT
 #
 
@@ -129,6 +129,7 @@ list(APPEND armnnRefBackendWorkloads_sources
     RefQuantizeWorkload.hpp
     RefQLstmWorkload.cpp
     RefQLstmWorkload.hpp
+    RefRankWorkload.hpp
     RefReshapeWorkload.cpp
     RefReshapeWorkload.hpp
     RefResizeBilinearWorkload.cpp
diff --git a/src/backends/reference/workloads/RefRankWorkload.hpp b/src/backends/reference/workloads/RefRankWorkload.hpp
new file mode 100644 (file)
index 0000000..780d3be
--- /dev/null
@@ -0,0 +1,32 @@
+//
+// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+
+#include "RefWorkloadUtils.hpp"
+
+namespace armnn
+{
+
+struct RefRankWorkload : public BaseWorkload<RankQueueDescriptor>
+{
+public:
+    using BaseWorkload<RankQueueDescriptor>::BaseWorkload;
+    virtual void Execute() const override
+    {
+        const int32_t rank = static_cast<int32_t>(GetTensorInfo(m_Data.m_Inputs[0]).GetNumDimensions());
+
+        std::memcpy(GetOutputTensorData<void>(0, m_Data), &rank, sizeof(int32_t));
+    }
+};
+
+} //namespace armnn
+
+
+
+
index 951e3a1..fc47cff 100644 (file)
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -51,6 +51,7 @@
 #include "RefPreluWorkload.hpp"
 #include "RefQLstmWorkload.hpp"
 #include "RefQuantizeWorkload.hpp"
+#include "RefRankWorkload.hpp"
 #include "RefReshapeWorkload.hpp"
 #include "RefResizeBilinearWorkload.hpp"
 #include "RefResizeWorkload.hpp"