IVGCVSW-3944 Add ArgMinMax output shape validation
authorJames Conroy <james.conroy@arm.com>
Tue, 8 Oct 2019 14:41:34 +0000 (15:41 +0100)
committerJim Flynn Arm <jim.flynn@arm.com>
Thu, 10 Oct 2019 08:48:39 +0000 (08:48 +0000)
Signed-off-by: James Conroy <james.conroy@arm.com>
Change-Id: I469895da158b062cd19248832525fa21527f7d41

src/armnn/layers/ArgMinMaxLayer.cpp
src/armnn/layers/ArgMinMaxLayer.hpp
src/armnn/test/InferOutputTests.cpp
src/armnn/test/InferOutputTests.hpp
src/backends/backendsCommon/WorkloadData.cpp
src/backends/backendsCommon/test/layerTests/ArgMinMaxTestImpl.cpp
src/backends/neon/test/NeonLayerTests.cpp

index aad95eb..bfd71d5 100644 (file)
@@ -6,6 +6,8 @@
 
 #include "LayerCloneBase.hpp"
 
+#include <TensorUtils.hpp>
+
 #include <armnn/TypesUtils.hpp>
 #include <backendsCommon/WorkloadData.hpp>
 #include <backendsCommon/WorkloadFactory.hpp>
@@ -30,6 +32,43 @@ ArgMinMaxLayer* ArgMinMaxLayer::Clone(Graph& graph) const
     return CloneBase<ArgMinMaxLayer>(graph, m_Param, GetName());
 }
 
+std::vector<TensorShape> ArgMinMaxLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+    BOOST_ASSERT(inputShapes.size() == 1);
+
+    TensorShape inputShape = inputShapes[0];
+    auto inputNumDimensions = inputShape.GetNumDimensions();
+
+    auto axis = m_Param.m_Axis;
+    auto unsignedAxis = armnnUtils::GetUnsignedAxis(inputNumDimensions, axis);
+
+    BOOST_ASSERT(unsignedAxis <= inputNumDimensions);
+
+    // 1D input shape results in scalar output
+    if (inputShape.GetNumDimensions() == 1)
+    {
+        std::vector<unsigned int> tensorDimensions(1, 1);
+        TensorShape outputShape(1, tensorDimensions.data());
+
+        return std::vector<TensorShape>({ outputShape });
+    }
+
+    std::vector<unsigned int> tensorDimensions(inputNumDimensions - 1, 0);
+    for (unsigned int i = 0; i < unsignedAxis; ++i)
+    {
+        tensorDimensions[i] = inputShape[i];
+    }
+
+    for (unsigned int i = unsignedAxis + 1; i < inputNumDimensions; ++i)
+    {
+        tensorDimensions[i - 1] = inputShape[i];
+    }
+
+    TensorShape outputShape = TensorShape(inputNumDimensions - 1, tensorDimensions.data());
+
+    return std::vector<TensorShape>({ outputShape });
+}
+
 void ArgMinMaxLayer::ValidateTensorShapesFromInputs()
 {
     VerifyLayerConnections(1, CHECK_LOCATION());
index ca1337f..43ea056 100644 (file)
@@ -25,6 +25,11 @@ public:
     /// @param [in] graph The graph into which this layer is being cloned.
     ArgMinMaxLayer* Clone(Graph& graph) const override;
 
+    /// Infers the output shape from a given input shape and axis parameter.
+    /// @param [in] inputShapes The vector of input shapes for ArgMinMax.
+    /// @return A vector of inferred output shapes.
+    std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
     /// Check if the input tensor shape(s)
     /// will lead to a valid configuration of @ref ArgMinMaxLayer.
     void ValidateTensorShapesFromInputs() override;
index 8606745..3293cef 100644 (file)
 
 BOOST_AUTO_TEST_SUITE(LayerValidateOutput)
 
+// ArgMinMax
+ARMNN_SIMPLE_TEST_CASE(ArgMinMaxInferOutputShape4d, ArgMinMaxInferOutputShape4dTest)
+ARMNN_SIMPLE_TEST_CASE(ArgMinMaxInferOutputShape3d, ArgMinMaxInferOutputShape3dTest)
+ARMNN_SIMPLE_TEST_CASE(ArgMinMaxInferOutputShape2d, ArgMinMaxInferOutputShape2dTest)
+ARMNN_SIMPLE_TEST_CASE(ArgMinMaxInferOutputShape1d, ArgMinMaxInferOutputShape1dTest)
+
 // BatchToSpace
 ARMNN_SIMPLE_TEST_CASE(BatchToSpaceInferOutputShape, BatchToSpaceInferOutputShapeTest)
 
index c428a9d..feb2125 100644 (file)
@@ -10,6 +10,7 @@
 #include <armnn/ArmNN.hpp>
 
 #include <Graph.hpp>
+#include <layers/ArgMinMaxLayer.hpp>
 #include <layers/BatchToSpaceNdLayer.hpp>
 #include <layers/SpaceToDepthLayer.hpp>
 #include <layers/PreluLayer.hpp>
 #include <boost/algorithm/string.hpp>
 #include <boost/test/unit_test.hpp>
 
+void ArgMinMaxInferOutputShapeImpl(const armnn::ArgMinMaxDescriptor       descriptor,
+                                   const std::vector<armnn::TensorShape>& inputShapes,
+                                   std::vector<armnn::TensorShape>&       outputShapes)
+{
+    armnn::Graph graph;
+    auto argMinMaxLayer = graph.AddLayer<armnn::ArgMinMaxLayer>(descriptor, "argMinMax");
+    outputShapes = argMinMaxLayer->InferOutputShapes(inputShapes);
+}
+
+void ArgMinMaxInferOutputShape4dTest()
+{
+    armnn::Graph graph;
+    armnn::ArgMinMaxDescriptor descriptor;
+    descriptor.m_Axis = 2;
+
+    const std::vector<armnn::TensorShape> inputShapes
+    {
+        { 1, 3, 2, 4 }
+    };
+
+    std::vector<armnn::TensorShape> outputShapes;
+    BOOST_CHECK_NO_THROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
+
+    armnn::TensorShape expectedOutputShape( { 1, 3, 4 } );
+    BOOST_CHECK(outputShapes.size() == 1);
+    BOOST_CHECK(outputShapes[0] == expectedOutputShape);
+}
+
+void ArgMinMaxInferOutputShape3dTest()
+{
+    armnn::Graph graph;
+    armnn::ArgMinMaxDescriptor descriptor;
+    descriptor.m_Axis = 0;
+
+    const std::vector<armnn::TensorShape> inputShapes
+    {
+        { 1, 3, 2 }
+    };
+
+    std::vector<armnn::TensorShape> outputShapes;
+    BOOST_CHECK_NO_THROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
+
+    armnn::TensorShape expectedOutputShape( { 3, 2 } );
+    BOOST_CHECK(outputShapes.size() == 1);
+    BOOST_CHECK(outputShapes[0] == expectedOutputShape);
+}
+
+void ArgMinMaxInferOutputShape2dTest()
+{
+    armnn::Graph graph;
+    armnn::ArgMinMaxDescriptor descriptor;
+    descriptor.m_Axis = 1;
+
+    const std::vector<armnn::TensorShape> inputShapes
+    {
+        { 3, 2 }
+    };
+
+    std::vector<armnn::TensorShape> outputShapes;
+    BOOST_CHECK_NO_THROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
+
+    armnn::TensorShape expectedOutputShape( { 3 } );
+    BOOST_CHECK(outputShapes.size() == 1);
+    BOOST_CHECK(outputShapes[0] == expectedOutputShape);
+}
+
+void ArgMinMaxInferOutputShape1dTest()
+{
+    armnn::Graph graph;
+    armnn::ArgMinMaxDescriptor descriptor;
+    descriptor.m_Axis = 0;
+
+    const std::vector<armnn::TensorShape> inputShapes
+    {
+        { 5 }
+    };
+
+    std::vector<armnn::TensorShape> outputShapes;
+    BOOST_CHECK_NO_THROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
+
+    armnn::TensorShape expectedOutputShape( { 1 } );
+    BOOST_CHECK(outputShapes.size() == 1);
+    BOOST_CHECK(outputShapes[0] == expectedOutputShape);
+}
+
 void BatchToSpaceInferOutputShapeTest()
 {
     armnn::Graph graph;
index 89277d7..ea0e5c8 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <boost/format.hpp>
 #include <boost/numeric/conversion/cast.hpp>
+#include <TensorUtils.hpp>
 
 using namespace armnnUtils;
 
@@ -485,6 +486,41 @@ void ArgMinMaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
     };
 
     ValidateDataTypes(inputTensorInfo, supportedInputTypes, descriptorName);
+
+    auto inputShape = inputTensorInfo.GetShape();
+    auto outputShape = outputTensorInfo.GetShape();
+
+    auto inputNumDimensions = inputShape.GetNumDimensions();
+    auto unsignedAxis = armnnUtils::GetUnsignedAxis(inputNumDimensions, m_Parameters.m_Axis);
+
+    const std::string outputShapeError{": Output tensor shape does not match shape inferred from input tensor."};
+
+    // 1D input shape results in scalar output shape
+    if (inputShape.GetNumDimensions() == 1)
+    {
+        if (outputShape.GetNumDimensions() != 1 && outputShape[0] != 1)
+        {
+            throw InvalidArgumentException(descriptorName + outputShapeError);
+        }
+    }
+    else
+    {
+        for (unsigned int i = 0; i < unsignedAxis; ++i)
+        {
+            if (outputShape[i] != inputShape[i])
+            {
+                throw InvalidArgumentException(descriptorName + outputShapeError);
+            }
+        }
+
+        for (auto i = unsignedAxis + 1; i < inputNumDimensions; ++i)
+        {
+            if (outputShape[i - 1] != inputShape[i])
+            {
+                throw InvalidArgumentException(descriptorName + outputShapeError);
+            }
+        }
+    }
 }
 
 void SoftmaxQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
index e023d60..be7ef4e 100644 (file)
@@ -190,7 +190,7 @@ LayerTestResult<int32_t, 3> ArgMaxHeightTest(
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
     const armnn::TensorShape inputShape{ 1, 3, 2, 4};
-    const armnn::TensorShape outputShape{ 3, 1, 4 };
+    const armnn::TensorShape outputShape{ 1, 3, 4 };
 
     armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
     armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
@@ -219,7 +219,7 @@ LayerTestResult<int32_t, 3> ArgMinWidthTest(
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
     const armnn::TensorShape inputShape{ 1, 3, 2, 4};
-    const armnn::TensorShape outputShape{ 3, 2, 1 };
+    const armnn::TensorShape outputShape{ 1, 3, 2 };
 
     armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType);
     armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
index 1d8aa11..920fb0b 100644 (file)
@@ -923,8 +923,6 @@ ARMNN_AUTO_TEST_CASE(ArgMinFloat32, ArgMinSimpleTest<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE(ArgMaxFloat32, ArgMaxSimpleTest<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE(ArgMinChannel, ArgMinChannelTest<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE(ArgMaxChannel, ArgMaxChannelTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(ArgMaxHeight, ArgMaxHeightTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(ArgMinWidth, ArgMinWidthTest<DataType::Float32>)
 
 #if defined(ARMNNREF_ENABLED)