IVGCVSW-3698 Add EndToEnd Layer test for ArgMinMax
authorNarumol Prangnawarat <narumol.prangnawarat@arm.com>
Thu, 31 Oct 2019 14:24:02 +0000 (14:24 +0000)
committerMatteo Martincigh <matteo.martincigh@arm.com>
Fri, 1 Nov 2019 09:01:49 +0000 (09:01 +0000)
 * Add EndToEnd test implementation for ArgMinMax
 * Add EndToEnd tests for Ref
 * Fix output data type of ArgMinMax in WorkloadFactory

Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Change-Id: I6d07d25bb96ab21422584284046222257ddee43c

src/backends/backendsCommon/WorkloadFactory.cpp
src/backends/backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp [new file with mode: 0644]
src/backends/backendsCommon/test/CMakeLists.txt
src/backends/reference/test/RefEndToEndTests.cpp

index 31ad5cb45acea0d296336f1fa9210700e4705c0e..b4b4ffca3092360f1d745b626692750a259898fd 100644 (file)
@@ -110,7 +110,7 @@ bool IWorkloadFactory::IsLayerSupported(const BackendId& backendId,
             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
             result = layerSupportObject->IsArgMinMaxSupported(
                     OverrideDataType(input, dataType),
-                    OverrideDataType(output, dataType),
+                    OverrideDataType(output, DataType::Signed32),
                     descriptor,
                     reason);
             break;
diff --git a/src/backends/backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp
new file mode 100644 (file)
index 0000000..3bb1dd6
--- /dev/null
@@ -0,0 +1,308 @@
+//
+// Copyright © 2019 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "CommonTestUtils.hpp"
+
+#include <QuantizeHelper.hpp>
+#include <ResolveType.hpp>
+
+#include <armnn/ArmNN.hpp>
+
+namespace
+{
+
+armnn::INetworkPtr CreateArgMinMaxNetwork(const armnn::TensorInfo& inputTensorInfo,
+                                          const armnn::TensorInfo& outputTensorInfo,
+                                          armnn::ArgMinMaxFunction function,
+                                          int axis)
+{
+    armnn::INetworkPtr network(armnn::INetwork::Create());
+
+    armnn::ArgMinMaxDescriptor descriptor;
+    descriptor.m_Function = function;
+    descriptor.m_Axis = axis;
+
+    armnn::IConnectableLayer* inputLayer  = network->AddInputLayer(0, "Input");
+    armnn::IConnectableLayer* argMinMaxLayer  = network->AddArgMinMaxLayer(descriptor, "ArgMinMax");
+    armnn::IConnectableLayer* outputLayer = network->AddOutputLayer(0, "Output");
+
+    Connect(inputLayer, argMinMaxLayer, inputTensorInfo, 0, 0);
+    Connect(argMinMaxLayer, outputLayer, outputTensorInfo, 0, 0);
+
+    return network;
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void ArgMinMaxEndToEndImpl(const armnn::TensorShape& inputShape,
+                           const armnn::TensorShape& outputShape,
+                           const std::vector<float>& inputData,
+                           const std::vector<int32_t>& expectedOutputData,
+                           armnn::ArgMinMaxFunction function,
+                           int axis,
+                           const std::vector<armnn::BackendId>& backends)
+{
+    const float qScale  = armnn::IsQuantizedType<T>() ? 2.0f : 1.0f;
+    const int32_t qOffset = armnn::IsQuantizedType<T>() ? 2 : 0;
+
+    armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
+    armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Signed32);
+
+    // quantize data
+    std::vector<T> qInputData = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset);
+
+    armnn::INetworkPtr network = CreateArgMinMaxNetwork(inputTensorInfo,
+                                                        outputTensorInfo,
+                                                        function,
+                                                        axis);
+
+    EndToEndLayerTestImpl<ArmnnType, armnn::DataType::Signed32>(std::move(network),
+                                                                { { 0, qInputData } },
+                                                                { { 0, expectedOutputData } },
+                                                                backends);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void ArgMaxEndToEndSimple(const std::vector<armnn::BackendId>& backends)
+{
+    const armnn::TensorShape inputShape{ 1, 1, 1, 5 };
+    const armnn::TensorShape outputShape{ 1, 1, 1 };
+
+    std::vector<float> inputData({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
+    std::vector<int32_t> expectedOutputData({ 3 });
+
+    ArgMinMaxEndToEndImpl<ArmnnType>(inputShape,
+                                     outputShape,
+                                     inputData,
+                                     expectedOutputData,
+                                     armnn::ArgMinMaxFunction::Max,
+                                     -1,
+                                     backends);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void ArgMinEndToEndSimple(const std::vector<armnn::BackendId>& backends)
+{
+    const armnn::TensorShape inputShape{ 1, 1, 1, 5 };
+    const armnn::TensorShape outputShape{ 1, 1, 1 };
+
+    std::vector<float> inputData({ 5.0f, 2.0f, 8.0f, 10.0f, 9.0f });
+    std::vector<int32_t> expectedOutputData({ 1 });
+
+    ArgMinMaxEndToEndImpl<ArmnnType>(inputShape,
+                                     outputShape,
+                                     inputData,
+                                     expectedOutputData,
+                                     armnn::ArgMinMaxFunction::Min,
+                                     3,
+                                     backends);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void ArgMaxAxis0EndToEnd(const std::vector<armnn::BackendId>& backends)
+{
+    const armnn::TensorShape inputShape{ 3, 2, 1, 4 };
+    const armnn::TensorShape outputShape{ 2, 1, 4 };
+
+    std::vector<float> inputData({    1.0f,   2.0f,   3.0f,   4.0f,
+                                      8.0f,   7.0f,   6.0f,   5.0f,
+                                    100.0f,  20.0f, 300.0f,  40.0f,
+                                    500.0f, 475.0f, 450.0f, 425.0f,
+                                     50.0f,  60.0f,  70.0f,  80.0f,
+                                     10.0f, 200.0f,  30.0f, 400.0f });
+
+    std::vector<int32_t> expectedOutputData({ 1, 2, 1, 2,
+                                              1, 1, 1, 1 });
+
+    ArgMinMaxEndToEndImpl<ArmnnType>(inputShape,
+                                     outputShape,
+                                     inputData,
+                                     expectedOutputData,
+                                     armnn::ArgMinMaxFunction::Max,
+                                     0,
+                                     backends);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void ArgMinAxis0EndToEnd(const std::vector<armnn::BackendId>& backends)
+{
+    const armnn::TensorShape inputShape{ 3, 2, 1, 4 };
+    const armnn::TensorShape outputShape{ 2, 1, 4 };
+
+    std::vector<float> inputData({    1.0f,   2.0f,   3.0f,   4.0f,
+                                      8.0f,   7.0f,   6.0f,   5.0f,
+                                    100.0f,  20.0f, 300.0f,  40.0f,
+                                    500.0f, 475.0f, 450.0f, 425.0f,
+                                     50.0f,  60.0f,  70.0f,  80.0f,
+                                     10.0f, 200.0f,  30.0f, 400.0f });
+
+    std::vector<int32_t> expectedOutputData({ 0, 0, 0, 0,
+                                              0, 0, 0, 0 });
+
+    ArgMinMaxEndToEndImpl<ArmnnType>(inputShape,
+                                     outputShape,
+                                     inputData,
+                                     expectedOutputData,
+                                     armnn::ArgMinMaxFunction::Min,
+                                     0,
+                                     backends);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void ArgMaxAxis1EndToEnd(const std::vector<armnn::BackendId>& backends)
+{
+    const armnn::TensorShape inputShape{ 1, 3, 2, 4 };
+    const armnn::TensorShape outputShape{ 1, 2, 4 };
+
+    std::vector<float> inputData({    1.0f,   2.0f,   3.0f,   4.0f,
+                                      8.0f,   7.0f,   6.0f,   5.0f,
+                                    100.0f,  20.0f, 300.0f,  40.0f,
+                                    500.0f, 475.0f, 450.0f, 425.0f,
+                                     50.0f,  60.0f,  70.0f,  80.0f,
+                                     10.0f, 200.0f,  30.0f, 400.0f });
+
+    std::vector<int32_t> expectedOutputData({ 1, 2, 1, 2,
+                                              1, 1, 1, 1 });
+
+    ArgMinMaxEndToEndImpl<ArmnnType>(inputShape,
+                                     outputShape,
+                                     inputData,
+                                     expectedOutputData,
+                                     armnn::ArgMinMaxFunction::Max,
+                                     1,
+                                     backends);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void ArgMinAxis1EndToEnd(const std::vector<armnn::BackendId>& backends)
+{
+    const armnn::TensorShape inputShape{ 1, 3, 2, 4 };
+    const armnn::TensorShape outputShape{ 1, 2, 4 };
+
+    std::vector<float> inputData({    1.0f,   2.0f,   3.0f,   4.0f,
+                                      8.0f,   7.0f,   6.0f,   5.0f,
+                                    100.0f,  20.0f, 300.0f,  40.0f,
+                                    500.0f, 475.0f, 450.0f, 425.0f,
+                                     50.0f,  60.0f,  70.0f,  80.0f,
+                                     10.0f, 200.0f,  30.0f, 400.0f });
+
+    std::vector<int32_t> expectedOutputData({ 0, 0, 0, 0,
+                                              0, 0, 0, 0 });
+
+    ArgMinMaxEndToEndImpl<ArmnnType>(inputShape,
+                                     outputShape,
+                                     inputData,
+                                     expectedOutputData,
+                                     armnn::ArgMinMaxFunction::Min,
+                                     1,
+                                     backends);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void ArgMaxAxis2EndToEnd(const std::vector<armnn::BackendId>& backends)
+{
+    const armnn::TensorShape inputShape{ 1, 3, 2, 4 };
+    const armnn::TensorShape outputShape{ 1, 3, 4 };
+
+    std::vector<float> inputData({    1.0f,   2.0f,   3.0f,   4.0f,
+                                      8.0f,   7.0f,   6.0f,   5.0f,
+                                    100.0f,  20.0f, 300.0f,  40.0f,
+                                    500.0f, 475.0f, 450.0f, 425.0f,
+                                     10.0f, 200.0f,  30.0f, 400.0f,
+                                     50.0f,  60.0f,  70.0f,  80.0f });
+
+    std::vector<int32_t> expectedOutputData({ 1, 1, 1, 1,
+                                              1, 1, 1, 1,
+                                              1, 0, 1, 0});
+
+    ArgMinMaxEndToEndImpl<ArmnnType>(inputShape,
+                                     outputShape,
+                                     inputData,
+                                     expectedOutputData,
+                                     armnn::ArgMinMaxFunction::Max,
+                                     2,
+                                     backends);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void ArgMinAxis2EndToEnd(const std::vector<armnn::BackendId>& backends)
+{
+    const armnn::TensorShape inputShape{ 1, 3, 2, 4 };
+    const armnn::TensorShape outputShape{ 1, 3, 4 };
+
+    std::vector<float> inputData({    1.0f,   2.0f,   3.0f,   4.0f,
+                                      8.0f,   7.0f,   6.0f,   5.0f,
+                                    100.0f,  20.0f, 300.0f,  40.0f,
+                                    500.0f, 475.0f, 450.0f, 425.0f,
+                                     10.0f, 200.0f,  30.0f, 400.0f,
+                                     50.0f,  60.0f,  70.0f,  80.0f });
+
+    std::vector<int32_t> expectedOutputData({ 0, 0, 0, 0,
+                                              0, 0, 0, 0,
+                                              0, 1, 0, 1 });
+
+    ArgMinMaxEndToEndImpl<ArmnnType>(inputShape,
+                                     outputShape,
+                                     inputData,
+                                     expectedOutputData,
+                                     armnn::ArgMinMaxFunction::Min,
+                                     2,
+                                     backends);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void ArgMaxAxis3EndToEnd(const std::vector<armnn::BackendId>& backends)
+{
+    const armnn::TensorShape inputShape{ 1, 3, 2, 4 };
+    const armnn::TensorShape outputShape{ 1, 3, 2 };
+
+    std::vector<float> inputData({    1.0f,   3.0f,   5.0f,   7.0f,
+                                      8.0f,   7.0f,   6.0f,   5.0f,
+                                    100.0f,  20.0f, 300.0f,  40.0f,
+                                    500.0f, 475.0f, 450.0f, 425.0f,
+                                     10.0f, 200.0f,  30.0f, 400.0f,
+                                     50.0f,  60.0f,  70.0f,  80.0f });
+
+    std::vector<int32_t> expectedOutputData({ 3, 0,
+                                              2, 0,
+                                              3, 3});
+
+    ArgMinMaxEndToEndImpl<ArmnnType>(inputShape,
+                                     outputShape,
+                                     inputData,
+                                     expectedOutputData,
+                                     armnn::ArgMinMaxFunction::Max,
+                                     3,
+                                     backends);
+}
+
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+void ArgMinAxis3EndToEnd(const std::vector<armnn::BackendId>& backends)
+{
+    const armnn::TensorShape inputShape{ 1, 3, 2, 4 };
+    const armnn::TensorShape outputShape{ 1, 3, 2 };
+
+    std::vector<float> inputData({    1.0f,   3.0f,   5.0f,   7.0f,
+                                     18.0f,  16.0f,  14.0f,  12.0f,
+                                    100.0f,  20.0f, 300.0f,  40.0f,
+                                    500.0f, 475.0f, 450.0f, 425.0f,
+                                     10.0f, 200.0f,  30.0f, 400.0f,
+                                     50.0f,  60.0f,  70.0f,  80.0f });
+
+    std::vector<int32_t> expectedOutputData({ 0, 3,
+                                              1, 3,
+                                              0, 0 });
+
+    ArgMinMaxEndToEndImpl<ArmnnType>(inputShape,
+                                     outputShape,
+                                     inputData,
+                                     expectedOutputData,
+                                     armnn::ArgMinMaxFunction::Min,
+                                     3,
+                                     backends);
+}
+
+} // anonymous namespace
index 9c86cdf3c118aaceeb74f94f7ec59288979ec028..8f93e084f812cf4746b7fa69743879074caf170a 100644 (file)
@@ -6,6 +6,7 @@
 list(APPEND armnnBackendsCommonUnitTests_sources
     AbsEndToEndTestImpl.hpp
     ActivationFixture.hpp
+    ArgMinMaxEndToEndTestImpl.hpp
     BackendIdTests.cpp
     BackendRegistryTests.cpp
     CommonTestUtils.cpp
index 1968e4da7e113e0aecca462f25b958fbb3f18da4..4d8c82d900f0fbda25820962f9b87eb67ad5e304 100644 (file)
@@ -6,6 +6,7 @@
 #include <backendsCommon/test/EndToEndTestImpl.hpp>
 
 #include <backendsCommon/test/AbsEndToEndTestImpl.hpp>
+#include <backendsCommon/test/ArgMinMaxEndToEndTestImpl.hpp>
 #include <backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp>
 #include <backendsCommon/test/ComparisonEndToEndTestImpl.hpp>
 #include <backendsCommon/test/ConcatEndToEndTestImpl.hpp>
@@ -1041,6 +1042,111 @@ BOOST_AUTO_TEST_CASE(RefInstanceNormalizationNchwEndToEndTest2)
     InstanceNormalizationNchwEndToEndTest2(defaultBackends);
 }
 
+// ArgMinMax
+BOOST_AUTO_TEST_CASE(RefArgMaxSimpleTest)
+{
+    ArgMaxEndToEndSimple<armnn::DataType::Float32>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMaxSimpleUint8Test)
+{
+    ArgMaxEndToEndSimple<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMinSimpleTest)
+{
+    ArgMinEndToEndSimple<armnn::DataType::Float32>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMinSimpleUint8Test)
+{
+    ArgMinEndToEndSimple<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMaxAxis0Test)
+{
+    ArgMaxAxis0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMaxAxis0Uint8Test)
+{
+    ArgMaxAxis0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMinAxis0Test)
+{
+    ArgMinAxis0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMinAxis0Uint8Test)
+{
+
+    ArgMinAxis0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMaxAxis1Test)
+{
+    ArgMaxAxis1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMaxAxis1Uint8Test)
+{
+    ArgMaxAxis1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMinAxis1Test)
+{
+    ArgMinAxis1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMinAxis1Uint8Test)
+{
+
+    ArgMinAxis1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMaxAxis2Test)
+{
+    ArgMaxAxis2EndToEnd<armnn::DataType::Float32>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMaxAxis2Uint8Test)
+{
+    ArgMaxAxis2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMinAxis2Test)
+{
+    ArgMinAxis2EndToEnd<armnn::DataType::Float32>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMinAxis2Uint8Test)
+{
+
+    ArgMinAxis2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMaxAxis3Test)
+{
+    ArgMaxAxis3EndToEnd<armnn::DataType::Float32>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMaxAxis3Uint8Test)
+{
+    ArgMaxAxis3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMinAxis3Test)
+{
+    ArgMinAxis3EndToEnd<armnn::DataType::Float32>(defaultBackends);
+}
+
+BOOST_AUTO_TEST_CASE(RefArgMinAxis3Uint8Test)
+{
+
+    ArgMinAxis3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
+}
+
 #if !defined(__ANDROID__)
 // Only run these tests on non Android platforms
 BOOST_AUTO_TEST_CASE(RefImportNonAlignedPointerTest)