MLCE-190: Neon and CL Constant Workloads do not support newer DataTypes
authorMike Kelly <mike.kelly@arm.com>
Mon, 27 Apr 2020 08:55:40 +0000 (09:55 +0100)
committermike.kelly <mike.kelly@arm.com>
Mon, 27 Apr 2020 10:19:33 +0000 (10:19 +0000)
 * Added support for QASYMM8_SIGNED, QSYMM16, QSYMM8 and QSYMM8_PER_CHANNEL to Neon and CL backends
 * Added unit tests to Neon, CL and Ref backends

Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: I4c726b6d86b4d75abedd130dcea372d1e82be5c2

src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
src/backends/cl/ClLayerSupport.cpp
src/backends/cl/test/ClLayerSupportTests.cpp
src/backends/cl/workloads/ClConstantWorkload.cpp
src/backends/cl/workloads/ClConstantWorkload.hpp
src/backends/neon/NeonLayerSupport.cpp
src/backends/neon/test/NeonLayerSupportTests.cpp
src/backends/neon/workloads/NeonConstantWorkload.cpp
src/backends/neon/workloads/NeonConstantWorkload.hpp
src/backends/reference/test/RefLayerSupportTests.cpp

index dccfd1e..0780f4b 100644 (file)
@@ -882,5 +882,22 @@ bool IsMeanLayerNotSupportedTests(std::string& reasonIfUnsupported)
     return result;
 }
 
+template<typename FactoryType, armnn::DataType OutputDataType>
+bool IsConstantLayerSupportedTests(std::string& reasonIfUnsupported)
+{
+    armnn::Graph graph;
+
+    armnn::Layer* const layer = graph.AddLayer<armnn::ConstantLayer>("ConstantLayerName");
+    armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "OutputLayerName");
+
+    armnn::TensorInfo outputTensorInfo({1, 1}, OutputDataType);
+
+    layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+    layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
+
+    bool result = FactoryType::IsLayerSupported(*layer, OutputDataType, reasonIfUnsupported);
+
+    return result;
+}
 
 } //namespace
index 12c71c0..546cbc1 100644 (file)
@@ -23,6 +23,7 @@
 #include "workloads/ClBatchNormalizationFloatWorkload.hpp"
 #include "workloads/ClBatchToSpaceNdWorkload.hpp"
 #include "workloads/ClComparisonWorkload.hpp"
+#include "workloads/ClConstantWorkload.hpp"
 #include "workloads/ClConvertFp16ToFp32Workload.hpp"
 #include "workloads/ClConvertFp32ToFp16Workload.hpp"
 #include "workloads/ClConvolution2dWorkload.hpp"
@@ -284,10 +285,9 @@ bool ClLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> inpu
 bool ClLayerSupport::IsConstantSupported(const TensorInfo& output,
                                          Optional<std::string&> reasonIfUnsupported) const
 {
-    return IsSupportedForDataTypeCl(reasonIfUnsupported,
-                                    output.GetDataType(),
-                                    &TrueFunc<>,
-                                    &TrueFunc<>);
+    FORWARD_WORKLOAD_VALIDATE_FUNC(ClConstantWorkloadValidate,
+                                   reasonIfUnsupported,
+                                   output);
 }
 
 bool ClLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
index 33a2912..81d0cc2 100644 (file)
@@ -131,4 +131,41 @@ BOOST_FIXTURE_TEST_CASE(IsMeanSupportedCl, ClContextControlFixture)
     BOOST_CHECK(result);
 }
 
+BOOST_AUTO_TEST_CASE(IsConstantSupportedCl)
+{
+    std::string reasonIfUnsupported;
+
+    bool result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
+            armnn::DataType::Float16>(reasonIfUnsupported);
+    BOOST_CHECK(result);
+
+    result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
+            armnn::DataType::Float32>(reasonIfUnsupported);
+    BOOST_CHECK(result);
+
+    result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
+            armnn::DataType::QAsymmU8>(reasonIfUnsupported);
+    BOOST_CHECK(result);
+
+    result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
+            armnn::DataType::Boolean>(reasonIfUnsupported);
+    BOOST_CHECK(!result);
+
+    result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
+            armnn::DataType::QSymmS16>(reasonIfUnsupported);
+    BOOST_CHECK(result);
+
+    result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
+            armnn::DataType::QSymmS8>(reasonIfUnsupported);
+    BOOST_CHECK(result);
+
+    result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
+            armnn::DataType::QAsymmS8>(reasonIfUnsupported);
+    BOOST_CHECK(result);
+
+    result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
+            armnn::DataType::BFloat16>(reasonIfUnsupported);
+    BOOST_CHECK(!result);
+}
+
 BOOST_AUTO_TEST_SUITE_END()
index e928870..bae7446 100644 (file)
 namespace armnn
 {
 
+arm_compute::Status ClConstantWorkloadValidate(const TensorInfo& output)
+{
+    const arm_compute::TensorInfo neonOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+
+    std::array<arm_compute::DataType,7> supportedTypes = {
+            arm_compute::DataType::F16,
+            arm_compute::DataType::F32,
+            arm_compute::DataType::QASYMM8,
+            arm_compute::DataType::QASYMM8_SIGNED,
+            arm_compute::DataType::QSYMM16,
+            arm_compute::DataType::QSYMM8,
+            arm_compute::DataType::QSYMM8_PER_CHANNEL
+    };
+    auto it = std::find(begin(supportedTypes), end(supportedTypes), neonOutputInfo.data_type());
+
+    if (it != end(supportedTypes))
+    {
+        return arm_compute::Status{};
+    }
+    else
+    {
+        return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported DataType"};
+    }
+}
+
 ClConstantWorkload::ClConstantWorkload(const ConstantQueueDescriptor& descriptor, const WorkloadInfo& info)
     : BaseWorkload<ConstantQueueDescriptor>(descriptor, info)
     , m_RanOnce(false)
@@ -54,6 +79,22 @@ void ClConstantWorkload::Execute() const
                 CopyArmComputeClTensorData(output, data.m_LayerOutput->GetConstTensor<uint8_t>());
                 break;
             }
+            case arm_compute::DataType::QASYMM8_SIGNED:
+            {
+                CopyArmComputeClTensorData(output, data.m_LayerOutput->GetConstTensor<int8_t>());
+                break;
+            }
+            case arm_compute::DataType::QSYMM16:
+            {
+                CopyArmComputeClTensorData(output, data.m_LayerOutput->GetConstTensor<int16_t>());
+                break;
+            }
+            case arm_compute::DataType::QSYMM8:
+            case arm_compute::DataType::QSYMM8_PER_CHANNEL:
+            {
+                CopyArmComputeClTensorData(output, data.m_LayerOutput->GetConstTensor<int8_t>());
+                break;
+            }
             default:
             {
                 ARMNN_ASSERT_MSG(false, "Unknown data type");
index 75325dc..e5a1d44 100644 (file)
@@ -5,10 +5,13 @@
 
 #pragma once
 
+#include <arm_compute/core/Error.h>
 #include <backendsCommon/Workload.hpp>
 
 namespace armnn
 {
+arm_compute::Status ClConstantWorkloadValidate(const TensorInfo& output);
+
 class ClConstantWorkload : public BaseWorkload<ConstantQueueDescriptor>
 {
 public:
index 44e84fb..5d59ab8 100644 (file)
@@ -25,6 +25,7 @@
 #include "workloads/NeonBatchNormalizationWorkload.hpp"
 #include "workloads/NeonBatchToSpaceNdWorkload.hpp"
 #include "workloads/NeonComparisonWorkload.hpp"
+#include "workloads/NeonConstantWorkload.hpp"
 #include "workloads/NeonConvolution2dWorkload.hpp"
 #include "workloads/NeonDepthToSpaceWorkload.hpp"
 #include "workloads/NeonDepthwiseConvolutionWorkload.hpp"
@@ -253,10 +254,9 @@ bool NeonLayerSupport::IsConcatSupported(const std::vector<const TensorInfo*> in
 bool NeonLayerSupport::IsConstantSupported(const TensorInfo& output,
                                            Optional<std::string&> reasonIfUnsupported) const
 {
-    return IsSupportedForDataTypeNeon(reasonIfUnsupported,
-                                      output.GetDataType(),
-                                      &TrueFunc<>,
-                                      &TrueFunc<>);
+    FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConstantWorkloadValidate,
+                                   reasonIfUnsupported,
+                                   output);
 }
 
 bool NeonLayerSupport::IsConvertBf16ToFp32Supported(const TensorInfo& input,
index 2d43125..3b086ad 100644 (file)
@@ -85,4 +85,41 @@ BOOST_AUTO_TEST_CASE(IsMeanSupportedNeon)
     BOOST_CHECK(result);
 }
 
+BOOST_AUTO_TEST_CASE(IsConstantSupportedNeon)
+{
+    std::string reasonIfUnsupported;
+
+    bool result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
+            armnn::DataType::Float16>(reasonIfUnsupported);
+    BOOST_CHECK(result);
+
+    result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
+            armnn::DataType::Float32>(reasonIfUnsupported);
+    BOOST_CHECK(result);
+
+    result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
+            armnn::DataType::QAsymmU8>(reasonIfUnsupported);
+    BOOST_CHECK(result);
+
+    result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
+            armnn::DataType::Boolean>(reasonIfUnsupported);
+    BOOST_CHECK(!result);
+
+    result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
+            armnn::DataType::QSymmS16>(reasonIfUnsupported);
+    BOOST_CHECK(result);
+
+    result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
+            armnn::DataType::QSymmS8>(reasonIfUnsupported);
+    BOOST_CHECK(result);
+
+    result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
+            armnn::DataType::QAsymmS8>(reasonIfUnsupported);
+    BOOST_CHECK(result);
+
+    result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
+            armnn::DataType::BFloat16>(reasonIfUnsupported);
+    BOOST_CHECK(result);
+}
+
 BOOST_AUTO_TEST_SUITE_END()
index 1cffbe1..f7c8a73 100644 (file)
 namespace armnn
 {
 
+arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo& output)
+{
+    const arm_compute::TensorInfo neonOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+
+    std::array<arm_compute::DataType,8> supportedTypes = {
+            arm_compute::DataType::BFLOAT16,
+            arm_compute::DataType::F16,
+            arm_compute::DataType::F32,
+            arm_compute::DataType::QASYMM8,
+            arm_compute::DataType::QASYMM8_SIGNED,
+            arm_compute::DataType::QSYMM16,
+            arm_compute::DataType::QSYMM8,
+            arm_compute::DataType::QSYMM8_PER_CHANNEL
+    };
+    auto it = std::find(begin(supportedTypes), end(supportedTypes), neonOutputInfo.data_type());
+
+    if (it != end(supportedTypes))
+    {
+        return arm_compute::Status{};
+    }
+    else
+    {
+        return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported DataType"};
+    }
+}
+
 NeonConstantWorkload::NeonConstantWorkload(const ConstantQueueDescriptor& descriptor,
                                            const WorkloadInfo& info)
     : BaseWorkload<ConstantQueueDescriptor>(descriptor, info)
@@ -68,6 +94,22 @@ void NeonConstantWorkload::Execute() const
                 CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<uint8_t>(), output);
                 break;
             }
+            case arm_compute::DataType::QASYMM8_SIGNED:
+            {
+                CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<int8_t>(), output);
+                break;
+            }
+            case arm_compute::DataType::QSYMM16:
+            {
+                CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<int16_t>(), output);
+                break;
+            }
+            case arm_compute::DataType::QSYMM8:
+            case arm_compute::DataType::QSYMM8_PER_CHANNEL:
+            {
+                CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<int8_t>(), output);
+                break;
+            }
             default:
             {
                 ARMNN_ASSERT_MSG(false, "Unknown data type");
index 18c1547..f800a45 100644 (file)
@@ -9,6 +9,7 @@
 
 namespace armnn
 {
+arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo& output);
 
 class NeonConstantWorkload : public BaseWorkload<ConstantQueueDescriptor>
 {
index 1d4b4a0..2a27a9d 100644 (file)
@@ -235,4 +235,41 @@ BOOST_AUTO_TEST_CASE(IsLayerNotSupportedMeanDimensionsReference)
         != std::string::npos);
 }
 
+BOOST_AUTO_TEST_CASE(IsConstantSupportedRef)
+{
+    std::string reasonIfUnsupported;
+
+    bool result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
+            armnn::DataType::Float16>(reasonIfUnsupported);
+    BOOST_CHECK(!result);
+
+    result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
+            armnn::DataType::Float32>(reasonIfUnsupported);
+    BOOST_CHECK(result);
+
+    result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
+            armnn::DataType::QAsymmU8>(reasonIfUnsupported);
+    BOOST_CHECK(result);
+
+    result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
+            armnn::DataType::Boolean>(reasonIfUnsupported);
+    BOOST_CHECK(!result);
+
+    result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
+            armnn::DataType::QSymmS16>(reasonIfUnsupported);
+    BOOST_CHECK(result);
+
+    result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
+            armnn::DataType::QSymmS8>(reasonIfUnsupported);
+    BOOST_CHECK(result);
+
+    result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
+            armnn::DataType::QAsymmS8>(reasonIfUnsupported);
+    BOOST_CHECK(result);
+
+    result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
+            armnn::DataType::BFloat16>(reasonIfUnsupported);
+    BOOST_CHECK(result);
+}
+
 BOOST_AUTO_TEST_SUITE_END()