* Added support for QASYMM8_SIGNED, QSYMM16, QSYMM8 and QSYMM8_PER_CHANNEL to Neon and CL backends
* Added unit tests to Neon, CL and Ref backends
Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: I4c726b6d86b4d75abedd130dcea372d1e82be5c2
return result;
}
+template<typename FactoryType, armnn::DataType OutputDataType>
+bool IsConstantLayerSupportedTests(std::string& reasonIfUnsupported)
+{
+ armnn::Graph graph;
+
+ armnn::Layer* const layer = graph.AddLayer<armnn::ConstantLayer>("ConstantLayerName");
+ armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "OutputLayerName");
+
+ armnn::TensorInfo outputTensorInfo({1, 1}, OutputDataType);
+
+ layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+ layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
+
+ bool result = FactoryType::IsLayerSupported(*layer, OutputDataType, reasonIfUnsupported);
+
+ return result;
+}
} //namespace
#include "workloads/ClBatchNormalizationFloatWorkload.hpp"
#include "workloads/ClBatchToSpaceNdWorkload.hpp"
#include "workloads/ClComparisonWorkload.hpp"
+#include "workloads/ClConstantWorkload.hpp"
#include "workloads/ClConvertFp16ToFp32Workload.hpp"
#include "workloads/ClConvertFp32ToFp16Workload.hpp"
#include "workloads/ClConvolution2dWorkload.hpp"
bool ClLayerSupport::IsConstantSupported(const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- return IsSupportedForDataTypeCl(reasonIfUnsupported,
- output.GetDataType(),
- &TrueFunc<>,
- &TrueFunc<>);
+ FORWARD_WORKLOAD_VALIDATE_FUNC(ClConstantWorkloadValidate,
+ reasonIfUnsupported,
+ output);
}
bool ClLayerSupport::IsConvertFp16ToFp32Supported(const TensorInfo& input,
BOOST_CHECK(result);
}
+BOOST_AUTO_TEST_CASE(IsConstantSupportedCl)
+{
+ std::string reasonIfUnsupported;
+
+ bool result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
+ armnn::DataType::Float16>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+
+ result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
+ armnn::DataType::Float32>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+
+ result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
+ armnn::DataType::QAsymmU8>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+
+ result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
+ armnn::DataType::Boolean>(reasonIfUnsupported);
+ BOOST_CHECK(!result);
+
+ result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
+ armnn::DataType::QSymmS16>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+
+ result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
+ armnn::DataType::QSymmS8>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+
+ result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
+ armnn::DataType::QAsymmS8>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+
+ result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
+ armnn::DataType::BFloat16>(reasonIfUnsupported);
+ BOOST_CHECK(!result);
+}
+
BOOST_AUTO_TEST_SUITE_END()
namespace armnn
{
+arm_compute::Status ClConstantWorkloadValidate(const TensorInfo& output)
+{
+ const arm_compute::TensorInfo neonOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+
+ std::array<arm_compute::DataType,7> supportedTypes = {
+ arm_compute::DataType::F16,
+ arm_compute::DataType::F32,
+ arm_compute::DataType::QASYMM8,
+ arm_compute::DataType::QASYMM8_SIGNED,
+ arm_compute::DataType::QSYMM16,
+ arm_compute::DataType::QSYMM8,
+ arm_compute::DataType::QSYMM8_PER_CHANNEL
+ };
+ auto it = std::find(begin(supportedTypes), end(supportedTypes), neonOutputInfo.data_type());
+
+ if (it != end(supportedTypes))
+ {
+ return arm_compute::Status{};
+ }
+ else
+ {
+ return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported DataType"};
+ }
+}
+
ClConstantWorkload::ClConstantWorkload(const ConstantQueueDescriptor& descriptor, const WorkloadInfo& info)
: BaseWorkload<ConstantQueueDescriptor>(descriptor, info)
, m_RanOnce(false)
CopyArmComputeClTensorData(output, data.m_LayerOutput->GetConstTensor<uint8_t>());
break;
}
+ case arm_compute::DataType::QASYMM8_SIGNED:
+ {
+ CopyArmComputeClTensorData(output, data.m_LayerOutput->GetConstTensor<int8_t>());
+ break;
+ }
+ case arm_compute::DataType::QSYMM16:
+ {
+ CopyArmComputeClTensorData(output, data.m_LayerOutput->GetConstTensor<int16_t>());
+ break;
+ }
+ case arm_compute::DataType::QSYMM8:
+ case arm_compute::DataType::QSYMM8_PER_CHANNEL:
+ {
+ CopyArmComputeClTensorData(output, data.m_LayerOutput->GetConstTensor<int8_t>());
+ break;
+ }
default:
{
ARMNN_ASSERT_MSG(false, "Unknown data type");
#pragma once
+#include <arm_compute/core/Error.h>
#include <backendsCommon/Workload.hpp>
namespace armnn
{
+arm_compute::Status ClConstantWorkloadValidate(const TensorInfo& output);
+
class ClConstantWorkload : public BaseWorkload<ConstantQueueDescriptor>
{
public:
#include "workloads/NeonBatchNormalizationWorkload.hpp"
#include "workloads/NeonBatchToSpaceNdWorkload.hpp"
#include "workloads/NeonComparisonWorkload.hpp"
+#include "workloads/NeonConstantWorkload.hpp"
#include "workloads/NeonConvolution2dWorkload.hpp"
#include "workloads/NeonDepthToSpaceWorkload.hpp"
#include "workloads/NeonDepthwiseConvolutionWorkload.hpp"
bool NeonLayerSupport::IsConstantSupported(const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- return IsSupportedForDataTypeNeon(reasonIfUnsupported,
- output.GetDataType(),
- &TrueFunc<>,
- &TrueFunc<>);
+ FORWARD_WORKLOAD_VALIDATE_FUNC(NeonConstantWorkloadValidate,
+ reasonIfUnsupported,
+ output);
}
bool NeonLayerSupport::IsConvertBf16ToFp32Supported(const TensorInfo& input,
BOOST_CHECK(result);
}
+BOOST_AUTO_TEST_CASE(IsConstantSupportedNeon)
+{
+ std::string reasonIfUnsupported;
+
+ bool result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
+ armnn::DataType::Float16>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+
+ result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
+ armnn::DataType::Float32>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+
+ result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
+ armnn::DataType::QAsymmU8>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+
+ result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
+ armnn::DataType::Boolean>(reasonIfUnsupported);
+ BOOST_CHECK(!result);
+
+ result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
+ armnn::DataType::QSymmS16>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+
+ result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
+ armnn::DataType::QSymmS8>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+
+ result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
+ armnn::DataType::QAsymmS8>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+
+ result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
+ armnn::DataType::BFloat16>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+}
+
BOOST_AUTO_TEST_SUITE_END()
namespace armnn
{
+arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo& output)
+{
+ const arm_compute::TensorInfo neonOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+
+ std::array<arm_compute::DataType,8> supportedTypes = {
+ arm_compute::DataType::BFLOAT16,
+ arm_compute::DataType::F16,
+ arm_compute::DataType::F32,
+ arm_compute::DataType::QASYMM8,
+ arm_compute::DataType::QASYMM8_SIGNED,
+ arm_compute::DataType::QSYMM16,
+ arm_compute::DataType::QSYMM8,
+ arm_compute::DataType::QSYMM8_PER_CHANNEL
+ };
+ auto it = std::find(begin(supportedTypes), end(supportedTypes), neonOutputInfo.data_type());
+
+ if (it != end(supportedTypes))
+ {
+ return arm_compute::Status{};
+ }
+ else
+ {
+ return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR, "Unsupported DataType"};
+ }
+}
+
NeonConstantWorkload::NeonConstantWorkload(const ConstantQueueDescriptor& descriptor,
const WorkloadInfo& info)
: BaseWorkload<ConstantQueueDescriptor>(descriptor, info)
CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<uint8_t>(), output);
break;
}
+ case arm_compute::DataType::QASYMM8_SIGNED:
+ {
+ CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<int8_t>(), output);
+ break;
+ }
+ case arm_compute::DataType::QSYMM16:
+ {
+ CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<int16_t>(), output);
+ break;
+ }
+ case arm_compute::DataType::QSYMM8:
+ case arm_compute::DataType::QSYMM8_PER_CHANNEL:
+ {
+ CopyArmComputeITensorData(data.m_LayerOutput->GetConstTensor<int8_t>(), output);
+ break;
+ }
default:
{
ARMNN_ASSERT_MSG(false, "Unknown data type");
namespace armnn
{
+arm_compute::Status NeonConstantWorkloadValidate(const TensorInfo& output);
class NeonConstantWorkload : public BaseWorkload<ConstantQueueDescriptor>
{
!= std::string::npos);
}
+BOOST_AUTO_TEST_CASE(IsConstantSupportedRef)
+{
+ std::string reasonIfUnsupported;
+
+ bool result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
+ armnn::DataType::Float16>(reasonIfUnsupported);
+ BOOST_CHECK(!result);
+
+ result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
+ armnn::DataType::Float32>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+
+ result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
+ armnn::DataType::QAsymmU8>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+
+ result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
+ armnn::DataType::Boolean>(reasonIfUnsupported);
+ BOOST_CHECK(!result);
+
+ result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
+ armnn::DataType::QSymmS16>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+
+ result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
+ armnn::DataType::QSymmS8>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+
+ result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
+ armnn::DataType::QAsymmS8>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+
+ result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
+ armnn::DataType::BFloat16>(reasonIfUnsupported);
+ BOOST_CHECK(result);
+}
+
BOOST_AUTO_TEST_SUITE_END()