const TensorInfo& output,
Optional<std::string&> reasonIfUnsupported) const
{
- ignore_unused(output);
- return IsSupportedForDataTypeRef(reasonIfUnsupported,
- input.GetDataType(),
- &TrueFunc<>,
- &TrueFunc<>);
+ bool supported = true;
+
+ std::array<DataType,3> supportedTypes =
+ {
+ DataType::Float32,
+ DataType::QuantisedAsymm8,
+ DataType::QuantisedSymm16
+ };
+
+ supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
+ "Reference debug: input type not supported");
+
+ supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
+ "Reference debug: output type not supported");
+
+ supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
+ "Reference debug: input and output types are mismatched");
+
+ return supported;
}
bool RefLayerSupport::IsDepthwiseConvolutionSupported(const TensorInfo& input,
return IsDataType<DataType::Float16>(info);
}
-bool IsUint8(const WorkloadInfo& info)
+bool IsQSymm16(const WorkloadInfo& info)
{
- return IsDataType<DataType::QuantisedAsymm8>(info);
+ return IsDataType<DataType::QuantisedSymm16>(info);
}
RefWorkloadFactory::RefWorkloadFactory(const std::shared_ptr<RefMemoryManager>& memoryManager)
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateDebug(const DebugQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return MakeWorkload<RefDebugFloat32Workload, RefDebugUint8Workload>(descriptor, info);
+ if (IsQSymm16(info))
+ {
+ return std::make_unique<RefDebugQSymm16Workload>(descriptor, info);
+ }
+ return MakeWorkload<RefDebugFloat32Workload, RefDebugQAsymm8Workload>(descriptor, info);
}
std::unique_ptr<IWorkload> RefWorkloadFactory::CreateRsqrt(const RsqrtQueueDescriptor& descriptor,
#include <reference/RefWorkloadFactory.hpp>
+#include <backendsCommon/test/DebugTestImpl.hpp>
#include <backendsCommon/test/DetectionPostProcessLayerTestImpl.hpp>
#include <backendsCommon/test/LayerTests.hpp>
ARMNN_AUTO_TEST_CASE(Debug2DUint8, Debug2DUint8Test)
ARMNN_AUTO_TEST_CASE(Debug1DUint8, Debug1DUint8Test)
+ARMNN_AUTO_TEST_CASE(Debug4DQSymm16, Debug4DTest<armnn::DataType::QuantisedSymm16>)
+ARMNN_AUTO_TEST_CASE(Debug3DQSymm16, Debug3DTest<armnn::DataType::QuantisedSymm16>)
+ARMNN_AUTO_TEST_CASE(Debug2DQSymm16, Debug2DTest<armnn::DataType::QuantisedSymm16>)
+ARMNN_AUTO_TEST_CASE(Debug1DQSymm16, Debug1DTest<armnn::DataType::QuantisedSymm16>)
+
// Gather
ARMNN_AUTO_TEST_CASE(Gather1DParamsFloat, Gather1DParamsFloatTest)
ARMNN_AUTO_TEST_CASE(Gather1DParamsUint8, Gather1DParamsUint8Test)
const std::string& layerName,
unsigned int slotIndex);
+template void Debug<int16_t>(const TensorInfo& inputInfo,
+ const int16_t* inputData,
+ LayerGuid guid,
+ const std::string& layerName,
+ unsigned int slotIndex);
} // namespace armnn
template class RefDebugWorkload<DataType::Float32>;
template class RefDebugWorkload<DataType::QuantisedAsymm8>;
+template class RefDebugWorkload<DataType::QuantisedSymm16>;
} // namespace armnn
};
using RefDebugFloat32Workload = RefDebugWorkload<DataType::Float32>;
-using RefDebugUint8Workload = RefDebugWorkload<DataType::QuantisedAsymm8>;
+using RefDebugQAsymm8Workload = RefDebugWorkload<DataType::QuantisedAsymm8>;
+using RefDebugQSymm16Workload = RefDebugWorkload<DataType::QuantisedSymm16>;
} // namespace armnn