2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
6 #include <layers/ConvertFp16ToFp32Layer.hpp>
7 #include <layers/ConvertFp32ToFp16Layer.hpp>
8 #include <test/TensorHelpers.hpp>
10 #include <backendsCommon/CpuTensorHandle.hpp>
11 #include <neon/NeonWorkloadFactory.hpp>
12 #include <backendsCommon/test/IsLayerSupportedTestImpl.hpp>
13 #include <backendsCommon/test/LayerTests.hpp>
15 #include <boost/test/unit_test.hpp>
19 BOOST_AUTO_TEST_SUITE(NeonLayerSupport)
21 BOOST_AUTO_TEST_CASE(IsLayerSupportedFloat16Neon)
23 armnn::NeonWorkloadFactory factory;
24 IsLayerSupportedTests<armnn::NeonWorkloadFactory, armnn::DataType::Float16>(&factory);
27 BOOST_AUTO_TEST_CASE(IsLayerSupportedFloat32Neon)
29 armnn::NeonWorkloadFactory factory;
30 IsLayerSupportedTests<armnn::NeonWorkloadFactory, armnn::DataType::Float32>(&factory);
33 BOOST_AUTO_TEST_CASE(IsLayerSupportedUint8Neon)
35 armnn::NeonWorkloadFactory factory;
36 IsLayerSupportedTests<armnn::NeonWorkloadFactory, armnn::DataType::QuantisedAsymm8>(&factory);
39 BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedNeon)
41 std::string reasonIfUnsupported;
43 bool result = IsConvertLayerSupportedTests<armnn::NeonWorkloadFactory, armnn::ConvertFp16ToFp32Layer,
44 armnn::DataType::Float16, armnn::DataType::Float32>(reasonIfUnsupported);
49 BOOST_AUTO_TEST_CASE(IsConvertFp32ToFp16SupportedNeon)
51 std::string reasonIfUnsupported;
53 bool result = IsConvertLayerSupportedTests<armnn::NeonWorkloadFactory, armnn::ConvertFp32ToFp16Layer,
54 armnn::DataType::Float32, armnn::DataType::Float16>(reasonIfUnsupported);
59 BOOST_AUTO_TEST_SUITE_END()