2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
6 #include "ConstantTestImpl.hpp"
8 #include <QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
11 #include <armnn/ArmNN.hpp>
13 #include <armnnUtils/Permute.hpp>
15 #include <backendsCommon/CpuTensorHandle.hpp>
17 #include <backendsCommon/test/TensorCopyUtils.hpp>
18 #include <backendsCommon/test/WorkloadTestUtils.hpp>
20 #include <test/TensorHelpers.hpp>
25 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
26 LayerTestResult<T, 4> ConstantTestImpl(
27 armnn::IWorkloadFactory& workloadFactory,
28 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
32 boost::ignore_unused(memoryManager);
33 constexpr unsigned int inputWidth = 3;
34 constexpr unsigned int inputHeight = 4;
35 constexpr unsigned int inputChannels = 3;
36 constexpr unsigned int inputBatchSize = 2;
38 constexpr unsigned int outputWidth = inputWidth;
39 constexpr unsigned int outputHeight = inputHeight;
40 constexpr unsigned int outputChannels = inputChannels;
41 constexpr unsigned int outputBatchSize = inputBatchSize;
43 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
44 ArmnnType, qScale, qOffset);
46 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
47 ArmnnType, qScale, qOffset);
49 // Set quantization parameters if the requested type is a quantized type.
50 if(armnn::IsQuantizedType<T>())
52 inputTensorInfo.SetQuantizationScale(qScale);
53 inputTensorInfo.SetQuantizationOffset(qOffset);
54 outputTensorInfo.SetQuantizationScale(qScale);
55 outputTensorInfo.SetQuantizationOffset(qOffset);
58 auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
59 armnnUtils::QuantizedVector<T>(
62 235.0f, 46.0f, 178.0f,
63 100.0f, 123.0f, 19.0f,
64 172.0f, 74.0f, 250.0f,
68 113.0f, 95.0f, 202.0f,
70 122.0f, 246.0f, 166.0f,
74 56.0f, 170.0f, 162.0f,
75 194.0f, 89.0f, 254.0f,
76 12.0f, 209.0f, 200.0f,
82 25.0f, 117.0f, 103.0f,
83 247.0f, 59.0f, 189.0f,
86 239.0f, 104.0f, 199.0f,
87 17.0f, 124.0f, 153.0f,
88 222.0f, 217.0f, 75.0f,
92 97.0f, 145.0f, 215.0f,
93 115.0f, 116.0f, 238.0f,
94 226.0f, 16.0f, 132.0f,
99 LayerTestResult<T, 4> result(outputTensorInfo);
100 result.outputExpected = input;
102 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
104 armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
105 AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
107 armnn::ConstantQueueDescriptor descriptor;
108 descriptor.m_LayerOutput = &constantTensor;
110 armnn::WorkloadInfo info;
111 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
113 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
115 outputHandle->Allocate();
117 workload->PostAllocationConfigure();
120 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
124 } // anonymous namespace
126 LayerTestResult<float, 4> ConstantTest(
127 armnn::IWorkloadFactory& workloadFactory,
128 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
130 return ConstantTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
133 LayerTestResult<int16_t, 4> ConstantInt16SimpleQuantizationScaleNoOffsetTest(
134 armnn::IWorkloadFactory& workloadFactory,
135 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
137 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 1.0f, 0);
140 LayerTestResult<uint8_t, 4> ConstantUint8SimpleQuantizationScaleNoOffsetTest(
141 armnn::IWorkloadFactory& workloadFactory,
142 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
144 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 1.0f, 0);
147 LayerTestResult<uint8_t, 4> ConstantUint8CustomQuantizationScaleAndOffsetTest(
148 armnn::IWorkloadFactory& workloadFactory,
149 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
151 return ConstantTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 2e-6f, 1);
154 LayerTestResult<int16_t, 4> ConstantInt16CustomQuantizationScaleAndOffsetTest(
155 armnn::IWorkloadFactory& workloadFactory,
156 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
158 return ConstantTestImpl<armnn::DataType::QuantisedSymm16>(workloadFactory, memoryManager, 2e-6f, 1);