2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
6 #include "FakeQuantizationTestImpl.hpp"
8 #include <armnn/ArmNN.hpp>
10 #include <backendsCommon/CpuTensorHandle.hpp>
12 #include <backendsCommon/test/TensorCopyUtils.hpp>
13 #include <backendsCommon/test/WorkloadTestUtils.hpp>
15 #include <test/TensorHelpers.hpp>
17 LayerTestResult<float, 2> FakeQuantizationTest(
18 armnn::IWorkloadFactory& workloadFactory,
19 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
21 boost::ignore_unused(memoryManager);
22 constexpr unsigned int width = 2;
23 constexpr unsigned int height = 3;
25 const armnn::TensorInfo tensorInfo({height, width },
26 armnn::DataType::Float32);
28 auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
34 LayerTestResult<float, 2> ret(tensorInfo);
36 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
37 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
39 armnn::FakeQuantizationQueueDescriptor data;
40 armnn::WorkloadInfo info;
42 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
43 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
48 data.m_Parameters.m_Min = min;
49 data.m_Parameters.m_Max = max;
51 armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
52 armnn::FakeQuantizationQueueDescriptor refData = data;
53 armnn::WorkloadInfo refInfo = info;
54 SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
56 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
58 inputHandle->Allocate();
59 outputHandle->Allocate();
61 CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
63 workload->PostAllocationConfigure();
66 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
68 ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({