2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
6 #include "BatchNormalizationTestImpl.hpp"
8 #include <QuantizeHelper.hpp>
9 #include <ResolveType.hpp>
11 #include <armnn/ArmNN.hpp>
13 #include <armnnUtils/DataLayoutIndexed.hpp>
15 #include <backendsCommon/CpuTensorHandle.hpp>
16 #include <armnn/backends/IBackendInternal.hpp>
17 #include <backendsCommon/WorkloadFactory.hpp>
19 #include <backendsCommon/test/TensorCopyUtils.hpp>
20 #include <backendsCommon/test/WorkloadTestUtils.hpp>
22 #include <test/TensorHelpers.hpp>
27 using namespace armnnUtils;
29 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
30 LayerTestResult<T, 4> BatchNormTestImpl(
31 armnn::IWorkloadFactory& workloadFactory,
32 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
33 const armnn::TensorShape& inputOutputTensorShape,
34 const std::vector<float>& inputValues,
35 const std::vector<float>& expectedOutputValues,
38 armnn::DataLayout dataLayout)
40 boost::ignore_unused(memoryManager);
41 armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType);
42 armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType);
44 armnnUtils::DataLayoutIndexed dataLayoutIndexed(dataLayout);
46 armnn::TensorInfo tensorInfo({ inputOutputTensorShape[dataLayoutIndexed.GetChannelsIndex()] },
49 // Set quantization parameters if the requested type is a quantized type.
50 if (armnn::IsQuantizedType<T>())
52 inputTensorInfo.SetQuantizationScale(qScale);
53 inputTensorInfo.SetQuantizationOffset(qOffset);
54 outputTensorInfo.SetQuantizationScale(qScale);
55 outputTensorInfo.SetQuantizationOffset(qOffset);
56 tensorInfo.SetQuantizationScale(qScale);
57 tensorInfo.SetQuantizationOffset(qOffset);
60 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputValues, qScale, qOffset));
62 // These values are per-channel of the input.
63 auto mean = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, -2 }, qScale, qOffset));
64 auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 4, 9 }, qScale, qOffset));
65 auto beta = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, 2 }, qScale, qOffset));
66 auto gamma = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 2, 1 }, qScale, qOffset));
68 LayerTestResult<T, 4> result(outputTensorInfo);
70 result.outputExpected = MakeTensor<T, 4>(inputTensorInfo,
71 QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
73 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
74 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
76 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
77 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
78 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
79 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
81 armnn::BatchNormalizationQueueDescriptor descriptor;
82 descriptor.m_Mean = &meanTensor;
83 descriptor.m_Variance = &varianceTensor;
84 descriptor.m_Beta = &betaTensor;
85 descriptor.m_Gamma = &gammaTensor;
86 descriptor.m_Parameters.m_Eps = 0.0f;
87 descriptor.m_Parameters.m_DataLayout = dataLayout;
88 armnn::WorkloadInfo info;
90 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
91 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
92 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
93 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
95 AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
96 AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
98 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(descriptor, info);
100 inputHandle->Allocate();
101 outputHandle->Allocate();
103 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
107 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
112 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
113 LayerTestResult<T,4> BatchNormTestNhwcImpl(
114 armnn::IWorkloadFactory& workloadFactory,
115 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
119 boost::ignore_unused(memoryManager);
121 const unsigned int width = 2;
122 const unsigned int height = 3;
123 const unsigned int channels = 2;
124 const unsigned int num = 1;
126 armnn::TensorInfo inputTensorInfo({num, height, width, channels}, ArmnnType);
127 armnn::TensorInfo outputTensorInfo({num, height, width, channels}, ArmnnType);
128 armnn::TensorInfo tensorInfo({channels}, ArmnnType);
130 // Set quantization parameters if the requested type is a quantized type.
131 if(armnn::IsQuantizedType<T>())
133 inputTensorInfo.SetQuantizationScale(qScale);
134 inputTensorInfo.SetQuantizationOffset(qOffset);
135 outputTensorInfo.SetQuantizationScale(qScale);
136 outputTensorInfo.SetQuantizationOffset(qOffset);
137 tensorInfo.SetQuantizationScale(qScale);
138 tensorInfo.SetQuantizationOffset(qOffset);
141 auto input = MakeTensor<T, 4>(inputTensorInfo,
149 // These values are per-channel of the input.
150 auto mean = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, -2 }, qScale, qOffset));
151 auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 4, 9 }, qScale, qOffset));
152 auto beta = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, 2 }, qScale, qOffset));
153 auto gamma = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 2, 1 }, qScale, qOffset));
154 LayerTestResult<T,4> ret(outputTensorInfo);
156 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
157 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
159 armnn::BatchNormalizationQueueDescriptor data;
160 armnn::WorkloadInfo info;
161 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
162 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
163 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
164 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
166 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
167 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
168 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
169 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
171 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
172 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
173 data.m_Mean = &meanTensor;
174 data.m_Variance = &varianceTensor;
175 data.m_Beta = &betaTensor;
176 data.m_Gamma = &gammaTensor;
177 data.m_Parameters.m_Eps = 0.0f;
178 data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
181 // substract mean, divide by standard deviation (with an epsilon to avoid div by 0),
182 // multiply by gamma and add beta
183 ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
192 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
194 inputHandle->Allocate();
195 outputHandle->Allocate();
197 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
201 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
206 } // anonymous namespace
208 LayerTestResult<float, 4> BatchNormFloat32Test(
209 armnn::IWorkloadFactory& workloadFactory,
210 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
217 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
218 std::vector<float> inputValues
220 // Batch 0, Channel 0, Height (3) x Width (2)
225 // Batch 0, Channel 1, Height (3) x Width (2)
230 std::vector<float> expectedOutputValues
232 // Batch 0, Channel 0, Height (3) x Width (2)
237 // Batch 0, Channel 1, Height (3) x Width (2)
243 return BatchNormTestImpl<armnn::DataType::Float32>(
248 expectedOutputValues,
251 armnn::DataLayout::NCHW);
254 LayerTestResult<float, 4> BatchNormFloat32NhwcTest(
255 armnn::IWorkloadFactory& workloadFactory,
256 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
263 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
264 std::vector<float> inputValues
266 // Batch 0, Height 0, Width (2) x Channel (2)
270 // Batch 0, Height 1, Width (2) x Channel (2)
274 // Batch 0, Height 2, Width (2) x Channel (2)
278 std::vector<float> expectedOutputValues
280 // Batch 0, Height 0, Width (2) x Channel (2)
284 // Batch 0, Height 1, Width (2) x Channel (2)
288 // Batch 0, Height 2, Width (2) x Channel (2)
293 return BatchNormTestImpl<armnn::DataType::Float32>(
298 expectedOutputValues,
301 armnn::DataLayout::NHWC);
304 LayerTestResult<armnn::Half, 4> BatchNormFloat16Test(
305 armnn::IWorkloadFactory& workloadFactory,
306 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
313 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
314 std::vector<float> inputValues
316 // Batch 0, Channel 0, Height (3) x Width (2)
321 // Batch 0, Channel 1, Height (3) x Width (2)
326 std::vector<float> expectedOutputValues
328 // Batch 0, Channel 0, Height (3) x Width (2)
333 // Batch 0, Channel 1, Height (3) x Width (2)
339 return BatchNormTestImpl<armnn::DataType::Float16>(
344 expectedOutputValues,
347 armnn::DataLayout::NCHW);
350 LayerTestResult<armnn::Half, 4> BatchNormFloat16NhwcTest(
351 armnn::IWorkloadFactory& workloadFactory,
352 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
359 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
360 std::vector<float> inputValues
362 // Batch 0, Height 0, Width (2) x Channel (2)
366 // Batch 0, Height 1, Width (2) x Channel (2)
370 // Batch 0, Height 2, Width (2) x Channel (2)
374 std::vector<float> expectedOutputValues
376 // Batch 0, Height 0, Width (2) x Channel (2)
380 // Batch 0, Height 1, Width (2) x Channel (2)
384 // Batch 0, Height 2, Width (2) x Channel (2)
389 return BatchNormTestImpl<armnn::DataType::Float16>(
394 expectedOutputValues,
397 armnn::DataLayout::NHWC);
400 LayerTestResult<uint8_t, 4> BatchNormUint8Test(
401 armnn::IWorkloadFactory& workloadFactory,
402 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
409 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
410 std::vector<float> inputValues
412 // Batch 0, Channel 0, Height (3) x Width (2)
417 // Batch 0, Channel 1, Height (3) x Width (2)
422 std::vector<float> expectedOutputValues
424 // Batch 0, Channel 0, Height (3) x Width (2)
429 // Batch 0, Channel 1, Height (3) x Width (2)
435 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
440 expectedOutputValues,
443 armnn::DataLayout::NCHW);
446 LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(
447 armnn::IWorkloadFactory& workloadFactory,
448 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
455 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
456 std::vector<float> inputValues
458 // Batch 0, Height 0, Width (2) x Channel (2)
462 // Batch 0, Height 1, Width (2) x Channel (2)
466 // Batch 0, Height 2, Width (2) x Channel (2)
470 std::vector<float> expectedOutputValues
472 // Batch 0, Height 0, Width (2) x Channel (2)
476 // Batch 0, Height 1, Width (2) x Channel (2)
480 // Batch 0, Height 2, Width (2) x Channel (2)
485 return BatchNormTestImpl<armnn::DataType::QuantisedAsymm8>(
488 inputOutputShape, inputValues, expectedOutputValues,
489 1.f/20.f, 50, armnn::DataLayout::NHWC);
492 LayerTestResult<int16_t, 4> BatchNormInt16Test(
493 armnn::IWorkloadFactory& workloadFactory,
494 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
501 const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
502 std::vector<float> inputValues
504 // Batch 0, Channel 0, Height (3) x Width (2)
509 // Batch 0, Channel 1, Height (3) x Width (2)
514 std::vector<float> expectedOutputValues
516 // Batch 0, Channel 0, Height (3) x Width (2)
521 // Batch 0, Channel 1, Height (3) x Width (2)
527 return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>(
532 expectedOutputValues,
535 armnn::DataLayout::NCHW);
538 LayerTestResult<int16_t, 4> BatchNormInt16NhwcTest(
539 armnn::IWorkloadFactory& workloadFactory,
540 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
547 const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
548 std::vector<float> inputValues
550 // Batch 0, Height 0, Width (2) x Channel (2)
554 // Batch 0, Height 1, Width (2) x Channel (2)
558 // Batch 0, Height 2, Width (2) x Channel (2)
562 std::vector<float> expectedOutputValues
564 // Batch 0, Height 0, Width (2) x Channel (2)
568 // Batch 0, Height 1, Width (2) x Channel (2)
572 // Batch 0, Height 2, Width (2) x Channel (2)
577 return BatchNormTestImpl<armnn::DataType::QuantisedSymm16>(
582 expectedOutputValues,
585 armnn::DataLayout::NHWC);
588 LayerTestResult<float,4> CompareBatchNormTest(
589 armnn::IWorkloadFactory& workloadFactory,
590 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
591 armnn::IWorkloadFactory& refWorkloadFactory)
593 boost::ignore_unused(memoryManager);
594 const unsigned int width = 2;
595 const unsigned int height = 3;
596 const unsigned int channels = 5;
597 const unsigned int batchSize = 3;
599 armnn::TensorInfo inputTensorInfo;
600 armnn::TensorInfo outputTensorInfo;
601 armnn::TensorInfo tensorInfo;
603 constexpr unsigned int shape[] = {batchSize, channels, height, width};
604 constexpr unsigned int tensorShape[] = {channels};
606 inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
607 outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
608 tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
610 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
612 auto mean = MakeRandomTensor<float, 1>(tensorInfo, 123);
613 auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
614 auto beta = MakeRandomTensor<float, 1>(tensorInfo, 123);
615 auto gamma = MakeRandomTensor<float, 1>(tensorInfo, 345);
617 LayerTestResult<float,4> ret(outputTensorInfo);
619 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
620 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
622 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
623 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
625 armnn::BatchNormalizationQueueDescriptor data;
626 armnn::WorkloadInfo info;
627 armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
628 armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
629 armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
630 armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
632 AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
633 AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
634 AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
635 AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
637 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
638 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
639 data.m_Mean = &meanTensor;
640 data.m_Variance = &varianceTensor;
641 data.m_Beta = &betaTensor;
642 data.m_Gamma = &gammaTensor;
643 data.m_Parameters.m_Eps = 0.01f;
645 armnn::BatchNormalizationQueueDescriptor refData = data;
646 armnn::WorkloadInfo refInfo = info;
647 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
648 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
650 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
651 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
653 inputHandle->Allocate();
654 outputHandle->Allocate();
655 inputHandleRef->Allocate();
656 outputHandleRef->Allocate();
658 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
659 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
661 workload->PostAllocationConfigure();
663 workloadRef->PostAllocationConfigure();
664 workloadRef->Execute();
666 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
667 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());