IVGCVSW-2093 Add SpaceToBatchNd layer and corresponding no-op factory implementations
[platform/upstream/armnn.git] / src / backends / backendsCommon / test / BatchNormTestImpl.hpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6
7 #include <armnn/ArmNN.hpp>
8 #include <armnn/Tensor.hpp>
9
10 #include <backendsCommon/CpuTensorHandle.hpp>
11 #include <backendsCommon/WorkloadFactory.hpp>
12 #include <backendsCommon/test/QuantizeHelper.hpp>
13
14 #include <test/TensorHelpers.hpp>
15
16 template<typename T>
17 LayerTestResult<T, 4> BatchNormTestImpl(armnn::IWorkloadFactory& workloadFactory,
18                                         const armnn::TensorShape& inputOutputTensorShape,
19                                         const std::vector<float>& inputValues,
20                                         const std::vector<float>& expectedOutputValues,
21                                         float qScale,
22                                         int32_t qOffset,
23                                         armnn::DataLayout dataLayout)
24 {
25     armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::GetDataType<T>());
26     armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::GetDataType<T>());
27
28     armnn::DataLayoutIndexed dataLayoutIndexed(dataLayout);
29
30     armnn::TensorInfo tensorInfo({ inputOutputTensorShape[dataLayoutIndexed.GetChannelsIndex()] },
31                                  armnn::GetDataType<T>());
32
33     // Set quantization parameters if the requested type is a quantized type.
34     if (armnn::IsQuantizedType<T>())
35     {
36         inputTensorInfo.SetQuantizationScale(qScale);
37         inputTensorInfo.SetQuantizationOffset(qOffset);
38         outputTensorInfo.SetQuantizationScale(qScale);
39         outputTensorInfo.SetQuantizationOffset(qOffset);
40         tensorInfo.SetQuantizationScale(qScale);
41         tensorInfo.SetQuantizationOffset(qOffset);
42     }
43
44     auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
45                                         QuantizedVector<T>(qScale, qOffset, inputValues));
46
47     // These values are per-channel of the input.
48     auto mean     = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, -2}));
49     auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {4,  9}));
50     auto beta     = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3,  2}));
51     auto gamma    = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {2,  1}));
52
53     LayerTestResult<T, 4> result(outputTensorInfo);
54
55     result.outputExpected = MakeTensor<T, 4>(inputTensorInfo,
56                                              QuantizedVector<T>(qScale, qOffset, expectedOutputValues));
57
58     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
59     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
60
61     armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
62     armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
63     armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
64     armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
65
66     armnn::BatchNormalizationQueueDescriptor descriptor;
67     descriptor.m_Mean                    = &meanTensor;
68     descriptor.m_Variance                = &varianceTensor;
69     descriptor.m_Beta                    = &betaTensor;
70     descriptor.m_Gamma                   = &gammaTensor;
71     descriptor.m_Parameters.m_Eps        = 0.0f;
72     descriptor.m_Parameters.m_DataLayout = dataLayout;
73     armnn::WorkloadInfo info;
74
75     AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
76     AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
77     AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
78     AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
79
80     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
81     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
82
83     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(descriptor, info);
84
85     inputHandle->Allocate();
86     outputHandle->Allocate();
87
88     CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
89
90     workloadFactory.Finalize();
91     workload->Execute();
92
93     CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
94
95     return result;
96 }
97
98
99 template<typename T>
100 LayerTestResult<T,4> BatchNormTestNhwcImpl(armnn::IWorkloadFactory& workloadFactory,
101                                            float qScale,
102                                            int32_t qOffset)
103 {
104     const unsigned int width    = 2;
105     const unsigned int height   = 3;
106     const unsigned int channels = 2;
107     const unsigned int num      = 1;
108
109     armnn::TensorInfo inputTensorInfo({num, height, width, channels}, armnn::GetDataType<T>());
110     armnn::TensorInfo outputTensorInfo({num, height, width, channels}, armnn::GetDataType<T>());
111     armnn::TensorInfo tensorInfo({channels}, armnn::GetDataType<T>());
112
113     // Set quantization parameters if the requested type is a quantized type.
114     if(armnn::IsQuantizedType<T>())
115     {
116         inputTensorInfo.SetQuantizationScale(qScale);
117         inputTensorInfo.SetQuantizationOffset(qOffset);
118         outputTensorInfo.SetQuantizationScale(qScale);
119         outputTensorInfo.SetQuantizationOffset(qOffset);
120         tensorInfo.SetQuantizationScale(qScale);
121         tensorInfo.SetQuantizationOffset(qOffset);
122     }
123
124     auto input = MakeTensor<T, 4>(inputTensorInfo,
125         QuantizedVector<T>(qScale, qOffset,
126         {
127             1.f, 1.f, 4.f, 1.f,
128             4.f, 4.f, 2.f, 1.f,
129             1.f, -2.f, 6.f, 4.f
130         }));
131     // These values are per-channel of the input.
132     auto mean     = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, -2}));
133     auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {4, 9}));
134     auto beta     = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, 2}));
135     auto gamma    = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {2, 1}));
136     LayerTestResult<T,4> ret(outputTensorInfo);
137
138     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
139     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
140
141     armnn::BatchNormalizationQueueDescriptor data;
142     armnn::WorkloadInfo info;
143     armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
144     armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
145     armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
146     armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
147
148     AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
149     AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
150     AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
151     AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
152
153     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
154     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
155     data.m_Mean             = &meanTensor;
156     data.m_Variance         = &varianceTensor;
157     data.m_Beta             = &betaTensor;
158     data.m_Gamma            = &gammaTensor;
159     data.m_Parameters.m_Eps = 0.0f;
160     data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
161
162     // For each channel:
163     // substract mean, divide by standard deviation (with an epsilon to avoid div by 0),
164     // multiply by gamma and add beta
165     ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
166         QuantizedVector<T>(qScale, qOffset,
167         {
168             1.f, 3.f, 4.f, 3.f,
169             4.f, 4.f, 2.f, 3.f,
170             1.f, 2.f, 6.f, 4.f
171         }));
172
173     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
174
175     inputHandle->Allocate();
176     outputHandle->Allocate();
177
178     CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
179
180     workloadFactory.Finalize();
181     workload->Execute();
182
183     CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
184
185     return ret;
186 }