IVGCVSW-2093 Add SpaceToBatchNd layer and corresponding no-op factory implementations
[platform/upstream/armnn.git] / src / backends / test / BatchNormTestImpl.hpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6
7 #include <armnn/ArmNN.hpp>
8 #include <armnn/Tensor.hpp>
9
10 #include <test/TensorHelpers.hpp>
11
12 #include <backends/CpuTensorHandle.hpp>
13 #include <backends/WorkloadFactory.hpp>
14
15 #include <backends/test/QuantizeHelper.hpp>
16
17 template<typename T>
18 LayerTestResult<T, 4> BatchNormTestImpl(armnn::IWorkloadFactory& workloadFactory,
19                                         const armnn::TensorShape& inputOutputTensorShape,
20                                         const std::vector<float>& inputValues,
21                                         const std::vector<float>& expectedOutputValues,
22                                         float qScale,
23                                         int32_t qOffset,
24                                         armnn::DataLayout dataLayout)
25 {
26     armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::GetDataType<T>());
27     armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::GetDataType<T>());
28
29     armnn::DataLayoutIndexed dataLayoutIndexed(dataLayout);
30
31     armnn::TensorInfo tensorInfo({ inputOutputTensorShape[dataLayoutIndexed.GetChannelsIndex()] },
32                                  armnn::GetDataType<T>());
33
34     // Set quantization parameters if the requested type is a quantized type.
35     if (armnn::IsQuantizedType<T>())
36     {
37         inputTensorInfo.SetQuantizationScale(qScale);
38         inputTensorInfo.SetQuantizationOffset(qOffset);
39         outputTensorInfo.SetQuantizationScale(qScale);
40         outputTensorInfo.SetQuantizationOffset(qOffset);
41         tensorInfo.SetQuantizationScale(qScale);
42         tensorInfo.SetQuantizationOffset(qOffset);
43     }
44
45     auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
46                                         QuantizedVector<T>(qScale, qOffset, inputValues));
47
48     // These values are per-channel of the input.
49     auto mean     = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, -2}));
50     auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {4,  9}));
51     auto beta     = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3,  2}));
52     auto gamma    = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {2,  1}));
53
54     LayerTestResult<T, 4> result(outputTensorInfo);
55
56     result.outputExpected = MakeTensor<T, 4>(inputTensorInfo,
57                                              QuantizedVector<T>(qScale, qOffset, expectedOutputValues));
58
59     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
60     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
61
62     armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
63     armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
64     armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
65     armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
66
67     armnn::BatchNormalizationQueueDescriptor descriptor;
68     descriptor.m_Mean                    = &meanTensor;
69     descriptor.m_Variance                = &varianceTensor;
70     descriptor.m_Beta                    = &betaTensor;
71     descriptor.m_Gamma                   = &gammaTensor;
72     descriptor.m_Parameters.m_Eps        = 0.0f;
73     descriptor.m_Parameters.m_DataLayout = dataLayout;
74     armnn::WorkloadInfo info;
75
76     AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
77     AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
78     AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
79     AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
80
81     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
82     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
83
84     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(descriptor, info);
85
86     inputHandle->Allocate();
87     outputHandle->Allocate();
88
89     CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
90
91     workloadFactory.Finalize();
92     workload->Execute();
93
94     CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
95
96     return result;
97 }
98
99
100 template<typename T>
101 LayerTestResult<T,4> BatchNormTestNhwcImpl(armnn::IWorkloadFactory& workloadFactory,
102                                            float qScale,
103                                            int32_t qOffset)
104 {
105     const unsigned int width    = 2;
106     const unsigned int height   = 3;
107     const unsigned int channels = 2;
108     const unsigned int num      = 1;
109
110     armnn::TensorInfo inputTensorInfo({num, height, width, channels}, armnn::GetDataType<T>());
111     armnn::TensorInfo outputTensorInfo({num, height, width, channels}, armnn::GetDataType<T>());
112     armnn::TensorInfo tensorInfo({channels}, armnn::GetDataType<T>());
113
114     // Set quantization parameters if the requested type is a quantized type.
115     if(armnn::IsQuantizedType<T>())
116     {
117         inputTensorInfo.SetQuantizationScale(qScale);
118         inputTensorInfo.SetQuantizationOffset(qOffset);
119         outputTensorInfo.SetQuantizationScale(qScale);
120         outputTensorInfo.SetQuantizationOffset(qOffset);
121         tensorInfo.SetQuantizationScale(qScale);
122         tensorInfo.SetQuantizationOffset(qOffset);
123     }
124
125     auto input = MakeTensor<T, 4>(inputTensorInfo,
126         QuantizedVector<T>(qScale, qOffset,
127         {
128             1.f, 1.f, 4.f, 1.f,
129             4.f, 4.f, 2.f, 1.f,
130             1.f, -2.f, 6.f, 4.f
131         }));
132     // These values are per-channel of the input.
133     auto mean     = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, -2}));
134     auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {4, 9}));
135     auto beta     = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, 2}));
136     auto gamma    = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {2, 1}));
137     LayerTestResult<T,4> ret(outputTensorInfo);
138
139     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
140     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
141
142     armnn::BatchNormalizationQueueDescriptor data;
143     armnn::WorkloadInfo info;
144     armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
145     armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
146     armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
147     armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
148
149     AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
150     AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
151     AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
152     AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
153
154     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
155     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
156     data.m_Mean             = &meanTensor;
157     data.m_Variance         = &varianceTensor;
158     data.m_Beta             = &betaTensor;
159     data.m_Gamma            = &gammaTensor;
160     data.m_Parameters.m_Eps = 0.0f;
161     data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
162
163     // For each channel:
164     // substract mean, divide by standard deviation (with an epsilon to avoid div by 0),
165     // multiply by gamma and add beta
166     ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
167         QuantizedVector<T>(qScale, qOffset,
168         {
169             1.f, 3.f, 4.f, 3.f,
170             4.f, 4.f, 2.f, 3.f,
171             1.f, 2.f, 6.f, 4.f
172         }));
173
174     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
175
176     inputHandle->Allocate();
177     outputHandle->Allocate();
178
179     CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
180
181     workloadFactory.Finalize();
182     workload->Execute();
183
184     CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
185
186     return ret;
187 }