IVGCVSW-2467 Remove GetDataType<T> function
[platform/upstream/armnn.git] / src / backends / backendsCommon / test / BatchNormTestImpl.hpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6
7 #include "TypeUtils.hpp"
8 #include "WorkloadTestUtils.hpp"
9
10 #include <armnn/ArmNN.hpp>
11 #include <armnn/Tensor.hpp>
12
13 #include <backendsCommon/CpuTensorHandle.hpp>
14 #include <backendsCommon/IBackendInternal.hpp>
15 #include <backendsCommon/WorkloadFactory.hpp>
16 #include <backendsCommon/test/QuantizeHelper.hpp>
17
18 #include <test/TensorHelpers.hpp>
19
20 #include <DataLayoutIndexed.hpp>
21
22 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
23 LayerTestResult<T, 4> BatchNormTestImpl(
24     armnn::IWorkloadFactory& workloadFactory,
25     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
26     const armnn::TensorShape& inputOutputTensorShape,
27     const std::vector<float>& inputValues,
28     const std::vector<float>& expectedOutputValues,
29     float qScale,
30     int32_t qOffset,
31     armnn::DataLayout dataLayout)
32 {
33     armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, ArmnnType);
34     armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, ArmnnType);
35
36     armnnUtils::DataLayoutIndexed dataLayoutIndexed(dataLayout);
37
38     armnn::TensorInfo tensorInfo({ inputOutputTensorShape[dataLayoutIndexed.GetChannelsIndex()] },
39                                  ArmnnType);
40
41     // Set quantization parameters if the requested type is a quantized type.
42     if (armnn::IsQuantizedType<T>())
43     {
44         inputTensorInfo.SetQuantizationScale(qScale);
45         inputTensorInfo.SetQuantizationOffset(qOffset);
46         outputTensorInfo.SetQuantizationScale(qScale);
47         outputTensorInfo.SetQuantizationOffset(qOffset);
48         tensorInfo.SetQuantizationScale(qScale);
49         tensorInfo.SetQuantizationOffset(qOffset);
50     }
51
52     auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
53                                         QuantizedVector<T>(qScale, qOffset, inputValues));
54
55     // These values are per-channel of the input.
56     auto mean     = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, -2}));
57     auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {4,  9}));
58     auto beta     = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3,  2}));
59     auto gamma    = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {2,  1}));
60
61     LayerTestResult<T, 4> result(outputTensorInfo);
62
63     result.outputExpected = MakeTensor<T, 4>(inputTensorInfo,
64                                              QuantizedVector<T>(qScale, qOffset, expectedOutputValues));
65
66     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
67     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
68
69     armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
70     armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
71     armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
72     armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
73
74     armnn::BatchNormalizationQueueDescriptor descriptor;
75     descriptor.m_Mean                    = &meanTensor;
76     descriptor.m_Variance                = &varianceTensor;
77     descriptor.m_Beta                    = &betaTensor;
78     descriptor.m_Gamma                   = &gammaTensor;
79     descriptor.m_Parameters.m_Eps        = 0.0f;
80     descriptor.m_Parameters.m_DataLayout = dataLayout;
81     armnn::WorkloadInfo info;
82
83     AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
84     AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
85     AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
86     AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
87
88     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
89     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
90
91     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(descriptor, info);
92
93     inputHandle->Allocate();
94     outputHandle->Allocate();
95
96     CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
97
98     workload->Execute();
99
100     CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
101
102     return result;
103 }
104
105
106 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
107 LayerTestResult<T,4> BatchNormTestNhwcImpl(
108     armnn::IWorkloadFactory& workloadFactory,
109     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
110     float qScale,
111     int32_t qOffset)
112 {
113     const unsigned int width    = 2;
114     const unsigned int height   = 3;
115     const unsigned int channels = 2;
116     const unsigned int num      = 1;
117
118     armnn::TensorInfo inputTensorInfo({num, height, width, channels}, ArmnnType);
119     armnn::TensorInfo outputTensorInfo({num, height, width, channels}, ArmnnType);
120     armnn::TensorInfo tensorInfo({channels}, ArmnnType);
121
122     // Set quantization parameters if the requested type is a quantized type.
123     if(armnn::IsQuantizedType<T>())
124     {
125         inputTensorInfo.SetQuantizationScale(qScale);
126         inputTensorInfo.SetQuantizationOffset(qOffset);
127         outputTensorInfo.SetQuantizationScale(qScale);
128         outputTensorInfo.SetQuantizationOffset(qOffset);
129         tensorInfo.SetQuantizationScale(qScale);
130         tensorInfo.SetQuantizationOffset(qOffset);
131     }
132
133     auto input = MakeTensor<T, 4>(inputTensorInfo,
134         QuantizedVector<T>(qScale, qOffset,
135         {
136             1.f, 1.f, 4.f, 1.f,
137             4.f, 4.f, 2.f, 1.f,
138             1.f, -2.f, 6.f, 4.f
139         }));
140     // These values are per-channel of the input.
141     auto mean     = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, -2}));
142     auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {4, 9}));
143     auto beta     = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, 2}));
144     auto gamma    = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {2, 1}));
145     LayerTestResult<T,4> ret(outputTensorInfo);
146
147     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
148     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
149
150     armnn::BatchNormalizationQueueDescriptor data;
151     armnn::WorkloadInfo info;
152     armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
153     armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
154     armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
155     armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
156
157     AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
158     AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
159     AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
160     AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
161
162     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
163     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
164     data.m_Mean             = &meanTensor;
165     data.m_Variance         = &varianceTensor;
166     data.m_Beta             = &betaTensor;
167     data.m_Gamma            = &gammaTensor;
168     data.m_Parameters.m_Eps = 0.0f;
169     data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
170
171     // For each channel:
172     // substract mean, divide by standard deviation (with an epsilon to avoid div by 0),
173     // multiply by gamma and add beta
174     ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
175         QuantizedVector<T>(qScale, qOffset,
176         {
177             1.f, 3.f, 4.f, 3.f,
178             4.f, 4.f, 2.f, 3.f,
179             1.f, 2.f, 6.f, 4.f
180         }));
181
182     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
183
184     inputHandle->Allocate();
185     outputHandle->Allocate();
186
187     CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
188
189     workload->Execute();
190
191     CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
192
193     return ret;
194 }