IVGCVSW-1946: Remove armnn/src from the include paths
[platform/upstream/armnn.git] / src / backends / backendsCommon / test / SplitterTestImpl.hpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 #pragma once
6
7 #include <armnn/ArmNN.hpp>
8 #include <armnn/Tensor.hpp>
9
10 #include <backendsCommon/CpuTensorHandle.hpp>
11 #include <backendsCommon/WorkloadFactory.hpp>
12 #include <backendsCommon/test/QuantizeHelper.hpp>
13
14 #include <test/TensorHelpers.hpp>
15
16 template<typename T>
17 std::vector<LayerTestResult<T,3>> SplitterTestCommon(armnn::IWorkloadFactory& workloadFactory,
18                                                      float qScale = 0.0f,
19                                                      int32_t qOffset = 0)
20 {
21     unsigned int inputWidth = 5;
22     unsigned int inputHeight = 6;
23     unsigned int inputChannels = 3;
24
25     // NOTE: Compute Library imposes a restriction that the x and y dimension (input height and width)
26     //       cannot be split.
27     //       For the reasons for this, see first comment on https://jira.arm.com/browse/IVGCVSW-1239
28     //
29     // This test has therefore been recast to split the channels, then split the resulting subtensor.
30
31     // To take channel 0 of original output
32     // and channel 0 and channel 1 of the split subtensor.
33     unsigned int outputWidth1 = inputWidth;
34     unsigned int outputHeight1 = inputHeight;
35     unsigned int outputChannels1 = 1;
36
37     // To take channel 1 and 2 of the original output.
38     unsigned int outputWidth2 = inputWidth;
39     unsigned int outputHeight2 = inputHeight;
40     unsigned int outputChannels2 = 2;
41
42
43     // Define the tensor descriptors.
44     armnn::TensorInfo inputTensorInfo({ inputChannels, inputHeight, inputWidth }, armnn::GetDataType<T>());
45
46     // Outputs of the original split.
47     armnn::TensorInfo outputTensorInfo1({ outputChannels1, outputHeight1, outputWidth1 }, armnn::GetDataType<T>());
48     armnn::TensorInfo outputTensorInfo2({ outputChannels2, outputHeight2, outputWidth2 }, armnn::GetDataType<T>());
49
50     // Outputs of the subsequent subtensor split.
51     armnn::TensorInfo outputTensorInfo3({ outputChannels1, outputHeight1, outputWidth1 }, armnn::GetDataType<T>());
52     armnn::TensorInfo outputTensorInfo4({ outputChannels1, outputHeight1, outputWidth1 }, armnn::GetDataType<T>());
53
54     // Set quantization parameters if the requested type is a quantized type.
55     // The quantization doesn't really matter as the splitter operator doesn't dequantize/quantize.
56     if(armnn::IsQuantizedType<T>())
57     {
58         inputTensorInfo.SetQuantizationScale(qScale);
59         inputTensorInfo.SetQuantizationOffset(qOffset);
60         outputTensorInfo1.SetQuantizationScale(qScale);
61         outputTensorInfo1.SetQuantizationOffset(qOffset);
62         outputTensorInfo2.SetQuantizationScale(qScale);
63         outputTensorInfo2.SetQuantizationOffset(qOffset);
64         outputTensorInfo3.SetQuantizationScale(qScale);
65         outputTensorInfo3.SetQuantizationOffset(qOffset);
66         outputTensorInfo4.SetQuantizationScale(qScale);
67         outputTensorInfo4.SetQuantizationOffset(qOffset);
68     }
69
70     LayerTestResult<T,3> ret1(outputTensorInfo1);
71     LayerTestResult<T,3> ret2(outputTensorInfo2);
72     LayerTestResult<T,3> ret3(outputTensorInfo3);
73     LayerTestResult<T,3> ret4(outputTensorInfo4);
74
75     auto input = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(
76         QuantizedVector<T>(qScale, qOffset, {
77             1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
78             6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
79             11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
80             16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
81             21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
82             26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
83
84             31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
85             36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
86             41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
87             46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
88             51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
89             56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
90
91             61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
92             66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
93             71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
94             76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
95             81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
96             86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
97         })
98     ));
99
100     // Channel 0 of the original input.
101     ret1.outputExpected = MakeTensor<T, 3>(outputTensorInfo1, std::vector<T>(
102         QuantizedVector<T>(qScale, qOffset, {
103             1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
104             6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
105             11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
106             16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
107             21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
108             26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
109         })
110     ));
111
112     // Channel 1 & 2 of the original input.
113     ret2.outputExpected = MakeTensor<T, 3>(outputTensorInfo2, std::vector<T>(
114         QuantizedVector<T>(qScale, qOffset, {
115             31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
116             36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
117             41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
118             46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
119             51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
120             56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
121
122             61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
123             66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
124             71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
125             76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
126             81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
127             86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
128         })
129     ));
130
131     // Channel 0 of return 2 (i.e. channels 1 and 2 of the original input).
132     ret3.outputExpected = MakeTensor<T, 3>(outputTensorInfo3, std::vector<T>(
133         QuantizedVector<T>(qScale, qOffset, {
134             31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
135             36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
136             41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
137             46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
138             51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
139             56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
140         })
141     ));
142
143     // Channel 1 of return 2.
144     ret4.outputExpected = MakeTensor<T, 3>(outputTensorInfo4, std::vector<T>(
145         QuantizedVector<T>(qScale, qOffset, {
146             61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
147             66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
148             71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
149             76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
150             81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
151             86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
152         })
153     ));
154
155     // NOTE: as a corollary of the splitting of x and y restriction the x and y values of the view origins
156     //       have to be zero, the co-ordinates are as per the tensor info above channels, height/y, width/x
157     //       note that under the hood the compute engine reverses these i.e. its coordinate system is x, y, channels.
158     std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of output[0].
159     armnn::SplitterQueueDescriptor::ViewOrigin window1(wOrigin1);
160
161     std::vector<unsigned int> wOrigin2 = {1, 0, 0}; //Extent of the window is defined by size of output[1].
162     armnn::SplitterQueueDescriptor::ViewOrigin window2(wOrigin2);
163
164     std::vector<unsigned int> wOrigin3 = {0, 0, 0}; //Extent of the window is defined by size of output[2].
165     armnn::SplitterQueueDescriptor::ViewOrigin window3(wOrigin3);
166
167     std::vector<unsigned int> wOrigin4 = {1, 0, 0}; //Extent of the window is defined by size of output[3].
168     armnn::SplitterQueueDescriptor::ViewOrigin window4(wOrigin4);
169
170     bool subTensorsSupported = workloadFactory.SupportsSubTensors();
171
172     std::unique_ptr<armnn::ITensorHandle> inputHandle  = workloadFactory.CreateTensorHandle(inputTensorInfo);
173
174     std::unique_ptr<armnn::ITensorHandle> outputHandle1 =
175         subTensorsSupported ?
176             workloadFactory.CreateSubTensorHandle(*inputHandle, outputTensorInfo1.GetShape(), wOrigin1.data()) :
177             workloadFactory.CreateTensorHandle(outputTensorInfo1);
178
179     std::unique_ptr<armnn::ITensorHandle> outputHandle2 =
180         subTensorsSupported ?
181             workloadFactory.CreateSubTensorHandle(*inputHandle, outputTensorInfo2.GetShape(), wOrigin2.data()) :
182             workloadFactory.CreateTensorHandle(outputTensorInfo2);
183
184     std::unique_ptr<armnn::ITensorHandle> outputHandle3 =
185         subTensorsSupported ?
186             workloadFactory.CreateSubTensorHandle(*outputHandle2, outputTensorInfo3.GetShape(), wOrigin3.data()) :
187             workloadFactory.CreateTensorHandle(outputTensorInfo3);
188
189     std::unique_ptr<armnn::ITensorHandle> outputHandle4 =
190         subTensorsSupported ?
191             workloadFactory.CreateSubTensorHandle(*outputHandle2, outputTensorInfo4.GetShape(), wOrigin4.data()) :
192             workloadFactory.CreateTensorHandle(outputTensorInfo4);
193
194     // Do the first split
195     armnn::SplitterQueueDescriptor data;
196     armnn::WorkloadInfo info;
197     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
198     AddOutputToWorkload(data, info, outputTensorInfo1, outputHandle1.get());
199     AddOutputToWorkload(data, info, outputTensorInfo2, outputHandle2.get());
200
201     data.m_ViewOrigins.push_back(window1);
202     data.m_ViewOrigins.push_back(window2);
203
204     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSplitter(data, info);
205
206     inputHandle->Allocate();
207     outputHandle1->Allocate();
208     outputHandle2->Allocate();
209
210     CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0]);
211
212     workload->Execute();
213
214     CopyDataFromITensorHandle(&ret1.output[0][0][0], outputHandle1.get());
215     CopyDataFromITensorHandle(&ret2.output[0][0][0], outputHandle2.get());
216
217 //    // Do the second split.
218     armnn::SplitterQueueDescriptor data2;
219     armnn::WorkloadInfo info2;
220     AddInputToWorkload(data2, info2, outputTensorInfo2, outputHandle2.get());
221     AddOutputToWorkload(data2, info2, outputTensorInfo3, outputHandle3.get());
222     AddOutputToWorkload(data2, info2, outputTensorInfo4, outputHandle4.get());
223
224     data2.m_ViewOrigins.push_back(window3);
225     data2.m_ViewOrigins.push_back(window4);
226
227     std::unique_ptr<armnn::IWorkload> workload2 = workloadFactory.CreateSplitter(data2, info2);
228
229     outputHandle3->Allocate();
230     outputHandle4->Allocate();
231
232     workload2->Execute();
233
234     CopyDataFromITensorHandle(&ret3.output[0][0][0], outputHandle3.get());
235     CopyDataFromITensorHandle(&ret4.output[0][0][0], outputHandle4.get());
236
237     std::vector<LayerTestResult<T,3>> ret = {ret1, ret2, ret3, ret4,};
238
239     return ret;
240 }
241
242
243 template <typename T>
244 LayerTestResult<T, 3> CopyViaSplitterTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
245 {
246     const armnn::TensorInfo tensorInfo({ 3, 6, 5 }, armnn::GetDataType<T>());
247     auto input = MakeTensor<T, 3>(tensorInfo, QuantizedVector<T>(qScale, qOffset,
248                                                                  {
249                                                                      1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
250                                                                      6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
251                                                                      11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
252                                                                      16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
253                                                                      21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
254                                                                      26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
255
256                                                                      31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
257                                                                      36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
258                                                                      41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
259                                                                      46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
260                                                                      51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
261                                                                      56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
262
263                                                                      61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
264                                                                      66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
265                                                                      71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
266                                                                      76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
267                                                                      81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
268                                                                      86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
269                                                                  }));
270
271     std::vector<unsigned int> origin = { 0, 0, 0 };
272     armnn::SplitterQueueDescriptor::ViewOrigin window(origin);
273
274     const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
275
276     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
277
278     std::unique_ptr<armnn::ITensorHandle> outputHandle =
279         subTensorsSupported ?
280             workloadFactory.CreateSubTensorHandle(*inputHandle, tensorInfo.GetShape(), origin.data()) :
281             workloadFactory.CreateTensorHandle(tensorInfo);
282
283     armnn::SplitterQueueDescriptor data;
284     armnn::WorkloadInfo info;
285     AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
286     AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
287
288     data.m_ViewOrigins.push_back(window);
289
290     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSplitter(data, info);
291
292     inputHandle->Allocate();
293     outputHandle->Allocate();
294
295     CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0]);
296
297     workload->Execute();
298
299     LayerTestResult<T, 3> ret(tensorInfo);
300     CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
301     ret.outputExpected = input;
302
303     return ret;
304 }