2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
7 #include <armnn/ArmNN.hpp>
8 #include <armnn/Tensor.hpp>
10 #include <test/TensorHelpers.hpp>
12 #include <backends/CpuTensorHandle.hpp>
13 #include <backends/WorkloadFactory.hpp>
15 #include <backends/test/QuantizeHelper.hpp>
19 std::vector<LayerTestResult<T,3>> SplitterTestCommon(armnn::IWorkloadFactory& workloadFactory,
23 unsigned int inputWidth = 5;
24 unsigned int inputHeight = 6;
25 unsigned int inputChannels = 3;
27 // NOTE: Compute Library imposes a restriction that the x and y dimension (input height and width)
29 // For the reasons for this, see first comment on https://jira.arm.com/browse/IVGCVSW-1239
31 // This test has therefore been recast to split the channels, then split the resulting subtensor.
33 // To take channel 0 of original output
34 // and channel 0 and channel 1 of the split subtensor.
35 unsigned int outputWidth1 = inputWidth;
36 unsigned int outputHeight1 = inputHeight;
37 unsigned int outputChannels1 = 1;
39 // To take channel 1 and 2 of the original output.
40 unsigned int outputWidth2 = inputWidth;
41 unsigned int outputHeight2 = inputHeight;
42 unsigned int outputChannels2 = 2;
45 // Define the tensor descriptors.
46 armnn::TensorInfo inputTensorInfo({ inputChannels, inputHeight, inputWidth }, armnn::GetDataType<T>());
48 // Outputs of the original split.
49 armnn::TensorInfo outputTensorInfo1({ outputChannels1, outputHeight1, outputWidth1 }, armnn::GetDataType<T>());
50 armnn::TensorInfo outputTensorInfo2({ outputChannels2, outputHeight2, outputWidth2 }, armnn::GetDataType<T>());
52 // Outputs of the subsequent subtensor split.
53 armnn::TensorInfo outputTensorInfo3({ outputChannels1, outputHeight1, outputWidth1 }, armnn::GetDataType<T>());
54 armnn::TensorInfo outputTensorInfo4({ outputChannels1, outputHeight1, outputWidth1 }, armnn::GetDataType<T>());
56 // Set quantization parameters if the requested type is a quantized type.
57 // The quantization doesn't really matter as the splitter operator doesn't dequantize/quantize.
58 if(armnn::IsQuantizedType<T>())
60 inputTensorInfo.SetQuantizationScale(qScale);
61 inputTensorInfo.SetQuantizationOffset(qOffset);
62 outputTensorInfo1.SetQuantizationScale(qScale);
63 outputTensorInfo1.SetQuantizationOffset(qOffset);
64 outputTensorInfo2.SetQuantizationScale(qScale);
65 outputTensorInfo2.SetQuantizationOffset(qOffset);
66 outputTensorInfo3.SetQuantizationScale(qScale);
67 outputTensorInfo3.SetQuantizationOffset(qOffset);
68 outputTensorInfo4.SetQuantizationScale(qScale);
69 outputTensorInfo4.SetQuantizationOffset(qOffset);
72 LayerTestResult<T,3> ret1(outputTensorInfo1);
73 LayerTestResult<T,3> ret2(outputTensorInfo2);
74 LayerTestResult<T,3> ret3(outputTensorInfo3);
75 LayerTestResult<T,3> ret4(outputTensorInfo4);
77 auto input = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(
78 QuantizedVector<T>(qScale, qOffset, {
79 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
80 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
81 11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
82 16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
83 21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
84 26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
86 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
87 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
88 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
89 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
90 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
91 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
93 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
94 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
95 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
96 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
97 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
98 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
102 // Channel 0 of the original input.
103 ret1.outputExpected = MakeTensor<T, 3>(outputTensorInfo1, std::vector<T>(
104 QuantizedVector<T>(qScale, qOffset, {
105 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
106 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
107 11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
108 16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
109 21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
110 26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
114 // Channel 1 & 2 of the original input.
115 ret2.outputExpected = MakeTensor<T, 3>(outputTensorInfo2, std::vector<T>(
116 QuantizedVector<T>(qScale, qOffset, {
117 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
118 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
119 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
120 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
121 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
122 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
124 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
125 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
126 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
127 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
128 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
129 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
133 // Channel 0 of return 2 (i.e. channels 1 and 2 of the original input).
134 ret3.outputExpected = MakeTensor<T, 3>(outputTensorInfo3, std::vector<T>(
135 QuantizedVector<T>(qScale, qOffset, {
136 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
137 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
138 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
139 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
140 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
141 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
145 // Channel 1 of return 2.
146 ret4.outputExpected = MakeTensor<T, 3>(outputTensorInfo4, std::vector<T>(
147 QuantizedVector<T>(qScale, qOffset, {
148 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
149 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
150 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
151 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
152 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
153 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
157 // NOTE: as a corollary of the splitting of x and y restriction the x and y values of the view origins
158 // have to be zero, the co-ordinates are as per the tensor info above channels, height/y, width/x
159 // note that under the hood the compute engine reverses these i.e. its coordinate system is x, y, channels.
160 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of output[0].
161 armnn::SplitterQueueDescriptor::ViewOrigin window1(wOrigin1);
163 std::vector<unsigned int> wOrigin2 = {1, 0, 0}; //Extent of the window is defined by size of output[1].
164 armnn::SplitterQueueDescriptor::ViewOrigin window2(wOrigin2);
166 std::vector<unsigned int> wOrigin3 = {0, 0, 0}; //Extent of the window is defined by size of output[2].
167 armnn::SplitterQueueDescriptor::ViewOrigin window3(wOrigin3);
169 std::vector<unsigned int> wOrigin4 = {1, 0, 0}; //Extent of the window is defined by size of output[3].
170 armnn::SplitterQueueDescriptor::ViewOrigin window4(wOrigin4);
172 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
174 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
176 std::unique_ptr<armnn::ITensorHandle> outputHandle1 =
177 subTensorsSupported ?
178 workloadFactory.CreateSubTensorHandle(*inputHandle, outputTensorInfo1.GetShape(), wOrigin1.data()) :
179 workloadFactory.CreateTensorHandle(outputTensorInfo1);
181 std::unique_ptr<armnn::ITensorHandle> outputHandle2 =
182 subTensorsSupported ?
183 workloadFactory.CreateSubTensorHandle(*inputHandle, outputTensorInfo2.GetShape(), wOrigin2.data()) :
184 workloadFactory.CreateTensorHandle(outputTensorInfo2);
186 std::unique_ptr<armnn::ITensorHandle> outputHandle3 =
187 subTensorsSupported ?
188 workloadFactory.CreateSubTensorHandle(*outputHandle2, outputTensorInfo3.GetShape(), wOrigin3.data()) :
189 workloadFactory.CreateTensorHandle(outputTensorInfo3);
191 std::unique_ptr<armnn::ITensorHandle> outputHandle4 =
192 subTensorsSupported ?
193 workloadFactory.CreateSubTensorHandle(*outputHandle2, outputTensorInfo4.GetShape(), wOrigin4.data()) :
194 workloadFactory.CreateTensorHandle(outputTensorInfo4);
196 // Do the first split
197 armnn::SplitterQueueDescriptor data;
198 armnn::WorkloadInfo info;
199 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
200 AddOutputToWorkload(data, info, outputTensorInfo1, outputHandle1.get());
201 AddOutputToWorkload(data, info, outputTensorInfo2, outputHandle2.get());
203 data.m_ViewOrigins.push_back(window1);
204 data.m_ViewOrigins.push_back(window2);
206 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSplitter(data, info);
208 inputHandle->Allocate();
209 outputHandle1->Allocate();
210 outputHandle2->Allocate();
212 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0]);
216 CopyDataFromITensorHandle(&ret1.output[0][0][0], outputHandle1.get());
217 CopyDataFromITensorHandle(&ret2.output[0][0][0], outputHandle2.get());
219 // // Do the second split.
220 armnn::SplitterQueueDescriptor data2;
221 armnn::WorkloadInfo info2;
222 AddInputToWorkload(data2, info2, outputTensorInfo2, outputHandle2.get());
223 AddOutputToWorkload(data2, info2, outputTensorInfo3, outputHandle3.get());
224 AddOutputToWorkload(data2, info2, outputTensorInfo4, outputHandle4.get());
226 data2.m_ViewOrigins.push_back(window3);
227 data2.m_ViewOrigins.push_back(window4);
229 std::unique_ptr<armnn::IWorkload> workload2 = workloadFactory.CreateSplitter(data2, info2);
231 outputHandle3->Allocate();
232 outputHandle4->Allocate();
234 workload2->Execute();
236 CopyDataFromITensorHandle(&ret3.output[0][0][0], outputHandle3.get());
237 CopyDataFromITensorHandle(&ret4.output[0][0][0], outputHandle4.get());
239 std::vector<LayerTestResult<T,3>> ret = {ret1, ret2, ret3, ret4,};
245 template <typename T>
246 LayerTestResult<T, 3> CopyViaSplitterTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
248 const armnn::TensorInfo tensorInfo({ 3, 6, 5 }, armnn::GetDataType<T>());
249 auto input = MakeTensor<T, 3>(tensorInfo, QuantizedVector<T>(qScale, qOffset,
251 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
252 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
253 11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
254 16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
255 21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
256 26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
258 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
259 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
260 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
261 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
262 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
263 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
265 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
266 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
267 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
268 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
269 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
270 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
273 std::vector<unsigned int> origin = { 0, 0, 0 };
274 armnn::SplitterQueueDescriptor::ViewOrigin window(origin);
276 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
278 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
280 std::unique_ptr<armnn::ITensorHandle> outputHandle =
281 subTensorsSupported ?
282 workloadFactory.CreateSubTensorHandle(*inputHandle, tensorInfo.GetShape(), origin.data()) :
283 workloadFactory.CreateTensorHandle(tensorInfo);
285 armnn::SplitterQueueDescriptor data;
286 armnn::WorkloadInfo info;
287 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
288 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
290 data.m_ViewOrigins.push_back(window);
292 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSplitter(data, info);
294 inputHandle->Allocate();
295 outputHandle->Allocate();
297 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0]);
301 LayerTestResult<T, 3> ret(tensorInfo);
302 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
303 ret.outputExpected = input;