2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
7 #include "WorkloadTestUtils.hpp"
9 #include <armnn/ArmNN.hpp>
10 #include <armnn/Tensor.hpp>
12 #include <backendsCommon/CpuTensorHandle.hpp>
13 #include <backendsCommon/IBackendInternal.hpp>
14 #include <backendsCommon/WorkloadFactory.hpp>
15 #include <backendsCommon/test/QuantizeHelper.hpp>
17 #include <test/TensorHelpers.hpp>
19 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
20 std::vector<LayerTestResult<T,3>> SplitterTestCommon(
21 armnn::IWorkloadFactory& workloadFactory,
22 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
26 unsigned int inputWidth = 5;
27 unsigned int inputHeight = 6;
28 unsigned int inputChannels = 3;
30 // NOTE: Compute Library imposes a restriction that the x and y dimension (input height and width)
32 // For the reasons for this, see first comment on https://jira.arm.com/browse/IVGCVSW-1239
34 // This test has therefore been recast to split the channels, then split the resulting subtensor.
36 // To take channel 0 of original output
37 // and channel 0 and channel 1 of the split subtensor.
38 unsigned int outputWidth1 = inputWidth;
39 unsigned int outputHeight1 = inputHeight;
40 unsigned int outputChannels1 = 1;
42 // To take channel 1 and 2 of the original output.
43 unsigned int outputWidth2 = inputWidth;
44 unsigned int outputHeight2 = inputHeight;
45 unsigned int outputChannels2 = 2;
48 // Define the tensor descriptors.
49 armnn::TensorInfo inputTensorInfo({ inputChannels, inputHeight, inputWidth }, ArmnnType);
51 // Outputs of the original split.
52 armnn::TensorInfo outputTensorInfo1({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType);
53 armnn::TensorInfo outputTensorInfo2({ outputChannels2, outputHeight2, outputWidth2 }, ArmnnType);
55 // Outputs of the subsequent subtensor split.
56 armnn::TensorInfo outputTensorInfo3({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType);
57 armnn::TensorInfo outputTensorInfo4({ outputChannels1, outputHeight1, outputWidth1 }, ArmnnType);
59 // Set quantization parameters if the requested type is a quantized type.
60 // The quantization doesn't really matter as the splitter operator doesn't dequantize/quantize.
61 if(armnn::IsQuantizedType<T>())
63 inputTensorInfo.SetQuantizationScale(qScale);
64 inputTensorInfo.SetQuantizationOffset(qOffset);
65 outputTensorInfo1.SetQuantizationScale(qScale);
66 outputTensorInfo1.SetQuantizationOffset(qOffset);
67 outputTensorInfo2.SetQuantizationScale(qScale);
68 outputTensorInfo2.SetQuantizationOffset(qOffset);
69 outputTensorInfo3.SetQuantizationScale(qScale);
70 outputTensorInfo3.SetQuantizationOffset(qOffset);
71 outputTensorInfo4.SetQuantizationScale(qScale);
72 outputTensorInfo4.SetQuantizationOffset(qOffset);
75 LayerTestResult<T,3> ret1(outputTensorInfo1);
76 LayerTestResult<T,3> ret2(outputTensorInfo2);
77 LayerTestResult<T,3> ret3(outputTensorInfo3);
78 LayerTestResult<T,3> ret4(outputTensorInfo4);
80 auto input = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(
81 QuantizedVector<T>(qScale, qOffset, {
82 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
83 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
84 11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
85 16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
86 21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
87 26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
89 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
90 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
91 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
92 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
93 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
94 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
96 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
97 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
98 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
99 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
100 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
101 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
105 // Channel 0 of the original input.
106 ret1.outputExpected = MakeTensor<T, 3>(outputTensorInfo1, std::vector<T>(
107 QuantizedVector<T>(qScale, qOffset, {
108 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
109 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
110 11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
111 16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
112 21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
113 26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
117 // Channel 1 & 2 of the original input.
118 ret2.outputExpected = MakeTensor<T, 3>(outputTensorInfo2, std::vector<T>(
119 QuantizedVector<T>(qScale, qOffset, {
120 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
121 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
122 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
123 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
124 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
125 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
127 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
128 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
129 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
130 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
131 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
132 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
136 // Channel 0 of return 2 (i.e. channels 1 and 2 of the original input).
137 ret3.outputExpected = MakeTensor<T, 3>(outputTensorInfo3, std::vector<T>(
138 QuantizedVector<T>(qScale, qOffset, {
139 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
140 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
141 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
142 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
143 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
144 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
148 // Channel 1 of return 2.
149 ret4.outputExpected = MakeTensor<T, 3>(outputTensorInfo4, std::vector<T>(
150 QuantizedVector<T>(qScale, qOffset, {
151 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
152 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
153 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
154 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
155 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
156 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
160 // NOTE: as a corollary of the splitting of x and y restriction the x and y values of the view origins
161 // have to be zero, the co-ordinates are as per the tensor info above channels, height/y, width/x
162 // note that under the hood the compute engine reverses these i.e. its coordinate system is x, y, channels.
163 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of output[0].
164 armnn::SplitterQueueDescriptor::ViewOrigin window1(wOrigin1);
166 std::vector<unsigned int> wOrigin2 = {1, 0, 0}; //Extent of the window is defined by size of output[1].
167 armnn::SplitterQueueDescriptor::ViewOrigin window2(wOrigin2);
169 std::vector<unsigned int> wOrigin3 = {0, 0, 0}; //Extent of the window is defined by size of output[2].
170 armnn::SplitterQueueDescriptor::ViewOrigin window3(wOrigin3);
172 std::vector<unsigned int> wOrigin4 = {1, 0, 0}; //Extent of the window is defined by size of output[3].
173 armnn::SplitterQueueDescriptor::ViewOrigin window4(wOrigin4);
175 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
177 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
179 std::unique_ptr<armnn::ITensorHandle> outputHandle1 =
180 subTensorsSupported ?
181 workloadFactory.CreateSubTensorHandle(*inputHandle, outputTensorInfo1.GetShape(), wOrigin1.data()) :
182 workloadFactory.CreateTensorHandle(outputTensorInfo1);
184 std::unique_ptr<armnn::ITensorHandle> outputHandle2 =
185 subTensorsSupported ?
186 workloadFactory.CreateSubTensorHandle(*inputHandle, outputTensorInfo2.GetShape(), wOrigin2.data()) :
187 workloadFactory.CreateTensorHandle(outputTensorInfo2);
189 std::unique_ptr<armnn::ITensorHandle> outputHandle3 =
190 subTensorsSupported ?
191 workloadFactory.CreateSubTensorHandle(*outputHandle2, outputTensorInfo3.GetShape(), wOrigin3.data()) :
192 workloadFactory.CreateTensorHandle(outputTensorInfo3);
194 std::unique_ptr<armnn::ITensorHandle> outputHandle4 =
195 subTensorsSupported ?
196 workloadFactory.CreateSubTensorHandle(*outputHandle2, outputTensorInfo4.GetShape(), wOrigin4.data()) :
197 workloadFactory.CreateTensorHandle(outputTensorInfo4);
199 // Do the first split
200 armnn::SplitterQueueDescriptor data;
201 armnn::WorkloadInfo info;
202 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
203 AddOutputToWorkload(data, info, outputTensorInfo1, outputHandle1.get());
204 AddOutputToWorkload(data, info, outputTensorInfo2, outputHandle2.get());
206 data.m_ViewOrigins.push_back(window1);
207 data.m_ViewOrigins.push_back(window2);
209 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSplitter(data, info);
211 inputHandle->Allocate();
212 outputHandle1->Allocate();
213 outputHandle2->Allocate();
215 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0]);
219 CopyDataFromITensorHandle(&ret1.output[0][0][0], outputHandle1.get());
220 CopyDataFromITensorHandle(&ret2.output[0][0][0], outputHandle2.get());
222 // Do the second split.
223 armnn::SplitterQueueDescriptor data2;
224 armnn::WorkloadInfo info2;
225 AddInputToWorkload(data2, info2, outputTensorInfo2, outputHandle2.get());
226 AddOutputToWorkload(data2, info2, outputTensorInfo3, outputHandle3.get());
227 AddOutputToWorkload(data2, info2, outputTensorInfo4, outputHandle4.get());
229 data2.m_ViewOrigins.push_back(window3);
230 data2.m_ViewOrigins.push_back(window4);
232 std::unique_ptr<armnn::IWorkload> workload2 = workloadFactory.CreateSplitter(data2, info2);
234 outputHandle3->Allocate();
235 outputHandle4->Allocate();
237 ExecuteWorkload(*workload2, memoryManager);
239 CopyDataFromITensorHandle(&ret3.output[0][0][0], outputHandle3.get());
240 CopyDataFromITensorHandle(&ret4.output[0][0][0], outputHandle4.get());
242 std::vector<LayerTestResult<T,3>> ret = {ret1, ret2, ret3, ret4,};
248 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
249 LayerTestResult<T, 3> CopyViaSplitterTestImpl(
250 armnn::IWorkloadFactory& workloadFactory,
251 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
252 float qScale, int32_t qOffset)
254 const armnn::TensorInfo tensorInfo({ 3, 6, 5 }, ArmnnType);
255 auto input = MakeTensor<T, 3>(tensorInfo, QuantizedVector<T>(qScale, qOffset,
257 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
258 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
259 11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
260 16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
261 21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
262 26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
264 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
265 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
266 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
267 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
268 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
269 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
271 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
272 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
273 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
274 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
275 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
276 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
279 std::vector<unsigned int> origin = { 0, 0, 0 };
280 armnn::SplitterQueueDescriptor::ViewOrigin window(origin);
282 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
284 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
286 std::unique_ptr<armnn::ITensorHandle> outputHandle =
287 subTensorsSupported ?
288 workloadFactory.CreateSubTensorHandle(*inputHandle, tensorInfo.GetShape(), origin.data()) :
289 workloadFactory.CreateTensorHandle(tensorInfo);
291 armnn::SplitterQueueDescriptor data;
292 armnn::WorkloadInfo info;
293 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
294 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
296 data.m_ViewOrigins.push_back(window);
298 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSplitter(data, info);
300 inputHandle->Allocate();
301 outputHandle->Allocate();
303 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0]);
307 LayerTestResult<T, 3> ret(tensorInfo);
308 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
309 ret.outputExpected = input;