2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
7 #include <armnn/ArmNN.hpp>
8 #include <armnn/Tensor.hpp>
10 #include <backendsCommon/CpuTensorHandle.hpp>
11 #include <backendsCommon/WorkloadFactory.hpp>
12 #include <backendsCommon/test/QuantizeHelper.hpp>
14 #include <test/TensorHelpers.hpp>
17 std::vector<LayerTestResult<T,3>> SplitterTestCommon(armnn::IWorkloadFactory& workloadFactory,
21 unsigned int inputWidth = 5;
22 unsigned int inputHeight = 6;
23 unsigned int inputChannels = 3;
25 // NOTE: Compute Library imposes a restriction that the x and y dimension (input height and width)
27 // For the reasons for this, see first comment on https://jira.arm.com/browse/IVGCVSW-1239
29 // This test has therefore been recast to split the channels, then split the resulting subtensor.
31 // To take channel 0 of original output
32 // and channel 0 and channel 1 of the split subtensor.
33 unsigned int outputWidth1 = inputWidth;
34 unsigned int outputHeight1 = inputHeight;
35 unsigned int outputChannels1 = 1;
37 // To take channel 1 and 2 of the original output.
38 unsigned int outputWidth2 = inputWidth;
39 unsigned int outputHeight2 = inputHeight;
40 unsigned int outputChannels2 = 2;
43 // Define the tensor descriptors.
44 armnn::TensorInfo inputTensorInfo({ inputChannels, inputHeight, inputWidth }, armnn::GetDataType<T>());
46 // Outputs of the original split.
47 armnn::TensorInfo outputTensorInfo1({ outputChannels1, outputHeight1, outputWidth1 }, armnn::GetDataType<T>());
48 armnn::TensorInfo outputTensorInfo2({ outputChannels2, outputHeight2, outputWidth2 }, armnn::GetDataType<T>());
50 // Outputs of the subsequent subtensor split.
51 armnn::TensorInfo outputTensorInfo3({ outputChannels1, outputHeight1, outputWidth1 }, armnn::GetDataType<T>());
52 armnn::TensorInfo outputTensorInfo4({ outputChannels1, outputHeight1, outputWidth1 }, armnn::GetDataType<T>());
54 // Set quantization parameters if the requested type is a quantized type.
55 // The quantization doesn't really matter as the splitter operator doesn't dequantize/quantize.
56 if(armnn::IsQuantizedType<T>())
58 inputTensorInfo.SetQuantizationScale(qScale);
59 inputTensorInfo.SetQuantizationOffset(qOffset);
60 outputTensorInfo1.SetQuantizationScale(qScale);
61 outputTensorInfo1.SetQuantizationOffset(qOffset);
62 outputTensorInfo2.SetQuantizationScale(qScale);
63 outputTensorInfo2.SetQuantizationOffset(qOffset);
64 outputTensorInfo3.SetQuantizationScale(qScale);
65 outputTensorInfo3.SetQuantizationOffset(qOffset);
66 outputTensorInfo4.SetQuantizationScale(qScale);
67 outputTensorInfo4.SetQuantizationOffset(qOffset);
70 LayerTestResult<T,3> ret1(outputTensorInfo1);
71 LayerTestResult<T,3> ret2(outputTensorInfo2);
72 LayerTestResult<T,3> ret3(outputTensorInfo3);
73 LayerTestResult<T,3> ret4(outputTensorInfo4);
75 auto input = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(
76 QuantizedVector<T>(qScale, qOffset, {
77 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
78 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
79 11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
80 16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
81 21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
82 26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
84 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
85 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
86 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
87 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
88 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
89 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
91 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
92 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
93 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
94 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
95 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
96 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
100 // Channel 0 of the original input.
101 ret1.outputExpected = MakeTensor<T, 3>(outputTensorInfo1, std::vector<T>(
102 QuantizedVector<T>(qScale, qOffset, {
103 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
104 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
105 11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
106 16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
107 21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
108 26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
112 // Channel 1 & 2 of the original input.
113 ret2.outputExpected = MakeTensor<T, 3>(outputTensorInfo2, std::vector<T>(
114 QuantizedVector<T>(qScale, qOffset, {
115 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
116 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
117 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
118 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
119 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
120 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
122 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
123 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
124 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
125 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
126 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
127 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
131 // Channel 0 of return 2 (i.e. channels 1 and 2 of the original input).
132 ret3.outputExpected = MakeTensor<T, 3>(outputTensorInfo3, std::vector<T>(
133 QuantizedVector<T>(qScale, qOffset, {
134 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
135 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
136 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
137 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
138 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
139 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
143 // Channel 1 of return 2.
144 ret4.outputExpected = MakeTensor<T, 3>(outputTensorInfo4, std::vector<T>(
145 QuantizedVector<T>(qScale, qOffset, {
146 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
147 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
148 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
149 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
150 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
151 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
155 // NOTE: as a corollary of the splitting of x and y restriction the x and y values of the view origins
156 // have to be zero, the co-ordinates are as per the tensor info above channels, height/y, width/x
157 // note that under the hood the compute engine reverses these i.e. its coordinate system is x, y, channels.
158 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of output[0].
159 armnn::SplitterQueueDescriptor::ViewOrigin window1(wOrigin1);
161 std::vector<unsigned int> wOrigin2 = {1, 0, 0}; //Extent of the window is defined by size of output[1].
162 armnn::SplitterQueueDescriptor::ViewOrigin window2(wOrigin2);
164 std::vector<unsigned int> wOrigin3 = {0, 0, 0}; //Extent of the window is defined by size of output[2].
165 armnn::SplitterQueueDescriptor::ViewOrigin window3(wOrigin3);
167 std::vector<unsigned int> wOrigin4 = {1, 0, 0}; //Extent of the window is defined by size of output[3].
168 armnn::SplitterQueueDescriptor::ViewOrigin window4(wOrigin4);
170 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
172 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
174 std::unique_ptr<armnn::ITensorHandle> outputHandle1 =
175 subTensorsSupported ?
176 workloadFactory.CreateSubTensorHandle(*inputHandle, outputTensorInfo1.GetShape(), wOrigin1.data()) :
177 workloadFactory.CreateTensorHandle(outputTensorInfo1);
179 std::unique_ptr<armnn::ITensorHandle> outputHandle2 =
180 subTensorsSupported ?
181 workloadFactory.CreateSubTensorHandle(*inputHandle, outputTensorInfo2.GetShape(), wOrigin2.data()) :
182 workloadFactory.CreateTensorHandle(outputTensorInfo2);
184 std::unique_ptr<armnn::ITensorHandle> outputHandle3 =
185 subTensorsSupported ?
186 workloadFactory.CreateSubTensorHandle(*outputHandle2, outputTensorInfo3.GetShape(), wOrigin3.data()) :
187 workloadFactory.CreateTensorHandle(outputTensorInfo3);
189 std::unique_ptr<armnn::ITensorHandle> outputHandle4 =
190 subTensorsSupported ?
191 workloadFactory.CreateSubTensorHandle(*outputHandle2, outputTensorInfo4.GetShape(), wOrigin4.data()) :
192 workloadFactory.CreateTensorHandle(outputTensorInfo4);
194 // Do the first split
195 armnn::SplitterQueueDescriptor data;
196 armnn::WorkloadInfo info;
197 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
198 AddOutputToWorkload(data, info, outputTensorInfo1, outputHandle1.get());
199 AddOutputToWorkload(data, info, outputTensorInfo2, outputHandle2.get());
201 data.m_ViewOrigins.push_back(window1);
202 data.m_ViewOrigins.push_back(window2);
204 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSplitter(data, info);
206 inputHandle->Allocate();
207 outputHandle1->Allocate();
208 outputHandle2->Allocate();
210 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0]);
214 CopyDataFromITensorHandle(&ret1.output[0][0][0], outputHandle1.get());
215 CopyDataFromITensorHandle(&ret2.output[0][0][0], outputHandle2.get());
217 // // Do the second split.
218 armnn::SplitterQueueDescriptor data2;
219 armnn::WorkloadInfo info2;
220 AddInputToWorkload(data2, info2, outputTensorInfo2, outputHandle2.get());
221 AddOutputToWorkload(data2, info2, outputTensorInfo3, outputHandle3.get());
222 AddOutputToWorkload(data2, info2, outputTensorInfo4, outputHandle4.get());
224 data2.m_ViewOrigins.push_back(window3);
225 data2.m_ViewOrigins.push_back(window4);
227 std::unique_ptr<armnn::IWorkload> workload2 = workloadFactory.CreateSplitter(data2, info2);
229 outputHandle3->Allocate();
230 outputHandle4->Allocate();
232 workload2->Execute();
234 CopyDataFromITensorHandle(&ret3.output[0][0][0], outputHandle3.get());
235 CopyDataFromITensorHandle(&ret4.output[0][0][0], outputHandle4.get());
237 std::vector<LayerTestResult<T,3>> ret = {ret1, ret2, ret3, ret4,};
243 template <typename T>
244 LayerTestResult<T, 3> CopyViaSplitterTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
246 const armnn::TensorInfo tensorInfo({ 3, 6, 5 }, armnn::GetDataType<T>());
247 auto input = MakeTensor<T, 3>(tensorInfo, QuantizedVector<T>(qScale, qOffset,
249 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
250 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
251 11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
252 16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
253 21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
254 26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
256 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
257 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
258 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
259 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
260 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
261 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
263 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
264 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
265 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
266 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
267 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
268 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
271 std::vector<unsigned int> origin = { 0, 0, 0 };
272 armnn::SplitterQueueDescriptor::ViewOrigin window(origin);
274 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
276 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
278 std::unique_ptr<armnn::ITensorHandle> outputHandle =
279 subTensorsSupported ?
280 workloadFactory.CreateSubTensorHandle(*inputHandle, tensorInfo.GetShape(), origin.data()) :
281 workloadFactory.CreateTensorHandle(tensorInfo);
283 armnn::SplitterQueueDescriptor data;
284 armnn::WorkloadInfo info;
285 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
286 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
288 data.m_ViewOrigins.push_back(window);
290 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSplitter(data, info);
292 inputHandle->Allocate();
293 outputHandle->Allocate();
295 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0]);
299 LayerTestResult<T, 3> ret(tensorInfo);
300 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
301 ret.outputExpected = input;