2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // See LICENSE file in the project root for full license information.
7 #include <armnn/ArmNN.hpp>
8 #include <armnn/Tensor.hpp>
9 #include <backends/WorkloadInfo.hpp>
11 #include "test/TensorHelpers.hpp"
13 #include "backends/CpuTensorHandle.hpp"
14 #include "backends/WorkloadFactory.hpp"
16 #include "backends/test/QuantizeHelper.hpp"
20 std::vector<LayerTestResult<T,3>> SplitterTestCommon(armnn::IWorkloadFactory& workloadFactory,
24 unsigned int inputWidth = 5;
25 unsigned int inputHeight = 6;
26 unsigned int inputChannels = 3;
28 // NOTE: Compute Library imposes a restriction that the x and y dimension (input height and width)
30 // For the reasons for this, see first comment on https://jira.arm.com/browse/IVGCVSW-1239
32 // This test has therefore been recast to split the channels, then split the resulting subtensor.
34 // To take channel 0 of original output
35 // and channel 0 and channel 1 of the split subtensor.
36 unsigned int outputWidth1 = inputWidth;
37 unsigned int outputHeight1 = inputHeight;
38 unsigned int outputChannels1 = 1;
40 // To take channel 1 and 2 of the original output.
41 unsigned int outputWidth2 = inputWidth;
42 unsigned int outputHeight2 = inputHeight;
43 unsigned int outputChannels2 = 2;
46 // Define the tensor descriptors.
47 armnn::TensorInfo inputTensorInfo({ inputChannels, inputHeight, inputWidth }, armnn::GetDataType<T>());
49 // Outputs of the original split.
50 armnn::TensorInfo outputTensorInfo1({ outputChannels1, outputHeight1, outputWidth1 }, armnn::GetDataType<T>());
51 armnn::TensorInfo outputTensorInfo2({ outputChannels2, outputHeight2, outputWidth2 }, armnn::GetDataType<T>());
53 // Outputs of the subsequent subtensor split.
54 armnn::TensorInfo outputTensorInfo3({ outputChannels1, outputHeight1, outputWidth1 }, armnn::GetDataType<T>());
55 armnn::TensorInfo outputTensorInfo4({ outputChannels1, outputHeight1, outputWidth1 }, armnn::GetDataType<T>());
57 // Set quantization parameters if the requested type is a quantized type.
58 // The quantization doesn't really matter as the splitter operator doesn't dequantize/quantize.
59 if(armnn::IsQuantizedType<T>())
61 inputTensorInfo.SetQuantizationScale(qScale);
62 inputTensorInfo.SetQuantizationOffset(qOffset);
63 outputTensorInfo1.SetQuantizationScale(qScale);
64 outputTensorInfo1.SetQuantizationOffset(qOffset);
65 outputTensorInfo2.SetQuantizationScale(qScale);
66 outputTensorInfo2.SetQuantizationOffset(qOffset);
67 outputTensorInfo3.SetQuantizationScale(qScale);
68 outputTensorInfo3.SetQuantizationOffset(qOffset);
69 outputTensorInfo4.SetQuantizationScale(qScale);
70 outputTensorInfo4.SetQuantizationOffset(qOffset);
73 LayerTestResult<T,3> ret1(outputTensorInfo1);
74 LayerTestResult<T,3> ret2(outputTensorInfo2);
75 LayerTestResult<T,3> ret3(outputTensorInfo3);
76 LayerTestResult<T,3> ret4(outputTensorInfo4);
78 auto input = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(
79 QuantizedVector<T>(qScale, qOffset, {
80 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
81 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
82 11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
83 16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
84 21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
85 26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
87 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
88 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
89 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
90 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
91 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
92 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
94 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
95 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
96 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
97 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
98 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
99 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
103 // Channel 0 of the original input.
104 ret1.outputExpected = MakeTensor<T, 3>(outputTensorInfo1, std::vector<T>(
105 QuantizedVector<T>(qScale, qOffset, {
106 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
107 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
108 11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
109 16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
110 21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
111 26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
115 // Channel 1 & 2 of the original input.
116 ret2.outputExpected = MakeTensor<T, 3>(outputTensorInfo2, std::vector<T>(
117 QuantizedVector<T>(qScale, qOffset, {
118 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
119 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
120 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
121 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
122 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
123 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
125 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
126 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
127 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
128 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
129 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
130 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
134 // Channel 0 of return 2 (i.e. channels 1 and 2 of the original input).
135 ret3.outputExpected = MakeTensor<T, 3>(outputTensorInfo3, std::vector<T>(
136 QuantizedVector<T>(qScale, qOffset, {
137 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
138 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
139 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
140 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
141 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
142 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
146 // Channel 1 of return 2.
147 ret4.outputExpected = MakeTensor<T, 3>(outputTensorInfo4, std::vector<T>(
148 QuantizedVector<T>(qScale, qOffset, {
149 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
150 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
151 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
152 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
153 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
154 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
158 // NOTE: as a corollary of the splitting of x and y restriction the x and y values of the view origins
159 // have to be zero, the co-ordinates are as per the tensor info above channels, height/y, width/x
160 // note that under the hood the compute engine reverses these i.e. its coordinate system is x, y, channels.
161 std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of output[0].
162 armnn::SplitterQueueDescriptor::ViewOrigin window1(wOrigin1);
164 std::vector<unsigned int> wOrigin2 = {1, 0, 0}; //Extent of the window is defined by size of output[1].
165 armnn::SplitterQueueDescriptor::ViewOrigin window2(wOrigin2);
167 std::vector<unsigned int> wOrigin3 = {0, 0, 0}; //Extent of the window is defined by size of output[2].
168 armnn::SplitterQueueDescriptor::ViewOrigin window3(wOrigin3);
170 std::vector<unsigned int> wOrigin4 = {1, 0, 0}; //Extent of the window is defined by size of output[3].
171 armnn::SplitterQueueDescriptor::ViewOrigin window4(wOrigin4);
173 bool subTensorsSupported = workloadFactory.SupportsSubTensors();
175 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
177 std::unique_ptr<armnn::ITensorHandle> outputHandle1 =
178 subTensorsSupported ?
179 workloadFactory.CreateSubTensorHandle(*inputHandle, outputTensorInfo1.GetShape(), wOrigin1.data()) :
180 workloadFactory.CreateTensorHandle(outputTensorInfo1);
182 std::unique_ptr<armnn::ITensorHandle> outputHandle2 =
183 subTensorsSupported ?
184 workloadFactory.CreateSubTensorHandle(*inputHandle, outputTensorInfo2.GetShape(), wOrigin2.data()) :
185 workloadFactory.CreateTensorHandle(outputTensorInfo2);
187 std::unique_ptr<armnn::ITensorHandle> outputHandle3 =
188 subTensorsSupported ?
189 workloadFactory.CreateSubTensorHandle(*outputHandle2, outputTensorInfo3.GetShape(), wOrigin3.data()) :
190 workloadFactory.CreateTensorHandle(outputTensorInfo3);
192 std::unique_ptr<armnn::ITensorHandle> outputHandle4 =
193 subTensorsSupported ?
194 workloadFactory.CreateSubTensorHandle(*outputHandle2, outputTensorInfo4.GetShape(), wOrigin4.data()) :
195 workloadFactory.CreateTensorHandle(outputTensorInfo4);
197 // Do the first split
198 armnn::SplitterQueueDescriptor data;
199 armnn::WorkloadInfo info;
200 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
201 AddOutputToWorkload(data, info, outputTensorInfo1, outputHandle1.get());
202 AddOutputToWorkload(data, info, outputTensorInfo2, outputHandle2.get());
204 data.m_ViewOrigins.push_back(window1);
205 data.m_ViewOrigins.push_back(window2);
207 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSplitter(data, info);
209 inputHandle->Allocate();
210 outputHandle1->Allocate();
211 outputHandle2->Allocate();
213 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0]);
217 CopyDataFromITensorHandle(&ret1.output[0][0][0], outputHandle1.get());
218 CopyDataFromITensorHandle(&ret2.output[0][0][0], outputHandle2.get());
220 // // Do the second split.
221 armnn::SplitterQueueDescriptor data2;
222 armnn::WorkloadInfo info2;
223 AddInputToWorkload(data2, info2, outputTensorInfo2, outputHandle2.get());
224 AddOutputToWorkload(data2, info2, outputTensorInfo3, outputHandle3.get());
225 AddOutputToWorkload(data2, info2, outputTensorInfo4, outputHandle4.get());
227 data2.m_ViewOrigins.push_back(window3);
228 data2.m_ViewOrigins.push_back(window4);
230 std::unique_ptr<armnn::IWorkload> workload2 = workloadFactory.CreateSplitter(data2, info2);
232 outputHandle3->Allocate();
233 outputHandle4->Allocate();
235 workload2->Execute();
237 CopyDataFromITensorHandle(&ret3.output[0][0][0], outputHandle3.get());
238 CopyDataFromITensorHandle(&ret4.output[0][0][0], outputHandle4.get());
240 std::vector<LayerTestResult<T,3>> ret = {ret1, ret2, ret3, ret4,};
246 template <typename T>
247 LayerTestResult<T, 3> CopyViaSplitterTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
249 const armnn::TensorInfo tensorInfo({ 3, 6, 5 }, armnn::GetDataType<T>());
250 auto input = MakeTensor<T, 3>(tensorInfo, QuantizedVector<T>(qScale, qOffset,
252 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
253 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
254 11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
255 16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
256 21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
257 26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
259 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
260 36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
261 41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
262 46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
263 51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
264 56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
266 61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
267 66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
268 71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
269 76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
270 81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
271 86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
274 std::vector<unsigned int> origin = { 0, 0, 0 };
275 armnn::SplitterQueueDescriptor::ViewOrigin window(origin);
277 const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
279 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
281 std::unique_ptr<armnn::ITensorHandle> outputHandle =
282 subTensorsSupported ?
283 workloadFactory.CreateSubTensorHandle(*inputHandle, tensorInfo.GetShape(), origin.data()) :
284 workloadFactory.CreateTensorHandle(tensorInfo);
286 armnn::SplitterQueueDescriptor data;
287 armnn::WorkloadInfo info;
288 AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
289 AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
291 data.m_ViewOrigins.push_back(window);
293 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSplitter(data, info);
295 inputHandle->Allocate();
296 outputHandle->Allocate();
298 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0]);
302 LayerTestResult<T, 3> ret(tensorInfo);
303 CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
304 ret.outputExpected = input;