Release 18.08
[platform/upstream/armnn.git] / src / armnn / backends / test / CreateWorkloadRef.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // See LICENSE file in the project root for full license information.
4 //
5 #include "backends/RefWorkloadFactory.hpp"
6 #include "backends/RefWorkloads.hpp"
7 #include "backends/CpuTensorHandle.hpp"
8
9 #include "test/CreateWorkload.hpp"
10
11 namespace
12 {
13
14 template<typename Workload>
15 void CheckInputOutput(std::unique_ptr<Workload> workload, const TensorInfo& inputInfo, const TensorInfo& outputInfo)
16 {
17     auto queueDescriptor = workload->GetData();
18     auto inputHandle  = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[0]);
19     auto outputHandle = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[0]);
20     BOOST_TEST((inputHandle->GetTensorInfo() == inputInfo));
21     BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
22 }
23
24 template <typename Workload>
25 void CheckInputsOutput(std::unique_ptr<Workload> workload,
26                        const TensorInfo&         inputInfo0,
27                        const TensorInfo&         inputInfo1,
28                        const TensorInfo&         outputInfo)
29 {
30     auto queueDescriptor = workload->GetData();
31     auto inputHandle0     = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[0]);
32     auto inputHandle1     = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[1]);
33     auto outputHandle    = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[0]);
34     BOOST_TEST((inputHandle0->GetTensorInfo() == inputInfo0));
35     BOOST_TEST((inputHandle1->GetTensorInfo() == inputInfo1));
36     BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
37 }
38 }
39
40 BOOST_AUTO_TEST_SUITE(CreateWorkloadRef)
41
42 template <typename ActivationWorkloadType, armnn::DataType DataType>
43 static void RefCreateActivationWorkloadTest()
44 {
45     Graph graph;
46     RefWorkloadFactory factory;
47     auto workload = CreateActivationWorkloadTest<ActivationWorkloadType, DataType>(factory, graph);
48
49     // Checks that outputs are as we expect them (see definition of CreateActivationWorkloadTest).
50     CheckInputOutput(std::move(workload),
51         TensorInfo({ 1, 1 }, DataType),
52         TensorInfo({ 1, 1 }, DataType));
53 }
54
55 BOOST_AUTO_TEST_CASE(CreateActivationFloat32Workload)
56 {
57     RefCreateActivationWorkloadTest<RefActivationFloat32Workload, armnn::DataType::Float32>();
58 }
59
60 BOOST_AUTO_TEST_CASE(CreateActivationUint8Workload)
61 {
62     RefCreateActivationWorkloadTest<RefActivationUint8Workload, armnn::DataType::QuantisedAsymm8>();
63 }
64
65 template <typename AdditionWorkloadType, armnn::DataType DataType>
66 static void RefCreateAdditionWorkloadTest()
67 {
68     Graph graph;
69     RefWorkloadFactory factory;
70     auto workload = CreateAdditionWorkloadTest<AdditionWorkloadType, DataType>(factory, graph);
71
72     // Checks that outputs are as we expect them (see definition of CreateAdditionWorkloadTest).
73     CheckInputsOutput(std::move(workload),
74         TensorInfo({ 2, 3 }, DataType),
75         TensorInfo({ 2, 3 }, DataType),
76         TensorInfo({ 2, 3 }, DataType));
77 }
78
79 BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
80 {
81     RefCreateAdditionWorkloadTest<RefAdditionFloat32Workload, armnn::DataType::Float32>();
82 }
83
84 BOOST_AUTO_TEST_CASE(CreateAdditionUint8Workload)
85 {
86     RefCreateAdditionWorkloadTest<RefAdditionUint8Workload, armnn::DataType::QuantisedAsymm8>();
87 }
88
89 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationWorkload)
90 {
91     Graph                graph;
92     RefWorkloadFactory factory;
93     auto workload = CreateBatchNormalizationWorkloadTest<RefBatchNormalizationFloat32Workload, armnn::DataType::Float32>
94                     (factory, graph);
95
96     // Checks that outputs and inputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest).
97     CheckInputOutput(
98         std::move(workload), TensorInfo({2, 3, 1, 1}, DataType::Float32), TensorInfo({2, 3, 1, 1}, DataType::Float32));
99 }
100
101 BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Float32Workload)
102 {
103     Graph                graph;
104     RefWorkloadFactory factory;
105     auto workload = CreateConvertFp16ToFp32WorkloadTest<RefConvertFp16ToFp32Workload>(factory, graph);
106
107     // Checks that outputs and inputs are as we expect them
108     CheckInputOutput(
109         std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float16), TensorInfo({1, 3, 2, 3}, DataType::Float32));
110 }
111
112 BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Float16Workload)
113 {
114     Graph                graph;
115     RefWorkloadFactory factory;
116     auto workload = CreateConvertFp32ToFp16WorkloadTest<RefConvertFp32ToFp16Workload>(factory, graph);
117
118     // Checks that outputs and inputs are as we expect them
119     CheckInputOutput(
120         std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float32), TensorInfo({1, 3, 2, 3}, DataType::Float16));
121 }
122
123 BOOST_AUTO_TEST_CASE(CreateConvolution2dWorkload)
124 {
125     Graph                graph;
126     RefWorkloadFactory factory;
127     auto                 workload = CreateConvolution2dWorkloadTest<RefConvolution2dFloat32Workload,
128                          DataType::Float32>(factory, graph);
129
130     // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest).
131     CheckInputOutput(std::move(workload),
132                      TensorInfo({2, 3, 8, 16}, DataType::Float32),
133                      TensorInfo({2, 2, 2, 10}, DataType::Float32));
134 }
135
136 BOOST_AUTO_TEST_CASE(CreateDepthwiseConvolution2dWorkload)
137 {
138     Graph                graph;
139     RefWorkloadFactory factory;
140     auto                 workload =
141         CreateDepthwiseConvolution2dWorkloadTest<RefDepthwiseConvolution2dFloat32Workload>(factory, graph);
142
143     // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest).
144     CheckInputOutput(std::move(workload),
145                      TensorInfo({2, 3, 8, 16}, DataType::Float32),
146                      TensorInfo({2, 9, 2, 10}, DataType::Float32));
147 }
148
149 template <typename FullyConnectedWorkloadType, armnn::DataType DataType>
150 static void RefCreateFullyConnectedWorkloadTest()
151 {
152     Graph graph;
153     RefWorkloadFactory factory;
154     auto workload = CreateFullyConnectedWorkloadTest<FullyConnectedWorkloadType, DataType>(factory, graph);
155
156     // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
157     float inputsQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 1.0f : 0.0;
158     float outputQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 2.0f : 0.0;
159     CheckInputOutput(std::move(workload),
160         TensorInfo({ 3, 1, 4, 5 }, DataType, inputsQScale),
161         TensorInfo({ 3, 7 }, DataType, outputQScale));
162 }
163
164 BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloat32Workload)
165 {
166     RefCreateFullyConnectedWorkloadTest<RefFullyConnectedFloat32Workload, armnn::DataType::Float32>();
167 }
168
169 BOOST_AUTO_TEST_CASE(CreateFullyConnectedUint8Workload)
170 {
171     RefCreateFullyConnectedWorkloadTest<RefFullyConnectedUint8Workload, armnn::DataType::QuantisedAsymm8>();
172 }
173
174 template <typename MultiplicationWorkloadType, armnn::DataType DataType>
175 static void RefCreateMultiplicationWorkloadTest()
176 {
177     Graph graph;
178     RefWorkloadFactory factory;
179     auto workload = CreateMultiplicationWorkloadTest<MultiplicationWorkloadType, DataType>(factory, graph);
180
181     // Checks that outputs are as we expect them (see definition of CreateMultiplicationWorkloadTest).
182     CheckInputsOutput(std::move(workload),
183         TensorInfo({ 2, 3 }, DataType),
184         TensorInfo({ 2, 3 }, DataType),
185         TensorInfo({ 2, 3 }, DataType));
186 }
187
188 BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload)
189 {
190     RefCreateMultiplicationWorkloadTest<RefMultiplicationFloat32Workload, armnn::DataType::Float32>();
191 }
192
193 BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload)
194 {
195     RefCreateMultiplicationWorkloadTest<RefMultiplicationUint8Workload, armnn::DataType::QuantisedAsymm8>();
196 }
197
198 BOOST_AUTO_TEST_CASE(CreateNormalizationWorkload)
199 {
200     Graph                graph;
201     RefWorkloadFactory factory;
202     auto                 workload = CreateNormalizationWorkloadTest<RefNormalizationFloat32Workload,
203                                     armnn::DataType::Float32>(factory, graph);
204
205     // Checks that outputs and inputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
206     CheckInputOutput(std::move(workload),
207                      TensorInfo({3, 5, 5, 1}, DataType::Float32),
208                      TensorInfo({3, 5, 5, 1}, DataType::Float32));
209 }
210
211 template <typename Pooling2dWorkloadType, armnn::DataType DataType>
212 static void RefCreatePooling2dWorkloadTest()
213 {
214     Graph graph;
215     RefWorkloadFactory factory;
216     auto workload = CreatePooling2dWorkloadTest<Pooling2dWorkloadType, DataType>(factory, graph);
217
218     // Checks that outputs and inputs are as we expect them (see definition of CreatePooling2dWorkloadTest).
219     CheckInputOutput(
220         std::move(workload),
221         TensorInfo({3, 2, 5, 5}, DataType),
222         TensorInfo({3, 2, 2, 4}, DataType));
223 }
224
225 BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32Workload)
226 {
227     RefCreatePooling2dWorkloadTest<RefPooling2dFloat32Workload, armnn::DataType::Float32>();
228 }
229
230 BOOST_AUTO_TEST_CASE(CreatePooling2dUint8Workload)
231 {
232     RefCreatePooling2dWorkloadTest<RefPooling2dUint8Workload, armnn::DataType::QuantisedAsymm8>();
233 }
234
235 template <typename SoftmaxWorkloadType, armnn::DataType DataType>
236 static void RefCreateSoftmaxWorkloadTest()
237 {
238     Graph graph;
239     RefWorkloadFactory factory;
240     auto workload = CreateSoftmaxWorkloadTest<SoftmaxWorkloadType, DataType>(factory, graph);
241
242     // Checks that outputs and inputs are as we expect them (see definition of CreateSoftmaxWorkloadTest).
243     CheckInputOutput(
244         std::move(workload),
245         TensorInfo({4, 1}, DataType),
246         TensorInfo({4, 1}, DataType));
247 }
248
249 BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat32Workload)
250 {
251     RefCreateSoftmaxWorkloadTest<RefSoftmaxFloat32Workload, armnn::DataType::Float32>();
252 }
253
254 BOOST_AUTO_TEST_CASE(CreateSoftmaxUint8Workload)
255 {
256     RefCreateSoftmaxWorkloadTest<RefSoftmaxUint8Workload, armnn::DataType::QuantisedAsymm8>();
257 }
258
259 template <typename SplitterWorkloadType, armnn::DataType DataType>
260 static void RefCreateSplitterWorkloadTest()
261 {
262     Graph graph;
263     RefWorkloadFactory factory;
264     auto workload = CreateSplitterWorkloadTest<SplitterWorkloadType, DataType>(factory, graph);
265
266     // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest).
267     SplitterQueueDescriptor queueDescriptor = workload->GetData();
268     auto inputHandle = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[0]);
269     BOOST_TEST((inputHandle->GetTensorInfo() == TensorInfo({ 5, 7, 7 }, DataType)));
270
271     auto outputHandle0 = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[0]);
272     BOOST_TEST((outputHandle0->GetTensorInfo() == TensorInfo({ 1, 7, 7 }, DataType)));
273
274     auto outputHandle1 = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[1]);
275     BOOST_TEST((outputHandle1->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
276
277     auto outputHandle2 = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[2]);
278     BOOST_TEST((outputHandle2->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
279 }
280
281 BOOST_AUTO_TEST_CASE(CreateSplitterFloat32Workload)
282 {
283     RefCreateSplitterWorkloadTest<RefSplitterFloat32Workload, armnn::DataType::Float32>();
284 }
285
286 BOOST_AUTO_TEST_CASE(CreateSplitterUint8Workload)
287 {
288     RefCreateSplitterWorkloadTest<RefSplitterUint8Workload, armnn::DataType::QuantisedAsymm8>();
289 }
290
291 template <typename SplitterWorkloadType, typename MergerWorkloadType, armnn::DataType DataType>
292 static void RefCreateSplitterMergerWorkloadTest()
293 {
294     // Tests that it is possible to decide which output of the splitter layer
295     // should be lined to which input of the merger layer.
296     // We tested that is is possible to specify 0th output
297     // of the splitter to be the 1st input to the merger and the 1st output of the splitter to be 0th input
298     // of the merger.
299
300     Graph graph;
301     RefWorkloadFactory factory;
302     auto workloads = CreateSplitterMergerWorkloadTest<SplitterWorkloadType, MergerWorkloadType, DataType>
303         (factory, graph);
304
305     auto wlSplitter = std::move(workloads.first);
306     auto wlMerger = std::move(workloads.second);
307
308     //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction.
309     armnn::CpuTensorHandle* sOut0 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
310     armnn::CpuTensorHandle* sOut1 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
311     armnn::CpuTensorHandle* mIn0 = dynamic_cast<armnn::CpuTensorHandle*>(wlMerger->GetData().m_Inputs[0]);
312     armnn::CpuTensorHandle* mIn1 = dynamic_cast<armnn::CpuTensorHandle*>(wlMerger->GetData().m_Inputs[1]);
313
314     BOOST_TEST(sOut0);
315     BOOST_TEST(sOut1);
316     BOOST_TEST(mIn0);
317     BOOST_TEST(mIn1);
318
319     bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0);
320
321     BOOST_TEST(validDataPointers);
322 }
323
324 BOOST_AUTO_TEST_CASE(CreateSplitterMergerFloat32)
325 {
326     RefCreateSplitterMergerWorkloadTest<RefSplitterFloat32Workload, RefMergerFloat32Workload, DataType::Float32>();
327 }
328
329 BOOST_AUTO_TEST_CASE(CreateSplitterMergerUint8)
330 {
331     RefCreateSplitterMergerWorkloadTest<RefSplitterUint8Workload, RefMergerUint8Workload, DataType::QuantisedAsymm8>();
332 }
333
334 template <typename SplitterWorkloadType, typename ActivationWorkloadType, armnn::DataType DataType>
335 static void RefCreateSingleOutputMultipleInputsTest()
336 {
337     // Tests that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer.
338     // We created a splitter with two outputs. That each of those outputs is used by two different activation layers.
339
340     Graph graph;
341     RefWorkloadFactory factory;
342     std::unique_ptr<SplitterWorkloadType> wlSplitter;
343     std::unique_ptr<ActivationWorkloadType> wlActiv0_0;
344     std::unique_ptr<ActivationWorkloadType> wlActiv0_1;
345     std::unique_ptr<ActivationWorkloadType> wlActiv1_0;
346     std::unique_ptr<ActivationWorkloadType> wlActiv1_1;
347
348     CreateSplitterMultipleInputsOneOutputWorkloadTest<SplitterWorkloadType,
349         ActivationWorkloadType, DataType>(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1, wlActiv1_0, wlActiv1_1);
350
351     armnn::CpuTensorHandle* sOut0 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
352     armnn::CpuTensorHandle* sOut1 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
353     armnn::CpuTensorHandle* activ0_0Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv0_0->GetData().m_Inputs[0]);
354     armnn::CpuTensorHandle* activ0_1Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv0_1->GetData().m_Inputs[0]);
355     armnn::CpuTensorHandle* activ1_0Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv1_0->GetData().m_Inputs[0]);
356     armnn::CpuTensorHandle* activ1_1Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv1_1->GetData().m_Inputs[0]);
357
358
359     BOOST_TEST(sOut0);
360     BOOST_TEST(sOut1);
361     BOOST_TEST(activ0_0Im);
362     BOOST_TEST(activ0_1Im);
363     BOOST_TEST(activ1_0Im);
364     BOOST_TEST(activ1_1Im);
365
366     bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) &&
367                              (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im);
368
369     BOOST_TEST(validDataPointers);
370 }
371
372 BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsFloat32)
373 {
374     RefCreateSingleOutputMultipleInputsTest<RefSplitterFloat32Workload, RefActivationFloat32Workload,
375         armnn::DataType::Float32>();
376 }
377
378 BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsUint8)
379 {
380     RefCreateSingleOutputMultipleInputsTest<RefSplitterUint8Workload, RefActivationUint8Workload,
381         armnn::DataType::QuantisedAsymm8>();
382 }
383
384 template <typename ResizeBilinearWorkloadType, armnn::DataType DataType>
385 static void RefCreateResizeBilinearTest()
386 {
387     Graph graph;
388     RefWorkloadFactory factory;
389     auto workload = CreateResizeBilinearWorkloadTest<ResizeBilinearWorkloadType, DataType>(factory, graph);
390
391     // Checks that outputs and inputs are as we expect them (see definition of CreateResizeBilinearWorkloadTest).
392     CheckInputOutput(
393         std::move(workload),
394         TensorInfo({ 2, 3, 4, 4 }, DataType),
395         TensorInfo({ 2, 3, 2, 2 }, DataType));
396 }
397
398 BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32)
399 {
400     RefCreateResizeBilinearTest<RefResizeBilinearFloat32Workload, armnn::DataType::Float32>();
401 }
402
403 BOOST_AUTO_TEST_CASE(CreateResizeBilinearUint8)
404 {
405     RefCreateResizeBilinearTest<RefResizeBilinearUint8Workload, armnn::DataType::QuantisedAsymm8>();
406 }
407
408 BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32)
409 {
410     Graph graph;
411     RefWorkloadFactory factory;
412     auto workload = CreateL2NormalizationWorkloadTest<RefL2NormalizationFloat32Workload, armnn::DataType::Float32>
413             (factory, graph);
414
415     // Checks that outputs and inputs are as we expect them (see definition of CreateL2NormalizationWorkloadTest).
416     CheckInputOutput(
417         std::move(workload),
418         TensorInfo({ 5, 20, 50, 67 }, armnn::DataType::Float32),
419         TensorInfo({ 5, 20, 50, 67 }, armnn::DataType::Float32));
420 }
421
422 template <typename ReshapeWorkloadType, armnn::DataType DataType>
423 static void RefCreateReshapeWorkloadTest()
424 {
425     Graph graph;
426     RefWorkloadFactory factory;
427     auto workload = CreateReshapeWorkloadTest<ReshapeWorkloadType, DataType>(factory, graph);
428
429     // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest).
430     CheckInputOutput(
431         std::move(workload),
432         TensorInfo({ 4, 1 }, DataType),
433         TensorInfo({ 1, 4 }, DataType));
434 }
435
436 BOOST_AUTO_TEST_CASE(CreateReshapeFloat32Workload)
437 {
438     RefCreateReshapeWorkloadTest<RefReshapeFloat32Workload, armnn::DataType::Float32>();
439 }
440
441 BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload)
442 {
443     RefCreateReshapeWorkloadTest<RefReshapeUint8Workload, armnn::DataType::QuantisedAsymm8>();
444 }
445
446 BOOST_AUTO_TEST_SUITE_END()