IVGCVSW-2467 Remove GetDataType<T> function
[platform/upstream/armnn.git] / src / backends / reference / test / RefEndToEndTests.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include <backendsCommon/test/EndToEndTestImpl.hpp>
7 #include <backendsCommon/test/MergerTestImpl.hpp>
8 #include <backendsCommon/test/ArithmeticTestImpl.hpp>
9
10 #include <boost/test/unit_test.hpp>
11 #include <boost/test/execution_monitor.hpp>
12
13 BOOST_AUTO_TEST_SUITE(RefEndToEnd)
14
15 std::vector<armnn::BackendId> defaultBackends = {armnn::Compute::CpuRef};
16
17 BOOST_AUTO_TEST_CASE(ConstantUsage_Ref_Float32)
18 {
19     BOOST_TEST(ConstantUsageFloat32Test(defaultBackends));
20 }
21
22 BOOST_AUTO_TEST_CASE(ConstantUsage_Ref_Uint8)
23 {
24     BOOST_TEST(ConstantUsageUint8Test(defaultBackends));
25 }
26
27 BOOST_AUTO_TEST_CASE(Unsigned8)
28 {
29     using namespace armnn;
30
31     // Create runtime in which test will run
32     armnn::IRuntime::CreationOptions options;
33     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
34
35     // Builds up the structure of the network.
36     armnn::INetworkPtr net(INetwork::Create());
37
38     IConnectableLayer* input = net->AddInputLayer(0, "input");
39     IConnectableLayer* softmax = net->AddSoftmaxLayer(SoftmaxDescriptor(), "softmax");
40     IConnectableLayer* output  = net->AddOutputLayer(0, "output");
41
42     input->GetOutputSlot(0).Connect(softmax->GetInputSlot(0));
43     softmax->GetOutputSlot(0).Connect(output->GetInputSlot(0));
44
45     // Sets the tensors in the network.
46     TensorInfo inputTensorInfo(TensorShape({1, 5}), DataType::QuantisedAsymm8);
47     inputTensorInfo.SetQuantizationOffset(100);
48     inputTensorInfo.SetQuantizationScale(10000.0f);
49     input->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
50
51     TensorInfo outputTensorInfo(TensorShape({1, 5}), DataType::QuantisedAsymm8);
52     outputTensorInfo.SetQuantizationOffset(0);
53     outputTensorInfo.SetQuantizationScale(1.0f/255.0f);
54     softmax->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
55
56     // optimize the network
57     IOptimizedNetworkPtr optNet = Optimize(*net, defaultBackends, runtime->GetDeviceSpec());
58
59     // Loads it into the runtime.
60     NetworkId netId;
61     auto error = runtime->LoadNetwork(netId, std::move(optNet));
62     BOOST_TEST(error == Status::Success);
63
64     // Creates structures for input & output.
65     std::vector<uint8_t> inputData
66     {
67         1, 10, 3, 200, 5 // Some inputs - one of which is sufficiently larger than the others to saturate softmax.
68     };
69     std::vector<uint8_t> outputData(5);
70
71     armnn::InputTensors inputTensors
72     {
73         {0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())}
74     };
75     armnn::OutputTensors outputTensors
76     {
77         {0, armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
78     };
79
80     // Does the inference.
81     runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
82
83     // Checks the results.
84     BOOST_TEST(outputData[0] == 0);
85     BOOST_TEST(outputData[1] == 0);
86     BOOST_TEST(outputData[2] == 0);
87     BOOST_TEST(outputData[3] == 255); // softmax has been saturated.
88     BOOST_TEST(outputData[4] == 0);
89 }
90
91 BOOST_AUTO_TEST_CASE(TrivialAdd)
92 {
93     // This test was designed to match "AddTwo" in android nn/runtime/test/TestTrivialModel.cpp.
94
95     using namespace armnn;
96
97     // Create runtime in which test will run
98     armnn::IRuntime::CreationOptions options;
99     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
100
101     // Builds up the structure of the network.
102     armnn::INetworkPtr net(INetwork::Create());
103
104     IConnectableLayer* input1 = net->AddInputLayer(0);
105     IConnectableLayer* input2 = net->AddInputLayer(1);
106     IConnectableLayer* add    = net->AddAdditionLayer();
107     IConnectableLayer* output = net->AddOutputLayer(0);
108
109     input1->GetOutputSlot(0).Connect(add->GetInputSlot(0));
110     input2->GetOutputSlot(0).Connect(add->GetInputSlot(1));
111     add->GetOutputSlot(0).Connect(output->GetInputSlot(0));
112
113     // Sets the tensors in the network.
114     TensorInfo tensorInfo(TensorShape({3, 4}), DataType::Float32);
115     input1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
116     input2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
117     add->GetOutputSlot(0).SetTensorInfo(tensorInfo);
118
119     // optimize the network
120     IOptimizedNetworkPtr optNet = Optimize(*net, defaultBackends, runtime->GetDeviceSpec());
121
122     // Loads it into the runtime.
123     NetworkId netId;
124     runtime->LoadNetwork(netId, std::move(optNet));
125
126     // Creates structures for input & output - matching android nn test.
127     std::vector<float> input1Data
128     {
129         1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f
130     };
131     std::vector<float> input2Data
132     {
133         100.f, 200.f, 300.f, 400.f, 500.f, 600.f, 700.f, 800.f, 900.f, 1000.f, 1100.f, 1200.f
134     };
135     std::vector<float> outputData(12);
136
137     InputTensors inputTensors
138     {
139         {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input1Data.data())},
140         {1,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input2Data.data())}
141     };
142     OutputTensors outputTensors
143     {
144         {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
145     };
146
147     // Does the inference.
148     runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
149
150     // Checks the results
151     BOOST_TEST(outputData[0] == 101);
152     BOOST_TEST(outputData[1] == 202);
153     BOOST_TEST(outputData[2] == 303);
154     BOOST_TEST(outputData[3] == 404);
155     BOOST_TEST(outputData[4] == 505);
156     BOOST_TEST(outputData[5] == 606);
157     BOOST_TEST(outputData[6] == 707);
158     BOOST_TEST(outputData[7] == 808);
159     BOOST_TEST(outputData[8] == 909);
160     BOOST_TEST(outputData[9] == 1010);
161     BOOST_TEST(outputData[10] == 1111);
162     BOOST_TEST(outputData[11] == 1212);
163 }
164
165 BOOST_AUTO_TEST_CASE(MultipleOutputs)
166 {
167     using namespace armnn;
168
169     // Create runtime in which test will run
170     armnn::IRuntime::CreationOptions options;
171     armnn::IRuntimePtr  runtime(armnn::IRuntime::Create(options));
172
173     // Builds up the structure of the network.
174     INetworkPtr net(INetwork::Create());
175
176     IConnectableLayer* input = net->AddInputLayer(0);
177
178     // ReLu1
179     ActivationDescriptor activation1Descriptor;
180     activation1Descriptor.m_Function = ActivationFunction::BoundedReLu;
181     activation1Descriptor.m_A = 1.f;
182     activation1Descriptor.m_B = -1.f;
183     IConnectableLayer* activation1 = net->AddActivationLayer(activation1Descriptor);
184
185     // ReLu6
186     ActivationDescriptor activation2Descriptor;
187     activation2Descriptor.m_Function = ActivationFunction::BoundedReLu;
188     activation2Descriptor.m_A = 6.0f;
189     IConnectableLayer* activation2 = net->AddActivationLayer(activation2Descriptor);
190
191     // BoundedReLu(min=2, max=5)
192     ActivationDescriptor activation3Descriptor;
193     activation3Descriptor.m_Function = ActivationFunction::BoundedReLu;
194     activation3Descriptor.m_A = 5.0f;
195     activation3Descriptor.m_B = 2.0f;
196     IConnectableLayer* activation3 = net->AddActivationLayer(activation3Descriptor);
197
198     IConnectableLayer* output1 = net->AddOutputLayer(0);
199     IConnectableLayer* output2 = net->AddOutputLayer(1);
200     IConnectableLayer* output3 = net->AddOutputLayer(2);
201
202     input->GetOutputSlot(0).Connect(activation1->GetInputSlot(0));
203     input->GetOutputSlot(0).Connect(activation2->GetInputSlot(0));
204     input->GetOutputSlot(0).Connect(activation3->GetInputSlot(0));
205
206     activation1->GetOutputSlot(0).Connect(output1->GetInputSlot(0));
207     activation2->GetOutputSlot(0).Connect(output2->GetInputSlot(0));
208     activation3->GetOutputSlot(0).Connect(output3->GetInputSlot(0));
209
210     // Sets the tensors in the network.
211     TensorInfo tensorInfo(TensorShape({ 10 }), DataType::Float32);
212     input->GetOutputSlot(0).SetTensorInfo(tensorInfo);
213     activation1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
214     activation2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
215     activation3->GetOutputSlot(0).SetTensorInfo(tensorInfo);
216
217     // optimize the network
218     IOptimizedNetworkPtr optNet = Optimize(*net, defaultBackends, runtime->GetDeviceSpec());
219
220     // Loads it into the runtime.
221     NetworkId netId;
222     runtime->LoadNetwork(netId, std::move(optNet));
223
224     // Creates structures for input & output.
225     const std::vector<float> inputData{ 3.f, 5.f, 2.f, 3.f, 7.f, 0.f, -2.f, -1.f, 3.f, 3.f };
226
227     std::vector<float> output1Data(inputData.size());
228     std::vector<float> output2Data(inputData.size());
229     std::vector<float> output3Data(inputData.size());
230
231     InputTensors inputTensors
232     {
233         {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())}
234     };
235     OutputTensors outputTensors
236     {
237         {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), output1Data.data())},
238         {1,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 1), output2Data.data())},
239         {2,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 2), output3Data.data())}
240     };
241
242     // Does the inference.
243     runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
244
245     // Checks the results.
246     BOOST_TEST(output1Data == std::vector<float>({ 1.f, 1.f, 1.f, 1.f, 1.f, 0.f, -1.f, -1.f, 1.f, 1.f })); // ReLu1
247     BOOST_TEST(output2Data == std::vector<float>({ 3.f, 5.f, 2.f, 3.f, 6.f, 0.f, 0.f, 0.f, 3.f, 3.f })); // ReLu6
248     BOOST_TEST(output3Data == std::vector<float>({ 3.f, 5.f, 2.f, 3.f, 5.f, 2.f, 2.f, 2.f, 3.f, 3.f })); // [2, 5]
249 }
250
251 BOOST_AUTO_TEST_CASE(TrivialMin)
252 {
253     using namespace armnn;
254
255     // Create runtime in which test will run
256     armnn::IRuntime::CreationOptions options;
257     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
258
259     // Builds up the structure of the network.
260     armnn::INetworkPtr net(INetwork::Create());
261
262     IConnectableLayer* input1 = net->AddInputLayer(0);
263     IConnectableLayer* input2 = net->AddInputLayer(1);
264     IConnectableLayer* min    = net->AddMinimumLayer();
265     IConnectableLayer* output = net->AddOutputLayer(0);
266
267     input1->GetOutputSlot(0).Connect(min->GetInputSlot(0));
268     input2->GetOutputSlot(0).Connect(min->GetInputSlot(1));
269     min->GetOutputSlot(0).Connect(output->GetInputSlot(0));
270
271     // Sets the tensors in the network.
272     TensorInfo tensorInfo(TensorShape({1, 1, 1, 4}), DataType::Float32);
273     input1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
274     input2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
275     min->GetOutputSlot(0).SetTensorInfo(tensorInfo);
276
277     // optimize the network
278     IOptimizedNetworkPtr optNet = Optimize(*net, defaultBackends, runtime->GetDeviceSpec());
279
280     // Loads it into the runtime.
281     NetworkId netId;
282     runtime->LoadNetwork(netId, std::move(optNet));
283
284     // Creates structures for input & output - matching android nn test.
285     std::vector<float> input1Data
286         {
287             1.0f, 2.0f, 3.0f, 4.0f
288         };
289     std::vector<float> input2Data
290         {
291             2.0f, 1.0f, 5.0f, 2.0f
292         };
293     std::vector<float> outputData(4);
294
295     InputTensors inputTensors
296         {
297             {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input1Data.data())},
298             {1,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input2Data.data())}
299         };
300     OutputTensors outputTensors
301         {
302             {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
303         };
304
305     // Does the inference.
306     runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
307
308     // Checks the results
309     BOOST_TEST(outputData[0] == 1);
310     BOOST_TEST(outputData[1] == 1);
311     BOOST_TEST(outputData[2] == 3);
312     BOOST_TEST(outputData[3] == 2);
313 }
314
315 BOOST_AUTO_TEST_CASE(RefEqualSimpleEndToEndTest)
316 {
317     const std::vector<float > expectedOutput({ 1, 1, 1, 1,  0, 0, 0, 0,
318                                                0, 0, 0, 0,  1, 1, 1, 1 });
319
320     ArithmeticSimpleEndToEnd<armnn::DataType::Float32>(defaultBackends, LayerType::Equal, expectedOutput);
321 }
322
323 BOOST_AUTO_TEST_CASE(RefGreaterSimpleEndToEndTest)
324 {
325     const std::vector<float> expectedOutput({ 0, 0, 0, 0,  1, 1, 1, 1,
326                                               0, 0, 0, 0,  0, 0, 0, 0 });
327
328     ArithmeticSimpleEndToEnd<armnn::DataType::Float32>(defaultBackends, LayerType::Greater, expectedOutput);
329 }
330
331 BOOST_AUTO_TEST_CASE(RefEqualSimpleEndToEndUint8Test)
332 {
333     const std::vector<uint8_t> expectedOutput({ 1, 1, 1, 1,  0, 0, 0, 0,
334                                                 0, 0, 0, 0,  1, 1, 1, 1 });
335
336     ArithmeticSimpleEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, LayerType::Equal, expectedOutput);
337 }
338
339 BOOST_AUTO_TEST_CASE(RefGreaterSimpleEndToEndUint8Test)
340 {
341     const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0,  1, 1, 1, 1,
342                                                 0, 0, 0, 0,  0, 0, 0, 0 });
343
344     ArithmeticSimpleEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, LayerType::Greater, expectedOutput);
345 }
346
347 BOOST_AUTO_TEST_CASE(RefEqualBroadcastEndToEndTest)
348 {
349     const std::vector<float > expectedOutput({ 1, 0, 1, 1, 0, 0,
350                                                0, 0, 0, 0, 0, 0 });
351
352     ArithmeticBroadcastEndToEnd<armnn::DataType::Float32>(defaultBackends, LayerType::Equal, expectedOutput);
353 }
354
355 BOOST_AUTO_TEST_CASE(RefGreaterBroadcastEndToEndTest)
356 {
357     const std::vector<float> expectedOutput({ 0, 1, 0, 0, 0, 1,
358                                               1, 1, 1, 1, 1, 1 });
359
360     ArithmeticBroadcastEndToEnd<armnn::DataType::Float32>(defaultBackends, LayerType::Greater, expectedOutput);
361 }
362
363 BOOST_AUTO_TEST_CASE(RefEqualBroadcastEndToEndUint8Test)
364 {
365     const std::vector<uint8_t > expectedOutput({ 1, 0, 1, 1, 0, 0,
366                                                  0, 0, 0, 0, 0, 0 });
367
368     ArithmeticBroadcastEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, LayerType::Equal, expectedOutput);
369 }
370
371 BOOST_AUTO_TEST_CASE(RefGreaterBroadcastEndToEndUint8Test)
372 {
373     const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
374                                                 1, 1, 1, 1, 1, 1 });
375
376     ArithmeticBroadcastEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, LayerType::Greater, expectedOutput);
377 }
378
379 BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim0Test)
380 {
381     MergerDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
382 }
383
384 BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim0Uint8Test)
385 {
386     MergerDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
387 }
388
389 BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim1Test)
390 {
391     MergerDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
392 }
393
394 BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim1Uint8Test)
395 {
396     MergerDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
397 }
398
399 BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim2Test)
400 {
401     MergerDim2EndToEnd<armnn::DataType::Float32>(defaultBackends);
402 }
403
404 BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim2Uint8Test)
405 {
406     MergerDim2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
407 }
408
409 BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim3Test)
410 {
411     MergerDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
412 }
413
414 BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim3Uint8Test)
415 {
416     MergerDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
417 }
418
419 BOOST_AUTO_TEST_SUITE_END()