2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
6 #include <backendsCommon/test/EndToEndTestImpl.hpp>
7 #include <backendsCommon/test/MergerTestImpl.hpp>
8 #include <backendsCommon/test/ArithmeticTestImpl.hpp>
10 #include <boost/test/unit_test.hpp>
11 #include <boost/test/execution_monitor.hpp>
13 BOOST_AUTO_TEST_SUITE(RefEndToEnd)
15 std::vector<armnn::BackendId> defaultBackends = {armnn::Compute::CpuRef};
17 BOOST_AUTO_TEST_CASE(ConstantUsage_Ref_Float32)
19 BOOST_TEST(ConstantUsageFloat32Test(defaultBackends));
22 BOOST_AUTO_TEST_CASE(ConstantUsage_Ref_Uint8)
24 BOOST_TEST(ConstantUsageUint8Test(defaultBackends));
27 BOOST_AUTO_TEST_CASE(Unsigned8)
29 using namespace armnn;
31 // Create runtime in which test will run
32 armnn::IRuntime::CreationOptions options;
33 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
35 // Builds up the structure of the network.
36 armnn::INetworkPtr net(INetwork::Create());
38 IConnectableLayer* input = net->AddInputLayer(0, "input");
39 IConnectableLayer* softmax = net->AddSoftmaxLayer(SoftmaxDescriptor(), "softmax");
40 IConnectableLayer* output = net->AddOutputLayer(0, "output");
42 input->GetOutputSlot(0).Connect(softmax->GetInputSlot(0));
43 softmax->GetOutputSlot(0).Connect(output->GetInputSlot(0));
45 // Sets the tensors in the network.
46 TensorInfo inputTensorInfo(TensorShape({1, 5}), DataType::QuantisedAsymm8);
47 inputTensorInfo.SetQuantizationOffset(100);
48 inputTensorInfo.SetQuantizationScale(10000.0f);
49 input->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
51 TensorInfo outputTensorInfo(TensorShape({1, 5}), DataType::QuantisedAsymm8);
52 outputTensorInfo.SetQuantizationOffset(0);
53 outputTensorInfo.SetQuantizationScale(1.0f/255.0f);
54 softmax->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
56 // optimize the network
57 IOptimizedNetworkPtr optNet = Optimize(*net, defaultBackends, runtime->GetDeviceSpec());
59 // Loads it into the runtime.
61 auto error = runtime->LoadNetwork(netId, std::move(optNet));
62 BOOST_TEST(error == Status::Success);
64 // Creates structures for input & output.
65 std::vector<uint8_t> inputData
67 1, 10, 3, 200, 5 // Some inputs - one of which is sufficiently larger than the others to saturate softmax.
69 std::vector<uint8_t> outputData(5);
71 armnn::InputTensors inputTensors
73 {0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())}
75 armnn::OutputTensors outputTensors
77 {0, armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
80 // Does the inference.
81 runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
83 // Checks the results.
84 BOOST_TEST(outputData[0] == 0);
85 BOOST_TEST(outputData[1] == 0);
86 BOOST_TEST(outputData[2] == 0);
87 BOOST_TEST(outputData[3] == 255); // softmax has been saturated.
88 BOOST_TEST(outputData[4] == 0);
91 BOOST_AUTO_TEST_CASE(TrivialAdd)
93 // This test was designed to match "AddTwo" in android nn/runtime/test/TestTrivialModel.cpp.
95 using namespace armnn;
97 // Create runtime in which test will run
98 armnn::IRuntime::CreationOptions options;
99 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
101 // Builds up the structure of the network.
102 armnn::INetworkPtr net(INetwork::Create());
104 IConnectableLayer* input1 = net->AddInputLayer(0);
105 IConnectableLayer* input2 = net->AddInputLayer(1);
106 IConnectableLayer* add = net->AddAdditionLayer();
107 IConnectableLayer* output = net->AddOutputLayer(0);
109 input1->GetOutputSlot(0).Connect(add->GetInputSlot(0));
110 input2->GetOutputSlot(0).Connect(add->GetInputSlot(1));
111 add->GetOutputSlot(0).Connect(output->GetInputSlot(0));
113 // Sets the tensors in the network.
114 TensorInfo tensorInfo(TensorShape({3, 4}), DataType::Float32);
115 input1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
116 input2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
117 add->GetOutputSlot(0).SetTensorInfo(tensorInfo);
119 // optimize the network
120 IOptimizedNetworkPtr optNet = Optimize(*net, defaultBackends, runtime->GetDeviceSpec());
122 // Loads it into the runtime.
124 runtime->LoadNetwork(netId, std::move(optNet));
126 // Creates structures for input & output - matching android nn test.
127 std::vector<float> input1Data
129 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f, 12.f
131 std::vector<float> input2Data
133 100.f, 200.f, 300.f, 400.f, 500.f, 600.f, 700.f, 800.f, 900.f, 1000.f, 1100.f, 1200.f
135 std::vector<float> outputData(12);
137 InputTensors inputTensors
139 {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input1Data.data())},
140 {1,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input2Data.data())}
142 OutputTensors outputTensors
144 {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
147 // Does the inference.
148 runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
150 // Checks the results
151 BOOST_TEST(outputData[0] == 101);
152 BOOST_TEST(outputData[1] == 202);
153 BOOST_TEST(outputData[2] == 303);
154 BOOST_TEST(outputData[3] == 404);
155 BOOST_TEST(outputData[4] == 505);
156 BOOST_TEST(outputData[5] == 606);
157 BOOST_TEST(outputData[6] == 707);
158 BOOST_TEST(outputData[7] == 808);
159 BOOST_TEST(outputData[8] == 909);
160 BOOST_TEST(outputData[9] == 1010);
161 BOOST_TEST(outputData[10] == 1111);
162 BOOST_TEST(outputData[11] == 1212);
165 BOOST_AUTO_TEST_CASE(MultipleOutputs)
167 using namespace armnn;
169 // Create runtime in which test will run
170 armnn::IRuntime::CreationOptions options;
171 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
173 // Builds up the structure of the network.
174 INetworkPtr net(INetwork::Create());
176 IConnectableLayer* input = net->AddInputLayer(0);
179 ActivationDescriptor activation1Descriptor;
180 activation1Descriptor.m_Function = ActivationFunction::BoundedReLu;
181 activation1Descriptor.m_A = 1.f;
182 activation1Descriptor.m_B = -1.f;
183 IConnectableLayer* activation1 = net->AddActivationLayer(activation1Descriptor);
186 ActivationDescriptor activation2Descriptor;
187 activation2Descriptor.m_Function = ActivationFunction::BoundedReLu;
188 activation2Descriptor.m_A = 6.0f;
189 IConnectableLayer* activation2 = net->AddActivationLayer(activation2Descriptor);
191 // BoundedReLu(min=2, max=5)
192 ActivationDescriptor activation3Descriptor;
193 activation3Descriptor.m_Function = ActivationFunction::BoundedReLu;
194 activation3Descriptor.m_A = 5.0f;
195 activation3Descriptor.m_B = 2.0f;
196 IConnectableLayer* activation3 = net->AddActivationLayer(activation3Descriptor);
198 IConnectableLayer* output1 = net->AddOutputLayer(0);
199 IConnectableLayer* output2 = net->AddOutputLayer(1);
200 IConnectableLayer* output3 = net->AddOutputLayer(2);
202 input->GetOutputSlot(0).Connect(activation1->GetInputSlot(0));
203 input->GetOutputSlot(0).Connect(activation2->GetInputSlot(0));
204 input->GetOutputSlot(0).Connect(activation3->GetInputSlot(0));
206 activation1->GetOutputSlot(0).Connect(output1->GetInputSlot(0));
207 activation2->GetOutputSlot(0).Connect(output2->GetInputSlot(0));
208 activation3->GetOutputSlot(0).Connect(output3->GetInputSlot(0));
210 // Sets the tensors in the network.
211 TensorInfo tensorInfo(TensorShape({ 10 }), DataType::Float32);
212 input->GetOutputSlot(0).SetTensorInfo(tensorInfo);
213 activation1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
214 activation2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
215 activation3->GetOutputSlot(0).SetTensorInfo(tensorInfo);
217 // optimize the network
218 IOptimizedNetworkPtr optNet = Optimize(*net, defaultBackends, runtime->GetDeviceSpec());
220 // Loads it into the runtime.
222 runtime->LoadNetwork(netId, std::move(optNet));
224 // Creates structures for input & output.
225 const std::vector<float> inputData{ 3.f, 5.f, 2.f, 3.f, 7.f, 0.f, -2.f, -1.f, 3.f, 3.f };
227 std::vector<float> output1Data(inputData.size());
228 std::vector<float> output2Data(inputData.size());
229 std::vector<float> output3Data(inputData.size());
231 InputTensors inputTensors
233 {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())}
235 OutputTensors outputTensors
237 {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), output1Data.data())},
238 {1,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 1), output2Data.data())},
239 {2,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 2), output3Data.data())}
242 // Does the inference.
243 runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
245 // Checks the results.
246 BOOST_TEST(output1Data == std::vector<float>({ 1.f, 1.f, 1.f, 1.f, 1.f, 0.f, -1.f, -1.f, 1.f, 1.f })); // ReLu1
247 BOOST_TEST(output2Data == std::vector<float>({ 3.f, 5.f, 2.f, 3.f, 6.f, 0.f, 0.f, 0.f, 3.f, 3.f })); // ReLu6
248 BOOST_TEST(output3Data == std::vector<float>({ 3.f, 5.f, 2.f, 3.f, 5.f, 2.f, 2.f, 2.f, 3.f, 3.f })); // [2, 5]
251 BOOST_AUTO_TEST_CASE(TrivialMin)
253 using namespace armnn;
255 // Create runtime in which test will run
256 armnn::IRuntime::CreationOptions options;
257 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
259 // Builds up the structure of the network.
260 armnn::INetworkPtr net(INetwork::Create());
262 IConnectableLayer* input1 = net->AddInputLayer(0);
263 IConnectableLayer* input2 = net->AddInputLayer(1);
264 IConnectableLayer* min = net->AddMinimumLayer();
265 IConnectableLayer* output = net->AddOutputLayer(0);
267 input1->GetOutputSlot(0).Connect(min->GetInputSlot(0));
268 input2->GetOutputSlot(0).Connect(min->GetInputSlot(1));
269 min->GetOutputSlot(0).Connect(output->GetInputSlot(0));
271 // Sets the tensors in the network.
272 TensorInfo tensorInfo(TensorShape({1, 1, 1, 4}), DataType::Float32);
273 input1->GetOutputSlot(0).SetTensorInfo(tensorInfo);
274 input2->GetOutputSlot(0).SetTensorInfo(tensorInfo);
275 min->GetOutputSlot(0).SetTensorInfo(tensorInfo);
277 // optimize the network
278 IOptimizedNetworkPtr optNet = Optimize(*net, defaultBackends, runtime->GetDeviceSpec());
280 // Loads it into the runtime.
282 runtime->LoadNetwork(netId, std::move(optNet));
284 // Creates structures for input & output - matching android nn test.
285 std::vector<float> input1Data
287 1.0f, 2.0f, 3.0f, 4.0f
289 std::vector<float> input2Data
291 2.0f, 1.0f, 5.0f, 2.0f
293 std::vector<float> outputData(4);
295 InputTensors inputTensors
297 {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input1Data.data())},
298 {1,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), input2Data.data())}
300 OutputTensors outputTensors
302 {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
305 // Does the inference.
306 runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
308 // Checks the results
309 BOOST_TEST(outputData[0] == 1);
310 BOOST_TEST(outputData[1] == 1);
311 BOOST_TEST(outputData[2] == 3);
312 BOOST_TEST(outputData[3] == 2);
315 BOOST_AUTO_TEST_CASE(RefEqualSimpleEndToEndTest)
317 const std::vector<float > expectedOutput({ 1, 1, 1, 1, 0, 0, 0, 0,
318 0, 0, 0, 0, 1, 1, 1, 1 });
320 ArithmeticSimpleEndToEnd<armnn::DataType::Float32>(defaultBackends, LayerType::Equal, expectedOutput);
323 BOOST_AUTO_TEST_CASE(RefGreaterSimpleEndToEndTest)
325 const std::vector<float> expectedOutput({ 0, 0, 0, 0, 1, 1, 1, 1,
326 0, 0, 0, 0, 0, 0, 0, 0 });
328 ArithmeticSimpleEndToEnd<armnn::DataType::Float32>(defaultBackends, LayerType::Greater, expectedOutput);
331 BOOST_AUTO_TEST_CASE(RefEqualSimpleEndToEndUint8Test)
333 const std::vector<uint8_t> expectedOutput({ 1, 1, 1, 1, 0, 0, 0, 0,
334 0, 0, 0, 0, 1, 1, 1, 1 });
336 ArithmeticSimpleEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, LayerType::Equal, expectedOutput);
339 BOOST_AUTO_TEST_CASE(RefGreaterSimpleEndToEndUint8Test)
341 const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0, 1, 1, 1, 1,
342 0, 0, 0, 0, 0, 0, 0, 0 });
344 ArithmeticSimpleEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, LayerType::Greater, expectedOutput);
347 BOOST_AUTO_TEST_CASE(RefEqualBroadcastEndToEndTest)
349 const std::vector<float > expectedOutput({ 1, 0, 1, 1, 0, 0,
352 ArithmeticBroadcastEndToEnd<armnn::DataType::Float32>(defaultBackends, LayerType::Equal, expectedOutput);
355 BOOST_AUTO_TEST_CASE(RefGreaterBroadcastEndToEndTest)
357 const std::vector<float> expectedOutput({ 0, 1, 0, 0, 0, 1,
360 ArithmeticBroadcastEndToEnd<armnn::DataType::Float32>(defaultBackends, LayerType::Greater, expectedOutput);
363 BOOST_AUTO_TEST_CASE(RefEqualBroadcastEndToEndUint8Test)
365 const std::vector<uint8_t > expectedOutput({ 1, 0, 1, 1, 0, 0,
368 ArithmeticBroadcastEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, LayerType::Equal, expectedOutput);
371 BOOST_AUTO_TEST_CASE(RefGreaterBroadcastEndToEndUint8Test)
373 const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
376 ArithmeticBroadcastEndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends, LayerType::Greater, expectedOutput);
379 BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim0Test)
381 MergerDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
384 BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim0Uint8Test)
386 MergerDim0EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
389 BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim1Test)
391 MergerDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
394 BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim1Uint8Test)
396 MergerDim1EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
399 BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim2Test)
401 MergerDim2EndToEnd<armnn::DataType::Float32>(defaultBackends);
404 BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim2Uint8Test)
406 MergerDim2EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
409 BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim3Test)
411 MergerDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
414 BOOST_AUTO_TEST_CASE(RefMergerEndToEndDim3Uint8Test)
416 MergerDim3EndToEnd<armnn::DataType::QuantisedAsymm8>(defaultBackends);
419 BOOST_AUTO_TEST_SUITE_END()