2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // See LICENSE file in the project root for full license information.
5 #include <boost/test/unit_test.hpp>
7 #include "armnn/ArmNN.hpp"
10 #include "backends/RefWorkloadFactory.hpp"
11 #include "backends/ClWorkloadFactory.hpp"
12 #include "backends/NeonWorkloadFactory.hpp"
14 #include "GraphUtils.hpp"
19 bool AreAllLayerInputSlotsConnected(const armnn::IConnectableLayer& layer)
21 bool allConnected = true;
22 for (unsigned int i = 0; i < layer.GetNumInputSlots(); ++i)
24 const bool inputConnected = layer.GetInputSlot(i).GetConnection() != nullptr;
25 allConnected &= inputConnected;
32 BOOST_AUTO_TEST_SUITE(Network)
34 BOOST_AUTO_TEST_CASE(LayerGuids)
37 armnn::LayerGuid inputId = net.AddInputLayer(0)->GetGuid();
38 armnn::LayerGuid addId = net.AddAdditionLayer()->GetGuid();
39 armnn::LayerGuid outputId = net.AddOutputLayer(0)->GetGuid();
41 BOOST_TEST(inputId != addId);
42 BOOST_TEST(addId != outputId);
43 BOOST_TEST(inputId != outputId);
46 BOOST_AUTO_TEST_CASE(SerializeToDot)
51 auto input = net.AddInputLayer(0);
52 auto add = net.AddAdditionLayer();
53 auto output = net.AddOutputLayer(0);
56 input->GetOutputSlot(0).Connect(add->GetInputSlot(0));
57 input->GetOutputSlot(0).Connect(add->GetInputSlot(1));
58 add->GetOutputSlot(0).Connect(output->GetInputSlot(0));
60 armnn::TensorShape shape({4});
61 armnn::TensorInfo info(shape, armnn::DataType::Float32);
62 input->GetOutputSlot(0).SetTensorInfo(info);
63 add->GetOutputSlot(0).SetTensorInfo(info);
65 armnn::IRuntime::CreationOptions options;
66 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
68 std::vector<armnn::Compute> backends = {armnn::Compute::CpuRef};
69 armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec());
71 std::ostringstream ss;
72 optimizedNet->SerializeToDot(ss);
74 auto inputId = input->GetGuid();
75 auto addId = add->GetGuid();
76 auto outputId = output->GetGuid();
78 std::stringstream expected;
80 "digraph Optimized {\n"
81 " node [shape=\"record\"];\n"
82 " edge [fontsize=8 fontcolor=\"blue\" fontname=\"arial-bold\"];\n"
83 " " << inputId << " [label=\"{Input}\"];\n"
84 " " << addId << " [label=\"{Addition}\"];\n"
85 " " << outputId << " [label=\"{Output}\"];\n"
86 " " << inputId << " -> " << addId << " [label=< [4] >];\n"
87 " " << inputId << " -> " << addId << " [label=< [4] >];\n"
88 " " << addId << " -> " << outputId << " [label=< [4] >];\n"
91 BOOST_TEST(ss.str() == expected.str());
94 BOOST_AUTO_TEST_CASE(NetworkBasic)
97 BOOST_TEST(net.PrintGraph() == armnn::Status::Success);
100 BOOST_AUTO_TEST_CASE(LayerNamesAreOptionalForINetwork)
103 armnn::INetwork& inet = net;
104 inet.AddInputLayer(0);
105 inet.AddAdditionLayer();
106 inet.AddActivationLayer(armnn::ActivationDescriptor());
107 inet.AddOutputLayer(0);
110 BOOST_AUTO_TEST_CASE(LayerNamesAreOptionalForNetwork)
113 net.AddInputLayer(0);
114 net.AddAdditionLayer();
115 net.AddActivationLayer(armnn::ActivationDescriptor());
116 net.AddOutputLayer(0);
119 BOOST_AUTO_TEST_CASE(NetworkModification)
123 armnn::IConnectableLayer* const inputLayer = net.AddInputLayer(0, "input layer");
124 BOOST_TEST(inputLayer);
126 unsigned int dims[] = { 10,1,1,1 };
127 std::vector<float> convWeightsData(10);
128 armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32), convWeightsData);
130 armnn::Convolution2dDescriptor convDesc2d;
131 armnn::IConnectableLayer* const convLayer = net.AddConvolution2dLayer(convDesc2d, weights, "conv layer");
132 BOOST_TEST(convLayer);
134 inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
136 armnn::FullyConnectedDescriptor fullyConnectedDesc;
137 armnn::IConnectableLayer* const fullyConnectedLayer = net.AddFullyConnectedLayer(fullyConnectedDesc,
140 BOOST_TEST(fullyConnectedLayer);
142 convLayer->GetOutputSlot(0).Connect(fullyConnectedLayer->GetInputSlot(0));
144 armnn::Pooling2dDescriptor pooling2dDesc;
145 armnn::IConnectableLayer* const poolingLayer = net.AddPooling2dLayer(pooling2dDesc, "pooling2d");
146 BOOST_TEST(poolingLayer);
148 fullyConnectedLayer->GetOutputSlot(0).Connect(poolingLayer->GetInputSlot(0));
150 armnn::ActivationDescriptor activationDesc;
151 armnn::IConnectableLayer* const activationLayer = net.AddActivationLayer(activationDesc, "activation");
152 BOOST_TEST(activationLayer);
154 poolingLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
156 armnn::NormalizationDescriptor normalizationDesc;
157 armnn::IConnectableLayer* const normalizationLayer = net.AddNormalizationLayer(normalizationDesc, "normalization");
158 BOOST_TEST(normalizationLayer);
160 activationLayer->GetOutputSlot(0).Connect(normalizationLayer->GetInputSlot(0));
162 armnn::SoftmaxDescriptor softmaxDesc;
163 armnn::IConnectableLayer* const softmaxLayer = net.AddSoftmaxLayer(softmaxDesc, "softmax");
164 BOOST_TEST(softmaxLayer);
166 normalizationLayer->GetOutputSlot(0).Connect(softmaxLayer->GetInputSlot(0));
168 armnn::BatchNormalizationDescriptor batchNormDesc;
170 armnn::TensorInfo tensorInfo({ 1 }, armnn::DataType::Float32);
171 std::vector<float> data(tensorInfo.GetNumBytes() / sizeof(float));
172 armnn::ConstTensor invalidTensor(tensorInfo, data);
174 armnn::IConnectableLayer* const batchNormalizationLayer = net.AddBatchNormalizationLayer(batchNormDesc,
180 BOOST_TEST(batchNormalizationLayer);
182 softmaxLayer->GetOutputSlot(0).Connect(batchNormalizationLayer->GetInputSlot(0));
184 armnn::IConnectableLayer* const additionLayer = net.AddAdditionLayer("addition");
185 BOOST_TEST(additionLayer);
187 batchNormalizationLayer->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(0));
188 batchNormalizationLayer->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(1));
190 armnn::IConnectableLayer* const multiplicationLayer = net.AddMultiplicationLayer("multiplication");
191 BOOST_TEST(multiplicationLayer);
193 additionLayer->GetOutputSlot(0).Connect(multiplicationLayer->GetInputSlot(0));
194 additionLayer->GetOutputSlot(0).Connect(multiplicationLayer->GetInputSlot(1));
196 armnn::IConnectableLayer* const outputLayer = net.AddOutputLayer(0, "output layer");
197 BOOST_TEST(outputLayer);
199 multiplicationLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
201 //Tests that all layers are present in the graph.
202 BOOST_TEST(net.GetGraph().GetNumLayers() == 11);
204 //Tests that the vertices exist and have correct names.
205 BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "input layer"));
206 BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "conv layer"));
207 BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "fully connected"));
208 BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "pooling2d"));
209 BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "activation"));
210 BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "normalization"));
211 BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "softmax"));
212 BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "batch norm"));
213 BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "addition"));
214 BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "multiplication"));
215 BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "output layer"));
217 auto checkOneOutputToOneInputConnection = []
218 (const armnn::IConnectableLayer* const srcLayer,
219 const armnn::IConnectableLayer* const tgtLayer,
220 int expectedSrcNumInputs = 1,
221 int expectedDstNumOutputs = 1)
223 BOOST_TEST(srcLayer->GetNumInputSlots() == expectedSrcNumInputs);
224 BOOST_TEST(srcLayer->GetNumOutputSlots() == 1);
225 BOOST_TEST(tgtLayer->GetNumInputSlots() == 1);
226 BOOST_TEST(tgtLayer->GetNumOutputSlots() == expectedDstNumOutputs);
228 BOOST_TEST(srcLayer->GetOutputSlot(0).GetNumConnections() == 1);
229 BOOST_TEST(srcLayer->GetOutputSlot(0).GetConnection(0) == &tgtLayer->GetInputSlot(0));
230 BOOST_TEST(&srcLayer->GetOutputSlot(0) == tgtLayer->GetInputSlot(0).GetConnection());
232 auto checkOneOutputToTwoInputsConnections = []
233 (const armnn::IConnectableLayer* const srcLayer,
234 const armnn::IConnectableLayer* const tgtLayer,
235 int expectedSrcNumInputs,
236 int expectedDstNumOutputs = 1)
238 BOOST_TEST(srcLayer->GetNumInputSlots() == expectedSrcNumInputs);
239 BOOST_TEST(srcLayer->GetNumOutputSlots() == 1);
240 BOOST_TEST(tgtLayer->GetNumInputSlots() == 2);
241 BOOST_TEST(tgtLayer->GetNumOutputSlots() == expectedDstNumOutputs);
243 BOOST_TEST(srcLayer->GetOutputSlot(0).GetNumConnections() == 2);
244 for (unsigned int i = 0; i < srcLayer->GetOutputSlot(0).GetNumConnections(); ++i)
246 BOOST_TEST(srcLayer->GetOutputSlot(0).GetConnection(i) == &tgtLayer->GetInputSlot(i));
247 BOOST_TEST(&srcLayer->GetOutputSlot(0) == tgtLayer->GetInputSlot(i).GetConnection());
251 BOOST_TEST(AreAllLayerInputSlotsConnected(*convLayer));
252 BOOST_TEST(AreAllLayerInputSlotsConnected(*fullyConnectedLayer));
253 BOOST_TEST(AreAllLayerInputSlotsConnected(*poolingLayer));
254 BOOST_TEST(AreAllLayerInputSlotsConnected(*activationLayer));
255 BOOST_TEST(AreAllLayerInputSlotsConnected(*normalizationLayer));
256 BOOST_TEST(AreAllLayerInputSlotsConnected(*softmaxLayer));
257 BOOST_TEST(AreAllLayerInputSlotsConnected(*batchNormalizationLayer));
258 BOOST_TEST(AreAllLayerInputSlotsConnected(*additionLayer));
259 BOOST_TEST(AreAllLayerInputSlotsConnected(*multiplicationLayer));
260 BOOST_TEST(AreAllLayerInputSlotsConnected(*outputLayer));
262 // Checks connectivity.
263 checkOneOutputToOneInputConnection(inputLayer, convLayer, 0);
264 checkOneOutputToOneInputConnection(convLayer, fullyConnectedLayer);
265 checkOneOutputToOneInputConnection(fullyConnectedLayer, poolingLayer);
266 checkOneOutputToOneInputConnection(poolingLayer, activationLayer);
267 checkOneOutputToOneInputConnection(activationLayer, normalizationLayer);
268 checkOneOutputToOneInputConnection(normalizationLayer, softmaxLayer);
269 checkOneOutputToOneInputConnection(softmaxLayer, batchNormalizationLayer);
270 checkOneOutputToTwoInputsConnections(batchNormalizationLayer, additionLayer, 1);
271 checkOneOutputToTwoInputsConnections(additionLayer, multiplicationLayer, 2);
272 checkOneOutputToOneInputConnection(multiplicationLayer, outputLayer, 2, 0);
275 BOOST_AUTO_TEST_CASE(NetworkModification_SplitterMerger)
279 // Adds an input layer and an input tensor descriptor.
280 armnn::IConnectableLayer* inputLayer = net.AddInputLayer(0, "input layer");
281 BOOST_TEST(inputLayer);
283 // Adds a splitter layer.
284 armnn::ViewsDescriptor splitterDesc(2,4);
286 armnn::IConnectableLayer* splitterLayer = net.AddSplitterLayer(splitterDesc, "splitter layer");
287 BOOST_TEST(splitterLayer);
289 inputLayer->GetOutputSlot(0).Connect(splitterLayer->GetInputSlot(0));
291 // Adds a softmax layer 1.
292 armnn::SoftmaxDescriptor softmaxDescriptor;
293 armnn::IConnectableLayer* softmaxLayer1 = net.AddSoftmaxLayer(softmaxDescriptor, "softmax_1");
294 BOOST_TEST(softmaxLayer1);
296 splitterLayer->GetOutputSlot(0).Connect(softmaxLayer1->GetInputSlot(0));
298 // Adds a softmax layer 2.
299 armnn::IConnectableLayer* softmaxLayer2 = net.AddSoftmaxLayer(softmaxDescriptor, "softmax_2");
300 BOOST_TEST(softmaxLayer2);
302 splitterLayer->GetOutputSlot(1).Connect(softmaxLayer2->GetInputSlot(0));
304 // Adds a merger layer.
305 armnn::OriginsDescriptor mergerDesc(2, 4);
307 armnn::IConnectableLayer* mergerLayer = net.AddMergerLayer(mergerDesc, "merger layer");
308 BOOST_TEST(mergerLayer);
310 softmaxLayer1->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(0));
311 softmaxLayer2->GetOutputSlot(0).Connect(mergerLayer->GetInputSlot(1));
313 // Adds an output layer.
314 armnn::IConnectableLayer* outputLayer = net.AddOutputLayer(0, "output layer");
315 BOOST_TEST(outputLayer);
317 mergerLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
319 BOOST_TEST(splitterLayer->GetNumOutputSlots() == 2);
320 BOOST_TEST(splitterLayer->GetOutputSlot(0).GetConnection(0) == &softmaxLayer1->GetInputSlot(0));
321 BOOST_TEST(&splitterLayer->GetOutputSlot(0) == softmaxLayer1->GetInputSlot(0).GetConnection());
322 BOOST_TEST(splitterLayer->GetOutputSlot(1).GetConnection(0) == &softmaxLayer2->GetInputSlot(0));
323 BOOST_TEST(&splitterLayer->GetOutputSlot(1) == softmaxLayer2->GetInputSlot(0).GetConnection());
325 BOOST_TEST(mergerLayer->GetNumInputSlots() == 2);
326 BOOST_TEST(softmaxLayer1->GetOutputSlot(0).GetConnection(0) == &mergerLayer->GetInputSlot(0));
327 BOOST_TEST(&softmaxLayer1->GetOutputSlot(0) == mergerLayer->GetInputSlot(0).GetConnection());
328 BOOST_TEST(softmaxLayer2->GetOutputSlot(0).GetConnection(0) == &mergerLayer->GetInputSlot(1));
329 BOOST_TEST(&softmaxLayer2->GetOutputSlot(0) == mergerLayer->GetInputSlot(1).GetConnection());
332 BOOST_AUTO_TEST_CASE(NetworkModification_SplitterAddition)
336 // Adds an input layer and an input tensor descriptor.
337 armnn::IConnectableLayer* layer = net.AddInputLayer(0, "input layer");
340 // Adds a splitter layer.
341 armnn::ViewsDescriptor splitterDesc(2,4);
343 armnn::IConnectableLayer* const splitterLayer = net.AddSplitterLayer(splitterDesc, "splitter layer");
344 BOOST_TEST(splitterLayer);
346 layer->GetOutputSlot(0).Connect(splitterLayer->GetInputSlot(0));
348 // Adds a softmax layer 1.
349 armnn::SoftmaxDescriptor softmaxDescriptor;
350 armnn::IConnectableLayer* const softmax1Layer = net.AddSoftmaxLayer(softmaxDescriptor, "softmax_1");
351 BOOST_TEST(softmax1Layer);
353 splitterLayer->GetOutputSlot(0).Connect(softmax1Layer->GetInputSlot(0));
355 // Adds a softmax layer 2.
356 armnn::IConnectableLayer* const softmax2Layer = net.AddSoftmaxLayer(softmaxDescriptor, "softmax_2");
357 BOOST_TEST(softmax2Layer);
359 splitterLayer->GetOutputSlot(1).Connect(softmax2Layer->GetInputSlot(0));
361 // Adds addition layer.
362 layer = net.AddAdditionLayer("add layer");
365 softmax1Layer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
366 softmax2Layer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
368 // Adds an output layer.
369 armnn::IConnectableLayer* prevLayer = layer;
370 layer = net.AddOutputLayer(0, "output layer");
372 prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
377 BOOST_AUTO_TEST_CASE(NetworkModification_SplitterMultiplication)
381 // Adds an input layer and an input tensor descriptor.
382 armnn::IConnectableLayer* layer = net.AddInputLayer(0, "input layer");
385 // Adds a splitter layer.
386 armnn::ViewsDescriptor splitterDesc(2,4);
387 armnn::IConnectableLayer* const splitterLayer = net.AddSplitterLayer(splitterDesc, "splitter layer");
388 BOOST_TEST(splitterLayer);
390 layer->GetOutputSlot(0).Connect(splitterLayer->GetInputSlot(0));
392 // Adds a softmax layer 1.
393 armnn::SoftmaxDescriptor softmaxDescriptor;
394 armnn::IConnectableLayer* const softmax1Layer = net.AddSoftmaxLayer(softmaxDescriptor, "softmax_1");
395 BOOST_TEST(softmax1Layer);
397 splitterLayer->GetOutputSlot(0).Connect(softmax1Layer->GetInputSlot(0));
399 // Adds a softmax layer 2.
400 armnn::IConnectableLayer* const softmax2Layer = net.AddSoftmaxLayer(softmaxDescriptor, "softmax_2");
401 BOOST_TEST(softmax2Layer);
403 splitterLayer->GetOutputSlot(1).Connect(softmax2Layer->GetInputSlot(0));
405 // Adds multiplication layer.
406 layer = net.AddMultiplicationLayer("multiplication layer");
409 softmax1Layer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
410 softmax2Layer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
412 // Adds an output layer.
413 armnn::IConnectableLayer* prevLayer = layer;
414 layer = net.AddOutputLayer(0, "output layer");
417 prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
420 BOOST_AUTO_TEST_CASE(OptimizeValidateCpuRefWorkloads)
422 const armnn::TensorInfo desc({3, 5}, armnn::DataType::Float32);
426 armnn::NormalizationDescriptor nmDesc;
427 armnn::ActivationDescriptor acDesc;
440 armnn::IConnectableLayer* layer = net.AddInputLayer(0, "in");
441 layer->GetOutputSlot(0).SetTensorInfo(desc);
443 armnn::IConnectableLayer* const normLayer = net.AddNormalizationLayer(nmDesc, "nm");
445 layer->GetOutputSlot(0).Connect(normLayer->GetInputSlot(0));
446 normLayer->GetOutputSlot(0).SetTensorInfo(desc);
448 layer = net.AddActivationLayer(acDesc, "ac");
450 normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
451 layer->GetOutputSlot(0).SetTensorInfo(desc);
453 armnn::IConnectableLayer* prevLayer = layer;
454 layer = net.AddMultiplicationLayer("ml");
456 prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
457 normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
458 layer->GetOutputSlot(0).SetTensorInfo(desc);
461 armnn::SoftmaxDescriptor softmaxDescriptor;
462 layer = net.AddSoftmaxLayer(softmaxDescriptor, "sm");
464 prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
465 layer->GetOutputSlot(0).SetTensorInfo(desc);
468 layer = net.AddOutputLayer(0, "ot");
470 prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
472 armnn::IRuntime::CreationOptions options;
473 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
475 std::vector<armnn::Compute> backends = { armnn::Compute::CpuRef };
476 armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec());
477 static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph().AllocateDynamicBuffers();
480 // Validates workloads.
481 armnn::RefWorkloadFactory fact;
482 for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
484 BOOST_CHECK_NO_THROW(
485 layer->CreateWorkload(static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph(), fact));
489 #if ARMCOMPUTENEON_ENABLED
490 BOOST_AUTO_TEST_CASE(OptimizeValidateCpuAccDeviceSupportLayerNoFallback)
492 // build up the structure of the network
493 armnn::INetworkPtr net(armnn::INetwork::Create());
495 armnn::IConnectableLayer* input = net->AddInputLayer(0);
497 armnn::IConnectableLayer* output = net->AddOutputLayer(0);
499 input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
501 input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
503 armnn::IRuntime::CreationOptions options;
504 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
506 std::vector<armnn::Compute> backends = { armnn::Compute::CpuAcc };
507 armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
509 // validate workloads
510 armnn::NeonWorkloadFactory fact;
511 for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
513 BOOST_CHECK_EQUAL(armnn::Compute::CpuAcc, layer->GetComputeDevice());
514 BOOST_CHECK_NO_THROW(
515 layer->CreateWorkload(static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph(), fact));
518 #endif // ARMCOMPUTENEON_ENABLED
520 #if ARMCOMPUTECL_ENABLED
521 BOOST_AUTO_TEST_CASE(OptimizeValidateGpuDeviceSupportLayerNoFallback)
523 // build up the structure of the network
524 armnn::INetworkPtr net(armnn::INetwork::Create());
526 armnn::IConnectableLayer* input = net->AddInputLayer(0);
528 armnn::IConnectableLayer* output = net->AddOutputLayer(0);
530 input->GetOutputSlot(0).Connect(output->GetInputSlot(0));
532 input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
534 armnn::IRuntime::CreationOptions options;
535 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
537 std::vector<armnn::Compute> backends = { armnn::Compute::GpuAcc };
538 armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
540 // validate workloads
541 armnn::ClWorkloadFactory fact;
542 for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
544 BOOST_CHECK_EQUAL(armnn::Compute::GpuAcc, layer->GetComputeDevice());
545 BOOST_CHECK_NO_THROW(
546 layer->CreateWorkload(static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph(), fact));
549 #endif // ARMCOMPUTECL_ENABLED
551 BOOST_AUTO_TEST_CASE(OptimizeValidateDeviceNonSupportLayerNoFallback)
553 // build up the structure of the network
554 armnn::INetworkPtr net(armnn::INetwork::Create());
556 armnn::IConnectableLayer* input = net->AddInputLayer(0);
558 // This layer configuration isn't supported by CpuAcc and isn't allowed to fall back, so Optimize will return null.
559 armnn::NormalizationDescriptor descriptor;
560 armnn::IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
562 armnn::IConnectableLayer* output = net->AddOutputLayer(0);
564 input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
565 normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
567 input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
568 normalize->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
570 armnn::IRuntime::CreationOptions options;
571 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
573 std::vector<armnn::Compute> backends = { armnn::Compute::CpuAcc };
574 armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
575 BOOST_CHECK(!optNet);
578 BOOST_AUTO_TEST_CASE(OptimizeValidateDeviceNonSupportLayerWithFallback)
580 // build up the structure of the network
581 armnn::INetworkPtr net(armnn::INetwork::Create());
583 armnn::IConnectableLayer* input = net->AddInputLayer(0);
585 // This layer configuration isn't supported by CpuAcc but it allows to fallback to CpuRef.
586 armnn::NormalizationDescriptor descriptor;
587 armnn::IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
589 armnn::IConnectableLayer* output = net->AddOutputLayer(0);
591 input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
592 normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
594 input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
595 normalize->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
597 armnn::IRuntime::CreationOptions options;
598 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
600 std::vector<armnn::Compute> backends = { armnn::Compute::CpuAcc, armnn::Compute::CpuRef };
601 armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
602 BOOST_REQUIRE(optNet);
604 for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
606 // If NEON is enabled, Input and Output layers are supported by CpuAcc,
607 // the other layers are supported by CpuRef.
608 // If NEON is not enabled, all layers are supported by CpuRef.
609 #if ARMCOMPUTENEON_ENABLED
610 if (layer->GetType() == armnn::LayerType::Input || layer->GetType() == armnn::LayerType::Output)
612 BOOST_CHECK_EQUAL(armnn::Compute::CpuAcc, layer->GetComputeDevice());
614 else if (layer->GetType() == armnn::LayerType::Normalization)
616 BOOST_CHECK_EQUAL(armnn::Compute::CpuRef, layer->GetComputeDevice());
619 BOOST_CHECK_EQUAL(armnn::Compute::CpuRef, layer->GetComputeDevice());
624 BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsUndefinedComputeDevice)
626 const armnn::TensorInfo desc({3, 5}, armnn::DataType::Float32);
630 armnn::NormalizationDescriptor nmDesc;
631 armnn::ActivationDescriptor acDesc;
644 armnn::IConnectableLayer* layer = net.AddInputLayer(0, "in");
645 layer->GetOutputSlot(0).SetTensorInfo(desc);
647 armnn::IConnectableLayer* const normLayer = net.AddNormalizationLayer(nmDesc, "nm");
649 layer->GetOutputSlot(0).Connect(normLayer->GetInputSlot(0));
650 normLayer->GetOutputSlot(0).SetTensorInfo(desc);
652 layer = net.AddActivationLayer(acDesc, "ac");
654 normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
655 layer->GetOutputSlot(0).SetTensorInfo(desc);
657 armnn::IConnectableLayer* prevLayer = layer;
658 layer = net.AddMultiplicationLayer("ml");
660 prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
661 normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
662 layer->GetOutputSlot(0).SetTensorInfo(desc);
665 armnn::SoftmaxDescriptor softmaxDescriptor;
666 layer = net.AddSoftmaxLayer(softmaxDescriptor, "sm");
668 prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
669 layer->GetOutputSlot(0).SetTensorInfo(desc);
672 layer = net.AddOutputLayer(0, "ot");
674 prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
676 armnn::IRuntime::CreationOptions options;
677 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
679 std::vector<armnn::Compute> backends = { armnn::Compute::Undefined };
681 armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec());
682 BOOST_CHECK(!optNet);
686 BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsUndefinedComputeDeviceWithFallback)
688 const armnn::TensorInfo desc({3, 5}, armnn::DataType::Float32);
692 armnn::NormalizationDescriptor nmDesc;
693 armnn::ActivationDescriptor acDesc;
706 armnn::IConnectableLayer* layer = net.AddInputLayer(0, "in");
707 layer->GetOutputSlot(0).SetTensorInfo(desc);
709 armnn::IConnectableLayer* const normLayer = net.AddNormalizationLayer(nmDesc, "nm");
711 layer->GetOutputSlot(0).Connect(normLayer->GetInputSlot(0));
712 normLayer->GetOutputSlot(0).SetTensorInfo(desc);
714 layer = net.AddActivationLayer(acDesc, "ac");
716 normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
717 layer->GetOutputSlot(0).SetTensorInfo(desc);
719 armnn::IConnectableLayer* prevLayer = layer;
720 layer = net.AddMultiplicationLayer("ml");
722 prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
723 normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
724 layer->GetOutputSlot(0).SetTensorInfo(desc);
727 armnn::SoftmaxDescriptor softmaxDescriptor;
728 layer = net.AddSoftmaxLayer(softmaxDescriptor, "sm");
730 prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
731 layer->GetOutputSlot(0).SetTensorInfo(desc);
734 layer = net.AddOutputLayer(0, "ot");
736 prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
738 armnn::IRuntime::CreationOptions options;
739 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
741 std::vector<armnn::Compute> backends = { armnn::Compute::Undefined, armnn::Compute::CpuRef };
743 armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec());
746 // validate workloads
747 armnn::RefWorkloadFactory fact;
748 for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
750 BOOST_CHECK_EQUAL(armnn::Compute::CpuRef, layer->GetComputeDevice());
751 BOOST_CHECK_NO_THROW(
752 layer->CreateWorkload(static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph(), fact));
755 BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsDuplicateComputeDeviceWithFallback)
757 // build up the structure of the network
758 armnn::INetworkPtr net(armnn::INetwork::Create());
760 armnn::IConnectableLayer* input = net->AddInputLayer(0);
762 // This layer configuration isn't supported by CpuAcc but it allows to fallback to CpuRef.
763 armnn::NormalizationDescriptor descriptor;
764 armnn::IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
766 armnn::IConnectableLayer* output = net->AddOutputLayer(0);
768 input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
769 normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
771 input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
772 normalize->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
774 armnn::IRuntime::CreationOptions options;
775 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
777 std::vector<armnn::Compute> backends = { armnn::Compute::CpuAcc,
778 armnn::Compute::GpuAcc,
779 armnn::Compute::CpuRef };
781 armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
782 BOOST_REQUIRE(optNet);
784 for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
786 // If NEON is enabled, Input and Output layers are supported by CpuAcc,
787 // the other layers are supported by CpuRef.
788 // If only CL is enabled, Input and Output layers are supported by GpuAcc,
789 // the other layers are supported by CpuRef.
790 // If neither NEON, nor CL is enabled, all layers are supported by CpuRef.
791 #if ARMCOMPUTENEON_ENABLED
792 if (layer->GetType() == armnn::LayerType::Input || layer->GetType() == armnn::LayerType::Output)
794 BOOST_CHECK_EQUAL(armnn::Compute::CpuAcc, layer->GetComputeDevice());
796 else if (layer->GetType() == armnn::LayerType::Normalization)
798 BOOST_CHECK_EQUAL(armnn::Compute::CpuRef, layer->GetComputeDevice());
800 #elif ARMCOMPUTECL_ENABLED
801 if (layer->GetType() == armnn::LayerType::Input || layer->GetType() == armnn::LayerType::Output)
803 BOOST_CHECK_EQUAL(armnn::Compute::GpuAcc, layer->GetComputeDevice());
805 else if (layer->GetType() == armnn::LayerType::Normalization)
807 BOOST_CHECK_EQUAL(armnn::Compute::CpuRef, layer->GetComputeDevice());
810 BOOST_CHECK_EQUAL(armnn::Compute::CpuRef, layer->GetComputeDevice());
815 BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsCpuRefPermuteLayer)
817 // Create runtime in which test will run
818 armnn::IRuntime::CreationOptions options;
819 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
821 std::vector<armnn::Compute> backends = {armnn::Compute::CpuRef};
823 // build up the structure of the network
824 armnn::INetworkPtr net(armnn::INetwork::Create());
826 armnn::IConnectableLayer* input = net->AddInputLayer(0);
828 armnn::PermuteDescriptor descriptor({0, 2, 3, 1});
829 armnn::IConnectableLayer* permute = net->AddPermuteLayer(descriptor);
831 armnn::IConnectableLayer* output = net->AddOutputLayer(0);
833 input->GetOutputSlot(0).Connect(permute->GetInputSlot(0));
834 permute->GetOutputSlot(0).Connect(output->GetInputSlot(0));
836 input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
837 permute->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 4, 1, 4 }, armnn::DataType::Float32));
839 // optimize the network
840 armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
842 for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
844 BOOST_CHECK_EQUAL(armnn::Compute::CpuRef, layer->GetComputeDevice());
848 BOOST_AUTO_TEST_CASE(FP16TurboModeTestOnCpuRef)
850 // Test to check when FP16 Turbo mode set
851 // it converts the FP32 network to FP16 Network
852 // add FP32ToFP16 conversion layer after the InputLayer
853 // add FP16ToFP32 conversion layer after the OutputLayer
854 // checks the other layers if they are supported in FP16
855 // if they are not put the conversion layers before and after
856 // if they are not supported in FP16 use FP32 instead
857 // if there are inverse conversion layers remove them with optimization
858 // at the moment FloorLayer is not supported in FP16 so it rolls back to FP32
859 // and inverse conversion layers are removed by the optimizer
863 auto input = net.AddInputLayer(0);
864 auto floor = net.AddFloorLayer();
865 auto output = net.AddOutputLayer(0);
868 input->GetOutputSlot(0).Connect(floor->GetInputSlot(0));
869 floor->GetOutputSlot(0).Connect(output->GetInputSlot(0));
871 armnn::TensorShape shape({4});
872 armnn::TensorInfo info(shape, armnn::DataType::Float32);
873 input->GetOutputSlot(0).SetTensorInfo(info);
874 floor->GetOutputSlot(0).SetTensorInfo(info);
876 armnn::IRuntime::CreationOptions options;
877 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
879 std::vector<armnn::Compute> backends = {armnn::Compute::CpuRef};
881 armnn::OptimizerOptions optimizerOptions;
882 optimizerOptions.m_ReduceFp32ToFp16 = true;
884 armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec(),
887 std::ostringstream ss;
888 optimizedNet->SerializeToDot(ss);
890 auto inputId = input->GetGuid();
891 auto floorId = floor->GetGuid();
892 auto outputId = output->GetGuid();
894 std::stringstream expected;
896 "digraph Optimized {\n"
897 " node [shape=\"record\"];\n"
898 " edge [fontsize=8 fontcolor=\"blue\" fontname=\"arial-bold\"];\n"
899 " " << inputId << " [label=\"{Input}\"];\n"
900 " " << floorId << " [label=\"{Floor}\"];\n"
901 " " << outputId << " [label=\"{Output}\"];\n"
902 " " << inputId << " -> " << floorId << " [label=< [4] >];\n"
903 " " << floorId << " -> " << outputId << " [label=< [4] >];\n"
906 BOOST_TEST(ss.str() == expected.str());
909 #if ARMCOMPUTECL_ENABLED
910 BOOST_AUTO_TEST_CASE(FP16TurboModeTestOnGpuAcc)
912 // Test to check when Fp16 Turbo mode set
913 // it converts the Fp32 network to Fp16 Network
914 // add Fp32ToFp16 conversion layer after the InputLayer
915 // add Fp16ToFp32 conversion layer after the OutputLayer
916 // checks the other layers if they are supported in Fp16
917 // if they are not put the conversion layers before and after
918 // if they are not supported in Fp16 use Fp32 instead
919 // if there are inverse conversion layers remove them with optimization
920 // at the moment FloorLayer is not supported in Fp16 so it rolls back to Fp32
921 // and inverse conversion layers are removed by the optimizer
925 auto input = net.AddInputLayer(0, "input layer");
927 armnn::ActivationDescriptor activation1Descriptor;
928 activation1Descriptor.m_Function = armnn::ActivationFunction::BoundedReLu;
929 activation1Descriptor.m_A = 1.f;
930 activation1Descriptor.m_B = -1.f;
931 auto activation = net.AddActivationLayer(activation1Descriptor, "activation layer");
932 auto output = net.AddOutputLayer(0, "output layer");
935 input->GetOutputSlot(0).Connect(activation->GetInputSlot(0));
936 activation->GetOutputSlot(0).Connect(output->GetInputSlot(0));
938 armnn::TensorShape shape({4});
939 armnn::TensorInfo info(shape, armnn::DataType::Float32);
940 input->GetOutputSlot(0).SetTensorInfo(info);
941 activation->GetOutputSlot(0).SetTensorInfo(info);
943 armnn::IRuntime::CreationOptions options;
944 armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
946 std::vector<armnn::Compute> backends = {armnn::Compute::GpuAcc};
948 armnn::OptimizerOptions optimizerOptions;
949 optimizerOptions.m_ReduceFp32ToFp16 = true;
951 armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec(),
954 const armnn::Graph& graph = static_cast<armnn::OptimizedNetwork*>(optimizedNet.get())->GetGraph();
956 // Tests that all layers are present in the graph.
957 BOOST_TEST(graph.GetNumLayers() == 5);
959 // Tests that the vertices exist and have correct names.
960 BOOST_TEST(GraphHasNamedLayer(graph, "input layer"));
961 BOOST_TEST(GraphHasNamedLayer(graph, "convert_fp32_to_fp16-0-input layer"));
962 BOOST_TEST(GraphHasNamedLayer(graph, "activation layer"));
963 BOOST_TEST(GraphHasNamedLayer(graph, "convert_fp16_to_fp32-0-output layer"));
964 BOOST_TEST(GraphHasNamedLayer(graph, "output layer"));
968 BOOST_AUTO_TEST_SUITE_END()