2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
6 #include <test/CreateWorkload.hpp>
8 #include <backendsCommon/CpuTensorHandle.hpp>
9 #include <reference/RefWorkloadFactory.hpp>
10 #include <reference/workloads/RefWorkloads.hpp>
15 template<typename Workload>
16 void CheckInputOutput(std::unique_ptr<Workload> workload, const TensorInfo& inputInfo, const TensorInfo& outputInfo)
18 auto queueDescriptor = workload->GetData();
19 auto inputHandle = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[0]);
20 auto outputHandle = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[0]);
21 BOOST_TEST((inputHandle->GetTensorInfo() == inputInfo));
22 BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
25 template <typename Workload>
26 void CheckInputsOutput(std::unique_ptr<Workload> workload,
27 const TensorInfo& inputInfo0,
28 const TensorInfo& inputInfo1,
29 const TensorInfo& outputInfo)
31 auto queueDescriptor = workload->GetData();
32 auto inputHandle0 = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[0]);
33 auto inputHandle1 = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[1]);
34 auto outputHandle = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[0]);
35 BOOST_TEST((inputHandle0->GetTensorInfo() == inputInfo0));
36 BOOST_TEST((inputHandle1->GetTensorInfo() == inputInfo1));
37 BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
41 BOOST_AUTO_TEST_SUITE(CreateWorkloadRef)
43 template <typename ActivationWorkloadType, armnn::DataType DataType>
44 static void RefCreateActivationWorkloadTest()
47 RefWorkloadFactory factory;
48 auto workload = CreateActivationWorkloadTest<ActivationWorkloadType, DataType>(factory, graph);
50 // Checks that outputs are as we expect them (see definition of CreateActivationWorkloadTest).
51 CheckInputOutput(std::move(workload),
52 TensorInfo({ 1, 1 }, DataType),
53 TensorInfo({ 1, 1 }, DataType));
56 BOOST_AUTO_TEST_CASE(CreateActivationFloat32Workload)
58 RefCreateActivationWorkloadTest<RefActivationWorkload, armnn::DataType::Float32>();
61 BOOST_AUTO_TEST_CASE(CreateActivationUint8Workload)
63 RefCreateActivationWorkloadTest<RefActivationWorkload, armnn::DataType::QuantisedAsymm8>();
66 template <typename WorkloadType,
67 typename DescriptorType,
69 armnn::DataType DataType>
70 static void RefCreateElementwiseWorkloadTest()
73 RefWorkloadFactory factory;
74 auto workload = CreateElementwiseWorkloadTest<WorkloadType, DescriptorType, LayerType, DataType>(
77 CheckInputsOutput(std::move(workload),
78 TensorInfo({ 2, 3 }, DataType),
79 TensorInfo({ 2, 3 }, DataType),
80 TensorInfo({ 2, 3 }, DataType));
83 BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
85 RefCreateElementwiseWorkloadTest<RefAdditionWorkload,
86 AdditionQueueDescriptor,
88 armnn::DataType::Float32>();
91 BOOST_AUTO_TEST_CASE(CreateAdditionUint8Workload)
93 RefCreateElementwiseWorkloadTest<RefAdditionWorkload,
94 AdditionQueueDescriptor,
96 armnn::DataType::QuantisedAsymm8>();
99 BOOST_AUTO_TEST_CASE(CreateAdditionInt16Workload)
101 RefCreateElementwiseWorkloadTest<RefAdditionWorkload,
102 AdditionQueueDescriptor,
104 armnn::DataType::QuantisedSymm16>();
107 BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload)
109 RefCreateElementwiseWorkloadTest<RefSubtractionWorkload,
110 SubtractionQueueDescriptor,
112 armnn::DataType::Float32>();
115 BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload)
117 RefCreateElementwiseWorkloadTest<RefSubtractionWorkload,
118 SubtractionQueueDescriptor,
120 armnn::DataType::QuantisedAsymm8>();
123 BOOST_AUTO_TEST_CASE(CreateSubtractionInt16Workload)
125 RefCreateElementwiseWorkloadTest<RefSubtractionWorkload,
126 SubtractionQueueDescriptor,
128 armnn::DataType::QuantisedSymm16>();
131 BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload)
133 RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload,
134 MultiplicationQueueDescriptor,
136 armnn::DataType::Float32>();
139 BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload)
141 RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload,
142 MultiplicationQueueDescriptor,
144 armnn::DataType::QuantisedAsymm8>();
147 BOOST_AUTO_TEST_CASE(CreateMultiplicationInt16Workload)
149 RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload,
150 MultiplicationQueueDescriptor,
152 armnn::DataType::QuantisedSymm16>();
155 BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkload)
157 RefCreateElementwiseWorkloadTest<RefDivisionWorkload,
158 DivisionQueueDescriptor,
160 armnn::DataType::Float32>();
163 BOOST_AUTO_TEST_CASE(CreateDivisionUint8Workload)
165 RefCreateElementwiseWorkloadTest<RefDivisionWorkload,
166 DivisionQueueDescriptor,
168 armnn::DataType::QuantisedAsymm8>();
171 BOOST_AUTO_TEST_CASE(CreateDivisionInt16Workload)
173 RefCreateElementwiseWorkloadTest<RefDivisionWorkload,
174 DivisionQueueDescriptor,
176 armnn::DataType::QuantisedSymm16>();
179 template <typename BatchNormalizationWorkloadType, armnn::DataType DataType>
180 static void RefCreateBatchNormalizationWorkloadTest(DataLayout dataLayout)
183 RefWorkloadFactory factory;
185 CreateBatchNormalizationWorkloadTest<BatchNormalizationWorkloadType, DataType>(factory, graph, dataLayout);
187 TensorShape inputShape;
188 TensorShape outputShape;
192 case DataLayout::NHWC:
193 inputShape = { 2, 4, 4, 3 };
194 outputShape = { 2, 4, 4, 3 };
196 case DataLayout::NCHW:
198 inputShape = { 2, 3, 4, 4 };
199 outputShape = { 2, 3, 4, 4 };
203 // Checks that outputs and inputs are as we expect them (see definition of CreateBatchNormalizationWorkloadTest).
204 CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
207 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat32Workload)
209 RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationFloat32Workload,armnn::DataType::Float32>
213 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat32WorkloadNhwc)
215 RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationFloat32Workload, armnn::DataType::Float32>
219 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8Workload)
221 RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationUint8Workload, armnn::DataType::QuantisedAsymm8>
225 BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8WorkloadNhwc)
227 RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationUint8Workload, armnn::DataType::QuantisedAsymm8>
231 BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Float32Workload)
234 RefWorkloadFactory factory;
235 auto workload = CreateConvertFp16ToFp32WorkloadTest<RefConvertFp16ToFp32Workload>(factory, graph);
237 // Checks that outputs and inputs are as we expect them
239 std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float16), TensorInfo({1, 3, 2, 3}, DataType::Float32));
242 BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Float16Workload)
245 RefWorkloadFactory factory;
246 auto workload = CreateConvertFp32ToFp16WorkloadTest<RefConvertFp32ToFp16Workload>(factory, graph);
248 // Checks that outputs and inputs are as we expect them
250 std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float32), TensorInfo({1, 3, 2, 3}, DataType::Float16));
253 static void RefCreateConvolution2dWorkloadTest(DataLayout dataLayout = DataLayout::NCHW)
256 RefWorkloadFactory factory;
257 auto workload = CreateConvolution2dWorkloadTest<RefConvolution2dWorkload, DataType::Float32>
258 (factory, graph, dataLayout);
260 std::initializer_list<unsigned int> inputShape = (dataLayout == DataLayout::NCHW) ?
261 std::initializer_list<unsigned int>({2, 3, 8, 16}) : std::initializer_list<unsigned int>({2, 8, 16, 3});
262 std::initializer_list<unsigned int> outputShape = (dataLayout == DataLayout::NCHW) ?
263 std::initializer_list<unsigned int>({2, 2, 2, 10}) : std::initializer_list<unsigned int>({2, 2, 10, 2});
265 // Checks that outputs and inputs are as we expect them (see definition of CreateConvolution2dWorkloadTest).
266 CheckInputOutput(std::move(workload),
267 TensorInfo(inputShape, DataType::Float32),
268 TensorInfo(outputShape, DataType::Float32));
271 BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNchwWorkload)
273 RefCreateConvolution2dWorkloadTest(DataLayout::NCHW);
276 BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNhwcWorkload)
278 RefCreateConvolution2dWorkloadTest(DataLayout::NHWC);
281 static void RefCreateDepthwiseConvolutionWorkloadTest(DataLayout dataLayout)
284 RefWorkloadFactory factory;
285 auto workload = CreateDepthwiseConvolution2dWorkloadTest<RefDepthwiseConvolution2dWorkload, DataType::Float32>
286 (factory, graph, dataLayout);
288 std::initializer_list<unsigned int> inputShape = (dataLayout == DataLayout::NCHW)
289 ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
290 : std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
291 std::initializer_list<unsigned int> outputShape = (dataLayout == DataLayout::NCHW)
292 ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
293 : std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
294 // Checks that inputs/outputs are as we expect them (see definition of CreateDepthwiseConvolution2dWorkloadTest).
295 CheckInputOutput(std::move(workload),
296 TensorInfo(inputShape, DataType::Float32),
297 TensorInfo(outputShape, DataType::Float32));
300 BOOST_AUTO_TEST_CASE(CreateDepthwiseConvolutionFloat32NhwcWorkload)
302 RefCreateDepthwiseConvolutionWorkloadTest(DataLayout::NHWC);
305 template <typename FullyConnectedWorkloadType, armnn::DataType DataType>
306 static void RefCreateFullyConnectedWorkloadTest()
309 RefWorkloadFactory factory;
310 auto workload = CreateFullyConnectedWorkloadTest<FullyConnectedWorkloadType, DataType>(factory, graph);
312 // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
313 float inputsQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 1.0f : 0.0;
314 float outputQScale = DataType == armnn::DataType::QuantisedAsymm8 ? 2.0f : 0.0;
315 CheckInputOutput(std::move(workload),
316 TensorInfo({ 3, 1, 4, 5 }, DataType, inputsQScale),
317 TensorInfo({ 3, 7 }, DataType, outputQScale));
320 BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadFloat32)
322 RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::Float32>();
325 BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadQuantisedAsymm8)
327 RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QuantisedAsymm8>();
330 BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadQuantisedAsymm16)
332 RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QuantisedSymm16>();
335 template <typename NormalizationWorkloadType, armnn::DataType DataType>
336 static void RefCreateNormalizationWorkloadTest(DataLayout dataLayout)
339 RefWorkloadFactory factory;
340 auto workload = CreateNormalizationWorkloadTest<NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
342 TensorShape inputShape;
343 TensorShape outputShape;
347 case DataLayout::NHWC:
348 inputShape = { 3, 1, 5, 5 };
349 outputShape = { 3, 1, 5, 5 };
351 case DataLayout::NCHW:
353 inputShape = { 3, 5, 5, 1 };
354 outputShape = { 3, 5, 5, 1 };
358 // Checks that outputs and inputs are as we expect them (see definition of CreateNormalizationWorkloadTest).
359 CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
362 BOOST_AUTO_TEST_CASE(CreateRefNormalizationNchwWorkload)
364 RefCreateNormalizationWorkloadTest<RefNormalizationFloat32Workload, armnn::DataType::Float32>(DataLayout::NCHW);
367 BOOST_AUTO_TEST_CASE(CreateRefNormalizationNhwcWorkload)
369 RefCreateNormalizationWorkloadTest<RefNormalizationFloat32Workload, armnn::DataType::Float32>(DataLayout::NHWC);
372 template <typename Pooling2dWorkloadType, armnn::DataType DataType>
373 static void RefCreatePooling2dWorkloadTest(DataLayout dataLayout)
376 RefWorkloadFactory factory;
377 auto workload = CreatePooling2dWorkloadTest<Pooling2dWorkloadType, DataType>(factory, graph, dataLayout);
379 TensorShape inputShape;
380 TensorShape outputShape;
384 case DataLayout::NHWC:
385 inputShape = { 3, 5, 5, 2 };
386 outputShape = { 3, 2, 4, 2 };
388 case DataLayout::NCHW:
390 inputShape = { 3, 2, 5, 5 };
391 outputShape = { 3, 2, 2, 4 };
394 // Checks that outputs and inputs are as we expect them (see definition of CreatePooling2dWorkloadTest).
395 CheckInputOutput(std::move(workload),
396 TensorInfo(inputShape, DataType),
397 TensorInfo(outputShape, DataType));
400 BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32Workload)
402 RefCreatePooling2dWorkloadTest<RefPooling2dFloat32Workload, armnn::DataType::Float32>(DataLayout::NCHW);
405 BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32NhwcWorkload)
407 RefCreatePooling2dWorkloadTest<RefPooling2dFloat32Workload, armnn::DataType::Float32>(DataLayout::NHWC);
410 BOOST_AUTO_TEST_CASE(CreatePooling2dUint8Workload)
412 RefCreatePooling2dWorkloadTest<RefPooling2dUint8Workload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
415 BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NhwcWorkload)
417 RefCreatePooling2dWorkloadTest<RefPooling2dUint8Workload, armnn::DataType::QuantisedAsymm8>(DataLayout::NHWC);
420 template <typename SoftmaxWorkloadType, armnn::DataType DataType>
421 static void RefCreateSoftmaxWorkloadTest()
424 RefWorkloadFactory factory;
425 auto workload = CreateSoftmaxWorkloadTest<SoftmaxWorkloadType, DataType>(factory, graph);
427 // Checks that outputs and inputs are as we expect them (see definition of CreateSoftmaxWorkloadTest).
430 TensorInfo({4, 1}, DataType),
431 TensorInfo({4, 1}, DataType));
434 BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat32Workload)
436 RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::Float32>();
439 BOOST_AUTO_TEST_CASE(CreateSoftmaxQuantisedAsymm8Workload)
441 RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QuantisedAsymm8>();
444 BOOST_AUTO_TEST_CASE(CreateSoftmaxQuantisedSymm16Workload)
446 RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QuantisedSymm16>();
449 template <typename SplitterWorkloadType, armnn::DataType DataType>
450 static void RefCreateSplitterWorkloadTest()
453 RefWorkloadFactory factory;
454 auto workload = CreateSplitterWorkloadTest<SplitterWorkloadType, DataType>(factory, graph);
456 // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest).
457 SplitterQueueDescriptor queueDescriptor = workload->GetData();
458 auto inputHandle = boost::polymorphic_downcast<ConstCpuTensorHandle*>(queueDescriptor.m_Inputs[0]);
459 BOOST_TEST((inputHandle->GetTensorInfo() == TensorInfo({ 5, 7, 7 }, DataType)));
461 auto outputHandle0 = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[0]);
462 BOOST_TEST((outputHandle0->GetTensorInfo() == TensorInfo({ 1, 7, 7 }, DataType)));
464 auto outputHandle1 = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[1]);
465 BOOST_TEST((outputHandle1->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
467 auto outputHandle2 = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[2]);
468 BOOST_TEST((outputHandle2->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
471 BOOST_AUTO_TEST_CASE(CreateSplitterFloat32Workload)
473 RefCreateSplitterWorkloadTest<RefSplitterFloat32Workload, armnn::DataType::Float32>();
476 BOOST_AUTO_TEST_CASE(CreateSplitterUint8Workload)
478 RefCreateSplitterWorkloadTest<RefSplitterUint8Workload, armnn::DataType::QuantisedAsymm8>();
481 template <typename SplitterWorkloadType, typename ConcatWorkloadType, armnn::DataType DataType>
482 static void RefCreateSplitterConcatWorkloadTest()
484 // Tests that it is possible to decide which output of the splitter layer
485 // should be lined to which input of the concat layer.
486 // We tested that is is possible to specify 0th output
487 // of the splitter to be the 1st input to the concat and the 1st output of the splitter to be 0th input
491 RefWorkloadFactory factory;
492 auto workloads = CreateSplitterConcatWorkloadTest<SplitterWorkloadType, ConcatWorkloadType, DataType>
495 auto wlSplitter = std::move(workloads.first);
496 auto wlConcat = std::move(workloads.second);
498 //Checks that the index of inputs/outputs matches what we declared on InputDescriptor construction.
499 armnn::CpuTensorHandle* sOut0 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
500 armnn::CpuTensorHandle* sOut1 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
501 armnn::CpuTensorHandle* mIn0 = dynamic_cast<armnn::CpuTensorHandle*>(wlConcat->GetData().m_Inputs[0]);
502 armnn::CpuTensorHandle* mIn1 = dynamic_cast<armnn::CpuTensorHandle*>(wlConcat->GetData().m_Inputs[1]);
509 bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0);
511 BOOST_TEST(validDataPointers);
514 BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat32)
516 RefCreateSplitterConcatWorkloadTest<RefSplitterFloat32Workload, RefConcatWorkload, DataType::Float32>();
519 BOOST_AUTO_TEST_CASE(CreateSplitterConcatUint8)
521 RefCreateSplitterConcatWorkloadTest<RefSplitterUint8Workload, RefConcatWorkload, DataType::QuantisedAsymm8>();
524 template <typename SplitterWorkloadType, typename ActivationWorkloadType, armnn::DataType DataType>
525 static void RefCreateSingleOutputMultipleInputsTest()
527 // Tests that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer.
528 // We created a splitter with two outputs. That each of those outputs is used by two different activation layers.
531 RefWorkloadFactory factory;
532 std::unique_ptr<SplitterWorkloadType> wlSplitter;
533 std::unique_ptr<ActivationWorkloadType> wlActiv0_0;
534 std::unique_ptr<ActivationWorkloadType> wlActiv0_1;
535 std::unique_ptr<ActivationWorkloadType> wlActiv1_0;
536 std::unique_ptr<ActivationWorkloadType> wlActiv1_1;
538 CreateSplitterMultipleInputsOneOutputWorkloadTest<SplitterWorkloadType,
539 ActivationWorkloadType, DataType>(factory, graph, wlSplitter, wlActiv0_0, wlActiv0_1, wlActiv1_0, wlActiv1_1);
541 armnn::CpuTensorHandle* sOut0 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[0]);
542 armnn::CpuTensorHandle* sOut1 = dynamic_cast<armnn::CpuTensorHandle*>(wlSplitter->GetData().m_Outputs[1]);
543 armnn::CpuTensorHandle* activ0_0Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv0_0->GetData().m_Inputs[0]);
544 armnn::CpuTensorHandle* activ0_1Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv0_1->GetData().m_Inputs[0]);
545 armnn::CpuTensorHandle* activ1_0Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv1_0->GetData().m_Inputs[0]);
546 armnn::CpuTensorHandle* activ1_1Im = dynamic_cast<armnn::CpuTensorHandle*>(wlActiv1_1->GetData().m_Inputs[0]);
551 BOOST_TEST(activ0_0Im);
552 BOOST_TEST(activ0_1Im);
553 BOOST_TEST(activ1_0Im);
554 BOOST_TEST(activ1_1Im);
556 bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) &&
557 (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im);
559 BOOST_TEST(validDataPointers);
562 BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsFloat32)
564 RefCreateSingleOutputMultipleInputsTest<RefSplitterFloat32Workload, RefActivationWorkload,
565 armnn::DataType::Float32>();
568 BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsUint8)
570 RefCreateSingleOutputMultipleInputsTest<RefSplitterUint8Workload, RefActivationWorkload,
571 armnn::DataType::QuantisedAsymm8>();
574 template <typename ResizeBilinearWorkloadType, armnn::DataType DataType>
575 static void RefCreateResizeBilinearTest(DataLayout dataLayout)
578 RefWorkloadFactory factory;
579 auto workload = CreateResizeBilinearWorkloadTest<ResizeBilinearWorkloadType, DataType>(factory, graph, dataLayout);
581 TensorShape inputShape;
582 TensorShape outputShape;
586 case DataLayout::NHWC:
587 inputShape = { 2, 4, 4, 3 };
588 outputShape = { 2, 2, 2, 3 };
590 case DataLayout::NCHW:
592 inputShape = { 2, 3, 4, 4 };
593 outputShape = { 2, 3, 2, 2 };
596 // Checks that outputs and inputs are as we expect them (see definition of CreateResizeBilinearWorkloadTest).
597 CheckInputOutput(std::move(workload),
598 TensorInfo(inputShape, DataType),
599 TensorInfo(outputShape, DataType));
602 BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32)
604 RefCreateResizeBilinearTest<RefResizeBilinearFloat32Workload, armnn::DataType::Float32>(DataLayout::NCHW);
607 BOOST_AUTO_TEST_CASE(CreateResizeBilinearUint8)
609 RefCreateResizeBilinearTest<RefResizeBilinearUint8Workload, armnn::DataType::QuantisedAsymm8>(DataLayout::NCHW);
612 BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32Nhwc)
614 RefCreateResizeBilinearTest<RefResizeBilinearFloat32Workload, armnn::DataType::Float32>(DataLayout::NHWC);
617 template <typename L2NormalizationWorkloadType, armnn::DataType DataType>
618 static void RefCreateL2NormalizationTest(DataLayout dataLayout)
621 RefWorkloadFactory factory;
623 CreateL2NormalizationWorkloadTest<L2NormalizationWorkloadType, DataType>(factory, graph, dataLayout);
625 TensorShape inputShape;
626 TensorShape outputShape;
630 case DataLayout::NHWC:
631 inputShape = { 5, 50, 67, 20 };
632 outputShape = { 5, 50, 67, 20 };
634 case DataLayout::NCHW:
636 inputShape = { 5, 20, 50, 67 };
637 outputShape = { 5, 20, 50, 67 };
641 // Checks that outputs and inputs are as we expect them (see definition of CreateL2NormalizationWorkloadTest).
642 CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
645 BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32)
647 RefCreateL2NormalizationTest<RefL2NormalizationFloat32Workload, armnn::DataType::Float32>(DataLayout::NCHW);
650 BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32Nhwc)
652 RefCreateL2NormalizationTest<RefL2NormalizationFloat32Workload, armnn::DataType::Float32>(DataLayout::NHWC);
655 template <typename ReshapeWorkloadType, armnn::DataType DataType>
656 static void RefCreateReshapeWorkloadTest()
659 RefWorkloadFactory factory;
660 auto workload = CreateReshapeWorkloadTest<ReshapeWorkloadType, DataType>(factory, graph);
662 // Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest).
665 TensorInfo({ 4, 1 }, DataType),
666 TensorInfo({ 1, 4 }, DataType));
669 BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadFloat32)
671 RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::Float32>();
674 BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadQuantisedAsymm8)
676 RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QuantisedAsymm8>();
679 BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadQuantisedSymm16)
681 RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QuantisedSymm16>();
684 template <typename ConcatWorkloadType, armnn::DataType DataType>
685 static void RefCreateConcatWorkloadTest(const armnn::TensorShape& outputShape,
686 unsigned int concatAxis)
689 RefWorkloadFactory factory;
690 auto workload = CreateConcatWorkloadTest<ConcatWorkloadType, DataType>(factory, graph, outputShape, concatAxis);
692 CheckInputsOutput(std::move(workload),
693 TensorInfo({ 2, 3, 2, 5 }, DataType),
694 TensorInfo({ 2, 3, 2, 5 }, DataType),
695 TensorInfo(outputShape, DataType));
698 BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload)
700 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
703 BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload)
705 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 4, 3, 2, 5 }, 0);
708 BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint16Workload)
710 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedSymm16>({ 4, 3, 2, 5 }, 0);
713 BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload)
715 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
718 BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload)
720 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 6, 2, 5 }, 1);
723 BOOST_AUTO_TEST_CASE(CreateConcatDim2Float32Workload)
725 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 3, 4, 5 }, 2);
728 BOOST_AUTO_TEST_CASE(CreateConcatDim2Uint8Workload)
730 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 4, 5 }, 2);
733 BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload)
735 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
738 BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload)
740 RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 }, 3);
743 template <typename ConstantWorkloadType, armnn::DataType DataType>
744 static void RefCreateConstantWorkloadTest(const armnn::TensorShape& outputShape)
747 RefWorkloadFactory factory;
748 auto workload = CreateConstantWorkloadTest<ConstantWorkloadType, DataType>(factory, graph, outputShape);
750 // Check output is as expected
751 auto queueDescriptor = workload->GetData();
752 auto outputHandle = boost::polymorphic_downcast<CpuTensorHandle*>(queueDescriptor.m_Outputs[0]);
753 BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType)));
756 BOOST_AUTO_TEST_CASE(CreateConstantUint8Workload)
758 RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::QuantisedAsymm8>({ 2, 3, 2, 10 });
761 BOOST_AUTO_TEST_CASE(CreateConstantInt16Workload)
763 RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::QuantisedSymm16>({ 2, 3, 2, 10 });
766 BOOST_AUTO_TEST_CASE(CreateConstantFloat32Workload)
768 RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 });
771 BOOST_AUTO_TEST_CASE(CreateConstantSigned32Workload)
773 RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::Signed32>({ 2, 3, 2, 10 });
776 BOOST_AUTO_TEST_SUITE_END()