IVGCVSW-1946: Remove armnn/src from the include paths
[platform/upstream/armnn.git] / src / backends / cl / test / Fp16SupportTest.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include <armnn/ArmNN.hpp>
7 #include <armnn/Descriptors.hpp>
8 #include <armnn/IRuntime.hpp>
9 #include <armnn/INetwork.hpp>
10 #include <Half.hpp>
11
12 #include <Graph.hpp>
13 #include <Optimizer.hpp>
14 #include <backendsCommon/CpuTensorHandle.hpp>
15 #include <backendsCommon/test/QuantizeHelper.hpp>
16
17 #include <boost/core/ignore_unused.hpp>
18 #include <boost/test/unit_test.hpp>
19
20 #include <set>
21
22 using namespace armnn;
23
24 BOOST_AUTO_TEST_SUITE(Fp16Support)
25
26 BOOST_AUTO_TEST_CASE(Fp16DataTypeSupport)
27 {
28     Graph graph;
29
30     Layer* const inputLayer1 = graph.AddLayer<InputLayer>(1, "input1");
31     Layer* const inputLayer2 = graph.AddLayer<InputLayer>(2, "input2");
32
33     Layer* const additionLayer = graph.AddLayer<AdditionLayer>("addition");
34     Layer* const outputLayer = graph.AddLayer<armnn::OutputLayer>(0, "output");
35
36     TensorInfo fp16TensorInfo({1, 2, 3, 5}, armnn::DataType::Float16);
37     inputLayer1->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(0));
38     inputLayer2->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(1));
39     additionLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
40
41     inputLayer1->GetOutputSlot().SetTensorInfo(fp16TensorInfo);
42     inputLayer2->GetOutputSlot().SetTensorInfo(fp16TensorInfo);
43     additionLayer->GetOutputSlot().SetTensorInfo(fp16TensorInfo);
44
45     BOOST_CHECK(inputLayer1->GetOutputSlot(0).GetTensorInfo().GetDataType() == armnn::DataType::Float16);
46     BOOST_CHECK(inputLayer2->GetOutputSlot(0).GetTensorInfo().GetDataType() == armnn::DataType::Float16);
47     BOOST_CHECK(additionLayer->GetOutputSlot(0).GetTensorInfo().GetDataType() == armnn::DataType::Float16);
48 }
49
50 BOOST_AUTO_TEST_CASE(Fp16AdditionTest)
51 {
52    using namespace half_float::literal;
53    // Create runtime in which test will run
54    IRuntime::CreationOptions options;
55    IRuntimePtr runtime(IRuntime::Create(options));
56
57    // Builds up the structure of the network.
58    INetworkPtr net(INetwork::Create());
59
60    IConnectableLayer* inputLayer1 = net->AddInputLayer(0);
61    IConnectableLayer* inputLayer2 = net->AddInputLayer(1);
62    IConnectableLayer* additionLayer = net->AddAdditionLayer();
63    IConnectableLayer* outputLayer = net->AddOutputLayer(0);
64
65    inputLayer1->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(0));
66    inputLayer2->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(1));
67    additionLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
68
69    //change to float16
70    TensorInfo fp16TensorInfo(TensorShape({4}), DataType::Float16);
71    inputLayer1->GetOutputSlot(0).SetTensorInfo(fp16TensorInfo);
72    inputLayer2->GetOutputSlot(0).SetTensorInfo(fp16TensorInfo);
73    additionLayer->GetOutputSlot(0).SetTensorInfo(fp16TensorInfo);
74
75    // optimize the network
76    std::vector<BackendId> backends = {Compute::GpuAcc};
77    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
78
79    // Loads it into the runtime.
80    NetworkId netId;
81    runtime->LoadNetwork(netId, std::move(optNet));
82
83    std::vector<Half> input1Data
84    {
85        1.0_h, 2.0_h, 3.0_h, 4.0_h
86    };
87
88    std::vector<Half> input2Data
89    {
90        100.0_h, 200.0_h, 300.0_h, 400.0_h
91    };
92
93    InputTensors inputTensors
94    {
95        {0,ConstTensor(runtime->GetInputTensorInfo(netId, 0), input1Data.data())},
96        {1,ConstTensor(runtime->GetInputTensorInfo(netId, 0), input2Data.data())}
97    };
98
99    std::vector<Half> outputData(input1Data.size());
100    OutputTensors outputTensors
101    {
102        {0,Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
103    };
104
105    // Does the inference.
106    runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
107
108    // Checks the results.
109    BOOST_TEST(outputData == std::vector<Half>({ 101.0_h, 202.0_h, 303.0_h, 404.0_h})); // Add
110 }
111
112 BOOST_AUTO_TEST_SUITE_END()