IVGCVSW-1946: Remove armnn/src from the include paths
[platform/upstream/armnn.git] / src / armnn / test / RuntimeTests.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include <armnn/Descriptors.hpp>
7 #include <armnn/IRuntime.hpp>
8 #include <armnn/INetwork.hpp>
9 #include <Runtime.hpp>
10 #include <armnn/TypesUtils.hpp>
11
12 #include <HeapProfiling.hpp>
13 #include <LeakChecking.hpp>
14
15 #ifdef WITH_VALGRIND
16 #include <valgrind/memcheck.h>
17 #endif
18
19 #include <boost/test/unit_test.hpp>
20
21 namespace armnn
22 {
23
24 void RuntimeLoadedNetworksReserve(armnn::Runtime* runtime)
25 {
26     runtime->m_LoadedNetworks.reserve(1);
27 }
28
29 }
30
31 BOOST_AUTO_TEST_SUITE(Runtime)
32
33 BOOST_AUTO_TEST_CASE(RuntimeUnloadNetwork)
34 {
35     // build 2 mock-networks and load them into the runtime
36     armnn::IRuntime::CreationOptions options;
37     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
38
39     // Mock network 1.
40     armnn::NetworkId networkIdentifier1 = 1;
41     armnn::INetworkPtr mockNetwork1(armnn::INetwork::Create());
42     mockNetwork1->AddInputLayer(0, "test layer");
43     std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
44     runtime->LoadNetwork(networkIdentifier1, Optimize(*mockNetwork1, backends, runtime->GetDeviceSpec()));
45
46     // Mock network 2.
47     armnn::NetworkId networkIdentifier2 = 2;
48     armnn::INetworkPtr mockNetwork2(armnn::INetwork::Create());
49     mockNetwork2->AddInputLayer(0, "test layer");
50     runtime->LoadNetwork(networkIdentifier2, Optimize(*mockNetwork2, backends, runtime->GetDeviceSpec()));
51
52     // Unloads one by its networkID.
53     BOOST_TEST(runtime->UnloadNetwork(networkIdentifier1) == armnn::Status::Success);
54
55     BOOST_TEST(runtime->UnloadNetwork(networkIdentifier1) == armnn::Status::Failure);
56 }
57
58 // Note: the current builds we don't do valgrind and gperftools based leak checking at the same
59 //       time, so in practice WITH_VALGRIND and ARMNN_LEAK_CHECKING_ENABLED are exclusive. The
60 //       valgrind tests can stay for x86 builds, but on hikey Valgrind is just way too slow
61 //       to be integrated into the CI system.
62
63 #ifdef ARMNN_LEAK_CHECKING_ENABLED
64
65 struct DisableGlobalLeakChecking
66 {
67     DisableGlobalLeakChecking()
68     {
69         ARMNN_LOCAL_LEAK_CHECKING_ONLY();
70     }
71 };
72
73 BOOST_GLOBAL_FIXTURE(DisableGlobalLeakChecking);
74
75 BOOST_AUTO_TEST_CASE(RuntimeHeapMemoryUsageSanityChecks)
76 {
77     BOOST_TEST(ARMNN_LEAK_CHECKER_IS_ACTIVE());
78     {
79         ARMNN_SCOPED_LEAK_CHECKER("Sanity_Check_Outer");
80         {
81             ARMNN_SCOPED_LEAK_CHECKER("Sanity_Check_Inner");
82             BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE() == true);
83             std::unique_ptr<char[]> dummyAllocation(new char[1000]);
84             BOOST_CHECK_MESSAGE(ARMNN_NO_LEAKS_IN_SCOPE() == false,
85                 "A leak of 1000 bytes is expected here. "
86                 "Please make sure environment variable: HEAPCHECK=draconian is set!");
87             BOOST_TEST(ARMNN_BYTES_LEAKED_IN_SCOPE() == 1000);
88             BOOST_TEST(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 1);
89         }
90         BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
91         BOOST_TEST(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
92         BOOST_TEST(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
93     }
94 }
95
96 #endif // ARMNN_LEAK_CHECKING_ENABLED
97
98 // Note: this part of the code is due to be removed when we fully trust the gperftools based results.
99 #ifdef WITH_VALGRIND
100 // Run with the following command to get all the amazing output (in the devenv/build folder) :)
101 // valgrind --leak-check=full --show-leak-kinds=all --log-file=Valgrind_Memcheck_Leak_Report.txt armnn/test/UnitTests
102 BOOST_AUTO_TEST_CASE(RuntimeMemoryLeak)
103 {
104     // From documentation:
105
106     // This means that no pointer to the block can be found. The block is classified as "lost",
107     // because the programmer could not possibly have freed it at program exit, since no pointer to it exists.
108     unsigned long leakedBefore = 0;
109     unsigned long leakedAfter = 0;
110
111     // A start-pointer or chain of start-pointers to the block is found. Since the block is still pointed at,
112     // the programmer could, at least in principle, have freed it before program exit.
113     // We want to test this in case memory is not freed as early as it could have been.
114     unsigned long reachableBefore = 0;
115     unsigned long reachableAfter = 0;
116
117     // Needed as out params but we don't test them.
118     unsigned long dubious = 0;
119     unsigned long suppressed = 0;
120
121     armnn::NetworkId networkIdentifier1 = 1;
122
123     // ensure that runtime is large enough before checking for memory leaks
124     // otherwise when loading the network it will automatically reserve memory that won't be released until destruction
125     armnn::IRuntime::CreationOptions options;
126     armnn::Runtime runtime(options);
127     armnn::RuntimeLoadedNetworksReserve(&runtime);
128
129     // Checks for leaks before we load the network and record them so that we can see the delta after unloading.
130     VALGRIND_DO_QUICK_LEAK_CHECK;
131     VALGRIND_COUNT_LEAKS(leakedBefore, dubious, reachableBefore, suppressed);
132
133     // Builds a mock-network and load it into the runtime.
134     {
135         unsigned int inputShape[] = {1, 7, 1, 1};
136         armnn::TensorInfo inputTensorInfo(4, inputShape, armnn::DataType::Float32);
137
138         std::unique_ptr<armnn::Network> mockNetwork1 = std::make_unique<armnn::Network>();
139         mockNetwork1->AddInputLayer(0, "test layer");
140
141
142         std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
143         runtime.LoadNetwork(networkIdentifier1, Optimize(*mockNetwork1, backends, runtime.GetDeviceSpec()));
144     }
145
146     runtime.UnloadNetwork(networkIdentifier1);
147
148     VALGRIND_DO_ADDED_LEAK_CHECK;
149     VALGRIND_COUNT_LEAKS(leakedAfter, dubious, reachableAfter, suppressed);
150
151     // If we're not running under Valgrind, these vars will have been initialised to 0, so this will always pass.
152     BOOST_TEST(leakedBefore    == leakedAfter);
153     BOOST_TEST(reachableBefore == reachableAfter);
154
155     // These are needed because VALGRIND_COUNT_LEAKS is a macro that assigns to the parameters
156     // so they are assigned to, but still considered unused, causing a warning.
157     boost::ignore_unused(dubious);
158     boost::ignore_unused(suppressed);
159 }
160 #endif // WITH_VALGRIND
161
162 BOOST_AUTO_TEST_CASE(RuntimeCpuRef)
163 {
164     using namespace armnn;
165
166     // Create runtime in which test will run
167     armnn::IRuntime::CreationOptions options;
168     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
169
170     // build up the structure of the network
171     INetworkPtr net(INetwork::Create());
172
173     IConnectableLayer* input = net->AddInputLayer(0);
174
175     // This layer configuration isn't supported by CpuAcc, should be fall back to CpuRef.
176     NormalizationDescriptor descriptor;
177     IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
178
179     IConnectableLayer* output = net->AddOutputLayer(0);
180
181     input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
182     normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
183
184     input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32));
185     normalize->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32));
186
187     // optimize the network
188     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
189     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
190
191     // Load it into the runtime. It should success.
192     armnn::NetworkId netId;
193     BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success);
194 }
195
196 BOOST_AUTO_TEST_CASE(RuntimeFallbackToCpuRef)
197 {
198     using namespace armnn;
199
200     // Create runtime in which test will run
201     armnn::IRuntime::CreationOptions options;
202     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
203
204     // build up the structure of the network
205     INetworkPtr net(INetwork::Create());
206
207     IConnectableLayer* input = net->AddInputLayer(0);
208
209     // This layer configuration isn't supported by CpuAcc, should be fall back to CpuRef.
210     NormalizationDescriptor descriptor;
211     IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
212
213     IConnectableLayer* output = net->AddOutputLayer(0);
214
215     input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
216     normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
217
218     input->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32));
219     normalize->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 1, 1, 4, 4 }, DataType::Float32));
220
221     // Allow fallback to CpuRef.
222     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc, armnn::Compute::CpuRef };
223     // optimize the network
224     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
225
226     // Load it into the runtime. It should succeed.
227     armnn::NetworkId netId;
228     BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success);
229 }
230
231 BOOST_AUTO_TEST_CASE(IVGCVSW_1929_QuantizedSoftmaxIssue)
232 {
233     // Test for issue reported by Chris Nix in https://jira.arm.com/browse/IVGCVSW-1929
234     using namespace armnn;
235
236     // Create runtime in which test will run
237     armnn::IRuntime::CreationOptions options;
238     armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
239
240     // build up the structure of the network
241     INetworkPtr net(INetwork::Create());
242     armnn::IConnectableLayer* input = net->AddInputLayer(
243             0,
244             "input"
245     );
246     armnn::IConnectableLayer* softmax = net->AddSoftmaxLayer(
247             armnn::SoftmaxDescriptor(),
248             "softmax"
249     );
250     armnn::IConnectableLayer* output = net->AddOutputLayer(
251             0,
252             "output"
253     );
254
255     input->GetOutputSlot(0).Connect(softmax->GetInputSlot(0));
256     softmax->GetOutputSlot(0).Connect(output->GetInputSlot(0));
257
258     input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(
259             armnn::TensorShape({ 1, 5 }),
260             armnn::DataType::QuantisedAsymm8,
261             1.0f/255,
262             0
263     ));
264
265     softmax->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo(
266             armnn::TensorShape({ 1, 5 }),
267             armnn::DataType::QuantisedAsymm8
268     ));
269
270     std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
271     std::vector<std::string> errMessages;
272     armnn::IOptimizedNetworkPtr optNet = Optimize(
273             *net,
274             backends,
275             runtime->GetDeviceSpec(),
276             OptimizerOptions(),
277             errMessages
278     );
279
280     BOOST_TEST(errMessages.size() == 1);
281     BOOST_TEST(errMessages[0] ==
282         "ERROR: output 0 of layer Softmax (softmax) is of type "
283         "Quantized 8 bit but its scale parameter has not been set");
284     BOOST_TEST(!optNet);
285 }
286
287 BOOST_AUTO_TEST_SUITE_END()