2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
6 #include <boost/test/unit_test.hpp>
7 #include <boost/cast.hpp>
9 #include <backends/WorkloadData.hpp>
14 #include <backends/CpuTensorHandle.hpp>
15 #include <backends/cl/ClWorkloadFactory.hpp>
17 using namespace armnn;
20 // connects two layers
21 void Connect(Layer* from, Layer* to, const TensorInfo& tensorInfo, unsigned int fromIndex = 0, unsigned int toIndex = 0)
23 from->GetOutputSlot(fromIndex).Connect(to->GetInputSlot(toIndex));
24 from->GetOutputHandler(fromIndex).SetTensorInfo(tensorInfo);
27 /////////////////////////////////////////////////////////////////////////////////////////////
28 // The following test are created specifically to test ReleaseConstantData() method in the Layer
29 // They build very simple graphs including the layer will be checked.
30 // Checks weights and biases before the method called and after.
31 /////////////////////////////////////////////////////////////////////////////////////////////
33 BOOST_AUTO_TEST_SUITE(LayerReleaseConstantDataTest)
35 BOOST_AUTO_TEST_CASE(ReleaseBatchNormalizationLayerConstantDataTest)
38 ClWorkloadFactory factory;
40 // create the layer we're testing
41 BatchNormalizationDescriptor layerDesc;
42 layerDesc.m_Eps = 0.05f;
43 BatchNormalizationLayer* const layer = graph.AddLayer<BatchNormalizationLayer>(layerDesc, "layer");
45 armnn::TensorInfo weightInfo({3}, armnn::DataType::Float32);
46 layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
47 layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
48 layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
49 layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
50 layer->m_Mean->Allocate();
51 layer->m_Variance->Allocate();
52 layer->m_Beta->Allocate();
53 layer->m_Gamma->Allocate();
55 // create extra layers
56 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
57 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
60 armnn::TensorInfo tensorInfo({2, 3, 1, 1}, armnn::DataType::Float32);
61 Connect(input, layer, tensorInfo);
62 Connect(layer, output, tensorInfo);
64 // check the constants that they are not NULL
65 BOOST_CHECK(layer->m_Mean != nullptr);
66 BOOST_CHECK(layer->m_Variance != nullptr);
67 BOOST_CHECK(layer->m_Beta != nullptr);
68 BOOST_CHECK(layer->m_Gamma != nullptr);
70 // free up the constants..
71 layer->ReleaseConstantData();
73 // check the constants that they are NULL now
74 BOOST_CHECK(layer->m_Mean == nullptr);
75 BOOST_CHECK(layer->m_Variance == nullptr);
76 BOOST_CHECK(layer->m_Beta == nullptr);
77 BOOST_CHECK(layer->m_Gamma == nullptr);
82 BOOST_AUTO_TEST_CASE(ReleaseConvolution2dLayerConstantDataTest)
85 ClWorkloadFactory factory;
87 // create the layer we're testing
88 Convolution2dDescriptor layerDesc;
89 layerDesc.m_PadLeft = 3;
90 layerDesc.m_PadRight = 3;
91 layerDesc.m_PadTop = 1;
92 layerDesc.m_PadBottom = 1;
93 layerDesc.m_StrideX = 2;
94 layerDesc.m_StrideY = 4;
95 layerDesc.m_BiasEnabled = true;
97 Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
99 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({2, 3, 5, 3},
100 armnn::DataType::Float32));
101 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>
102 (TensorInfo({2}, GetBiasDataType(armnn::DataType::Float32)));
104 layer->m_Weight->Allocate();
105 layer->m_Bias->Allocate();
107 // create extra layers
108 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
109 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
112 Connect(input, layer, TensorInfo({2, 3, 8, 16}, armnn::DataType::Float32));
113 Connect(layer, output, TensorInfo({2, 2, 2, 10}, armnn::DataType::Float32));
115 // check the constants that they are not NULL
116 BOOST_CHECK(layer->m_Weight != nullptr);
117 BOOST_CHECK(layer->m_Bias != nullptr);
119 // free up the constants..
120 layer->ReleaseConstantData();
122 // check the constants that they are NULL now
123 BOOST_CHECK(layer->m_Weight == nullptr);
124 BOOST_CHECK(layer->m_Bias == nullptr);
127 BOOST_AUTO_TEST_CASE(ReleaseDepthwiseConvolution2dLayerConstantDataTest)
130 ClWorkloadFactory factory;
132 // create the layer we're testing
133 DepthwiseConvolution2dDescriptor layerDesc;
134 layerDesc.m_PadLeft = 3;
135 layerDesc.m_PadRight = 3;
136 layerDesc.m_PadTop = 1;
137 layerDesc.m_PadBottom = 1;
138 layerDesc.m_StrideX = 2;
139 layerDesc.m_StrideY = 4;
140 layerDesc.m_BiasEnabled = true;
142 DepthwiseConvolution2dLayer* const layer = graph.AddLayer<DepthwiseConvolution2dLayer>(layerDesc, "layer");
144 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({3, 3, 5, 3}, DataType::Float32));
145 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({9}, DataType::Float32));
146 layer->m_Weight->Allocate();
147 layer->m_Bias->Allocate();
149 // create extra layers
150 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
151 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
154 Connect(input, layer, TensorInfo({2, 3, 8, 16}, armnn::DataType::Float32));
155 Connect(layer, output, TensorInfo({2, 9, 2, 10}, armnn::DataType::Float32));
157 // check the constants that they are not NULL
158 BOOST_CHECK(layer->m_Weight != nullptr);
159 BOOST_CHECK(layer->m_Bias != nullptr);
161 // free up the constants..
162 layer->ReleaseConstantData();
164 // check the constants that they are NULL now
165 BOOST_CHECK(layer->m_Weight == nullptr);
166 BOOST_CHECK(layer->m_Bias == nullptr);
169 BOOST_AUTO_TEST_CASE(ReleaseFullyConnectedLayerConstantDataTest)
172 ClWorkloadFactory factory;
174 // create the layer we're testing
175 FullyConnectedDescriptor layerDesc;
176 layerDesc.m_BiasEnabled = true;
177 layerDesc.m_TransposeWeightMatrix = true;
179 FullyConnectedLayer* const layer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer");
181 float inputsQScale = 1.0f;
182 float outputQScale = 2.0f;
184 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7, 20},
185 DataType::QuantisedAsymm8, inputsQScale, 0));
186 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7},
187 GetBiasDataType(DataType::QuantisedAsymm8), inputsQScale));
188 layer->m_Weight->Allocate();
189 layer->m_Bias->Allocate();
191 // create extra layers
192 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
193 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
196 Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType::QuantisedAsymm8, inputsQScale));
197 Connect(layer, output, TensorInfo({3, 7}, DataType::QuantisedAsymm8, outputQScale));
199 // check the constants that they are not NULL
200 BOOST_CHECK(layer->m_Weight != nullptr);
201 BOOST_CHECK(layer->m_Bias != nullptr);
203 // free up the constants..
204 layer->ReleaseConstantData();
206 // check the constants that they are NULL now
207 BOOST_CHECK(layer->m_Weight == nullptr);
208 BOOST_CHECK(layer->m_Bias == nullptr);
211 BOOST_AUTO_TEST_SUITE_END()