2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
8 #include <backendsCommon/CpuTensorHandle.hpp>
9 #include <backendsCommon/WorkloadData.hpp>
11 #include <cl/ClWorkloadFactory.hpp>
13 #include <boost/cast.hpp>
14 #include <boost/test/unit_test.hpp>
18 using namespace armnn;
21 // connects two layers
22 void Connect(Layer* from, Layer* to, const TensorInfo& tensorInfo, unsigned int fromIndex = 0, unsigned int toIndex = 0)
24 from->GetOutputSlot(fromIndex).Connect(to->GetInputSlot(toIndex));
25 from->GetOutputHandler(fromIndex).SetTensorInfo(tensorInfo);
28 /////////////////////////////////////////////////////////////////////////////////////////////
29 // The following test are created specifically to test ReleaseConstantData() method in the Layer
30 // They build very simple graphs including the layer will be checked.
31 // Checks weights and biases before the method called and after.
32 /////////////////////////////////////////////////////////////////////////////////////////////
34 BOOST_AUTO_TEST_SUITE(LayerReleaseConstantDataTest)
36 BOOST_AUTO_TEST_CASE(ReleaseBatchNormalizationLayerConstantDataTest)
39 ClWorkloadFactory factory;
41 // create the layer we're testing
42 BatchNormalizationDescriptor layerDesc;
43 layerDesc.m_Eps = 0.05f;
44 BatchNormalizationLayer* const layer = graph.AddLayer<BatchNormalizationLayer>(layerDesc, "layer");
46 armnn::TensorInfo weightInfo({3}, armnn::DataType::Float32);
47 layer->m_Mean = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
48 layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
49 layer->m_Beta = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
50 layer->m_Gamma = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
51 layer->m_Mean->Allocate();
52 layer->m_Variance->Allocate();
53 layer->m_Beta->Allocate();
54 layer->m_Gamma->Allocate();
56 // create extra layers
57 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
58 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
61 armnn::TensorInfo tensorInfo({2, 3, 1, 1}, armnn::DataType::Float32);
62 Connect(input, layer, tensorInfo);
63 Connect(layer, output, tensorInfo);
65 // check the constants that they are not NULL
66 BOOST_CHECK(layer->m_Mean != nullptr);
67 BOOST_CHECK(layer->m_Variance != nullptr);
68 BOOST_CHECK(layer->m_Beta != nullptr);
69 BOOST_CHECK(layer->m_Gamma != nullptr);
71 // free up the constants..
72 layer->ReleaseConstantData();
74 // check the constants that they are NULL now
75 BOOST_CHECK(layer->m_Mean == nullptr);
76 BOOST_CHECK(layer->m_Variance == nullptr);
77 BOOST_CHECK(layer->m_Beta == nullptr);
78 BOOST_CHECK(layer->m_Gamma == nullptr);
83 BOOST_AUTO_TEST_CASE(ReleaseConvolution2dLayerConstantDataTest)
86 ClWorkloadFactory factory;
88 // create the layer we're testing
89 Convolution2dDescriptor layerDesc;
90 layerDesc.m_PadLeft = 3;
91 layerDesc.m_PadRight = 3;
92 layerDesc.m_PadTop = 1;
93 layerDesc.m_PadBottom = 1;
94 layerDesc.m_StrideX = 2;
95 layerDesc.m_StrideY = 4;
96 layerDesc.m_BiasEnabled = true;
98 Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
100 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({2, 3, 5, 3},
101 armnn::DataType::Float32));
102 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>
103 (TensorInfo({2}, GetBiasDataType(armnn::DataType::Float32)));
105 layer->m_Weight->Allocate();
106 layer->m_Bias->Allocate();
108 // create extra layers
109 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
110 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
113 Connect(input, layer, TensorInfo({2, 3, 8, 16}, armnn::DataType::Float32));
114 Connect(layer, output, TensorInfo({2, 2, 2, 10}, armnn::DataType::Float32));
116 // check the constants that they are not NULL
117 BOOST_CHECK(layer->m_Weight != nullptr);
118 BOOST_CHECK(layer->m_Bias != nullptr);
120 // free up the constants..
121 layer->ReleaseConstantData();
123 // check the constants that they are NULL now
124 BOOST_CHECK(layer->m_Weight == nullptr);
125 BOOST_CHECK(layer->m_Bias == nullptr);
128 BOOST_AUTO_TEST_CASE(ReleaseDepthwiseConvolution2dLayerConstantDataTest)
131 ClWorkloadFactory factory;
133 // create the layer we're testing
134 DepthwiseConvolution2dDescriptor layerDesc;
135 layerDesc.m_PadLeft = 3;
136 layerDesc.m_PadRight = 3;
137 layerDesc.m_PadTop = 1;
138 layerDesc.m_PadBottom = 1;
139 layerDesc.m_StrideX = 2;
140 layerDesc.m_StrideY = 4;
141 layerDesc.m_BiasEnabled = true;
143 DepthwiseConvolution2dLayer* const layer = graph.AddLayer<DepthwiseConvolution2dLayer>(layerDesc, "layer");
145 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({3, 3, 5, 3}, DataType::Float32));
146 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({9}, DataType::Float32));
147 layer->m_Weight->Allocate();
148 layer->m_Bias->Allocate();
150 // create extra layers
151 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
152 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
155 Connect(input, layer, TensorInfo({2, 3, 8, 16}, armnn::DataType::Float32));
156 Connect(layer, output, TensorInfo({2, 9, 2, 10}, armnn::DataType::Float32));
158 // check the constants that they are not NULL
159 BOOST_CHECK(layer->m_Weight != nullptr);
160 BOOST_CHECK(layer->m_Bias != nullptr);
162 // free up the constants..
163 layer->ReleaseConstantData();
165 // check the constants that they are NULL now
166 BOOST_CHECK(layer->m_Weight == nullptr);
167 BOOST_CHECK(layer->m_Bias == nullptr);
170 BOOST_AUTO_TEST_CASE(ReleaseFullyConnectedLayerConstantDataTest)
173 ClWorkloadFactory factory;
175 // create the layer we're testing
176 FullyConnectedDescriptor layerDesc;
177 layerDesc.m_BiasEnabled = true;
178 layerDesc.m_TransposeWeightMatrix = true;
180 FullyConnectedLayer* const layer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer");
182 float inputsQScale = 1.0f;
183 float outputQScale = 2.0f;
185 layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7, 20},
186 DataType::QuantisedAsymm8, inputsQScale, 0));
187 layer->m_Bias = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7},
188 GetBiasDataType(DataType::QuantisedAsymm8), inputsQScale));
189 layer->m_Weight->Allocate();
190 layer->m_Bias->Allocate();
192 // create extra layers
193 Layer* const input = graph.AddLayer<InputLayer>(0, "input");
194 Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
197 Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType::QuantisedAsymm8, inputsQScale));
198 Connect(layer, output, TensorInfo({3, 7}, DataType::QuantisedAsymm8, outputQScale));
200 // check the constants that they are not NULL
201 BOOST_CHECK(layer->m_Weight != nullptr);
202 BOOST_CHECK(layer->m_Bias != nullptr);
204 // free up the constants..
205 layer->ReleaseConstantData();
207 // check the constants that they are NULL now
208 BOOST_CHECK(layer->m_Weight == nullptr);
209 BOOST_CHECK(layer->m_Bias == nullptr);
212 BOOST_AUTO_TEST_SUITE_END()