2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
6 #include <armnn/Exceptions.hpp>
7 #include <armnn/LayerSupport.hpp>
8 #include "armnn/Types.hpp"
10 #include <backendsCommon/CpuTensorHandle.hpp>
11 #include <backendsCommon/WorkloadFactory.hpp>
13 LayerTestResult<float,4> SimpleNormalizationTestImpl(armnn::IWorkloadFactory& workloadFactory,
14 armnn::NormalizationAlgorithmChannel normChannel,
15 armnn::NormalizationAlgorithmMethod normMethod)
17 const unsigned int inputHeight = 2;
18 const unsigned int inputWidth = 2;
19 const unsigned int inputChannels = 1;
20 const unsigned int inputNum = 2;
22 unsigned int outputHeight = inputHeight;
23 unsigned int outputWidth = inputWidth;
24 unsigned int outputChannels = inputChannels;
25 unsigned int outputNum = inputNum;
27 unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
28 unsigned int outputShape[] = { outputNum, outputChannels, outputHeight, outputWidth };
30 auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
31 auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
33 LayerTestResult<float,4> ret(outputTensorInfo);
35 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
47 uint32_t normSize = 3;
49 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
50 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
52 armnn::NormalizationQueueDescriptor data;
53 armnn::WorkloadInfo info;
54 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
55 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
56 data.m_Parameters.m_NormChannelType = normChannel;
57 data.m_Parameters.m_NormMethodType = normMethod;
58 data.m_Parameters.m_NormSize = normSize;
59 data.m_Parameters.m_Alpha = alpha;
60 data.m_Parameters.m_Beta = beta;
61 data.m_Parameters.m_K = kappa;
62 data.m_Parameters.m_DataLayout = armnn::DataLayout::NCHW;
64 armnn::PassthroughCpuTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]);
65 armnn::NormalizationQueueDescriptor refData = data;
66 armnn::WorkloadInfo refInfo = info;
67 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
69 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
71 inputHandle->Allocate();
72 outputHandle->Allocate();
74 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
76 workloadFactory.Finalize();
79 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
83 case armnn::NormalizationAlgorithmMethod::LocalBrightness:
87 case armnn::NormalizationAlgorithmChannel::Within:
89 // When normalising within channels, the 3x3 kernel covers the entire 2x2 input at every index.
90 // Therefore, all output values should equal the inputs, but divided by:
91 // pow((kappa + (accumulatedScale * alpha)), beta)
92 // ...where accumulatedScale is the sum of every element squared.
93 float divisor[inputNum];
94 for(int i = 0; i < boost::numeric_cast<int>(inputNum); i++)
96 float accumulatedScale = input[i][0][0][0]*input[i][0][0][0] +
97 input[i][0][0][1]*input[i][0][0][1] +
98 input[i][0][1][0]*input[i][0][1][0] +
99 input[i][0][1][1]*input[i][0][1][1];
100 divisor[i] = powf((kappa + accumulatedScale * alpha), beta);
102 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo,
103 std::vector<float>({input[0][0][0][0]/divisor[0],
104 input[0][0][0][1]/divisor[0],
105 input[0][0][1][0]/divisor[0],
106 input[0][0][1][1]/divisor[0],
107 input[1][0][0][0]/divisor[1],
108 input[1][0][0][1]/divisor[1],
109 input[1][0][1][0]/divisor[1],
110 input[1][0][1][1]/divisor[1]}));
113 case armnn::NormalizationAlgorithmChannel::Across:
115 // When normalising across channels, all output values should equal the inputs, but multiplied by:
116 // pow((kappa + (accumulatedScale * alpha)), -beta)
117 // ...where accumulatedScale is the sum of the inputs for adjacent channels for this element squared
118 // ...where adjacent channels means within half the normSize for the channel
119 // The test data has only one channel, so this is simplified below.
120 std::vector<float> outputVector;
121 for (int n = 0; n < boost::numeric_cast<int>(inputNum); ++n)
123 for (int h = 0; h < boost::numeric_cast<int>(inputHeight); ++h)
125 for (int w = 0; w < boost::numeric_cast<int>(inputWidth); ++w)
127 float accumulatedScale = input[n][0][h][w]*input[n][0][h][w];
128 float scale = powf((kappa + accumulatedScale * alpha), -beta);
129 outputVector.push_back(input[n][0][h][w] * scale);
133 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputVector);
138 throw armnn::UnimplementedException("Unsupported normalisation channel type, "
139 "only Across and Within are supported");
144 case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough.
147 throw armnn::UnimplementedException("Unsupported normalisation method type, "
148 "only LocalBrightness is supported");
155 LayerTestResult<float,4> SimpleNormalizationNhwcTestImpl(armnn::IWorkloadFactory& workloadFactory,
156 armnn::NormalizationAlgorithmChannel normChannel,
157 armnn::NormalizationAlgorithmMethod normMethod)
159 const unsigned int inputHeight = 2;
160 const unsigned int inputWidth = 2;
161 const unsigned int inputChannels = 1;
162 const unsigned int inputNum = 2;
164 unsigned int outputHeight = inputHeight;
165 unsigned int outputWidth = inputWidth;
166 unsigned int outputChannels = inputChannels;
167 unsigned int outputNum = inputNum;
169 unsigned int inputShape[] = { inputNum, inputHeight, inputWidth, inputChannels };
170 unsigned int outputShape[] = { outputNum, outputHeight, outputWidth, outputChannels };
172 auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
173 auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
175 LayerTestResult<float,4> ret(outputTensorInfo);
177 auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
189 uint32_t normSize = 3;
191 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
192 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
194 armnn::NormalizationQueueDescriptor data;
195 armnn::WorkloadInfo info;
196 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
197 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
198 data.m_Parameters.m_NormChannelType = normChannel;
199 data.m_Parameters.m_NormMethodType = normMethod;
200 data.m_Parameters.m_NormSize = normSize;
201 data.m_Parameters.m_Alpha = alpha;
202 data.m_Parameters.m_Beta = beta;
203 data.m_Parameters.m_K = kappa;
204 data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
206 armnn::PassthroughCpuTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]);
207 armnn::NormalizationQueueDescriptor refData = data;
208 armnn::WorkloadInfo refInfo = info;
209 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
211 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
213 inputHandle->Allocate();
214 outputHandle->Allocate();
216 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
218 workloadFactory.Finalize();
221 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
225 case armnn::NormalizationAlgorithmMethod::LocalBrightness:
229 case armnn::NormalizationAlgorithmChannel::Across:
231 std::vector<float> expectedOutput{ 0.5f, 0.400000006f, 0.300000012f, 0.235294119f,
232 0.192307696f, 0.16216217f, 0.140000001f, 0.123076923f };
233 ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, expectedOutput);
238 throw armnn::UnimplementedException("Unsupported normalisation channel type, "
239 "Only Cross-map is supported for NHWC layout");
244 case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough.
247 throw armnn::UnimplementedException("Unsupported normalisation method type, "
248 "only LocalBrightness is supported");
255 LayerTestResult<float,4> CompareNormalizationTestImpl(armnn::IWorkloadFactory& workloadFactory,
256 armnn::IWorkloadFactory& refWorkloadFactory,
257 armnn::NormalizationAlgorithmChannel normChannel,
258 armnn::NormalizationAlgorithmMethod normMethod)
260 constexpr unsigned int inputNum = 5;
261 constexpr unsigned int inputChannels = 3;
262 constexpr unsigned int inputHeight = 32;
263 constexpr unsigned int inputWidth = 24;
265 constexpr unsigned int outputNum = inputNum;
266 constexpr unsigned int outputChannels = inputChannels;
267 constexpr unsigned int outputHeight = inputHeight;
268 constexpr unsigned int outputWidth = inputWidth;
270 armnn::TensorInfo inputTensorInfo;
271 armnn::TensorInfo outputTensorInfo;
273 unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth};
274 unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
276 inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
277 outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
279 LayerTestResult<float,4> ret(outputTensorInfo);
281 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 111234);
283 constexpr float alpha = 1.f;
284 constexpr float beta = 1.f;
285 constexpr float kappa = 1.f;
286 constexpr uint32_t normSize = 5;
288 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
289 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
291 armnn::NormalizationQueueDescriptor data;
292 armnn::WorkloadInfo info;
293 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
294 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
295 data.m_Parameters.m_NormChannelType = normChannel;
296 data.m_Parameters.m_NormMethodType = normMethod;
297 data.m_Parameters.m_NormSize = normSize;
298 data.m_Parameters.m_Alpha = alpha;
299 data.m_Parameters.m_Beta = beta;
300 data.m_Parameters.m_K = kappa;
302 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
303 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
305 armnn::NormalizationQueueDescriptor refData = data;
306 armnn::WorkloadInfo refInfo = info;
307 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
308 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
310 // Don't execute if Normalization is not supported for the method and channel types, as an exception will be raised.
311 armnn::BackendId backend = workloadFactory.GetBackendId();
312 const size_t reasonIfUnsupportedMaxLen = 255;
313 char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
314 ret.supported = armnn::IsNormalizationSupported(backend, inputTensorInfo, outputTensorInfo, data.m_Parameters,
315 reasonIfUnsupported, reasonIfUnsupportedMaxLen);
321 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
322 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateNormalization(refData, refInfo);
324 outputHandleRef->Allocate();
325 inputHandleRef->Allocate();
327 inputHandle->Allocate();
328 outputHandle->Allocate();
330 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
331 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
333 workloadFactory.Finalize();
335 refWorkloadFactory.Finalize();
336 workloadRef->Execute();
338 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
339 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());