2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
7 #include "ActivationFixture.hpp"
8 #include "QuantizeHelper.hpp"
10 #include <armnn/ArmNN.hpp>
11 #include <armnn/Tensor.hpp>
12 #include <armnn/TypesUtils.hpp>
14 #include <backendsCommon/CpuTensorHandle.hpp>
15 #include <backendsCommon/IBackendInternal.hpp>
16 #include <backendsCommon/WorkloadFactory.hpp>
18 #include <test/TensorHelpers.hpp>
22 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
23 LayerTestResult<T, 4> BoundedReLuTestCommon(
24 armnn::IWorkloadFactory& workloadFactory,
25 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
32 const std::vector<T>& inputData,
33 const std::vector<T>& outputExpectedData,
34 unsigned int inputWidth,
35 unsigned int inputHeight,
36 unsigned int inputChannels,
37 unsigned int inputBatchSize)
39 unsigned int outputWidth = inputWidth;
40 unsigned int outputHeight = inputHeight;
41 unsigned int outputChannels = inputChannels;
42 unsigned int outputBatchSize = inputBatchSize;
44 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
46 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
48 if(armnn::IsQuantizedType<T>())
50 inputTensorInfo.SetQuantizationScale(inputScale);
51 inputTensorInfo.SetQuantizationOffset(inputOffset);
53 outputTensorInfo.SetQuantizationScale(outputScale);
54 outputTensorInfo.SetQuantizationOffset(outputOffset);
57 LayerTestResult<T, 4> result(inputTensorInfo);
59 auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
61 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
62 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
64 // Setup bounded ReLu.
65 armnn::ActivationQueueDescriptor descriptor;
66 armnn::WorkloadInfo workloadInfo;
67 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
68 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
70 descriptor.m_Parameters.m_Function = armnn::ActivationFunction::BoundedReLu;
71 descriptor.m_Parameters.m_A = upperBound;
72 descriptor.m_Parameters.m_B = lowerBound;
74 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
76 inputHandle->Allocate();
77 outputHandle->Allocate();
79 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
83 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
85 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputExpectedData);
90 LayerTestResult<float, 4> BoundedReLuUpperAndLowerBoundTest(
91 armnn::IWorkloadFactory& workloadFactory,
92 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
94 unsigned int inputWidth = 4u;
95 unsigned int inputHeight = 5u;
96 unsigned int inputChannels = 1u;
97 unsigned int inputBatchSize = 1;
99 std::vector<float> input = std::vector<float>{
100 -2.0f, 0.1f, 0.5f, 1.25f,
101 0.786f, 0.9875f, -1.5f, 0.384f,
102 1.0001f, 3.5f, 7.5f, 0.896f,
103 2.126f, 2.0f, 0.3f, 0.15f,
104 0.999f, 1.2f, 0.89f, 6.1f,
107 // Calculated manually.
108 std::vector<float> output = std::vector<float>{
109 -1.0f, 0.1f, 0.5f, 1.0f,
110 0.786f, 0.9875f, -1.0f, 0.384f,
111 1.0f, 1.0f, 1.0f, 0.896f,
112 1.0f, 1.0f, 0.3f, 0.15f,
113 0.999f, 1.0f, 0.89f, 1.0f,
116 return BoundedReLuTestCommon<armnn::DataType::Float32>(
117 workloadFactory, memoryManager, 1.0f, -1.0f, 1.0f, 0, 1.0f, 0, input, output,
118 inputWidth, inputHeight, inputChannels, inputBatchSize);
121 LayerTestResult<float, 4> BoundedReLuUpperBoundOnlyTest(
122 armnn::IWorkloadFactory& workloadFactory,
123 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
125 unsigned int inputWidth = 4u;
126 unsigned int inputHeight = 5u;
127 unsigned int inputChannels = 1u;
128 unsigned int inputBatchSize = 1;
130 std::vector<float> input = std::vector<float>{
131 -1.0f, 0.1f, 0.5f, 6.25f,
132 0.786f, 5.9875f, -0.5f, 0.384f,
133 6.0001f, 3.5f, 7.5f, 0.896f,
134 2.126f, 12.0f, 0.3f, 0.15f,
135 0.999f, 1.2f, 0.89f, 6.1f,
138 // Calculated manually.
139 std::vector<float> output = std::vector<float>{
140 0.0f, 0.1f, 0.5f, 6.0f,
141 0.786f, 5.9875f, 0.0f, 0.384f,
142 6.0f, 3.5f, 6.0f, 0.896f,
143 2.126f, 6.0f, 0.3f, 0.15f,
144 0.999f, 1.2f, 0.89f, 6.0f,
147 return BoundedReLuTestCommon<armnn::DataType::Float32>(
148 workloadFactory, memoryManager, 6.0f, 0.0f, 1.0f, 0, 1.0f, 0, input, output,
149 inputWidth, inputHeight, inputChannels, inputBatchSize);
152 LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperBoundOnlyTest(
153 armnn::IWorkloadFactory& workloadFactory,
154 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
156 unsigned int inputWidth = 3u;
157 unsigned int inputHeight = 2u;
158 unsigned int inputChannels = 1u;
159 unsigned int inputBatchSize = 1;
161 std::vector<uint8_t> input = std::vector<uint8_t>{
166 // Calculated manually.
167 std::vector<uint8_t> output = std::vector<uint8_t>{
172 float inputScale = 12.0f / 255.0f;
173 int32_t inputOffset = 63;
174 float outputScale = 6.0f / 255.0f;
175 int32_t outputOffset = 0;
177 return BoundedReLuTestCommon<armnn::DataType::QuantisedAsymm8>(
178 workloadFactory, memoryManager, 6.0f, 0.0f,
179 inputScale, inputOffset, outputScale, outputOffset,
180 input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
183 LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperAndLowerBoundTest(
184 armnn::IWorkloadFactory& workloadFactory,
185 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
187 unsigned int inputWidth = 3u;
188 unsigned int inputHeight = 2u;
189 unsigned int inputChannels = 1u;
190 unsigned int inputBatchSize = 1;
192 std::vector<uint8_t> input = std::vector<uint8_t>{
197 // Calculated manually.
198 std::vector<uint8_t> output = std::vector<uint8_t>{
203 int32_t inputOffset = 112;
204 float inputScale = 0.0125f;
206 return BoundedReLuTestCommon<armnn::DataType::QuantisedAsymm8>(
207 workloadFactory, memoryManager, 1.0f, -1.0f,
208 inputScale, inputOffset, inputScale, inputOffset, // Input/output scale & offset same.
209 input, output, inputWidth, inputHeight, inputChannels, inputBatchSize);
215 struct BoundedReLuRandomInputTestTraits
217 constexpr static unsigned int inputHeight = 31u;
218 constexpr static unsigned int inputWidth = 19u;
219 constexpr static unsigned int inputChannels = 4u;
220 constexpr static unsigned int inputBatchSize = 2;
222 constexpr static unsigned int outputHeight = inputHeight;
223 constexpr static unsigned int outputWidth = inputWidth;
224 constexpr static unsigned int outputChannels = inputChannels;
225 constexpr static unsigned int outputBatchSize = inputBatchSize;
227 static armnn::TensorInfo GetInputTensorInfo()
229 return armnn::TensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
230 armnn::DataType::Float32);
233 static armnn::TensorInfo GetOutputTensorInfo()
235 return armnn::TensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
236 armnn::DataType::Float32);
240 boost::multi_array<float, 4> BoundedReLuRandomInputTest(
241 armnn::IWorkloadFactory& workloadFactory,
242 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
245 const armnn::ActivationDescriptor& activationDescriptor)
247 const armnn::TensorInfo inputTensorInfo = BoundedReLuRandomInputTestTraits::GetInputTensorInfo();
248 const armnn::TensorInfo outputTensorInfo = BoundedReLuRandomInputTestTraits::GetOutputTensorInfo();
250 boost::multi_array<float, 4> output(GetTensorShapeAsArray<4>(outputTensorInfo));
252 // Min/max random values passed to MakeRandomTensor are purposely outside of the ReLu
253 // range [lowerBound, upperBound].
254 auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 4605828, lowerBound - 5.0f, upperBound * 2.0f);
256 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
257 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
259 // Set up bounded ReLu.
260 armnn::ActivationQueueDescriptor descriptor;
261 armnn::WorkloadInfo workloadInfo;
262 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
263 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
264 descriptor.m_Parameters = activationDescriptor;
266 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
268 inputHandle->Allocate();
269 outputHandle->Allocate();
271 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
275 CopyDataFromITensorHandle(&output[0][0][0][0], outputHandle.get());
282 LayerTestResult<float, 4> CompareBoundedReLuTest(
283 armnn::IWorkloadFactory& workloadFactory,
284 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
285 armnn::IWorkloadFactory& refWorkloadFactory,
289 LayerTestResult<float, 4> result(BoundedReLuRandomInputTestTraits::GetOutputTensorInfo());
291 armnn::ActivationDescriptor activationDescriptor;
292 activationDescriptor.m_Function = armnn::ActivationFunction::BoundedReLu;
293 activationDescriptor.m_A = upperBound;
294 activationDescriptor.m_B = lowerBound;
296 result.output = BoundedReLuRandomInputTest(
297 workloadFactory, memoryManager, 0.0f, upperBound, activationDescriptor);
298 result.outputExpected = BoundedReLuRandomInputTest(
299 refWorkloadFactory, nullptr, 0.0f, upperBound, activationDescriptor);
304 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
305 LayerTestResult<T,4> ConstantLinearActivationTestCommon(
306 armnn::IWorkloadFactory& workloadFactory,
307 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
311 unsigned int inputHeight = 20;
312 unsigned int inputWidth = 17;
313 unsigned int inputChannels = 3;
314 unsigned int batchSize = 5;
316 armnn::TensorInfo inputTensorInfo;
317 armnn::TensorInfo outputTensorInfo;
319 unsigned int shape[] = {batchSize, inputChannels, inputHeight, inputWidth};
321 inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
322 outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
324 // Set quantization parameters if the requested type is a quantized type.
325 if(armnn::IsQuantizedType<T>())
327 inputTensorInfo.SetQuantizationScale(qScale);
328 inputTensorInfo.SetQuantizationOffset(qOffset);
329 outputTensorInfo.SetQuantizationScale(qScale);
330 outputTensorInfo.SetQuantizationOffset(qOffset);
333 LayerTestResult<T, 4> ret(outputTensorInfo);
335 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
336 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
338 // Do linear activation that should leave the tensor unchanged.
339 armnn::ActivationQueueDescriptor data;
340 armnn::WorkloadInfo info;
341 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
342 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
343 data.m_Parameters.m_A = 1.0f;
344 data.m_Parameters.m_B = 0.0f;
345 data.m_Parameters.m_Function = armnn::ActivationFunction::Linear;
347 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
349 inputHandle->Allocate();
350 outputHandle->Allocate();
352 boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 7123561);
353 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
357 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
359 // Ensure output equals input.
360 ret.outputExpected = input;
365 LayerTestResult<float, 4> ConstantLinearActivationTest(
366 armnn::IWorkloadFactory& workloadFactory,
367 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
369 return ConstantLinearActivationTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager);
372 LayerTestResult<uint8_t, 4> ConstantLinearActivationUint8Test(
373 armnn::IWorkloadFactory& workloadFactory,
374 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
376 return ConstantLinearActivationTestCommon<armnn::DataType::QuantisedAsymm8>(
377 workloadFactory, memoryManager, 4.0f, 3);
380 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
381 LayerTestResult<T, 4> SimpleActivationTest(
382 armnn::IWorkloadFactory& workloadFactory,
383 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
384 armnn::ActivationFunction activationFunction,
385 float activationParameterA,
386 float activationParameterB,
389 const std::vector<float>& inputData,
390 const std::vector<float>& outputExpectedData)
392 constexpr static unsigned int inputWidth = 16u;
393 constexpr static unsigned int inputHeight = 1u;
394 constexpr static unsigned int inputChannels = 1u;
395 constexpr static unsigned int inputBatchSize = 1u;
397 constexpr static unsigned int outputWidth = inputWidth;
398 constexpr static unsigned int outputHeight = inputHeight;
399 constexpr static unsigned int outputChannels = inputChannels;
400 constexpr static unsigned int outputBatchSize = inputBatchSize;
402 armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth }, ArmnnType);
403 armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth }, ArmnnType);
405 // Set quantization parameters if the requested type is a quantized type.
406 if(armnn::IsQuantizedType<T>())
408 inputTensorInfo.SetQuantizationScale(qScale);
409 inputTensorInfo.SetQuantizationOffset(qOffset);
410 outputTensorInfo.SetQuantizationScale(qScale);
411 outputTensorInfo.SetQuantizationOffset(qOffset);
414 LayerTestResult<T, 4> result(inputTensorInfo);
416 auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
418 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
419 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
421 // Setup bounded ReLu.
422 armnn::ActivationQueueDescriptor descriptor;
423 armnn::WorkloadInfo workloadInfo;
424 AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
425 AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
427 descriptor.m_Parameters.m_Function = activationFunction;
428 descriptor.m_Parameters.m_A = activationParameterA;
429 descriptor.m_Parameters.m_B = activationParameterB;
431 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
433 inputHandle->Allocate();
434 outputHandle->Allocate();
436 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
440 CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
442 // Calculated manually.
443 result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, outputExpectedData));
448 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
449 LayerTestResult<T, 4> SimpleSigmoidTestCommon(
450 armnn::IWorkloadFactory& workloadFactory,
451 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
455 std::vector<float> inputData = {
456 -0.1f, -0.2f, -0.3f, -0.4f,
457 0.1f, 0.2f, 0.3f, 0.4f,
458 -1.0f, -2.0f, -3.0f, -4.0f,
459 1.0f, 2.0f, 3.0f, 4.0f
462 // Calculate output values for input.
463 auto f = [](float value)
465 return 1.0f / (1.0f + std::exp(-value));
467 std::vector<float> outputExpectedData(inputData.size());
468 std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
470 return SimpleActivationTest<ArmnnType>(workloadFactory,
472 armnn::ActivationFunction::Sigmoid,
481 LayerTestResult<float, 4> SimpleSigmoidTest(
482 armnn::IWorkloadFactory& workloadFactory,
483 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
485 return SimpleSigmoidTestCommon<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
488 LayerTestResult<uint8_t, 4> SimpleSigmoidUint8Test(
489 armnn::IWorkloadFactory& workloadFactory,
490 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
492 return SimpleSigmoidTestCommon<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.1f, 50);
495 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
496 LayerTestResult<T,4> CompareActivationTestImpl(
497 armnn::IWorkloadFactory& workloadFactory,
498 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
499 armnn::IWorkloadFactory& refWorkloadFactory,
500 armnn::ActivationFunction f,
501 unsigned int batchSize = 5,
505 unsigned int width = 17;
506 unsigned int height = 29;
507 unsigned int channels = 2;
512 armnn::TensorInfo inputTensorInfo;
513 armnn::TensorInfo outputTensorInfo;
515 unsigned int shape[] = {batchSize, channels, height, width};
517 inputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
518 outputTensorInfo = armnn::TensorInfo(4, shape, ArmnnType);
520 // Set quantization parameters if the requested type is a quantized type.
521 if(armnn::IsQuantizedType<T>())
523 inputTensorInfo.SetQuantizationScale(qScale);
524 inputTensorInfo.SetQuantizationOffset(qOffset);
525 outputTensorInfo.SetQuantizationScale(qScale);
526 outputTensorInfo.SetQuantizationOffset(qOffset);
529 float minVal = -10.f;
530 if (f == armnn::ActivationFunction::Sqrt)
535 boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 21453, minVal, 10.f);
538 LayerTestResult<T,4> ret(outputTensorInfo);
539 auto boostArrayExtents = boost::extents
540 [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(batchSize)]
541 [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(channels)]
542 [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(height)]
543 [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(width)];
544 ret.output.resize(boostArrayExtents);
545 ret.outputExpected.resize(boostArrayExtents);
548 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
549 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
551 std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
552 std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
554 armnn::ActivationQueueDescriptor data;
555 armnn::WorkloadInfo info;
556 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
557 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
558 data.m_Parameters.m_A = a;
559 data.m_Parameters.m_B = b;
560 data.m_Parameters.m_Function = f;
562 armnn::ActivationQueueDescriptor refData = data;
563 armnn::WorkloadInfo refInfo = info;
564 SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
565 SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
567 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
568 BOOST_ASSERT(workload != nullptr);
569 std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateActivation(refData, refInfo);
570 BOOST_ASSERT(workloadRef != nullptr);
572 inputHandle->Allocate();
573 outputHandle->Allocate();
574 inputHandleRef->Allocate();
575 outputHandleRef->Allocate();
577 CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
578 CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
581 workloadRef->Execute();
583 CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
584 CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
589 LayerTestResult<float,4> CompareActivationTest(
590 armnn::IWorkloadFactory& workloadFactory,
591 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
592 armnn::IWorkloadFactory& refWorkloadFactory,
593 armnn::ActivationFunction f,
594 unsigned int batchSize)
596 return CompareActivationTestImpl<armnn::DataType::Float32>(
597 workloadFactory, memoryManager, refWorkloadFactory, f, batchSize);
600 LayerTestResult<uint8_t,4> CompareActivationUint8Test(
601 armnn::IWorkloadFactory& workloadFactory,
602 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
603 armnn::IWorkloadFactory& refWorkloadFactory,
604 armnn::ActivationFunction f)
606 return CompareActivationTestImpl<armnn::DataType::QuantisedAsymm8>(
607 workloadFactory, memoryManager, refWorkloadFactory, f, 5, 0.1f, 50);