IVGCVSW-1946: Remove armnn/src from the include paths
[platform/upstream/armnn.git] / src / backends / neon / workloads / NeonNormalizationFloatWorkload.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "NeonNormalizationFloatWorkload.hpp"
7 #include <neon/NeonLayerSupport.hpp>
8 #include <aclCommon/ArmComputeUtils.hpp>
9 #include <aclCommon/ArmComputeTensorUtils.hpp>
10
11 using namespace armnn::armcomputetensorutils;
12
13 namespace armnn
14 {
15
16 namespace
17 {
18
19 bool IsNeonNormalizationDescriptorSupported(const NormalizationDescriptor& parameters,
20                                             Optional<std::string&> reasonIfUnsupported)
21 {
22     if (parameters.m_NormMethodType != NormalizationAlgorithmMethod::LocalBrightness)
23     {
24         if (reasonIfUnsupported)
25         {
26             reasonIfUnsupported.value() = "Unsupported normalisation method type, only LocalBrightness is supported";
27         }
28         return false;
29     }
30     if (parameters.m_NormSize % 2 == 0)
31     {
32         if (reasonIfUnsupported)
33         {
34             reasonIfUnsupported.value() = "Normalization size must be an odd number.";
35         }
36         return false;
37     }
38
39     return true;
40 }
41
42 } // anonymous namespace
43
44 arm_compute::Status NeonNormalizationWorkloadValidate(const TensorInfo& input,
45                                                       const TensorInfo& output,
46                                                       const NormalizationDescriptor& descriptor)
47 {
48     const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
49     const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
50
51     arm_compute::NormalizationLayerInfo normalizationInfo = BuildArmComputeNormalizationLayerInfo(descriptor);
52
53     return arm_compute::NENormalizationLayer::validate(&aclInput, &aclOutput, normalizationInfo);
54 }
55
56 NeonNormalizationFloatWorkload::NeonNormalizationFloatWorkload(const NormalizationQueueDescriptor& descriptor,
57                                                    const WorkloadInfo& info,
58                                                    std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
59     : FloatWorkload<NormalizationQueueDescriptor>(descriptor, info)
60     , m_NormalizationLayer(memoryManager)
61 {
62     m_Data.ValidateInputsOutputs("NeonNormalizationFloatWorkload", 1, 1);
63     std::string reasonIfUnsupported;
64     if (!IsNeonNormalizationDescriptorSupported(m_Data.m_Parameters, Optional<std::string&>(reasonIfUnsupported)))
65     {
66         throw UnimplementedException(reasonIfUnsupported);
67     }
68
69     // Input and output tensors have to have the same dimensionality.
70     if (info.m_InputTensorInfos[0].GetShape()[1] != info.m_OutputTensorInfos[0].GetShape()[1]
71         || info.m_InputTensorInfos[0].GetShape()[0] != info.m_OutputTensorInfos[0].GetShape()[0]
72         || info.m_InputTensorInfos[0].GetShape()[3] != info.m_OutputTensorInfos[0].GetShape()[3]
73         || info.m_InputTensorInfos[0].GetShape()[2] != info.m_OutputTensorInfos[0].GetShape()[2])
74     {
75         throw InvalidArgumentException("Normalization requires input and output tensors to have equal dimensionality.");
76     }
77
78     arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
79     arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
80     arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
81     input.info()->set_data_layout(aclDataLayout);
82     output.info()->set_data_layout(aclDataLayout);
83
84     const arm_compute::NormType normType =
85         ConvertNormalizationAlgorithmChannelToAclNormType(m_Data.m_Parameters.m_NormChannelType);
86     arm_compute::NormalizationLayerInfo normalizationInfo(normType,
87                                                           m_Data.m_Parameters.m_NormSize,
88                                                           m_Data.m_Parameters.m_Alpha,
89                                                           m_Data.m_Parameters.m_Beta,
90                                                           m_Data.m_Parameters.m_K,
91                                                           false);
92
93     m_NormalizationLayer.configure(&input, &output, normalizationInfo);
94 }
95
96 void NeonNormalizationFloatWorkload::Execute() const
97 {
98     ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonNormalizationFloatWorkload_Execute");
99     m_NormalizationLayer.run();
100 }
101
102 } //namespace armnn