IVGCVSW-1946: Remove armnn/src from the include paths
[platform/upstream/armnn.git] / src / backends / neon / workloads / NeonActivationWorkload.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "NeonActivationWorkload.hpp"
7 #include <aclCommon/ArmComputeUtils.hpp>
8
9 namespace armnn
10 {
11
12 arm_compute::Status NeonActivationWorkloadValidate(const TensorInfo& input,
13                                                    const TensorInfo& output,
14                                                    const ActivationDescriptor& descriptor)
15 {
16     const arm_compute::TensorInfo aclInput = armcomputetensorutils::BuildArmComputeTensorInfo(input);
17     const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
18
19     const arm_compute::ActivationLayerInfo activationLayerInfo =
20         ConvertActivationDescriptorToAclActivationLayerInfo(descriptor);
21
22     if (input.GetDataType() == DataType::QuantisedAsymm8 &&
23         activationLayerInfo.activation() == arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC)
24     {
25         return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR,
26                                    "Neon: Logistic Activations unsupported with QAsymm8 data type."};
27     }
28
29     return arm_compute::NEActivationLayer::validate(&aclInput,
30                                                     &aclOutput,
31                                                     activationLayerInfo);
32 }
33
34 NeonActivationWorkload::NeonActivationWorkload(const ActivationQueueDescriptor& descriptor,
35                                                const WorkloadInfo& info)
36     : BaseWorkload<ActivationQueueDescriptor>(descriptor, info)
37 {
38     m_Data.ValidateInputsOutputs("NeonActivationWorkload", 1, 1);
39
40     const arm_compute::ActivationLayerInfo activationLayerInfo =
41         ConvertActivationDescriptorToAclActivationLayerInfo(m_Data.m_Parameters);
42
43     arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
44     arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
45
46     m_ActivationLayer.configure(&input, &output, activationLayerInfo);
47 }
48
49 void NeonActivationWorkload::Execute() const
50 {
51     ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonActivationWorkload_Execute");
52     m_ActivationLayer.run();
53 }
54
55 } //namespace armnn