IVGCVSW-1946: Remove armnn/src from the include paths
[platform/upstream/armnn.git] / src / backends / neon / workloads / NeonFullyConnectedWorkload.cpp
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "NeonFullyConnectedWorkload.hpp"
7
8 #include <aclCommon/ArmComputeTensorUtils.hpp>
9 #include <aclCommon/ArmComputeUtils.hpp>
10 #include <backendsCommon/CpuTensorHandle.hpp>
11
12 namespace armnn
13 {
14 using namespace armcomputetensorutils;
15
16 arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo& input,
17                                                        const TensorInfo& output,
18                                                        const TensorInfo& weights,
19                                                        const TensorInfo& biases,
20                                                        const FullyConnectedDescriptor& descriptor)
21 {
22     const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input);
23     const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output);
24     const arm_compute::TensorInfo aclWeights = BuildArmComputeTensorInfo(weights);
25
26     arm_compute::TensorInfo aclBiases;
27     arm_compute::TensorInfo *optionalAclBiases = nullptr;
28     if (descriptor.m_BiasEnabled)
29     {
30         aclBiases  = BuildArmComputeTensorInfo(biases);
31         optionalAclBiases = &aclBiases;
32     }
33
34     const arm_compute::FullyConnectedLayerInfo fullyConnectedLayerInfo =
35         ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(descriptor);
36
37
38     return arm_compute::NEFullyConnectedLayer::validate(&aclInput,
39                                                         &aclWeights,
40                                                         optionalAclBiases,
41                                                         &aclOutput,
42                                                         fullyConnectedLayerInfo);
43 }
44
45 NeonFullyConnectedWorkload::NeonFullyConnectedWorkload(const FullyConnectedQueueDescriptor& descriptor,
46     const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
47     : BaseWorkload<FullyConnectedQueueDescriptor>(descriptor, info)
48     , m_FullyConnectedLayer(memoryManager)
49 {
50     m_Data.ValidateInputsOutputs("NeonFullyConnectedWorkload", 1, 1);
51
52     arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
53     arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
54
55     m_WeightsTensor = std::make_unique<arm_compute::Tensor>();
56     BuildArmComputeTensor(*m_WeightsTensor, m_Data.m_Weight->GetTensorInfo());
57
58     if (m_Data.m_Parameters.m_BiasEnabled)
59     {
60         m_BiasesTensor = std::make_unique<arm_compute::Tensor>();
61         BuildArmComputeTensor(*m_BiasesTensor, m_Data.m_Bias->GetTensorInfo());
62     }
63
64     // Construct
65     arm_compute::FullyConnectedLayerInfo fc_info;
66     fc_info.transpose_weights = m_Data.m_Parameters.m_TransposeWeightMatrix;
67     m_FullyConnectedLayer.configure(&input, m_WeightsTensor.get(), m_BiasesTensor.get(), &output, fc_info);
68
69     // Allocate
70     if (m_Data.m_Weight->GetTensorInfo().GetDataType() == DataType::QuantisedAsymm8)
71     {
72         InitializeArmComputeTensorData(*m_WeightsTensor, m_Data.m_Weight);
73     }
74     else
75     {
76         InitializeArmComputeTensorData(*m_WeightsTensor, m_Data.m_Weight);
77     }
78
79     if (m_BiasesTensor)
80     {
81         if (m_Data.m_Bias->GetTensorInfo().GetDataType() == DataType::Signed32)
82         {
83             InitializeArmComputeTensorData(*m_BiasesTensor, m_Data.m_Bias);
84         }
85         else
86         {
87             InitializeArmComputeTensorData(*m_BiasesTensor, m_Data.m_Bias);
88         }
89     }
90
91     // Force Compute Library to perform the necessary copying and reshaping, after which
92     // delete all the input tensors that will no longer be needed
93     m_FullyConnectedLayer.prepare();
94     FreeUnusedTensors();
95 }
96
97 void NeonFullyConnectedWorkload::Execute() const
98 {
99     ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonFullyConnectedWorkload_Execute");
100     m_FullyConnectedLayer.run();
101 }
102
103 void NeonFullyConnectedWorkload::FreeUnusedTensors()
104 {
105     FreeTensorIfUnused(m_WeightsTensor);
106     FreeTensorIfUnused(m_BiasesTensor);
107 }
108
109 } //namespace armnn