2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // See LICENSE file in the project root for full license information.
6 #include "NeonFullyConnectedFloat32Workload.hpp"
8 #include "backends/ArmComputeTensorUtils.hpp"
9 #include "backends/ArmComputeUtils.hpp"
10 #include "backends/CpuTensorHandle.hpp"
14 using namespace armcomputetensorutils;
16 arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo& input,
17 const TensorInfo& output,
18 const TensorInfo& weights,
19 const TensorInfo& biases,
20 const FullyConnectedDescriptor& descriptor)
22 const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input);
23 const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output);
24 const arm_compute::TensorInfo aclWeights = BuildArmComputeTensorInfo(weights);
26 arm_compute::TensorInfo aclBiases;
27 arm_compute::TensorInfo *optionalAclBiases = nullptr;
28 if (descriptor.m_BiasEnabled)
30 aclBiases = BuildArmComputeTensorInfo(biases);
31 optionalAclBiases = &aclBiases;
34 const arm_compute::FullyConnectedLayerInfo fullyConnectedLayerInfo =
35 ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(descriptor);
38 return arm_compute::NEFullyConnectedLayer::validate(&aclInput,
42 fullyConnectedLayerInfo);
45 NeonFullyConnectedFloat32Workload::NeonFullyConnectedFloat32Workload(const FullyConnectedQueueDescriptor& descriptor,
46 const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
47 : FloatWorkload<FullyConnectedQueueDescriptor>(descriptor, info)
48 , m_FullyConnectedLayer(memoryManager)
50 m_Data.ValidateInputsOutputs("NeonFullyConnectedFloat32Workload", 1, 1);
52 arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
53 arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
55 m_WeightsTensor = std::make_unique<arm_compute::Tensor>();
56 BuildArmComputeTensor(*m_WeightsTensor, m_Data.m_Weight->GetTensorInfo());
58 if (m_Data.m_Parameters.m_BiasEnabled)
60 m_BiasesTensor = std::make_unique<arm_compute::Tensor>();
61 BuildArmComputeTensor(*m_BiasesTensor, m_Data.m_Bias->GetTensorInfo());
65 arm_compute::FullyConnectedLayerInfo fc_info;
66 fc_info.transpose_weights = m_Data.m_Parameters.m_TransposeWeightMatrix;
67 m_FullyConnectedLayer.configure(&input, m_WeightsTensor.get(), m_BiasesTensor.get(), &output, fc_info);
70 InitializeArmComputeTensorDataForFloatTypes(*m_WeightsTensor, m_Data.m_Weight);
74 InitializeArmComputeTensorDataForFloatTypes(*m_BiasesTensor, m_Data.m_Bias);
77 // Force Compute Library to perform the necessary copying and reshaping, after which
78 // delete all the input tensors that will no longer be needed
79 m_FullyConnectedLayer.prepare();
83 void NeonFullyConnectedFloat32Workload::Execute() const
85 ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonFullyConnectedFloat32Workload_Execute");
86 m_FullyConnectedLayer.run();
89 void NeonFullyConnectedFloat32Workload::FreeUnusedTensors()
91 FreeTensorIfUnused(m_WeightsTensor);
92 FreeTensorIfUnused(m_BiasesTensor);