src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionUint8Workload.cpp \
src/armnn/backends/ClWorkloads/ClDivisionFloatWorkload.cpp \
src/armnn/backends/ClWorkloads/ClFloorFloatWorkload.cpp \
- src/armnn/backends/ClWorkloads/ClFullyConnectedFloatWorkload.cpp \
+ src/armnn/backends/ClWorkloads/ClFullyConnectedWorkload.cpp \
src/armnn/backends/ClWorkloads/ClL2NormalizationFloatWorkload.cpp \
src/armnn/backends/ClWorkloads/ClLstmFloatWorkload.cpp \
src/armnn/backends/ClWorkloads/ClMergerFloatWorkload.cpp \
src/armnn/backends/ClWorkloads/ClDepthwiseConvolutionUint8Workload.hpp
src/armnn/backends/ClWorkloads/ClFloorFloatWorkload.cpp
src/armnn/backends/ClWorkloads/ClFloorFloatWorkload.hpp
- src/armnn/backends/ClWorkloads/ClFullyConnectedFloatWorkload.cpp
- src/armnn/backends/ClWorkloads/ClFullyConnectedFloatWorkload.hpp
+ src/armnn/backends/ClWorkloads/ClFullyConnectedWorkload.cpp
+ src/armnn/backends/ClWorkloads/ClFullyConnectedWorkload.hpp
src/armnn/backends/ClWorkloads/ClL2NormalizationFloatWorkload.cpp
src/armnn/backends/ClWorkloads/ClL2NormalizationFloatWorkload.hpp
src/armnn/backends/ClWorkloads/ClLstmFloatWorkload.cpp
#include "ClWorkloads/ClDivisionFloatWorkload.hpp"
#include "ClWorkloads/ClL2NormalizationFloatWorkload.hpp"
#include "ClWorkloads/ClMultiplicationFloatWorkload.hpp"
-#include "ClWorkloads/ClFullyConnectedFloatWorkload.hpp"
+#include "ClWorkloads/ClFullyConnectedWorkload.hpp"
#include "ClWorkloads/ClPooling2dBaseWorkload.hpp"
#include "ClWorkloads/ClPermuteWorkload.hpp"
#include "ClWorkloads/ClNormalizationFloatWorkload.hpp"
const FullyConnectedDescriptor& descriptor,
std::string* reasonIfUnsupported)
{
- // At the moment U8 is unsupported
- if (input.GetDataType() == DataType::QuantisedAsymm8)
- {
- return false;
- }
FORWARD_WORKLOAD_VALIDATE_FUNC(ClFullyConnectedWorkloadValidate,
reasonIfUnsupported,
input,
std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateFullyConnected(
const FullyConnectedQueueDescriptor& descriptor, const WorkloadInfo& info) const
{
- return MakeWorkload<ClFullyConnectedFloatWorkload, NullWorkload>(descriptor, info,
- m_MemoryManager.GetIntraLayerManager());
+ return MakeWorkload<ClFullyConnectedWorkload, ClFullyConnectedWorkload>(descriptor, info,
+ m_MemoryManager.GetIntraLayerManager());
}
std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreatePermute(const PermuteQueueDescriptor& descriptor,
#include "backends/ClWorkloads/ClDepthwiseConvolutionUint8Workload.hpp"
#include "backends/ClWorkloads/ClDivisionFloatWorkload.hpp"
#include "backends/ClWorkloads/ClFloorFloatWorkload.hpp"
-#include "backends/ClWorkloads/ClFullyConnectedFloatWorkload.hpp"
+#include "backends/ClWorkloads/ClFullyConnectedWorkload.hpp"
#include "backends/ClWorkloads/ClL2NormalizationFloatWorkload.hpp"
#include "backends/ClWorkloads/ClLstmFloatWorkload.hpp"
#include "backends/ClWorkloads/ClMergerFloatWorkload.hpp"
// SPDX-License-Identifier: MIT
//
-#include "ClFullyConnectedFloatWorkload.hpp"
+#include "ClFullyConnectedWorkload.hpp"
#include "backends/ClTensorHandle.hpp"
#include "backends/CpuTensorHandle.hpp"
#include "backends/ArmComputeTensorUtils.hpp"
fullyConnectedLayerInfo);
}
-ClFullyConnectedFloatWorkload::ClFullyConnectedFloatWorkload(const FullyConnectedQueueDescriptor& descriptor,
+ClFullyConnectedWorkload::ClFullyConnectedWorkload(const FullyConnectedQueueDescriptor& descriptor,
const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
- : FloatWorkload<FullyConnectedQueueDescriptor>(descriptor, info)
+ : BaseWorkload<FullyConnectedQueueDescriptor>(descriptor, info)
, m_FullyConnectedLayer(memoryManager)
{
m_WeightsTensor = std::make_unique<arm_compute::CLTensor>();
BuildArmComputeTensor(*m_BiasesTensor, m_Data.m_Bias->GetTensorInfo());
}
- m_Data.ValidateInputsOutputs("ClFullyConnectedFloatWorkload", 1, 1);
+ m_Data.ValidateInputsOutputs("ClFullyConnectedWorkload", 1, 1);
arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
m_FullyConnectedLayer.configure(&input, m_WeightsTensor.get(), m_BiasesTensor.get(), &output, fc_info);
// Allocate
- InitializeArmComputeClTensorDataForFloatTypes(*m_WeightsTensor, m_Data.m_Weight);
+ if (m_Data.m_Weight->GetTensorInfo().GetDataType() == DataType::QuantisedAsymm8)
+ {
+ InitialiseArmComputeClTensorData(*m_WeightsTensor, m_Data.m_Weight->GetConstTensor<uint8_t>());
+ }
+ else
+ {
+ InitializeArmComputeClTensorDataForFloatTypes(*m_WeightsTensor, m_Data.m_Weight);
+ }
if (m_BiasesTensor)
{
- InitializeArmComputeClTensorDataForFloatTypes(*m_BiasesTensor, m_Data.m_Bias);
+ if (m_Data.m_Bias->GetTensorInfo().GetDataType() == DataType::Signed32)
+ {
+ InitialiseArmComputeClTensorData(*m_BiasesTensor, m_Data.m_Bias->GetConstTensor<int32_t>());
+ }
+ else
+ {
+ InitializeArmComputeClTensorDataForFloatTypes(*m_BiasesTensor, m_Data.m_Bias);
+ }
}
// Force Compute Library to perform the necessary copying and reshaping, after which
FreeUnusedTensors();
}
-void ClFullyConnectedFloatWorkload::Execute() const
+void ClFullyConnectedWorkload::Execute() const
{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClFullyConnectedFloatWorkload_Execute");
+ ARMNN_SCOPED_PROFILING_EVENT_CL("ClFullyConnectedWorkload_Execute");
m_FullyConnectedLayer.run();
}
-void ClFullyConnectedFloatWorkload::FreeUnusedTensors()
+void ClFullyConnectedWorkload::FreeUnusedTensors()
{
FreeTensorIfUnused(m_WeightsTensor);
FreeTensorIfUnused(m_BiasesTensor);
const TensorInfo& biases,
const FullyConnectedDescriptor& descriptor);
-class ClFullyConnectedFloatWorkload : public armnn::FloatWorkload<armnn::FullyConnectedQueueDescriptor>
+class ClFullyConnectedWorkload : public armnn::BaseWorkload<armnn::FullyConnectedQueueDescriptor>
{
public:
- ClFullyConnectedFloatWorkload(const armnn::FullyConnectedQueueDescriptor& descriptor,
- const armnn::WorkloadInfo& info,
- std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
+ ClFullyConnectedWorkload(const armnn::FullyConnectedQueueDescriptor& descriptor,
+ const armnn::WorkloadInfo& info,
+ std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
- using armnn::FloatWorkload<armnn::FullyConnectedQueueDescriptor>::m_Data;
+ using armnn::BaseWorkload<armnn::FullyConnectedQueueDescriptor>::m_Data;
void Execute() const override;
private:
ARMNN_AUTO_TEST_CASE(SimpleFullyConnected, FullyConnectedFloat32Test, false, false)
ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithBias, FullyConnectedFloat32Test, true, false)
ARMNN_AUTO_TEST_CASE(SimpleFullyConnectedWithTranspose, FullyConnectedFloat32Test, false, true)
+ARMNN_AUTO_TEST_CASE(FullyConnectedUint8, FullyConnectedUint8Test, false)
+ARMNN_AUTO_TEST_CASE(FullyConnectedBiasedUint8, FullyConnectedUint8Test, true)
ARMNN_AUTO_TEST_CASE(FullyConnectedLarge, FullyConnectedLargeTest, false)
ARMNN_AUTO_TEST_CASE(FullyConnectedLargeTransposed, FullyConnectedLargeTest, true)
BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloatWorkloadTest)
{
- ClCreateFullyConnectedWorkloadTest<ClFullyConnectedFloatWorkload, armnn::DataType::Float32>();
+ ClCreateFullyConnectedWorkloadTest<ClFullyConnectedWorkload, armnn::DataType::Float32>();
}
BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloat16WorkloadTest)
{
- ClCreateFullyConnectedWorkloadTest<ClFullyConnectedFloatWorkload, armnn::DataType::Float16>();
+ ClCreateFullyConnectedWorkloadTest<ClFullyConnectedWorkload, armnn::DataType::Float16>();
}
template <typename NormalizationWorkloadType, typename armnn::DataType DataType>