BOOST_TEST(workload.get() == boost::polymorphic_downcast<Workload*>(workload.get()),
"Cannot convert to derived class");
std::string reasonIfUnsupported;
- layer.SetBackendId(factory.GetCompute());
+ layer.SetBackendId(factory.GetBackendId());
BOOST_TEST(factory.IsLayerSupported(layer, layer.GetDataType(), reasonIfUnsupported));
return std::unique_ptr<Workload>(static_cast<Workload*>(workload.release()));
}
public:
virtual ~IWorkloadFactory() { }
- virtual Compute GetCompute() const = 0;
+ virtual const BackendId& GetBackendId() const = 0;
/// Informs the memory manager that the network is finalized and ready for execution.
virtual void Finalize() { }
// SPDX-License-Identifier: MIT
//
#include "ClWorkloadFactory.hpp"
+#include "ClBackendId.hpp"
#include <armnn/Exceptions.hpp>
#include <armnn/Utils.hpp>
namespace armnn
{
+namespace
+{
+static const BackendId s_Id{ClBackendId()};
+}
+
bool ClWorkloadFactory::IsLayerSupported(const Layer& layer,
Optional<DataType> dataType,
std::string& outReasonIfUnsupported)
{
- return IWorkloadFactory::IsLayerSupported(Compute::GpuAcc, layer, dataType, outReasonIfUnsupported);
+ return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
+}
+
+const BackendId& ClWorkloadFactory::GetBackendId() const
+{
+ return s_Id;
}
#ifdef ARMCOMPUTECL_ENABLED
public:
ClWorkloadFactory();
- virtual Compute GetCompute() const override { return Compute::GpuAcc; }
+ const BackendId& GetBackendId() const override;
static bool IsLayerSupported(const Layer& layer,
Optional<DataType> dataType,
// SPDX-License-Identifier: MIT
//
#include "NeonWorkloadFactory.hpp"
+#include "NeonBackendId.hpp"
#include <armnn/Utils.hpp>
#include <backends/CpuTensorHandle.hpp>
#include <Layer.hpp>
namespace armnn
{
+namespace
+{
+static const BackendId s_Id{NeonBackendId()};
+}
+
bool NeonWorkloadFactory::IsLayerSupported(const Layer& layer,
Optional<DataType> dataType,
std::string& outReasonIfUnsupported)
{
- return IWorkloadFactory::IsLayerSupported(Compute::CpuAcc, layer, dataType, outReasonIfUnsupported);
+ return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
+}
+
+const BackendId& NeonWorkloadFactory::GetBackendId() const
+{
+ return s_Id;
}
#ifdef ARMCOMPUTENEON_ENABLED
public:
NeonWorkloadFactory();
- virtual Compute GetCompute() const override { return Compute::CpuAcc; }
+ const BackendId& GetBackendId() const override;
static bool IsLayerSupported(const Layer& layer,
Optional<DataType> dataType,
#include <backends/MemCopyWorkload.hpp>
#include <backends/MakeWorkloadHelper.hpp>
#include "RefWorkloadFactory.hpp"
+#include "RefBackendId.hpp"
#include "workloads/RefWorkloads.hpp"
#include "Layer.hpp"
namespace armnn
{
+namespace
+{
+static const BackendId s_Id{RefBackendId()};
+}
+
template <typename F32Workload, typename U8Workload, typename QueueDescriptorType>
std::unique_ptr<IWorkload> RefWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor,
const WorkloadInfo& info) const
{
}
+const BackendId& RefWorkloadFactory::GetBackendId() const
+{
+ return s_Id;
+}
+
bool RefWorkloadFactory::IsLayerSupported(const Layer& layer,
Optional<DataType> dataType,
std::string& outReasonIfUnsupported)
{
- return IWorkloadFactory::IsLayerSupported(Compute::CpuRef, layer, dataType, outReasonIfUnsupported);
+ return IWorkloadFactory::IsLayerSupported(s_Id, layer, dataType, outReasonIfUnsupported);
}
std::unique_ptr<ITensorHandle> RefWorkloadFactory::CreateTensorHandle(const TensorInfo& tensorInfo) const
explicit RefWorkloadFactory();
virtual ~RefWorkloadFactory() {}
- virtual Compute GetCompute() const override { return Compute::CpuRef; }
+ const BackendId& GetBackendId() const override;
static bool IsLayerSupported(const Layer& layer,
Optional<DataType> dataType,
SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
// Don't execute if Normalization is not supported for the method and channel types, as an exception will be raised.
- armnn::Compute compute = workloadFactory.GetCompute();
+ armnn::BackendId backend = workloadFactory.GetBackendId();
const size_t reasonIfUnsupportedMaxLen = 255;
char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
- ret.supported = armnn::IsNormalizationSupported(compute, inputTensorInfo, outputTensorInfo, data.m_Parameters,
+ ret.supported = armnn::IsNormalizationSupported(backend, inputTensorInfo, outputTensorInfo, data.m_Parameters,
reasonIfUnsupported, reasonIfUnsupportedMaxLen);
if (!ret.supported)
{
AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
// Don't execute if Pooling is not supported, as an exception will be raised.
- armnn::Compute compute = workloadFactory.GetCompute();
+ armnn::BackendId backend = workloadFactory.GetBackendId();
const size_t reasonIfUnsupportedMaxLen = 255;
char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
- result.supported = armnn::IsPooling2dSupported(compute, inputTensorInfo, outputTensorInfo,
+ result.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
queueDescriptor.m_Parameters,
reasonIfUnsupported, reasonIfUnsupportedMaxLen);
if (!result.supported)
std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
// Don't execute if Pooling is not supported, as an exception will be raised.
- armnn::Compute compute = workloadFactory.GetCompute();
+ armnn::BackendId backend = workloadFactory.GetBackendId();
const size_t reasonIfUnsupportedMaxLen = 255;
char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
- comparisonResult.supported = armnn::IsPooling2dSupported(compute, inputTensorInfo, outputTensorInfo,
+ comparisonResult.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
data.m_Parameters,
reasonIfUnsupported, reasonIfUnsupportedMaxLen);
if (!comparisonResult.supported)