src/armnn/backends/ArmComputeTensorUtils.cpp \
src/armnn/backends/ClWorkloads/ClActivationFloatWorkload.cpp \
src/armnn/backends/ClWorkloads/ClActivationUint8Workload.cpp \
- src/armnn/backends/ClWorkloads/ClAdditionBaseWorkload.cpp \
- src/armnn/backends/ClWorkloads/ClAdditionFloatWorkload.cpp \
- src/armnn/backends/ClWorkloads/ClAdditionUint8Workload.cpp \
- src/armnn/backends/ClWorkloads/ClSubtractionBaseWorkload.cpp \
- src/armnn/backends/ClWorkloads/ClSubtractionFloatWorkload.cpp \
- src/armnn/backends/ClWorkloads/ClSubtractionUint8Workload.cpp \
+ src/armnn/backends/ClWorkloads/ClAdditionWorkload.cpp \
+ src/armnn/backends/ClWorkloads/ClSubtractionWorkload.cpp \
src/armnn/backends/ClWorkloads/ClBaseConstantWorkload.cpp \
src/armnn/backends/ClWorkloads/ClBatchNormalizationFloatWorkload.cpp \
src/armnn/backends/ClWorkloads/ClConstantFloatWorkload.cpp \
src/armnn/backends/ClWorkloads/ClActivationFloatWorkload.hpp
src/armnn/backends/ClWorkloads/ClActivationUint8Workload.cpp
src/armnn/backends/ClWorkloads/ClActivationUint8Workload.hpp
- src/armnn/backends/ClWorkloads/ClAdditionBaseWorkload.cpp
- src/armnn/backends/ClWorkloads/ClAdditionBaseWorkload.hpp
- src/armnn/backends/ClWorkloads/ClAdditionFloatWorkload.cpp
- src/armnn/backends/ClWorkloads/ClAdditionFloatWorkload.hpp
- src/armnn/backends/ClWorkloads/ClAdditionUint8Workload.cpp
- src/armnn/backends/ClWorkloads/ClAdditionUint8Workload.hpp
- src/armnn/backends/ClWorkloads/ClSubtractionBaseWorkload.cpp
- src/armnn/backends/ClWorkloads/ClSubtractionBaseWorkload.hpp
- src/armnn/backends/ClWorkloads/ClSubtractionFloatWorkload.cpp
- src/armnn/backends/ClWorkloads/ClSubtractionFloatWorkload.hpp
- src/armnn/backends/ClWorkloads/ClSubtractionUint8Workload.cpp
- src/armnn/backends/ClWorkloads/ClSubtractionUint8Workload.hpp
+ src/armnn/backends/ClWorkloads/ClAdditionWorkload.cpp
+ src/armnn/backends/ClWorkloads/ClAdditionWorkload.hpp
+ src/armnn/backends/ClWorkloads/ClSubtractionWorkload.cpp
+ src/armnn/backends/ClWorkloads/ClSubtractionWorkload.hpp
src/armnn/backends/ClWorkloads/ClConvertFp16ToFp32Workload.cpp
src/armnn/backends/ClWorkloads/ClConvertFp16ToFp32Workload.hpp
src/armnn/backends/ClWorkloads/ClConvertFp32ToFp16Workload.cpp
#include <boost/core/ignore_unused.hpp>
#ifdef ARMCOMPUTECL_ENABLED
-#include "ClWorkloads/ClAdditionFloatWorkload.hpp"
+#include "ClWorkloads/ClAdditionWorkload.hpp"
#include "ClWorkloads/ClActivationFloatWorkload.hpp"
#include "ClWorkloads/ClBatchNormalizationFloatWorkload.hpp"
#include "ClWorkloads/ClConvertFp16ToFp32Workload.hpp"
#include "ClWorkloads/ClPermuteWorkload.hpp"
#include "ClWorkloads/ClNormalizationFloatWorkload.hpp"
#include "ClWorkloads/ClSoftmaxBaseWorkload.hpp"
-#include "ClWorkloads/ClSubtractionFloatWorkload.hpp"
+#include "ClWorkloads/ClSubtractionWorkload.hpp"
#include "ClWorkloads/ClLstmFloatWorkload.hpp"
#endif
std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateAddition(const AdditionQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return MakeWorkload<ClAdditionFloatWorkload, ClAdditionUint8Workload>(descriptor, info);
+ return MakeWorkload<ClAdditionWorkload<armnn::DataType::Float16, armnn::DataType::Float32>,
+ ClAdditionWorkload<armnn::DataType::QuantisedAsymm8>>(descriptor, info);
}
std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateMultiplication(
std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateSubtraction(const SubtractionQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return MakeWorkload<ClSubtractionFloatWorkload, ClSubtractionUint8Workload>(descriptor, info);
+ return MakeWorkload<ClSubtractionWorkload<armnn::DataType::Float16, armnn::DataType::Float32>,
+ ClSubtractionWorkload<armnn::DataType::QuantisedAsymm8>>(descriptor, info);
}
std::unique_ptr<armnn::IWorkload> ClWorkloadFactory::CreateBatchNormalization(
#pragma once
#include "backends/ClWorkloads/ClActivationFloatWorkload.hpp"
#include "backends/ClWorkloads/ClActivationUint8Workload.hpp"
-#include "backends/ClWorkloads/ClAdditionFloatWorkload.hpp"
-#include "backends/ClWorkloads/ClAdditionUint8Workload.hpp"
+#include "backends/ClWorkloads/ClAdditionWorkload.hpp"
#include "backends/ClWorkloads/ClBaseConstantWorkload.hpp"
#include "backends/ClWorkloads/ClBaseMergerWorkload.hpp"
#include "backends/ClWorkloads/ClBatchNormalizationFloatWorkload.hpp"
#include "backends/ClWorkloads/ClSoftmaxUint8Workload.hpp"
#include "backends/ClWorkloads/ClSplitterFloatWorkload.hpp"
#include "backends/ClWorkloads/ClSplitterUint8Workload.hpp"
-#include "backends/ClWorkloads/ClSubtractionFloatWorkload.hpp"
-#include "backends/ClWorkloads/ClSubtractionUint8Workload.hpp"
+#include "backends/ClWorkloads/ClSubtractionWorkload.hpp"
#include "backends/ClWorkloads/ClConvertFp16ToFp32Workload.hpp"
#include "backends/ClWorkloads/ClConvertFp32ToFp16Workload.hpp"
+++ /dev/null
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClAdditionBaseWorkload.hpp"
-
-#include "backends/ClTensorHandle.hpp"
-#include "backends/CpuTensorHandle.hpp"
-#include "backends/ArmComputeTensorUtils.hpp"
-
-namespace armnn
-{
-using namespace armcomputetensorutils;
-
-static constexpr arm_compute::ConvertPolicy g_AclConvertPolicy = arm_compute::ConvertPolicy::SATURATE;
-
-template <armnn::DataType... T>
-ClAdditionBaseWorkload<T...>::ClAdditionBaseWorkload(const AdditionQueueDescriptor& descriptor,
- const WorkloadInfo& info)
- : TypedWorkload<AdditionQueueDescriptor, T...>(descriptor, info)
-{
- this->m_Data.ValidateInputsOutputs("ClAdditionBaseWorkload", 2, 1);
-
- arm_compute::ICLTensor& input0 = static_cast<IClTensorHandle*>(this->m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ICLTensor& input1 = static_cast<IClTensorHandle*>(this->m_Data.m_Inputs[1])->GetTensor();
- arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(this->m_Data.m_Outputs[0])->GetTensor();
- m_Layer.configure(&input0, &input1, &output, g_AclConvertPolicy);
-}
-
-template <armnn::DataType... T>
-void ClAdditionBaseWorkload<T...>::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClAdditionBaseWorkload_Execute");
- m_Layer.run();
-}
-
-bool ClAdditionValidate(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- std::string* reasonIfUnsupported)
-{
- const arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0);
- const arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1);
- const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
-
- const arm_compute::Status aclStatus = arm_compute::CLArithmeticAddition::validate(&aclInput0Info,
- &aclInput1Info,
- &aclOutputInfo,
- g_AclConvertPolicy);
-
- const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
- if (!supported && reasonIfUnsupported)
- {
- *reasonIfUnsupported = aclStatus.error_description();
- }
-
- return supported;
-}
-
-} //namespace armnn
-
-template class armnn::ClAdditionBaseWorkload<armnn::DataType::Float16, armnn::DataType::Float32>;
-template class armnn::ClAdditionBaseWorkload<armnn::DataType::QuantisedAsymm8>;
+++ /dev/null
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-
-template <armnn::DataType... dataTypes>
-class ClAdditionBaseWorkload : public TypedWorkload<AdditionQueueDescriptor, dataTypes...>
-{
-public:
- ClAdditionBaseWorkload(const AdditionQueueDescriptor& descriptor, const WorkloadInfo& info);
-
- void Execute() const override;
-
-private:
- mutable arm_compute::CLArithmeticAddition m_Layer;
-};
-
-bool ClAdditionValidate(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- std::string* reasonIfUnsupported);
-} //namespace armnn
+++ /dev/null
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClAdditionFloatWorkload.hpp"
-
-#include "backends/ClTensorHandle.hpp"
-#include "backends/CpuTensorHandle.hpp"
-#include "backends/ArmComputeTensorUtils.hpp"
-
-namespace armnn
-{
-using namespace armcomputetensorutils;
-
-void ClAdditionFloatWorkload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClAdditionFloatWorkload_Execute");
- ClAdditionBaseWorkload::Execute();
-}
-
-} //namespace armnn
+++ /dev/null
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "ClAdditionBaseWorkload.hpp"
-
-namespace armnn
-{
-
-class ClAdditionFloatWorkload : public ClAdditionBaseWorkload<DataType::Float16, DataType::Float32>
-{
-public:
- using ClAdditionBaseWorkload<DataType::Float16, DataType::Float32>::ClAdditionBaseWorkload;
- void Execute() const override;
-};
-
-} //namespace armnn
+++ /dev/null
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClAdditionUint8Workload.hpp"
-
-namespace armnn
-{
-using namespace armcomputetensorutils;
-
-void ClAdditionUint8Workload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClAdditionUint8Workload_Execute");
- ClAdditionBaseWorkload::Execute();
-}
-
-} //namespace armnn
+++ /dev/null
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "ClAdditionBaseWorkload.hpp"
-
-namespace armnn
-{
-
-class ClAdditionUint8Workload : public ClAdditionBaseWorkload<DataType::QuantisedAsymm8>
-{
-public:
- using ClAdditionBaseWorkload<DataType::QuantisedAsymm8>::ClAdditionBaseWorkload;
- void Execute() const override;
-};
-
-} //namespace armnn
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ClAdditionWorkload.hpp"
+
+#include "backends/ClTensorHandle.hpp"
+#include "backends/CpuTensorHandle.hpp"
+#include "backends/ArmComputeTensorUtils.hpp"
+
+namespace armnn
+{
+using namespace armcomputetensorutils;
+
+static constexpr arm_compute::ConvertPolicy g_AclConvertPolicy = arm_compute::ConvertPolicy::SATURATE;
+
+template <armnn::DataType... T>
+ClAdditionWorkload<T...>::ClAdditionWorkload(const AdditionQueueDescriptor& descriptor,
+ const WorkloadInfo& info)
+ : TypedWorkload<AdditionQueueDescriptor, T...>(descriptor, info)
+{
+ this->m_Data.ValidateInputsOutputs("ClAdditionWorkload", 2, 1);
+
+ arm_compute::ICLTensor& input0 = static_cast<IClTensorHandle*>(this->m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ICLTensor& input1 = static_cast<IClTensorHandle*>(this->m_Data.m_Inputs[1])->GetTensor();
+ arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(this->m_Data.m_Outputs[0])->GetTensor();
+ m_Layer.configure(&input0, &input1, &output, g_AclConvertPolicy);
+}
+
+template <armnn::DataType... T>
+void ClAdditionWorkload<T...>::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT_CL("ClAdditionWorkload_Execute");
+ m_Layer.run();
+}
+
+bool ClAdditionValidate(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ std::string* reasonIfUnsupported)
+{
+ const arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0);
+ const arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1);
+ const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
+
+ const arm_compute::Status aclStatus = arm_compute::CLArithmeticAddition::validate(&aclInput0Info,
+ &aclInput1Info,
+ &aclOutputInfo,
+ g_AclConvertPolicy);
+
+ const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
+ if (!supported && reasonIfUnsupported)
+ {
+ *reasonIfUnsupported = aclStatus.error_description();
+ }
+
+ return supported;
+}
+
+} //namespace armnn
+
+template class armnn::ClAdditionWorkload<armnn::DataType::Float16, armnn::DataType::Float32>;
+template class armnn::ClAdditionWorkload<armnn::DataType::QuantisedAsymm8>;
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "backends/ClWorkloadUtils.hpp"
+
+namespace armnn
+{
+
+template <armnn::DataType... dataTypes>
+class ClAdditionWorkload : public TypedWorkload<AdditionQueueDescriptor, dataTypes...>
+{
+public:
+ ClAdditionWorkload(const AdditionQueueDescriptor& descriptor, const WorkloadInfo& info);
+
+ void Execute() const override;
+
+private:
+ mutable arm_compute::CLArithmeticAddition m_Layer;
+};
+
+bool ClAdditionValidate(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ std::string* reasonIfUnsupported);
+} //namespace armnn
+++ /dev/null
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClSubtractionBaseWorkload.hpp"
-
-#include "backends/ClTensorHandle.hpp"
-#include "backends/CpuTensorHandle.hpp"
-#include "backends/ArmComputeTensorUtils.hpp"
-
-namespace armnn
-{
-using namespace armcomputetensorutils;
-
-static constexpr arm_compute::ConvertPolicy g_AclConvertPolicy = arm_compute::ConvertPolicy::SATURATE;
-
-template <armnn::DataType... T>
-ClSubtractionBaseWorkload<T...>::ClSubtractionBaseWorkload(const SubtractionQueueDescriptor& descriptor,
- const WorkloadInfo& info)
- : TypedWorkload<SubtractionQueueDescriptor, T...>(descriptor, info)
-{
- this->m_Data.ValidateInputsOutputs("ClSubtractionBaseWorkload", 2, 1);
-
- arm_compute::ICLTensor& input0 = static_cast<IClTensorHandle*>(this->m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ICLTensor& input1 = static_cast<IClTensorHandle*>(this->m_Data.m_Inputs[1])->GetTensor();
- arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(this->m_Data.m_Outputs[0])->GetTensor();
- m_Layer.configure(&input0, &input1, &output, g_AclConvertPolicy);
-}
-
-template <armnn::DataType... T>
-void ClSubtractionBaseWorkload<T...>::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClSubtractionBaseWorkload_Execute");
- m_Layer.run();
-}
-
-bool ClSubtractionValidate(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- std::string* reasonIfUnsupported)
-{
- const arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0);
- const arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1);
- const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
-
- const arm_compute::Status aclStatus = arm_compute::CLArithmeticSubtraction::validate(&aclInput0Info,
- &aclInput1Info,
- &aclOutputInfo,
- g_AclConvertPolicy);
-
- const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
- if (!supported && reasonIfUnsupported)
- {
- *reasonIfUnsupported = aclStatus.error_description();
- }
-
- return supported;
-}
-
-} //namespace armnn
-
-template class armnn::ClSubtractionBaseWorkload<armnn::DataType::Float16, armnn::DataType::Float32>;
-template class armnn::ClSubtractionBaseWorkload<armnn::DataType::QuantisedAsymm8>;
+++ /dev/null
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "backends/ClWorkloadUtils.hpp"
-
-namespace armnn
-{
-
-template <armnn::DataType... dataTypes>
-class ClSubtractionBaseWorkload : public TypedWorkload<SubtractionQueueDescriptor, dataTypes...>
-{
-public:
- ClSubtractionBaseWorkload(const SubtractionQueueDescriptor& descriptor, const WorkloadInfo& info);
-
- void Execute() const override;
-
-private:
- mutable arm_compute::CLArithmeticSubtraction m_Layer;
-};
-
-bool ClSubtractionValidate(const TensorInfo& input0,
- const TensorInfo& input1,
- const TensorInfo& output,
- std::string* reasonIfUnsupported);
-} //namespace armnn
+++ /dev/null
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClSubtractionFloatWorkload.hpp"
-
-#include "backends/ClTensorHandle.hpp"
-#include "backends/CpuTensorHandle.hpp"
-#include "backends/ArmComputeTensorUtils.hpp"
-
-namespace armnn
-{
-using namespace armcomputetensorutils;
-
-void ClSubtractionFloatWorkload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClSubtractionFloatWorkload_Execute");
- ClSubtractionBaseWorkload::Execute();
-}
-
-} //namespace armnn
+++ /dev/null
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "ClSubtractionBaseWorkload.hpp"
-
-namespace armnn
-{
-
-class ClSubtractionFloatWorkload : public ClSubtractionBaseWorkload<DataType::Float16, DataType::Float32>
-{
-public:
- using ClSubtractionBaseWorkload<DataType::Float16, DataType::Float32>::ClSubtractionBaseWorkload;
- void Execute() const override;
-};
-
-} //namespace armnn
+++ /dev/null
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ClSubtractionUint8Workload.hpp"
-
-namespace armnn
-{
-using namespace armcomputetensorutils;
-
-void ClSubtractionUint8Workload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_CL("ClSubtractionUint8Workload_Execute");
- ClSubtractionBaseWorkload::Execute();
-}
-
-} //namespace armnn
+++ /dev/null
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "ClSubtractionBaseWorkload.hpp"
-
-namespace armnn
-{
-
-class ClSubtractionUint8Workload : public ClSubtractionBaseWorkload<DataType::QuantisedAsymm8>
-{
-public:
- using ClSubtractionBaseWorkload<DataType::QuantisedAsymm8>::ClSubtractionBaseWorkload;
- void Execute() const override;
-};
-
-} //namespace armnn
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ClSubtractionWorkload.hpp"
+
+#include "backends/ClTensorHandle.hpp"
+#include "backends/CpuTensorHandle.hpp"
+#include "backends/ArmComputeTensorUtils.hpp"
+
+namespace armnn
+{
+using namespace armcomputetensorutils;
+
+static constexpr arm_compute::ConvertPolicy g_AclConvertPolicy = arm_compute::ConvertPolicy::SATURATE;
+
+template <armnn::DataType... T>
+ClSubtractionWorkload<T...>::ClSubtractionWorkload(const SubtractionQueueDescriptor& descriptor,
+ const WorkloadInfo& info)
+ : TypedWorkload<SubtractionQueueDescriptor, T...>(descriptor, info)
+{
+ this->m_Data.ValidateInputsOutputs("ClSubtractionWorkload", 2, 1);
+
+ arm_compute::ICLTensor& input0 = static_cast<IClTensorHandle*>(this->m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ICLTensor& input1 = static_cast<IClTensorHandle*>(this->m_Data.m_Inputs[1])->GetTensor();
+ arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(this->m_Data.m_Outputs[0])->GetTensor();
+ m_Layer.configure(&input0, &input1, &output, g_AclConvertPolicy);
+}
+
+template <armnn::DataType... T>
+void ClSubtractionWorkload<T...>::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT_CL("ClSubtractionWorkload_Execute");
+ m_Layer.run();
+}
+
+bool ClSubtractionValidate(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ std::string* reasonIfUnsupported)
+{
+ const arm_compute::TensorInfo aclInput0Info = BuildArmComputeTensorInfo(input0);
+ const arm_compute::TensorInfo aclInput1Info = BuildArmComputeTensorInfo(input1);
+ const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
+
+ const arm_compute::Status aclStatus = arm_compute::CLArithmeticSubtraction::validate(&aclInput0Info,
+ &aclInput1Info,
+ &aclOutputInfo,
+ g_AclConvertPolicy);
+
+ const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
+ if (!supported && reasonIfUnsupported)
+ {
+ *reasonIfUnsupported = aclStatus.error_description();
+ }
+
+ return supported;
+}
+
+} //namespace armnn
+
+template class armnn::ClSubtractionWorkload<armnn::DataType::Float16, armnn::DataType::Float32>;
+template class armnn::ClSubtractionWorkload<armnn::DataType::QuantisedAsymm8>;
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "backends/ClWorkloadUtils.hpp"
+
+namespace armnn
+{
+
+template <armnn::DataType... dataTypes>
+class ClSubtractionWorkload : public TypedWorkload<SubtractionQueueDescriptor, dataTypes...>
+{
+public:
+ ClSubtractionWorkload(const SubtractionQueueDescriptor& descriptor, const WorkloadInfo& info);
+
+ void Execute() const override;
+
+private:
+ mutable arm_compute::CLArithmeticSubtraction m_Layer;
+};
+
+bool ClSubtractionValidate(const TensorInfo& input0,
+ const TensorInfo& input1,
+ const TensorInfo& output,
+ std::string* reasonIfUnsupported);
+} //namespace armnn
BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
{
- ClCreateArithmethicWorkloadTest<ClAdditionFloatWorkload,
+ ClCreateArithmethicWorkloadTest<ClAdditionWorkload<armnn::DataType::Float16, armnn::DataType::Float32>,
AdditionQueueDescriptor,
AdditionLayer,
armnn::DataType::Float32>();
BOOST_AUTO_TEST_CASE(CreateAdditionFloat16Workload)
{
- ClCreateArithmethicWorkloadTest<ClAdditionFloatWorkload,
+ ClCreateArithmethicWorkloadTest<ClAdditionWorkload<armnn::DataType::Float16, armnn::DataType::Float32>,
AdditionQueueDescriptor,
AdditionLayer,
armnn::DataType::Float16>();
BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload)
{
- ClCreateArithmethicWorkloadTest<ClSubtractionFloatWorkload,
+ ClCreateArithmethicWorkloadTest<ClSubtractionWorkload<armnn::DataType::Float16, armnn::DataType::Float32>,
SubtractionQueueDescriptor,
SubtractionLayer,
armnn::DataType::Float32>();
BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload)
{
- ClCreateArithmethicWorkloadTest<ClSubtractionFloatWorkload,
+ ClCreateArithmethicWorkloadTest<ClSubtractionWorkload<armnn::DataType::Float16, armnn::DataType::Float32>,
SubtractionQueueDescriptor,
SubtractionLayer,
armnn::DataType::Float16>();