#include "ClWorkloads/ClL2NormalizationFloatWorkload.hpp"
#include "ClWorkloads/ClMultiplicationFloatWorkload.hpp"
#include "ClWorkloads/ClFullyConnectedWorkload.hpp"
+#include "ClWorkloads/ClPadWorkload.hpp"
#include "ClWorkloads/ClPooling2dBaseWorkload.hpp"
#include "ClWorkloads/ClPermuteWorkload.hpp"
#include "ClWorkloads/ClNormalizationFloatWorkload.hpp"
&TrueFunc<>);
}
+bool IsPadSupportedCl(const TensorInfo& input,
+ const TensorInfo& output,
+ const PadDescriptor& descriptor,
+ std::string* reasonIfUnsupported)
+{
+ return FORWARD_CL_LAYER_SUPPORT_FUNC(ClPadValidate(input, output, descriptor, reasonIfUnsupported));
+}
+
bool IsPermuteSupportedCl(const TensorInfo& input,
const TensorInfo& output,
const PermuteDescriptor& descriptor,
bool IsOutputSupportedCl(const TensorInfo& output,
std::string* reasonIfUnsupported = nullptr);
+bool IsPadSupportedCl(const TensorInfo& input,
+ const TensorInfo& output,
+ const PadDescriptor& descriptor,
+ std::string* reasonIfUnsupported = nullptr);
+
bool IsPermuteSupportedCl(const TensorInfo& input,
const TensorInfo& output,
const PermuteDescriptor& descriptor,
std::unique_ptr<IWorkload> ClWorkloadFactory::CreatePad(const PadQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+ return MakeWorkload<ClPadWorkload<armnn::DataType::Float16, armnn::DataType::Float32>,
+ ClPadWorkload<armnn::DataType::QuantisedAsymm8>>(descriptor, info);
}
void ClWorkloadFactory::Finalize()
#include "backends/ClWorkloads/ClMergerUint8Workload.hpp"
#include "backends/ClWorkloads/ClMultiplicationFloatWorkload.hpp"
#include "backends/ClWorkloads/ClNormalizationFloatWorkload.hpp"
+#include "backends/ClWorkloads/ClPadWorkload.hpp"
#include "backends/ClWorkloads/ClPermuteWorkload.hpp"
#include "backends/ClWorkloads/ClPooling2dFloatWorkload.hpp"
#include "backends/ClWorkloads/ClPooling2dUint8Workload.hpp"
ClMultiplicationFloatWorkload.hpp
ClNormalizationFloatWorkload.cpp
ClNormalizationFloatWorkload.hpp
+ ClPadWorkload.cpp
+ ClPadWorkload.hpp
ClPermuteWorkload.cpp
ClPermuteWorkload.hpp
ClPooling2dBaseWorkload.cpp
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ClPadWorkload.hpp"
+
+#include "backends/ClTensorHandle.hpp"
+#include "backends/aclCommon/ArmComputeTensorUtils.hpp"
+#include "ClWorkloadUtils.hpp"
+#include <arm_compute/core/Types.h>
+
+namespace armnn
+{
+using namespace armcomputetensorutils;
+
+template <armnn::DataType... T>
+ClPadWorkload<T...>::ClPadWorkload(const PadQueueDescriptor& descriptor, const WorkloadInfo& info)
+: TypedWorkload<PadQueueDescriptor, T...>(descriptor, info)
+{
+ this->m_Data.ValidateInputsOutputs("ClPadWorkload", 1, 1);
+
+ arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(this->m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(this->m_Data.m_Outputs[0])->GetTensor();
+ arm_compute::PaddingList padList = static_cast<arm_compute::PaddingList>(descriptor.m_Parameters.m_PadList);
+
+ m_Layer.configure(&input, &output, padList);
+}
+
+template <armnn::DataType... T>
+void ClPadWorkload<T...>::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT_CL("ClPadWorkload_Execute");
+ m_Layer.run();
+}
+
+bool ClPadValidate(const TensorInfo& input,
+ const TensorInfo& output,
+ const PadDescriptor& descriptor,
+ std::string* reasonIfUnsupported)
+{
+ const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
+ const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
+ arm_compute::PaddingList padList = static_cast<arm_compute::PaddingList>(descriptor.m_PadList);
+
+ const arm_compute::Status aclStatus = arm_compute::CLPadLayer::validate(&aclInputInfo,
+ &aclOutputInfo,
+ padList);
+
+ const bool supported = (aclStatus.error_code() == arm_compute::ErrorCode::OK);
+ if (!supported && reasonIfUnsupported)
+ {
+ *reasonIfUnsupported = aclStatus.error_description();
+ }
+
+ return supported;
+}
+
+} // namespace armnn
+
+template class armnn::ClPadWorkload<armnn::DataType::Float16, armnn::DataType::Float32>;
+template class armnn::ClPadWorkload<armnn::DataType::QuantisedAsymm8>;
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "backends/WorkloadData.hpp"
+#include "backends/Workload.hpp"
+#include <arm_compute/runtime/CL/functions/CLPadLayer.h>
+
+namespace armnn {
+
+template <armnn::DataType... dataTypes>
+class ClPadWorkload : public TypedWorkload<PadQueueDescriptor, dataTypes...>
+{
+public:
+ ClPadWorkload(const PadQueueDescriptor& descriptor, const WorkloadInfo& info);
+
+ void Execute() const override;
+
+private:
+ mutable arm_compute::CLPadLayer m_Layer;
+};
+
+bool ClPadValidate(const TensorInfo& input,
+ const TensorInfo& output,
+ const PadDescriptor& descriptor,
+ std::string* reasonIfUnsupported);
+
+} //namespace armnn
+
ClMergerUint8Workload.cpp \
ClMultiplicationFloatWorkload.cpp \
ClNormalizationFloatWorkload.cpp \
+ ClPadWorkload.cpp \
ClPermuteWorkload.cpp \
ClPooling2dBaseWorkload.cpp \
ClPooling2dFloatWorkload.cpp \