std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateReshape(const ReshapeQueueDescriptor& descriptor,
const WorkloadInfo& info) const
{
- return MakeWorkload<NeonReshapeFloatWorkload, NeonReshapeUint8Workload>(descriptor, info);
+ return std::make_unique<NeonReshapeWorkload>(descriptor, info);
}
std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateFloor(const FloorQueueDescriptor& descriptor,
workloads/NeonPooling2dBaseWorkload.cpp \
workloads/NeonPooling2dFloatWorkload.cpp \
workloads/NeonPooling2dUint8Workload.cpp \
- workloads/NeonReshapeFloatWorkload.cpp \
- workloads/NeonReshapeUint8Workload.cpp \
+ workloads/NeonReshapeWorkload.cpp \
workloads/NeonSoftmaxBaseWorkload.cpp \
workloads/NeonSoftmaxFloatWorkload.cpp \
workloads/NeonSoftmaxUint8Workload.cpp \
NeonCreatePooling2dWorkloadTest<NeonPooling2dUint8Workload, DataType::QuantisedAsymm8>(DataLayout::NHWC);
}
-template <typename ReshapeWorkloadType, typename armnn::DataType DataType>
+template <typename armnn::DataType DataType>
static void NeonCreateReshapeWorkloadTest()
{
Graph graph;
NeonWorkloadFactory factory;
- auto workload = CreateReshapeWorkloadTest<ReshapeWorkloadType, DataType>(factory, graph);
+ auto workload = CreateReshapeWorkloadTest<NeonReshapeWorkload, DataType>(factory, graph);
// Checks that outputs and inputs are as we expect them (see definition of CreateReshapeWorkloadTest).
ReshapeQueueDescriptor queueDescriptor = workload->GetData();
#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
BOOST_AUTO_TEST_CASE(CreateReshapeFloat16Workload)
{
- NeonCreateReshapeWorkloadTest<NeonReshapeFloatWorkload, DataType::Float16>();
+ NeonCreateReshapeWorkloadTest<DataType::Float16>();
}
#endif
BOOST_AUTO_TEST_CASE(CreateReshapeFloatWorkload)
{
- NeonCreateReshapeWorkloadTest<NeonReshapeFloatWorkload, DataType::Float32>();
+ NeonCreateReshapeWorkloadTest<DataType::Float32>();
}
BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload)
{
- NeonCreateReshapeWorkloadTest<NeonReshapeUint8Workload, DataType::QuantisedAsymm8>();
+ NeonCreateReshapeWorkloadTest<DataType::QuantisedAsymm8>();
}
template <typename SoftmaxWorkloadType, typename armnn::DataType DataType>
NeonPooling2dFloatWorkload.hpp
NeonPooling2dUint8Workload.cpp
NeonPooling2dUint8Workload.hpp
- NeonReshapeFloatWorkload.cpp
- NeonReshapeFloatWorkload.hpp
- NeonReshapeUint8Workload.cpp
- NeonReshapeUint8Workload.hpp
+ NeonReshapeWorkload.cpp
+ NeonReshapeWorkload.hpp
NeonSoftmaxBaseWorkload.cpp
NeonSoftmaxBaseWorkload.hpp
NeonSoftmaxFloatWorkload.cpp
+++ /dev/null
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "NeonReshapeFloatWorkload.hpp"
-
-
-
-namespace armnn
-{
-
-NeonReshapeFloatWorkload::NeonReshapeFloatWorkload(const ReshapeQueueDescriptor& descriptor,
- const WorkloadInfo& info)
- : FloatWorkload<ReshapeQueueDescriptor>(descriptor, info)
-{
- m_Data.ValidateInputsOutputs("NeonReshapeFloatWorkload", 1, 1);
-
- arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
-
- m_Layer.configure(&input, &output);
-}
-
-void NeonReshapeFloatWorkload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonReshapeFloatWorkload_Execute");
- m_Layer.run();
-}
-
-} //namespace armnn
-
+++ /dev/null
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <backends/neon/workloads/NeonWorkloadUtils.hpp>
-
-namespace armnn
-{
-
-class NeonReshapeFloatWorkload : public FloatWorkload<ReshapeQueueDescriptor>
-{
-public:
- NeonReshapeFloatWorkload(const ReshapeQueueDescriptor& descriptor, const WorkloadInfo& info);
-
- virtual void Execute() const override;
-
-private:
- mutable arm_compute::NEReshapeLayer m_Layer;
-};
-
-} //namespace armnn
-
-
-
-
-
+++ /dev/null
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "NeonReshapeUint8Workload.hpp"
-
-
-
-
-namespace armnn
-{
-NeonReshapeUint8Workload::NeonReshapeUint8Workload(const ReshapeQueueDescriptor& descriptor,
- const WorkloadInfo& info)
- : Uint8Workload<ReshapeQueueDescriptor>(descriptor, info)
-{
- m_Data.ValidateInputsOutputs("NeonReshapeUint8Workload", 1, 1);
-
- arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
- arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
-
- m_Layer.configure(&input, &output);
-}
-
-void NeonReshapeUint8Workload::Execute() const
-{
- ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonReshapeUint8Workload_Execute");
- m_Layer.run();
-}
-} //namespace armnn
--- /dev/null
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonReshapeWorkload.hpp"
+
+namespace armnn
+{
+
+NeonReshapeWorkload::NeonReshapeWorkload(const ReshapeQueueDescriptor& descriptor,
+ const WorkloadInfo& info)
+ : BaseWorkload<ReshapeQueueDescriptor>(descriptor, info)
+{
+ m_Data.ValidateInputsOutputs("NeonReshapeWorkload", 1, 1);
+
+ arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+ arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+ m_Layer.configure(&input, &output);
+}
+
+void NeonReshapeWorkload::Execute() const
+{
+ ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonReshapeWorkload_Execute");
+ m_Layer.run();
+}
+
+} //namespace armnn
namespace armnn
{
-class NeonReshapeUint8Workload : public Uint8Workload<ReshapeQueueDescriptor>
+class NeonReshapeWorkload : public BaseWorkload<ReshapeQueueDescriptor>
{
public:
- NeonReshapeUint8Workload(const ReshapeQueueDescriptor& descriptor, const WorkloadInfo& info);
+ NeonReshapeWorkload(const ReshapeQueueDescriptor& descriptor, const WorkloadInfo& info);
+
virtual void Execute() const override;
private:
};
} //namespace armnn
-
-
-
-
#include "NeonPooling2dBaseWorkload.hpp"
#include "NeonPooling2dFloatWorkload.hpp"
#include "NeonPooling2dUint8Workload.hpp"
-#include "NeonReshapeFloatWorkload.hpp"
-#include "NeonReshapeUint8Workload.hpp"
+#include "NeonReshapeWorkload.hpp"
#include "NeonSoftmaxFloatWorkload.hpp"
#include "NeonSoftmaxUint8Workload.hpp"
#include "NeonSplitterFloatWorkload.hpp"