From 6930a7262abba9fb1c93391a9804d87e386644d8 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EB=B0=95=EC=A2=85=ED=98=84/=EB=8F=99=EC=9E=91=EC=A0=9C?= =?utf8?q?=EC=96=B4Lab=28SR=29/Staff=20Engineer/=EC=82=BC=EC=84=B1?= =?utf8?q?=EC=A0=84=EC=9E=90?= Date: Thu, 14 Jun 2018 14:17:35 +0900 Subject: [PATCH] Introduce 'nnfw::make_unique' (#1677) This commit deprecates 'make_layer' function in pure CL runtime, and introduces a general make_unique function as a helper. Signed-off-by: Jonghyun Park --- include/nnfw/std/memory.h | 17 ++++++++++++ runtimes/pure_arm_compute/src/compilation.cc | 40 ++++++++++++++-------------- 2 files changed, 37 insertions(+), 20 deletions(-) create mode 100644 include/nnfw/std/memory.h diff --git a/include/nnfw/std/memory.h b/include/nnfw/std/memory.h new file mode 100644 index 0000000..dd02369 --- /dev/null +++ b/include/nnfw/std/memory.h @@ -0,0 +1,17 @@ +#ifndef __NNFW_STD_MEMORY_H__ +#define __NNFW_STD_MEMORY_H__ + +#include + +namespace nnfw +{ + +template std::unique_ptr make_unique(Args &&... args) +{ + // NOTE std::make_unique is missing in C++11 standard + return std::unique_ptr(new T(std::forward(args)...)); +} + +} // namespace nnfw + +#endif // __NNFW_STD_MEMORY_H__ diff --git a/runtimes/pure_arm_compute/src/compilation.cc b/runtimes/pure_arm_compute/src/compilation.cc index 0608c23..36191c7 100644 --- a/runtimes/pure_arm_compute/src/compilation.cc +++ b/runtimes/pure_arm_compute/src/compilation.cc @@ -35,6 +35,8 @@ #include "util/kernel/IndexIterator.h" #include "util/feature/IndexIterator.h" +#include + #include "compilation.h" #include "model.h" #include "logging.h" @@ -128,8 +130,6 @@ Padding same_padding(const nnfw::util::feature::Shape &ifm_shape, return padding; } -template std::unique_ptr make_layer(void) { return std::unique_ptr{new T}; } - ::arm_compute::TensorShape asTensorShape(int32_t h, int32_t w) { return ::arm_compute::TensorShape(w, h); @@ -258,7 +258,7 @@ void ActivationBuilder::appendReLU(::arm_compute::ICLTensor *ifm_alloc) const ::arm_compute::ActivationLayerInfo act_info{ ::arm_compute::ActivationLayerInfo::ActivationFunction::RELU}; - auto fn = make_layer<::arm_compute::CLActivationLayer>(); + auto fn = nnfw::make_unique<::arm_compute::CLActivationLayer>(); fn->configure(ifm_alloc, nullptr, act_info); @@ -270,7 +270,7 @@ void ActivationBuilder::appendReLU6(::arm_compute::ICLTensor *ifm_alloc) const ::arm_compute::ActivationLayerInfo act_info{ ::arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 6.0f, 0.0f}; - auto fn = make_layer<::arm_compute::CLActivationLayer>(); + auto fn = nnfw::make_unique<::arm_compute::CLActivationLayer>(); fn->configure(ifm_alloc, nullptr, act_info); @@ -389,7 +389,7 @@ void Planner::visit(const ::internal::tflite::op::Add::Node &node) // TODO Figure out why this happens, and fix it if (from_env(std::getenv("USE_CL_ARITHMETIC_ADDITION"))) { - auto l = make_layer<::arm_compute::CLArithmeticAddition>(); + auto l = nnfw::make_unique<::arm_compute::CLArithmeticAddition>(); // TODO Decide ConvertPolicy (WARP? SATURATE?) according to NN API specification l->configure(lhs_alloc, rhs_alloc, ofm_alloc, ::arm_compute::ConvertPolicy::SATURATE); @@ -403,7 +403,7 @@ void Planner::visit(const ::internal::tflite::op::Add::Node &node) assert(lhs_shape.H == rhs_shape.H); assert(lhs_shape.W == rhs_shape.W); - auto l = make_layer(); + auto l = nnfw::make_unique(); l->configure(lhs_alloc, rhs_alloc, ofm_alloc); @@ -513,7 +513,7 @@ void Planner::visit(const ::internal::tflite::op::Sub::Node &node) auto lhs_alloc = ctx.at(::internal::tflite::operand::Index{param.lhs_index}); auto rhs_alloc = ctx.at(::internal::tflite::operand::Index{param.rhs_index}); - auto fn = make_layer<::arm_compute::CLArithmeticSubtraction>(); + auto fn = nnfw::make_unique<::arm_compute::CLArithmeticSubtraction>(); // TODO Decide ConvertPolicy (WARP? SATURATE?) according to NN API specification fn->configure(lhs_alloc, rhs_alloc, ofm_alloc, ::arm_compute::ConvertPolicy::SATURATE); @@ -569,7 +569,7 @@ void Planner::visit(const ::internal::tflite::op::Mul::Node &node) auto lhs_input_alloc = ctx.at(::internal::tflite::operand::Index{param.lhs_index}); auto rhs_input_alloc = ctx.at(::internal::tflite::operand::Index{param.rhs_index}); - auto fn = make_layer<::arm_compute::CLPixelWiseMultiplication>(); + auto fn = nnfw::make_unique<::arm_compute::CLPixelWiseMultiplication>(); fn->configure(lhs_input_alloc, rhs_input_alloc, output_alloc, 1.0, // scale @@ -679,7 +679,7 @@ void Planner::visit(const ::internal::tflite::op::Div::Node &node) auto lhs_alloc = ctx.at(::internal::tflite::operand::Index{param.lhs_index}); auto rhs_alloc = ctx.at(::internal::tflite::operand::Index{param.rhs_index}); - auto fn = make_layer<::arm_compute::CLPixelWiseDivision>(); + auto fn = nnfw::make_unique<::arm_compute::CLPixelWiseDivision>(); // TODO Decide scale, overflow_policy, and rounding_policy. // Currently, the default values are used. @@ -925,7 +925,7 @@ void Planner::visit(const ::internal::tflite::op::DepthwiseConv2D::implicit::Nod const auto conv_info = asPadStringInfo(param.padding, param.stride); - auto fn = make_layer<::arm_compute::CLDepthwiseConvolutionLayer>(); + auto fn = nnfw::make_unique<::arm_compute::CLDepthwiseConvolutionLayer>(); fn->configure(ifm_alloc, ker_alloc, bias_alloc, ofm_alloc, conv_info); @@ -1321,7 +1321,7 @@ void Planner::visit(const ::internal::tflite::op::FullyConnected::Node &node) auto weight_alloc = ctx.at(::internal::tflite::operand::Index{param.weight_index}); auto bias_alloc = ctx.at(::internal::tflite::operand::Index{param.bias_index}); - auto fn = make_layer<::arm_compute::CLFullyConnectedLayer>(); + auto fn = nnfw::make_unique<::arm_compute::CLFullyConnectedLayer>(); fn->configure(input_alloc, weight_alloc, bias_alloc, output_alloc); @@ -1369,7 +1369,7 @@ void Planner::visit(const ::internal::tflite::op::ResizeBilinear::Node &node) auto ofm_alloc = ctx.at(::internal::tflite::operand::Index{param.ofm_index}); auto ifm_alloc = ctx.at(::internal::tflite::operand::Index{param.ifm_index}); - auto fn = make_layer<::arm_compute::CLScale>(); + auto fn = nnfw::make_unique<::arm_compute::CLScale>(); fn->configure(ifm_alloc, ofm_alloc, ::arm_compute::InterpolationPolicy::BILINEAR, ::arm_compute::BorderMode::REPLICATE, ::arm_compute::PixelValue(0.f), @@ -1415,7 +1415,7 @@ void Planner::visit(const ::internal::tflite::op::Reshape::Node &node) auto input_alloc = ctx.at(::internal::tflite::operand::Index{param.input_index}); // GenericReshape first apply NCHW->NHWC permutation, and apply reshape - auto fn = make_layer(); + auto fn = nnfw::make_unique(); fn->configure(input_alloc, output_alloc); @@ -1458,7 +1458,7 @@ void Planner::visit(const ::internal::tflite::op::Softmax::Node &node) auto output_alloc = ctx.at(::internal::tflite::operand::Index{param.output_index}); auto input_alloc = ctx.at(::internal::tflite::operand::Index{param.input_index}); - auto fn = make_layer<::arm_compute::CLSoftmaxLayer>(); + auto fn = nnfw::make_unique<::arm_compute::CLSoftmaxLayer>(); fn->configure(input_alloc, output_alloc, param.scale); @@ -1532,7 +1532,7 @@ void Planner::visit(const ::internal::tflite::op::StridedSlice::Node &node) auto endData_alloc = ctx.at(::internal::tflite::operand::Index{param.endData_index}); auto stridesData_alloc = ctx.at(::internal::tflite::operand::Index{param.stridesData_index}); - auto fn = make_layer<::arm_compute::CLStridedSlice>(); + auto fn = nnfw::make_unique<::arm_compute::CLStridedSlice>(); fn->configure(inputData_alloc, outputData_alloc, startData_alloc, endData_alloc, stridesData_alloc, param.beginMask, param.endMask, param.shrinkAxisMask); @@ -1592,7 +1592,7 @@ void Planner::visit(const ::internal::tflite::op::ReduceMax::Node &node) auto ofm_alloc = ctx.at(::internal::tflite::operand::Index{param.ofm_index}); auto ifm_alloc = ctx.at(::internal::tflite::operand::Index{param.ifm_index}); - auto fn = make_layer<::arm_compute::CLReduceMax>(); + auto fn = nnfw::make_unique<::arm_compute::CLReduceMax>(); fn->configure(ifm_alloc, param.axis, ofm_alloc); @@ -1679,7 +1679,7 @@ void Planner::visit(const ::internal::tflite::op::Cast::Node &node) if (from_env(std::getenv("USE_SIMPLE_CAST"))) { // Use the CPU version of CAST operation - auto l = make_layer(); + auto l = nnfw::make_unique(); l->configure(input_alloc, output_alloc); fn = std::move(l); @@ -1687,7 +1687,7 @@ void Planner::visit(const ::internal::tflite::op::Cast::Node &node) else { // Use the OpenCL version of CAST operation - auto l = make_layer<::arm_compute::CLCast>(); + auto l = nnfw::make_unique<::arm_compute::CLCast>(); l->configure(input_alloc, output_alloc); fn = std::move(l); @@ -1745,7 +1745,7 @@ void Planner::visit(const ::internal::tflite::op::TopKV2::Node &node) auto indices_alloc = ctx.at(::internal::tflite::operand::Index{param.outputIndices_index}); auto input_alloc = ctx.at(::internal::tflite::operand::Index{param.inputData_index}); - auto fn = make_layer<::arm_compute::CLTopKV2>(); + auto fn = nnfw::make_unique<::arm_compute::CLTopKV2>(); fn->configure(input_alloc, param.k, values_alloc, indices_alloc); @@ -1799,7 +1799,7 @@ void Planner::visit(const ::internal::tflite::op::Gather::Node &node) std::unique_ptr<::arm_compute::IFunction> fn; - auto l = make_layer<::arm_compute::CLGather>(); + auto l = nnfw::make_unique<::arm_compute::CLGather>(); l->configure(lhs_alloc, rhs_alloc, ofm_alloc); fn = std::move(l); builder.append(std::move(fn)); -- 2.7.4