Update namespace and header define for cpp14 library (#3896)
author오형석/동작제어Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Fri, 7 Dec 2018 02:13:20 +0000 (11:13 +0900)
committerGitHub Enterprise <noreply-CODE@samsung.com>
Fri, 7 Dec 2018 02:13:20 +0000 (11:13 +0900)
Update namespace to nnfw::cpp14
Update define to __NNFW_CPP14_xxx__
Update usage of cpp14 library to updated namespace

Signed-off-by: Hyeongseok Oh <hseok82.oh@samsung.com>
13 files changed:
libs/cpp14/include/cpp14/memory.h
runtimes/neurun/src/backend/cpu/StageGenerator.cc
runtimes/neurun/src/compiler/SubTensorAnalyzer.cc
runtimes/neurun/src/frontend/memory.cc
runtimes/neurun/src/frontend/model.cc
runtimes/neurun/src/graph/Graph.cc
runtimes/neurun/src/graph/operand/Set.cc
runtimes/neurun/src/graph/pass/PermutationInsertionPass.cc
runtimes/neurun/test/graph/operand/UseDef.cc
runtimes/neurun/test/graph/operation/SetIO.cc
runtimes/neurun/test/graph/verifier/Verifier.cc
runtimes/pure_arm_compute/src/compilation.cc
runtimes/pure_arm_compute/src/memory.cc

index 782907b..b3e678b 100644 (file)
@@ -3,13 +3,15 @@
  * @ingroup COM_AI_RUNTIME
  * @brief This file contains @c make_unique which is not supported by C++11
  */
-#ifndef __NNFW_STD_MEMORY_H__
-#define __NNFW_STD_MEMORY_H__
+#ifndef __NNFW_CPP14_MEMORY_H__
+#define __NNFW_CPP14_MEMORY_H__
 
 #include <memory>
 
 namespace nnfw
 {
+namespace cpp14
+{
 /**
  * @brief Provide @c make_unique function supported from C++14
  * @param[in] args    List of arguments with which an instance of T will be constructed.
@@ -21,6 +23,7 @@ template <typename T, typename... Args> std::unique_ptr<T> make_unique(Args &&..
   return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
 }
 
+} // napesapce cpp14
 } // namespace nnfw
 
-#endif // __NNFW_STD_MEMORY_H__
+#endif // __NNFW_CPP14_MEMORY_H__
index a0c3a9c..4207027 100644 (file)
@@ -585,7 +585,7 @@ void StageGenerator::visit(const graph::operation::PermuteNode &node)
     auto output_alloc = output_tensors->tensorAt(param.output_index).get();
     auto input_alloc = input_tensors->tensorAt(param.input_index).get();
 
-    auto fn = nnfw::make_unique<::neurun::kernel::cpu::PermuteLayer>();
+    auto fn = nnfw::cpp14::make_unique<::neurun::kernel::cpu::PermuteLayer>();
 
     fn->configure(input_alloc, output_alloc, param.shape, param.type);
 
index 8d75e78..44b56dc 100644 (file)
@@ -63,7 +63,7 @@ void SubTensorAnalyzer::visit(const graph::operation::ConcatNode &node)
     offset[axis] = axis_point;
     neurun::util::feature::Coordinate4D coordinate_info(offset[0], offset[1], offset[2], offset[3]);
     std::unique_ptr<graph::operand::ParentInfo> parentInfo =
-        nnfw::make_unique<graph::operand::ParentInfo>(output_index, coordinate_info);
+        nnfw::cpp14::make_unique<graph::operand::ParentInfo>(output_index, coordinate_info);
 
     // NOTD Not support multiple parent tensor yet
     assert(_ctx.at(input_index).parent_info() == nullptr);
index eadeb54..b2f6ab2 100644 (file)
@@ -32,7 +32,7 @@ int ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd, size_t
 
   // Use unique pointer to avoid memory leak
   std::unique_ptr<ANeuralNetworksMemory> memory_ptr =
-      nnfw::make_unique<ANeuralNetworksMemory>(size, protect, fd, offset);
+      nnfw::cpp14::make_unique<ANeuralNetworksMemory>(size, protect, fd, offset);
   if (memory_ptr == nullptr)
   {
     return ANEURALNETWORKS_OUT_OF_MEMORY;
index 85c3a60..9e98066 100644 (file)
@@ -159,13 +159,13 @@ int ANeuralNetworksModel_setOperandValue(ANeuralNetworksModel *model, int32_t in
   // until all executions using this model have completed
   if (length <= ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES)
   {
-    model->deref().setOperandValue(
-        ind, nnfw::make_unique<CachedData>(reinterpret_cast<const uint8_t *>(buffer), length));
+    model->deref().setOperandValue(ind, nnfw::cpp14::make_unique<CachedData>(
+                                            reinterpret_cast<const uint8_t *>(buffer), length));
   }
   else
   {
-    model->deref().setOperandValue(
-        ind, nnfw::make_unique<ExternalData>(reinterpret_cast<const uint8_t *>(buffer), length));
+    model->deref().setOperandValue(ind, nnfw::cpp14::make_unique<ExternalData>(
+                                            reinterpret_cast<const uint8_t *>(buffer), length));
   }
 
   return ANEURALNETWORKS_NO_ERROR;
@@ -210,7 +210,7 @@ int ANeuralNetworksModel_setOperandValueFromMemory(ANeuralNetworksModel *model,
   using ::neurun::graph::operand::ExternalData;
 
   model->deref().setOperandValue(
-      ind, nnfw::make_unique<ExternalData>(
+      ind, nnfw::cpp14::make_unique<ExternalData>(
                reinterpret_cast<const uint8_t *>(memory->base() + offset), length));
 
   return ANEURALNETWORKS_NO_ERROR;
@@ -261,7 +261,7 @@ int ANeuralNetworksModel_addOperation(ANeuralNetworksModel *model,
       {
         using GraphNode = neurun::graph::operation::Conv2DNode;
 
-        graph.addOperation(nnfw::make_unique<GraphNode>(node_param));
+        graph.addOperation(nnfw::cpp14::make_unique<GraphNode>(node_param));
       }
       else
       {
@@ -282,7 +282,7 @@ int ANeuralNetworksModel_addOperation(ANeuralNetworksModel *model,
       {
         using GraphNode = neurun::graph::operation::MaxPool2DNode;
 
-        graph.addOperation(nnfw::make_unique<GraphNode>(node_param));
+        graph.addOperation(nnfw::cpp14::make_unique<GraphNode>(node_param));
       }
       else
       {
@@ -303,7 +303,7 @@ int ANeuralNetworksModel_addOperation(ANeuralNetworksModel *model,
       {
         using GraphNode = neurun::graph::operation::AvgPool2DNode;
 
-        graph.addOperation(nnfw::make_unique<GraphNode>(node_param));
+        graph.addOperation(nnfw::cpp14::make_unique<GraphNode>(node_param));
       }
       else
       {
@@ -316,7 +316,7 @@ int ANeuralNetworksModel_addOperation(ANeuralNetworksModel *model,
     {
       using GraphNode = neurun::graph::operation::ConcatNode;
 
-      graph.addOperation(nnfw::make_unique<GraphNode>(node_param));
+      graph.addOperation(nnfw::cpp14::make_unique<GraphNode>(node_param));
 
       break;
     }
@@ -324,7 +324,7 @@ int ANeuralNetworksModel_addOperation(ANeuralNetworksModel *model,
     {
       using GraphNode = neurun::graph::operation::ReshapeNode;
 
-      graph.addOperation(nnfw::make_unique<GraphNode>(node_param));
+      graph.addOperation(nnfw::cpp14::make_unique<GraphNode>(node_param));
 
       break;
     }
@@ -332,7 +332,7 @@ int ANeuralNetworksModel_addOperation(ANeuralNetworksModel *model,
     {
       using GraphNode = neurun::graph::operation::FullyConnectedNode;
 
-      graph.addOperation(nnfw::make_unique<GraphNode>(node_param));
+      graph.addOperation(nnfw::cpp14::make_unique<GraphNode>(node_param));
 
       break;
     }
@@ -340,7 +340,7 @@ int ANeuralNetworksModel_addOperation(ANeuralNetworksModel *model,
     {
       using GraphNode = neurun::graph::operation::SoftmaxNode;
 
-      graph.addOperation(nnfw::make_unique<GraphNode>(node_param));
+      graph.addOperation(nnfw::cpp14::make_unique<GraphNode>(node_param));
 
       break;
     }
index a523437..73ffd0c 100644 (file)
@@ -97,16 +97,16 @@ void Graph::lower(void)
 
     _model->operands.iterate([&](const operand::Index &index, const operand::Object &object) {
       operands_lower_info[index] =
-          nnfw::make_unique<operand::LowerInfo>(operand::asShape4D(object.shape()));
+          nnfw::cpp14::make_unique<operand::LowerInfo>(operand::asShape4D(object.shape()));
     });
 
-    _backend_resolver = nnfw::make_unique<compiler::BackendResolver>(_model->operands);
+    _backend_resolver = nnfw::cpp14::make_unique<compiler::BackendResolver>(_model->operands);
 
     _model->operations.iterate([&](const operation::Index &, operation::Node &node) {
       auto backend = _backend_resolver->getBackend(typeid(node));
 
       // Operation LowerInfo
-      node.lower_info(nnfw::make_unique<operation::LowerInfo>(backend));
+      node.lower_info(nnfw::cpp14::make_unique<operation::LowerInfo>(backend));
 
       // LowerInfo for in/output operands
       for (auto operand : node.getInputs())
@@ -223,7 +223,7 @@ std::unique_ptr<linear::Linear> Graph::linearize(void)
 {
   assert(isLowered());
 
-  auto linear = nnfw::make_unique<linear::Linear>(*this);
+  auto linear = nnfw::cpp14::make_unique<linear::Linear>(*this);
 
   // TODO Move the operations and operands to linear object
 
index 92f9b10..5895d98 100644 (file)
@@ -36,7 +36,7 @@ Index Set::append(const Shape &shape, const TypeInfo &type)
 {
   auto index = generateIndex();
 
-  _objects[index] = nnfw::make_unique<Object>(shape, type);
+  _objects[index] = nnfw::cpp14::make_unique<Object>(shape, type);
 
   return index;
 }
index 7549be0..ead8738 100644 (file)
@@ -130,7 +130,8 @@ operation::Index PermutationInsertionPass::insertPermute(const operand::Index &o
     model_outputs.replace(operand_index, out_operand_index);
   }
   out_operand.setAsOperationOutput();
-  auto out_operand_li = nnfw::make_unique<operand::LowerInfo>(operand::asShape4D(operand.shape()));
+  auto out_operand_li =
+      nnfw::cpp14::make_unique<operand::LowerInfo>(operand::asShape4D(operand.shape()));
   out_operand_li->addDefBackend(backend);
   out_operand_li->addUseBackend(backend);
   out_operand.lower_info(std::move(out_operand_li));
@@ -140,9 +141,10 @@ operation::Index PermutationInsertionPass::insertPermute(const operand::Index &o
   operand.lower_info()->addUseBackend(operand.lower_info()->def_backends().getOnlyElement());
 
   // Insert permute operation to the graph
-  auto insert_node = nnfw::make_unique<operation::PermuteNode>(operand_index, out_operand_index);
-  insert_node->lower_info(
-      nnfw::make_unique<operation::LowerInfo>(_graph.backend_resolver()->getDefaultBackend()));
+  auto insert_node =
+      nnfw::cpp14::make_unique<operation::PermuteNode>(operand_index, out_operand_index);
+  insert_node->lower_info(nnfw::cpp14::make_unique<operation::LowerInfo>(
+      _graph.backend_resolver()->getDefaultBackend()));
 
   auto node_index = _graph.operations().append(std::move(insert_node));
   const auto &node = _graph.operations().at(node_index);
index b001278..4930e4a 100644 (file)
@@ -53,16 +53,16 @@ TEST(graph_operand_usedef, usedef_test)
   auto operand_index1 = graph.addOperand(shape, type);
   graph.operands().at(operand_index1).setAsOperationOutput();
   auto mocknode_index1 = graph.addOperation(
-      nnfw::make_unique<MockNode>(IndexSet{input_operand}, IndexSet{operand_index1}));
+      nnfw::cpp14::make_unique<MockNode>(IndexSet{input_operand}, IndexSet{operand_index1}));
 
   // MockNode2
   auto operand_index2 = graph.addOperand(shape, type);
   graph.operands().at(operand_index2).setAsOperationOutput();
   auto mocknode_index2 = graph.addOperation(
-      nnfw::make_unique<MockNode>(IndexSet{input_operand}, IndexSet{operand_index2}));
+      nnfw::cpp14::make_unique<MockNode>(IndexSet{input_operand}, IndexSet{operand_index2}));
 
   // MockNode3(two input)
-  auto multiinput_index = graph.addOperation(nnfw::make_unique<MockNode>(
+  auto multiinput_index = graph.addOperation(nnfw::cpp14::make_unique<MockNode>(
       IndexSet{operand_index1, operand_index2}, IndexSet{output_operand}));
 
   graph.finishBuilding();
index 5868a42..7e12c94 100644 (file)
@@ -47,7 +47,8 @@ TEST(graph_operation_setIO, operation_setIO_conv)
 
   using GraphNode = neurun::graph::operation::Conv2DNode;
 
-  auto conv = nnfw::make_unique<GraphNode>(GraphNodeInitParam{7, params.data(), 1, &outoperand});
+  auto conv =
+      nnfw::cpp14::make_unique<GraphNode>(GraphNodeInitParam{7, params.data(), 1, &outoperand});
   ASSERT_EQ(conv->getInputs().at(Index{0}).asInt(), params[0]);
   conv->setInputs({8, 9, 10});
   ASSERT_NE(conv->getInputs().at(Index{0}).asInt(), params[0]);
@@ -72,7 +73,8 @@ TEST(graph_operation_setIO, operation_setIO_concat)
 
   using GraphNode = neurun::graph::operation::ConcatNode;
 
-  auto concat = nnfw::make_unique<GraphNode>(GraphNodeInitParam{7, params.data(), 1, &outoperand});
+  auto concat =
+      nnfw::cpp14::make_unique<GraphNode>(GraphNodeInitParam{7, params.data(), 1, &outoperand});
 
   ASSERT_EQ(concat->getInputs().size(), 6);
   ASSERT_EQ(concat->getInputs().at(Index{0}).asInt(), params[0]);
index df34c12..46e8b8f 100644 (file)
@@ -43,7 +43,7 @@ TEST(Verifier, dag_checker)
   graph.addOutput(operand2);
   graph.operands().at(operand2).setAsOperationOutput();
 
-  graph.addOperation(nnfw::make_unique<MockNode>(IndexSet{operand1}, IndexSet{operand2}));
+  graph.addOperation(nnfw::cpp14::make_unique<MockNode>(IndexSet{operand1}, IndexSet{operand2}));
 
   graph.finishBuilding();
 
index f46ea12..34cb1f8 100644 (file)
@@ -409,7 +409,7 @@ void ActivationBuilder::appendReLU(::arm_compute::ITensor *ifm_alloc)
 
   if (::internal::arm_compute::isGpuMode())
   {
-    auto fn = nnfw::make_unique<::arm_compute::CLActivationLayer>();
+    auto fn = nnfw::cpp14::make_unique<::arm_compute::CLActivationLayer>();
 
     fn->configure(CAST_CL(ifm_alloc), nullptr, act_info);
 
@@ -417,7 +417,7 @@ void ActivationBuilder::appendReLU(::arm_compute::ITensor *ifm_alloc)
   }
   else
   {
-    auto fn = nnfw::make_unique<::arm_compute::NEActivationLayer>();
+    auto fn = nnfw::cpp14::make_unique<::arm_compute::NEActivationLayer>();
 
     fn->configure(ifm_alloc, nullptr, act_info);
 
@@ -432,7 +432,7 @@ void ActivationBuilder::appendReLU1(::arm_compute::ITensor *ifm_alloc)
 
   if (::internal::arm_compute::isGpuMode())
   {
-    auto fn = nnfw::make_unique<::arm_compute::CLActivationLayer>();
+    auto fn = nnfw::cpp14::make_unique<::arm_compute::CLActivationLayer>();
 
     fn->configure(CAST_CL(ifm_alloc), nullptr, act_info);
 
@@ -440,7 +440,7 @@ void ActivationBuilder::appendReLU1(::arm_compute::ITensor *ifm_alloc)
   }
   else
   {
-    auto fn = nnfw::make_unique<::arm_compute::NEActivationLayer>();
+    auto fn = nnfw::cpp14::make_unique<::arm_compute::NEActivationLayer>();
 
     fn->configure(ifm_alloc, nullptr, act_info);
 
@@ -455,7 +455,7 @@ void ActivationBuilder::appendReLU6(::arm_compute::ITensor *ifm_alloc)
 
   if (::internal::arm_compute::isGpuMode())
   {
-    auto fn = nnfw::make_unique<::arm_compute::CLActivationLayer>();
+    auto fn = nnfw::cpp14::make_unique<::arm_compute::CLActivationLayer>();
 
     fn->configure(CAST_CL(ifm_alloc), nullptr, act_info);
 
@@ -463,7 +463,7 @@ void ActivationBuilder::appendReLU6(::arm_compute::ITensor *ifm_alloc)
   }
   else
   {
-    auto fn = nnfw::make_unique<::arm_compute::NEActivationLayer>();
+    auto fn = nnfw::cpp14::make_unique<::arm_compute::NEActivationLayer>();
 
     fn->configure(ifm_alloc, nullptr, act_info);
 
@@ -645,7 +645,7 @@ void Planner::visit(const ::internal::tflite::op::Add::Node &node)
       // NOTE SimpleArithmeticAddition does not support broadcasting
       assert(lhs_shape == rhs_shape);
 
-      auto l = nnfw::make_unique<SimpleArithmeticAddition>();
+      auto l = nnfw::cpp14::make_unique<SimpleArithmeticAddition>();
 
       l->configure(lhs_alloc, rhs_alloc, ofm_alloc);
 
@@ -655,7 +655,7 @@ void Planner::visit(const ::internal::tflite::op::Add::Node &node)
     {
       if (::internal::arm_compute::isGpuMode())
       {
-        auto l = nnfw::make_unique<::arm_compute::CLArithmeticAddition>();
+        auto l = nnfw::cpp14::make_unique<::arm_compute::CLArithmeticAddition>();
 
         // TODO Decide ConvertPolicy (WARP? SATURATE?) according to NN API specification
         l->configure(CAST_CL(lhs_alloc), CAST_CL(rhs_alloc), CAST_CL(ofm_alloc),
@@ -665,7 +665,7 @@ void Planner::visit(const ::internal::tflite::op::Add::Node &node)
       }
       else // NEON
       {
-        auto l = nnfw::make_unique<::arm_compute::NEArithmeticAddition>();
+        auto l = nnfw::cpp14::make_unique<::arm_compute::NEArithmeticAddition>();
 
         // TODO Decide ConvertPolicy (WARP? SATURATE?) according to NN API specification
         l->configure(lhs_alloc, rhs_alloc, ofm_alloc, ::arm_compute::ConvertPolicy::SATURATE);
@@ -735,7 +735,7 @@ void Planner::visit(const ::internal::tflite::op::Sub::Node &node)
 
     if (::internal::arm_compute::isGpuMode())
     {
-      auto fn = nnfw::make_unique<::arm_compute::CLArithmeticSubtractionEx>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::CLArithmeticSubtractionEx>();
 
       // TODO Decide ConvertPolicy (WARP? SATURATE?) according to NN API specification
       fn->configure(CAST_CL(lhs_alloc), CAST_CL(rhs_alloc), CAST_CL(ofm_alloc),
@@ -745,7 +745,7 @@ void Planner::visit(const ::internal::tflite::op::Sub::Node &node)
     }
     else // NEON
     {
-      auto fn = nnfw::make_unique<::arm_compute::NEArithmeticSubtraction>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::NEArithmeticSubtraction>();
 
       // TODO Decide ConvertPolicy (WARP? SATURATE?) according to NN API specification
       fn->configure(lhs_alloc, rhs_alloc, ofm_alloc, ::arm_compute::ConvertPolicy::SATURATE);
@@ -816,7 +816,7 @@ void Planner::visit(const ::internal::tflite::op::Mul::Node &node)
 
     if (::internal::arm_compute::isGpuMode())
     {
-      auto fn = nnfw::make_unique<::arm_compute::CLPixelWiseMultiplication>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::CLPixelWiseMultiplication>();
 
       fn->configure(CAST_CL(lhs_input_alloc), CAST_CL(rhs_input_alloc), CAST_CL(output_alloc),
                     1.0, // scale
@@ -827,7 +827,7 @@ void Planner::visit(const ::internal::tflite::op::Mul::Node &node)
     }
     else // NEON
     {
-      auto fn = nnfw::make_unique<::arm_compute::NEPixelWiseMultiplication>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::NEPixelWiseMultiplication>();
 
       fn->configure(lhs_input_alloc, rhs_input_alloc, output_alloc,
                     1.0, // scale
@@ -898,7 +898,7 @@ void Planner::visit(const ::internal::tflite::op::Div::Node &node)
 
     if (::internal::arm_compute::isGpuMode())
     {
-      auto fn = nnfw::make_unique<::arm_compute::CLPixelWiseDivision>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::CLPixelWiseDivision>();
 
       fn->configure(CAST_CL(lhs_alloc), CAST_CL(rhs_alloc), CAST_CL(ofm_alloc),
                     1.0, // scale
@@ -1355,7 +1355,7 @@ void Planner::visit(const ::internal::tflite::op::DepthwiseConv2D::Implicit::Nod
 
     if (::internal::arm_compute::isGpuMode())
     {
-      auto fn = nnfw::make_unique<::arm_compute::CLDepthwiseConvolutionLayer>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::CLDepthwiseConvolutionLayer>();
 
       fn->configure(CAST_CL(ifm_alloc), CAST_CL(ker_alloc), CAST_CL(bias_alloc), CAST_CL(ofm_alloc),
                     conv_info, param.multipler);
@@ -1364,7 +1364,7 @@ void Planner::visit(const ::internal::tflite::op::DepthwiseConv2D::Implicit::Nod
     }
     else
     {
-      auto fn = nnfw::make_unique<::arm_compute::NEDepthwiseConvolutionLayer>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::NEDepthwiseConvolutionLayer>();
 
       fn->configure(ifm_alloc, ker_alloc, bias_alloc, ofm_alloc, conv_info, param.multipler);
 
@@ -1498,7 +1498,7 @@ void Planner::visit(const ::internal::tflite::op::DepthwiseConv2D::Explicit::Nod
 
     if (::internal::arm_compute::isGpuMode())
     {
-      auto fn = nnfw::make_unique<::arm_compute::CLDepthwiseConvolutionLayer>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::CLDepthwiseConvolutionLayer>();
 
       fn->configure(CAST_CL(ifm_alloc), CAST_CL(ker_alloc), CAST_CL(bias_alloc), CAST_CL(ofm_alloc),
                     conv_info, param.multipler);
@@ -1507,7 +1507,7 @@ void Planner::visit(const ::internal::tflite::op::DepthwiseConv2D::Explicit::Nod
     }
     else
     {
-      auto fn = nnfw::make_unique<::arm_compute::NEDepthwiseConvolutionLayer>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::NEDepthwiseConvolutionLayer>();
 
       fn->configure(ifm_alloc, ker_alloc, bias_alloc, ofm_alloc, conv_info, param.multipler);
 
@@ -1561,7 +1561,7 @@ void Planner::visit(const ::internal::tflite::op::Dequantize::Node &node)
     if (from_env<bool>(std::getenv("USE_SIMPLE_CAST")))
     {
       // Use the CPU version of CAST operation
-      auto l = nnfw::make_unique<SimpleCastLayer>();
+      auto l = nnfw::cpp14::make_unique<SimpleCastLayer>();
 
       l->configure(input_alloc, output_alloc);
       fn = std::move(l);
@@ -1570,7 +1570,7 @@ void Planner::visit(const ::internal::tflite::op::Dequantize::Node &node)
     {
       if (::internal::arm_compute::isGpuMode())
       {
-        auto l = nnfw::make_unique<::arm_compute::CLCast>();
+        auto l = nnfw::cpp14::make_unique<::arm_compute::CLCast>();
 
         l->configure(CAST_CL(input_alloc), CAST_CL(output_alloc));
         fn = std::move(l);
@@ -2200,7 +2200,7 @@ void Planner::visit(const ::internal::tflite::op::FullyConnected::Node &node)
     auto weight_alloc = ctx.at(::internal::tflite::operand::Index{param.weight_index});
     auto bias_alloc = ctx.at(::internal::tflite::operand::Index{param.bias_index});
 
-    auto fn = nnfw::make_unique<GenericFullyConnectedLayer>();
+    auto fn = nnfw::cpp14::make_unique<GenericFullyConnectedLayer>();
 
     fn->configure(input_alloc, weight_alloc, bias_alloc, output_alloc, needs_reshape,
                   asTensorShape(reshape));
@@ -2252,7 +2252,7 @@ void Planner::visit(const ::internal::tflite::op::ResizeBilinear::Node &node)
 
     if (::internal::arm_compute::isGpuMode())
     {
-      auto fn = nnfw::make_unique<::arm_compute::CLScale>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::CLScale>();
 
       fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc),
                     ::arm_compute::InterpolationPolicy::BILINEAR,
@@ -2305,7 +2305,7 @@ void Planner::visit(const ::internal::tflite::op::Reshape::Node &node)
     if (::internal::arm_compute::isGpuMode())
     {
       // GenericReshape first apply NCHW->NHWC permutation, and apply reshape
-      auto fn = nnfw::make_unique<GenericReshapeLayer>();
+      auto fn = nnfw::cpp14::make_unique<GenericReshapeLayer>();
 
       fn->configure(CAST_CL(input_alloc), CAST_CL(output_alloc));
 
@@ -2313,7 +2313,7 @@ void Planner::visit(const ::internal::tflite::op::Reshape::Node &node)
     }
     else
     {
-      auto fn = nnfw::make_unique<GenericReshapeLayer>();
+      auto fn = nnfw::cpp14::make_unique<GenericReshapeLayer>();
 
       fn->configure(input_alloc, output_alloc);
 
@@ -2359,7 +2359,7 @@ void Planner::visit(const ::internal::tflite::op::Squeeze::Node &node)
 
     if (::internal::arm_compute::isGpuMode())
     {
-      auto fn = nnfw::make_unique<::arm_compute::CLReshapeLayer>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::CLReshapeLayer>();
 
       fn->configure(CAST_CL(input_alloc), CAST_CL(output_alloc));
 
@@ -2367,7 +2367,7 @@ void Planner::visit(const ::internal::tflite::op::Squeeze::Node &node)
     }
     else
     {
-      auto fn = nnfw::make_unique<::arm_compute::NEReshapeLayer>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::NEReshapeLayer>();
 
       fn->configure(input_alloc, output_alloc);
 
@@ -2418,7 +2418,7 @@ void Planner::visit(const ::internal::tflite::op::Softmax::Node &node)
 
     if (::internal::arm_compute::isGpuMode())
     {
-      auto fn = nnfw::make_unique<::arm_compute::CLSoftmaxLayer>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::CLSoftmaxLayer>();
 
       fn->configure(CAST_CL(input_alloc), CAST_CL(output_alloc), param.scale);
 
@@ -2426,7 +2426,7 @@ void Planner::visit(const ::internal::tflite::op::Softmax::Node &node)
     }
     else
     {
-      auto fn = nnfw::make_unique<::arm_compute::NESoftmaxLayer>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::NESoftmaxLayer>();
 
       fn->configure(input_alloc, output_alloc, param.scale);
 
@@ -2541,7 +2541,7 @@ void Planner::visit(const ::internal::tflite::op::StridedSlice::Node &node)
 
     if (::internal::arm_compute::isGpuMode())
     {
-      auto fn = nnfw::make_unique<::arm_compute::CLStridedSlice>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::CLStridedSlice>();
 
       fn->configure(CAST_CL(inputData_alloc), CAST_CL(outputData_alloc), CAST_CL(startData_alloc),
                     CAST_CL(endData_alloc), CAST_CL(stridesData_alloc), param.beginMask,
@@ -2669,7 +2669,7 @@ void Planner::visit(const ::internal::tflite::op::ReduceMin::Node &node)
 
     if (::internal::arm_compute::isGpuMode())
     {
-      auto fn = nnfw::make_unique<::arm_compute::CLReduceOperation>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::CLReduceOperation>();
 
       fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), param.axis,
                     ::arm_compute::ReduceOperation::MIN);
@@ -2796,7 +2796,7 @@ void Planner::visit(const ::internal::tflite::op::ReduceMax::Node &node)
 
     if (::internal::arm_compute::isGpuMode())
     {
-      auto fn = nnfw::make_unique<::arm_compute::CLReduceOperation>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::CLReduceOperation>();
 
       fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), param.axis,
                     ::arm_compute::ReduceOperation::MAX);
@@ -2847,7 +2847,7 @@ void Planner::visit(const ::internal::tflite::op::Cast::Node &node)
     if (from_env<bool>(std::getenv("USE_SIMPLE_CAST")))
     {
       // Use the CPU version of CAST operation
-      auto l = nnfw::make_unique<SimpleCastLayer>();
+      auto l = nnfw::cpp14::make_unique<SimpleCastLayer>();
 
       l->configure(input_alloc, output_alloc);
       fn = std::move(l);
@@ -2856,7 +2856,7 @@ void Planner::visit(const ::internal::tflite::op::Cast::Node &node)
     {
       if (::internal::arm_compute::isGpuMode())
       {
-        auto l = nnfw::make_unique<::arm_compute::CLCast>();
+        auto l = nnfw::cpp14::make_unique<::arm_compute::CLCast>();
 
         l->configure(CAST_CL(input_alloc), CAST_CL(output_alloc));
         fn = std::move(l);
@@ -2925,7 +2925,7 @@ void Planner::visit(const ::internal::tflite::op::TopKV2::Node &node)
 
     if (::internal::arm_compute::isGpuMode())
     {
-      auto fn = nnfw::make_unique<::arm_compute::CLTopKV2>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::CLTopKV2>();
 
       fn->configure(CAST_CL(input_alloc), param.k, CAST_CL(values_alloc), CAST_CL(indices_alloc));
 
@@ -2989,7 +2989,7 @@ void Planner::visit(const ::internal::tflite::op::Gather::Node &node)
     {
       std::unique_ptr<::arm_compute::IFunction> fn;
 
-      auto l = nnfw::make_unique<::arm_compute::CLGather>();
+      auto l = nnfw::cpp14::make_unique<::arm_compute::CLGather>();
       l->configure(CAST_CL(lhs_alloc), CAST_CL(rhs_alloc), CAST_CL(ofm_alloc));
       fn = std::move(l);
       builder.append("Gather", std::move(fn));
@@ -3043,7 +3043,7 @@ void Planner::visit(const ::internal::tflite::op::ReLU::Node &node)
 
     if (::internal::arm_compute::isGpuMode())
     {
-      auto fn = nnfw::make_unique<::arm_compute::CLActivationLayer>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::CLActivationLayer>();
 
       fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), act_info);
 
@@ -3051,7 +3051,7 @@ void Planner::visit(const ::internal::tflite::op::ReLU::Node &node)
     }
     else
     {
-      auto fn = nnfw::make_unique<::arm_compute::NEActivationLayer>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::NEActivationLayer>();
 
       fn->configure(ifm_alloc, ofm_alloc, act_info);
 
@@ -3097,7 +3097,7 @@ void Planner::visit(const ::internal::tflite::op::ReLU1::Node &node)
 
     if (::internal::arm_compute::isGpuMode())
     {
-      auto fn = nnfw::make_unique<::arm_compute::CLActivationLayer>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::CLActivationLayer>();
 
       fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), act_info);
 
@@ -3105,7 +3105,7 @@ void Planner::visit(const ::internal::tflite::op::ReLU1::Node &node)
     }
     else
     {
-      auto fn = nnfw::make_unique<::arm_compute::NEActivationLayer>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::NEActivationLayer>();
 
       fn->configure(ifm_alloc, ofm_alloc, act_info);
 
@@ -3151,7 +3151,7 @@ void Planner::visit(const ::internal::tflite::op::ReLU6::Node &node)
 
     if (::internal::arm_compute::isGpuMode())
     {
-      auto fn = nnfw::make_unique<::arm_compute::CLActivationLayer>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::CLActivationLayer>();
 
       fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), act_info);
 
@@ -3159,7 +3159,7 @@ void Planner::visit(const ::internal::tflite::op::ReLU6::Node &node)
     }
     else
     {
-      auto fn = nnfw::make_unique<::arm_compute::NEActivationLayer>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::NEActivationLayer>();
 
       fn->configure(ifm_alloc, ofm_alloc, act_info);
 
@@ -3205,7 +3205,7 @@ void Planner::visit(const ::internal::tflite::op::Tanh::Node &node)
 
     if (::internal::arm_compute::isGpuMode())
     {
-      auto fn = nnfw::make_unique<::arm_compute::CLActivationLayer>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::CLActivationLayer>();
 
       fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), act_info);
 
@@ -3213,7 +3213,7 @@ void Planner::visit(const ::internal::tflite::op::Tanh::Node &node)
     }
     else
     {
-      auto fn = nnfw::make_unique<::arm_compute::NEActivationLayer>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::NEActivationLayer>();
 
       fn->configure(ifm_alloc, ofm_alloc, act_info);
 
@@ -3259,7 +3259,7 @@ void Planner::visit(const ::internal::tflite::op::Logistic::Node &node)
 
     if (::internal::arm_compute::isGpuMode())
     {
-      auto fn = nnfw::make_unique<::arm_compute::CLActivationLayer>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::CLActivationLayer>();
 
       fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), act_info);
 
@@ -3267,7 +3267,7 @@ void Planner::visit(const ::internal::tflite::op::Logistic::Node &node)
     }
     else
     {
-      auto fn = nnfw::make_unique<::arm_compute::NEActivationLayer>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::NEActivationLayer>();
 
       fn->configure(ifm_alloc, ofm_alloc, act_info);
 
@@ -3394,7 +3394,7 @@ void Planner::visit(const ::internal::tflite::op::Mean::Node &node)
 
     if (::internal::arm_compute::isGpuMode())
     {
-      auto fn = nnfw::make_unique<::arm_compute::CLReduceOperation>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::CLReduceOperation>();
 
       fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), param.axis,
                     ::arm_compute::ReduceOperation::MEAN);
@@ -3589,7 +3589,7 @@ void Planner::visit(const ::internal::tflite::op::Transpose::Node &node)
 
     if (::internal::arm_compute::isGpuMode())
     {
-      auto fn = nnfw::make_unique<::arm_compute::CLPermuteEx>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::CLPermuteEx>();
 
       fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc),
                     getARMComputePermutationVector(param.rank, param.pv));
@@ -3638,7 +3638,7 @@ void Planner::visit(const ::internal::tflite::op::Floor::Node &node)
 
     if (::internal::arm_compute::isGpuMode())
     {
-      auto fn = nnfw::make_unique<::arm_compute::CLFloor>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::CLFloor>();
 
       fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc));
 
@@ -3646,7 +3646,7 @@ void Planner::visit(const ::internal::tflite::op::Floor::Node &node)
     }
     else
     {
-      auto fn = nnfw::make_unique<::arm_compute::NEFloor>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::NEFloor>();
 
       fn->configure(ifm_alloc, ofm_alloc);
 
@@ -3717,7 +3717,7 @@ void Planner::visit(const ::internal::tflite::op::ArgMax::Node &node)
     auto ofm_alloc = ctx.at(::internal::tflite::operand::Index{param.ofm_index});
     auto ifm_alloc = ctx.at(::internal::tflite::operand::Index{param.ifm_index});
 
-    auto fn = nnfw::make_unique<SimpleArgMinMax>();
+    auto fn = nnfw::cpp14::make_unique<SimpleArgMinMax>();
     bool is_min = false, is_max = true;
 
     fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), param.axis, param.rank, is_min, is_max);
@@ -3766,7 +3766,7 @@ void Planner::visit(const ::internal::tflite::op::SQRT::Node &node)
     if (from_env<bool>(std::getenv("USE_SIMPLE_SQRT")))
     {
       // USE CPU VERSION OF SQRT
-      auto fn = nnfw::make_unique<SimpleSQRT>();
+      auto fn = nnfw::cpp14::make_unique<SimpleSQRT>();
 
       fn->configure(input_alloc, output_alloc);
 
@@ -3776,7 +3776,7 @@ void Planner::visit(const ::internal::tflite::op::SQRT::Node &node)
     {
       if (::internal::arm_compute::isGpuMode())
       {
-        auto fn = nnfw::make_unique<::arm_compute::CLActivationLayer>();
+        auto fn = nnfw::cpp14::make_unique<::arm_compute::CLActivationLayer>();
 
         fn->configure(CAST_CL(input_alloc), CAST_CL(output_alloc), act_info);
 
@@ -3784,7 +3784,7 @@ void Planner::visit(const ::internal::tflite::op::SQRT::Node &node)
       }
       else
       {
-        auto fn = nnfw::make_unique<::arm_compute::NEActivationLayer>();
+        auto fn = nnfw::cpp14::make_unique<::arm_compute::NEActivationLayer>();
 
         fn->configure(input_alloc, output_alloc, act_info);
 
@@ -3833,7 +3833,7 @@ void Planner::visit(const ::internal::tflite::op::RSQRT::Node &node)
 
     if (::internal::arm_compute::isGpuMode())
     {
-      auto fn = nnfw::make_unique<::arm_compute::CLActivationLayerEx>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::CLActivationLayerEx>();
 
       fn->configure(CAST_CL(input_alloc), CAST_CL(output_alloc), act_info);
 
@@ -3896,7 +3896,7 @@ void Planner::visit(const ::internal::tflite::op::Equal::Node &node)
 
     if (::internal::arm_compute::isGpuMode())
     {
-      auto fn = nnfw::make_unique<::arm_compute::CLEqual>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::CLEqual>();
 
       fn->configure(CAST_CL(input1_alloc), CAST_CL(input2_alloc), CAST_CL(output_alloc));
 
@@ -3984,7 +3984,7 @@ void Planner::visit(const ::internal::tflite::op::TransposeConv::Node &node)
     auto ifm_alloc = ctx.at(::internal::tflite::operand::Index{param.ifm_index});
     auto ker_alloc = ctx.at(::internal::tflite::operand::Index{param.ker_index});
 
-    auto fn = nnfw::make_unique<SimpleTransposeConv>();
+    auto fn = nnfw::cpp14::make_unique<SimpleTransposeConv>();
 
     // Only rank 4 is supported
     const int rank = 4;
@@ -4046,7 +4046,7 @@ void Planner::visit(const ::internal::tflite::op::SquaredDifference::Node &node)
 
     if (::internal::arm_compute::isGpuMode())
     {
-      auto fn = nnfw::make_unique<::arm_compute::CLSquaredDifference>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::CLSquaredDifference>();
 
       fn->configure(CAST_CL(lhs_alloc), CAST_CL(rhs_alloc), CAST_CL(ofm_alloc));
       builder.append("SquaredDifference", std::move(fn));
@@ -4176,7 +4176,7 @@ void Planner::visit(const ::internal::tflite::op::Pad::Node &node)
     {
       // USE CPU VERSION OF PADLAYER
       auto rank = 4;
-      auto fn = nnfw::make_unique<SimplePadLayer>();
+      auto fn = nnfw::cpp14::make_unique<SimplePadLayer>();
 
       fn->configure(ifm_alloc, ofm_alloc, pad_alloc, getARMComputeAxises(rank));
 
@@ -4186,7 +4186,7 @@ void Planner::visit(const ::internal::tflite::op::Pad::Node &node)
     {
       if (::internal::arm_compute::isGpuMode()) // GPU
       {
-        auto fn = nnfw::make_unique<::arm_compute::CLPadLayer>();
+        auto fn = nnfw::cpp14::make_unique<::arm_compute::CLPadLayer>();
 
         fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), CAST_CL(pad_alloc));
 
@@ -4256,7 +4256,7 @@ void Planner::visit(const ::internal::tflite::op::SpaceToDepth::Node &node)
     {
       // USE CPU VERSION OF SPACETODEPTH
       auto rank = 4;
-      auto fn = nnfw::make_unique<SimpleSpaceToDepth>();
+      auto fn = nnfw::cpp14::make_unique<SimpleSpaceToDepth>();
 
       fn->configure(input_alloc, output_alloc, param.block_size, getARMComputeAxises(rank));
 
@@ -4266,7 +4266,7 @@ void Planner::visit(const ::internal::tflite::op::SpaceToDepth::Node &node)
     {
       if (::internal::arm_compute::isGpuMode()) // GPU
       {
-        auto fn = nnfw::make_unique<::arm_compute::CLSpaceToDepth>();
+        auto fn = nnfw::cpp14::make_unique<::arm_compute::CLSpaceToDepth>();
 
         fn->configure(CAST_CL(input_alloc), CAST_CL(output_alloc), param.block_size);
 
@@ -4405,7 +4405,7 @@ void Planner::visit(const ::internal::tflite::op::SpaceToBatchND::Node &node)
     // NOTE SimpleSpaceToBatchND is quite slow
     if (from_env<bool>(std::getenv("USE_SIMPLE_SPACE_TO_BATCH_ND")))
     {
-      auto fn = nnfw::make_unique<SimpleSpaceToBatchND>();
+      auto fn = nnfw::cpp14::make_unique<SimpleSpaceToBatchND>();
 
       fn->configure(input_alloc, block_size_alloc, padding_size_alloc, output_alloc,
                     getARMComputeAxises(param.rank));
@@ -4413,7 +4413,7 @@ void Planner::visit(const ::internal::tflite::op::SpaceToBatchND::Node &node)
     }
     else if (::internal::arm_compute::isGpuMode())
     {
-      auto fn = nnfw::make_unique<::arm_compute::CLSpaceToBatchND>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::CLSpaceToBatchND>();
 
       fn->configure(CAST_CL(input_alloc), CAST_CL(block_size_alloc), CAST_CL(padding_size_alloc),
                     CAST_CL(output_alloc));
@@ -4485,14 +4485,14 @@ void Planner::visit(const ::internal::tflite::op::BatchToSpaceNd::Node &node)
     // NOTE SimpleBatchToSpaceND is quite slow, but may be useful for debugging
     if (from_env<bool>(std::getenv("USE_SIMPLE_BATCH_TO_SPACE_ND")))
     {
-      auto fn = nnfw::make_unique<SimpleBatchToSpaceND>();
+      auto fn = nnfw::cpp14::make_unique<SimpleBatchToSpaceND>();
 
       fn->configure(input_alloc, output_alloc, param.block_size, getARMComputeAxises(param.rank));
       builder.append("BatchToSpaceND", std::move(fn));
     }
     else if (::internal::arm_compute::isGpuMode())
     {
-      auto fn = nnfw::make_unique<::arm_compute::CLBatchToSpaceND>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::CLBatchToSpaceND>();
 
       fn->configure(CAST_CL(input_alloc), CAST_CL(output_alloc), param.block_size);
       builder.append("BatchToSpaceND", std::move(fn));
@@ -4558,7 +4558,7 @@ void Planner::visit(const ::internal::tflite::op::L2Normalization::Node &node)
 
     if (::internal::arm_compute::isGpuMode())
     {
-      auto fn = nnfw::make_unique<::arm_compute::CLNormalizationLayerEx>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::CLNormalizationLayerEx>();
 
       fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), norm_info);
 
@@ -4566,7 +4566,7 @@ void Planner::visit(const ::internal::tflite::op::L2Normalization::Node &node)
     }
     else
     {
-      auto fn = nnfw::make_unique<::arm_compute::NENormalizationLayerEx>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::NENormalizationLayerEx>();
 
       fn->configure(ifm_alloc, ofm_alloc, norm_info);
 
@@ -4839,7 +4839,7 @@ void Planner::visit(const ::internal::tflite::op::EmbeddingLookup::Node &node)
 
     if (from_env<bool>(std::getenv("USE_SIMPLE_EMBEDDINGLOOKUP")))
     {
-      auto fn = nnfw::make_unique<SimpleEmbeddingLookup>();
+      auto fn = nnfw::cpp14::make_unique<SimpleEmbeddingLookup>();
 
       fn->configure(lookups_alloc, values_alloc, output_alloc);
 
@@ -4847,7 +4847,7 @@ void Planner::visit(const ::internal::tflite::op::EmbeddingLookup::Node &node)
     }
     else if (::internal::arm_compute::isGpuMode())
     {
-      auto fn = nnfw::make_unique<::arm_compute::CLEmbeddingLookup>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::CLEmbeddingLookup>();
 
       fn->configure(CAST_CL(values_alloc), CAST_CL(output_alloc), CAST_CL(lookups_alloc));
 
@@ -4943,7 +4943,7 @@ void Planner::visit(const ::internal::tflite::op::HashtableLookup::Node &node)
 
     if (from_env<bool>(std::getenv("USE_SIMPLE_HASHTABLELOOKUP")))
     {
-      auto fn = nnfw::make_unique<SimpleHashtableLookupLayer>();
+      auto fn = nnfw::cpp14::make_unique<SimpleHashtableLookupLayer>();
 
       fn->configure(lookups_alloc, keys_alloc, values_alloc, output_alloc, hits_alloc);
 
@@ -4951,7 +4951,7 @@ void Planner::visit(const ::internal::tflite::op::HashtableLookup::Node &node)
     }
     else if (::internal::arm_compute::isGpuMode()) // GPU
     {
-      auto fn = nnfw::make_unique<::arm_compute::CLHashtableLookup>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::CLHashtableLookup>();
 
       fn->configure(CAST_CL(lookups_alloc), CAST_CL(keys_alloc), CAST_CL(values_alloc),
                     CAST_CL(output_alloc), CAST_CL(hits_alloc));
@@ -5015,7 +5015,7 @@ void Planner::visit(const ::internal::tflite::op::LocalResponseNormalization::No
                                               param.alpha, param.beta, param.bias, false);
     if (::internal::arm_compute::isGpuMode())
     {
-      auto fn = nnfw::make_unique<::arm_compute::CLNormalizationLayerEx>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::CLNormalizationLayerEx>();
 
       fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), norm_info);
 
@@ -5023,7 +5023,7 @@ void Planner::visit(const ::internal::tflite::op::LocalResponseNormalization::No
     }
     else
     {
-      auto fn = nnfw::make_unique<::arm_compute::NENormalizationLayerEx>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::NENormalizationLayerEx>();
 
       fn->configure(ifm_alloc, ofm_alloc, norm_info);
 
@@ -5088,7 +5088,7 @@ void Planner::visit(const ::internal::tflite::op::DepthToSpace::Node &node)
     {
       // USE CPU VERSION OF DEPTHTOSPACE
       auto rank = 4;
-      auto fn = nnfw::make_unique<SimpleDepthToSpace>();
+      auto fn = nnfw::cpp14::make_unique<SimpleDepthToSpace>();
 
       fn->configure(input_alloc, output_alloc, param.block_size, getARMComputeAxises(rank));
 
@@ -5098,7 +5098,7 @@ void Planner::visit(const ::internal::tflite::op::DepthToSpace::Node &node)
     {
       if (::internal::arm_compute::isGpuMode()) // GPU
       {
-        auto fn = nnfw::make_unique<::arm_compute::CLDepthToSpace>();
+        auto fn = nnfw::cpp14::make_unique<::arm_compute::CLDepthToSpace>();
 
         fn->configure(CAST_CL(input_alloc), CAST_CL(output_alloc), param.block_size);
 
@@ -5159,7 +5159,7 @@ void Planner::visit(const ::internal::tflite::op::Unpack::Node &node)
 
       if (::internal::arm_compute::isGpuMode())
       {
-        auto fn = nnfw::make_unique<SimpleUnpackLayer>();
+        auto fn = nnfw::cpp14::make_unique<SimpleUnpackLayer>();
         std::vector<::arm_compute::ICLTensor *> outputs;
         for (const auto &index : param.ofm_indexes)
         {
@@ -5244,7 +5244,7 @@ void Planner::visit(const ::internal::tflite::op::Pack::Node &node)
 
       if (::internal::arm_compute::isGpuMode())
       {
-        auto fn = nnfw::make_unique<SimplePackLayer>();
+        auto fn = nnfw::cpp14::make_unique<SimplePackLayer>();
         std::vector<::arm_compute::ICLTensor *> inputs;
         for (const auto &index : param.ifm_indexes)
         {
@@ -5310,14 +5310,14 @@ void Planner::visit(const ::internal::tflite::op::Neg::Node &node)
     // NOTE SimpleNeg is quite slow, but may be useful for debugging
     if (from_env<bool>(std::getenv("USE_SIMPLE_NEG")))
     {
-      auto fn = nnfw::make_unique<SimpleNeg>();
+      auto fn = nnfw::cpp14::make_unique<SimpleNeg>();
 
       fn->configure(ifm_alloc, ofm_alloc);
       builder.append("Neg", std::move(fn));
     }
     else if (::internal::arm_compute::isGpuMode())
     {
-      auto fn = nnfw::make_unique<::arm_compute::CLNeg>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::CLNeg>();
 
       fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc));
       builder.append("Neg", std::move(fn));
@@ -5364,7 +5364,7 @@ void Planner::visit(const ::internal::tflite::op::Exp::Node &node)
 
     if (::internal::arm_compute::isGpuMode())
     {
-      auto fn = nnfw::make_unique<::arm_compute::CLExp>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::CLExp>();
 
       fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc));
 
@@ -5489,7 +5489,7 @@ void Planner::visit(const ::internal::tflite::op::ReduceSum::Node &node)
 
     if (::internal::arm_compute::isGpuMode())
     {
-      auto fn = nnfw::make_unique<::arm_compute::CLReduceOperation>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::CLReduceOperation>();
 
       fn->configure(CAST_CL(ifm_alloc), CAST_CL(ofm_alloc), param.axis,
                     ::arm_compute::ReduceOperation::SUM);
@@ -5559,7 +5559,7 @@ void Planner::visit(const ::internal::tflite::op::NotEqual::Node &node)
 
     if (::internal::arm_compute::isGpuMode())
     {
-      auto fn = nnfw::make_unique<::arm_compute::CLNotEqual>();
+      auto fn = nnfw::cpp14::make_unique<::arm_compute::CLNotEqual>();
 
       fn->configure(CAST_CL(input1_alloc), CAST_CL(input2_alloc), CAST_CL(output_alloc));
 
index 283ff55..9e99966 100644 (file)
@@ -31,7 +31,7 @@ int ANeuralNetworksMemory_createFromFd(size_t size, int protect, int fd, size_t
 
   // Use unique pointer to avoid memory leak
   std::unique_ptr<ANeuralNetworksMemory> memory_ptr =
-      nnfw::make_unique<ANeuralNetworksMemory>(size, protect, fd, offset);
+      nnfw::cpp14::make_unique<ANeuralNetworksMemory>(size, protect, fd, offset);
   if (memory_ptr == nullptr)
   {
     return ANEURALNETWORKS_OUT_OF_MEMORY;