[neurun] Apply acl internal buffer manager into KernelGenerator (#5908)
author김용섭/On-Device Lab(SR)/Engineer/삼성전자 <yons.kim@samsung.com>
Fri, 26 Jul 2019 02:45:07 +0000 (11:45 +0900)
committer오형석/On-Device Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Fri, 26 Jul 2019 02:45:07 +0000 (11:45 +0900)
Apply acl internal buffer manager into KernelGenerator on cl/neon

Signed-off-by: Yongseop Kim <yons.kim@samsung.com>
runtimes/neurun/backend/acl_cl/KernelGenerator.cc
runtimes/neurun/backend/acl_neon/KernelGenerator.cc

index cef1f01..cc1201d 100644 (file)
@@ -212,7 +212,8 @@ void KernelGenerator::visit(const model::operation::Conv2DNode &node)
   const auto conv_info = acl_common::asPadStrideInfo(padding, stride);
   const auto act_info = acl_common::asActivationLayerInfo(activation);
 
-  auto fn = nnfw::cpp14::make_unique<::arm_compute::CLConvolutionLayer>();
+  auto fn = nnfw::cpp14::make_unique<::arm_compute::CLConvolutionLayer>(
+      _tensor_builder->acl_memory_manager()->internal_buffer_manager());
 
   fn->configure(ifm_alloc->handle(), ker_alloc->handle(), bias_alloc->handle(), ofm_alloc->handle(),
                 conv_info, ::arm_compute::WeightsInfo(), ::arm_compute::Size2D(1U, 1U), act_info);
@@ -448,7 +449,8 @@ void KernelGenerator::visit(const model::operation::FullyConnectedNode &node)
   auto bias_alloc = _tensor_builder->at(bias_index).get();
   auto acl_layout = output_alloc->handle()->info()->data_layout();
 
-  auto fn = nnfw::cpp14::make_unique<arm_compute::CLFullyConnectedReshapingLayer>();
+  auto fn = nnfw::cpp14::make_unique<arm_compute::CLFullyConnectedReshapingLayer>(
+      _tensor_builder->acl_memory_manager()->internal_buffer_manager());
 
   fn->configure(input_alloc->handle(), weight_alloc->handle(), bias_alloc->handle(),
                 output_alloc->handle(), needs_reshape,
@@ -612,7 +614,8 @@ void KernelGenerator::visit(const model::operation::SoftmaxNode &node)
   auto output_alloc = _tensor_builder->at(output_index).get();
   auto input_alloc = _tensor_builder->at(input_index).get();
 
-  auto fn = nnfw::cpp14::make_unique<::arm_compute::CLSoftmaxLayer>();
+  auto fn = nnfw::cpp14::make_unique<::arm_compute::CLSoftmaxLayer>(
+      _tensor_builder->acl_memory_manager()->internal_buffer_manager());
 
   fn->configure(input_alloc->handle(), output_alloc->handle(), beta);
 
@@ -1571,7 +1574,8 @@ void KernelGenerator::visit(const model::operation::TransposeConvNode &node)
 
   std::unique_ptr<::arm_compute::IFunction> fn;
 
-  auto l = nnfw::cpp14::make_unique<::arm_compute::CLTransposeConvLayer>();
+  auto l = nnfw::cpp14::make_unique<::arm_compute::CLTransposeConvLayer>(
+      _tensor_builder->acl_memory_manager()->internal_buffer_manager());
 
   l->configure(ifm_alloc->handle(), ker_alloc->handle(), nullptr, ofm_alloc->handle(), tconv_info,
                invalid_vertical, invalid_horizontal);
index 2d1a477..338bd32 100644 (file)
@@ -200,7 +200,8 @@ void KernelGenerator::visit(const model::operation::Conv2DNode &node)
   const auto conv_info = acl_common::asPadStrideInfo(padding, stride);
   const auto act_info = acl_common::asActivationLayerInfo(activation);
 
-  auto fn = nnfw::cpp14::make_unique<::arm_compute::NEConvolutionLayer>();
+  auto fn = nnfw::cpp14::make_unique<::arm_compute::NEConvolutionLayer>(
+      _tensor_builder->acl_memory_manager()->internal_buffer_manager());
 
   fn->configure(ifm_alloc->handle(), ker_alloc->handle(), bias_alloc->handle(), ofm_alloc->handle(),
                 conv_info, ::arm_compute::WeightsInfo(), ::arm_compute::Size2D(1U, 1U), act_info);
@@ -516,7 +517,8 @@ void KernelGenerator::visit(const model::operation::FullyConnectedNode &node)
   auto bias_alloc = _tensor_builder->at(bias_index).get();
   auto acl_layout = output_alloc->handle()->info()->data_layout();
 
-  auto fn = nnfw::cpp14::make_unique<arm_compute::NEFullyConnectedReshapingLayer>();
+  auto fn = nnfw::cpp14::make_unique<arm_compute::NEFullyConnectedReshapingLayer>(
+      _tensor_builder->acl_memory_manager()->internal_buffer_manager());
 
   fn->configure(input_alloc->handle(), weight_alloc->handle(), bias_alloc->handle(),
                 output_alloc->handle(), needs_reshape,
@@ -622,7 +624,8 @@ void KernelGenerator::visit(const model::operation::SoftmaxNode &node)
   auto output_alloc = _tensor_builder->at(output_index).get();
   auto input_alloc = _tensor_builder->at(input_index).get();
 
-  auto fn = nnfw::cpp14::make_unique<::arm_compute::NESoftmaxLayer>();
+  auto fn = nnfw::cpp14::make_unique<::arm_compute::NESoftmaxLayer>(
+      _tensor_builder->acl_memory_manager()->internal_buffer_manager());
 
   fn->configure(input_alloc->handle(), output_alloc->handle(), beta);