Introduce CLUniqueTensor (#551)
author박종현/동작제어Lab(SR)/Senior Engineer/삼성전자 <jh1302.park@samsung.com>
Wed, 11 Apr 2018 01:47:21 +0000 (10:47 +0900)
committer김정현/동작제어Lab(SR)/Senior Engineer/삼성전자 <jh0822.kim@samsung.com>
Wed, 11 Apr 2018 01:47:21 +0000 (10:47 +0900)
This commit introduces CLUniqueTensor which initialize and allocate
buffer in its constructor, and free that buffer in its destructor.

Signed-off-by: Jonghyun Park <jh1302.park@samsung.com>
src/kernel/acl/src/Conv2D_acl.cpp

index 704925d..a639e9e 100644 (file)
@@ -50,6 +50,35 @@ arm_compute::TensorShape fromNNShape(const android::nn::Shape& shape)
   return arm_compute::TensorShape(w, h, c, n);
 }
 
+class CLUniqueTensor
+{
+public:
+  CLUniqueTensor(const ::arm_compute::TensorInfo &info)
+  {
+    _tensor.allocator()->init(info);
+    _tensor.allocator()->allocate();
+  }
+
+public:
+  // Both copy and move are not allowed
+  CLUniqueTensor(const CLUniqueTensor &) = delete;
+  CLUniqueTensor(CLUniqueTensor &&) = delete;
+
+public:
+  ~CLUniqueTensor()
+  {
+    _tensor.allocator()->free();
+
+  }
+
+public:
+  ::arm_compute::CLTensor &ref(void) { return _tensor; }
+  ::arm_compute::CLTensor *ptr(void) { return &_tensor; }
+
+private:
+  ::arm_compute::CLTensor _tensor;
+};
+
 bool convFloat32(const float* inputData, const android::nn::Shape& inputShape,
                  const float* filterData, const android::nn::Shape& filterShape,
                  const float* biasData, const android::nn::Shape& biasShape,
@@ -68,30 +97,23 @@ bool convFloat32(const float* inputData, const android::nn::Shape& inputShape,
                                               padding_top, padding_bottom,
                                               arm_compute::DimensionRoundingType::FLOOR);
 
-  arm_compute::CLTensor input, output, bias, filter;
-
-  input.allocator()->init(arm_compute::TensorInfo(input_shape, arm_compute::Format::F32));
-  output.allocator()->init(arm_compute::TensorInfo(output_shape, arm_compute::Format::F32));
-  bias.allocator()->init(arm_compute::TensorInfo(bias_shape, arm_compute::Format::F32));
-  filter.allocator()->init(arm_compute::TensorInfo(filter_shape, arm_compute::Format::F32));
+  CLUniqueTensor input(arm_compute::TensorInfo(input_shape, arm_compute::Format::F32));
+  CLUniqueTensor output(arm_compute::TensorInfo(output_shape, arm_compute::Format::F32));
+  CLUniqueTensor bias(arm_compute::TensorInfo(bias_shape, arm_compute::Format::F32));
+  CLUniqueTensor filter(arm_compute::TensorInfo(filter_shape, arm_compute::Format::F32));
 
   arm_compute::CLConvolutionLayer conv_f;
-  conv_f.configure(&input, &filter, &bias, &output, conv_info);
-
-  input.allocator()->allocate();
-  output.allocator()->allocate();
-  bias.allocator()->allocate();
-  filter.allocator()->allocate();
+  conv_f.configure(input.ptr(), filter.ptr(), bias.ptr(), output.ptr(), conv_info);
 
-  TensorAccess<InputAccessor>(input, inputData, inputShape);
-  TensorAccess<BiasAccessor>(bias, biasData, biasShape);
-  TensorAccess<WeightAccessor>(filter, filterData, filterShape);
+  TensorAccess<InputAccessor>(input.ref(), inputData, inputShape);
+  TensorAccess<BiasAccessor>(bias.ref(), biasData, biasShape);
+  TensorAccess<WeightAccessor>(filter.ref(), filterData, filterShape);
 
   conv_f.run();
 
   arm_compute::CLScheduler::get().sync();
 
-  TensorAccess<OutputAccessor>(output, outputData, outputShape);
+  TensorAccess<OutputAccessor>(output.ref(), outputData, outputShape);
 
   return true;
 }