Set arm_compute::TensorShape for bias correctly (#549)
author박종현/동작제어Lab(SR)/Senior Engineer/삼성전자 <jh1302.park@samsung.com>
Wed, 11 Apr 2018 00:56:28 +0000 (09:56 +0900)
committer김정현/동작제어Lab(SR)/Senior Engineer/삼성전자 <jh0822.kim@samsung.com>
Wed, 11 Apr 2018 00:56:28 +0000 (09:56 +0900)
This commit revises convFloat32 implementation based on ARM Compute to
correctly set bias shape. This change fixes clBuffer exception during
bias memory allocation.

Signed-off-by: Jonghyun Park <jh1302.park@samsung.com>
src/kernel/acl/src/Conv2D_acl.cpp

index 9c9ca01..704925d 100644 (file)
@@ -3,6 +3,8 @@
 #include <arm_compute/core/TensorInfo.h>
 #include "IO_accessor.h"
 
+#include <cassert>
+
 namespace android {
 namespace nn {
 
@@ -22,6 +24,15 @@ namespace nnfw {
 namespace kernel {
 namespace acl {
 
+arm_compute::TensorShape fromVectorNNShape(const android::nn::Shape& shape)
+{
+  assert(shape.dimensions.size() == 1);
+
+  const uint32_t len = android::nn::getSizeOfDimension(shape, 0);
+
+  return arm_compute::TensorShape(len);
+}
+
 arm_compute::TensorShape fromNNShape(const android::nn::Shape& shape)
 {
   // NNAPI assumes the following ordering:
@@ -50,7 +61,7 @@ bool convFloat32(const float* inputData, const android::nn::Shape& inputShape,
 {
   arm_compute::TensorShape input_shape = fromNNShape(inputShape);
   arm_compute::TensorShape filter_shape = fromNNShape(filterShape);
-  arm_compute::TensorShape bias_shape = fromNNShape(biasShape);
+  arm_compute::TensorShape bias_shape = fromVectorNNShape(biasShape);
   arm_compute::TensorShape output_shape = fromNNShape(outputShape);
   arm_compute::PadStrideInfo conv_info = arm_compute::PadStrideInfo(stride_width, stride_height,
                                               padding_left, padding_right,