From: 박종현/동작제어Lab(SR)/Senior Engineer/삼성전자 Date: Wed, 11 Apr 2018 00:56:28 +0000 (+0900) Subject: Set arm_compute::TensorShape for bias correctly (#549) X-Git-Tag: 0.1~345 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=aadb745d12fe96636654ce2c0f873d051028dd01;p=platform%2Fcore%2Fml%2Fnnfw.git Set arm_compute::TensorShape for bias correctly (#549) This commit revises convFloat32 implementation based on ARM Compute to correctly set bias shape. This change fixes clBuffer exception during bias memory allocation. Signed-off-by: Jonghyun Park --- diff --git a/src/kernel/acl/src/Conv2D_acl.cpp b/src/kernel/acl/src/Conv2D_acl.cpp index 9c9ca01..704925d 100644 --- a/src/kernel/acl/src/Conv2D_acl.cpp +++ b/src/kernel/acl/src/Conv2D_acl.cpp @@ -3,6 +3,8 @@ #include #include "IO_accessor.h" +#include + namespace android { namespace nn { @@ -22,6 +24,15 @@ namespace nnfw { namespace kernel { namespace acl { +arm_compute::TensorShape fromVectorNNShape(const android::nn::Shape& shape) +{ + assert(shape.dimensions.size() == 1); + + const uint32_t len = android::nn::getSizeOfDimension(shape, 0); + + return arm_compute::TensorShape(len); +} + arm_compute::TensorShape fromNNShape(const android::nn::Shape& shape) { // NNAPI assumes the following ordering: @@ -50,7 +61,7 @@ bool convFloat32(const float* inputData, const android::nn::Shape& inputShape, { arm_compute::TensorShape input_shape = fromNNShape(inputShape); arm_compute::TensorShape filter_shape = fromNNShape(filterShape); - arm_compute::TensorShape bias_shape = fromNNShape(biasShape); + arm_compute::TensorShape bias_shape = fromVectorNNShape(biasShape); arm_compute::TensorShape output_shape = fromNNShape(outputShape); arm_compute::PadStrideInfo conv_info = arm_compute::PadStrideInfo(stride_width, stride_height, padding_left, padding_right,