From: 김정현/동작제어Lab(SR)/Senior Engineer/삼성전자 Date: Thu, 12 Apr 2018 00:07:50 +0000 (+0900) Subject: FIX: build fail due to fromNNShape() (#599) X-Git-Tag: 0.1~312 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=f2c92386174693a998f99475d70621c92ff79fcc;p=platform%2Fcore%2Fml%2Fnnfw.git FIX: build fail due to fromNNShape() (#599) Build fail due to fromNNShape() is moved to namespace nnfw::kernel::acl::util. This commit fixes this fail by chainging `fromNNShape()` to `util::fromNNShape()`. Signed-off-by: Junghyun Kim --- diff --git a/src/kernel/acl/src/cl/Pooling.cpp b/src/kernel/acl/src/cl/Pooling.cpp index 0664a62..97377bb 100644 --- a/src/kernel/acl/src/cl/Pooling.cpp +++ b/src/kernel/acl/src/cl/Pooling.cpp @@ -2,23 +2,14 @@ #include #include #include "../IO_accessor.h" +#include "../shape.h" #include -namespace android { -namespace nn { - -// TODO remove from this source and use it from runtime -uint32_t getSizeOfDimension(const Shape& shape, uint32_t dimensionIdx); - -} // namespace nn -} // namespace android - namespace nnfw { namespace kernel { namespace acl { -arm_compute::TensorShape fromNNShape(const android::nn::Shape& shape); bool maxPoolFloat32(const float* inputData, const android::nn::Shape& inputShape, int32_t padding_left, int32_t padding_right, @@ -28,8 +19,8 @@ bool maxPoolFloat32(const float* inputData, const android::nn::Shape& inputShape int32_t activation, float* outputData, const android::nn::Shape& outputShape) { - arm_compute::TensorShape input_shape = fromNNShape(inputShape); - arm_compute::TensorShape output_shape = fromNNShape(outputShape); + arm_compute::TensorShape input_shape = util::fromNNShape(inputShape); + arm_compute::TensorShape output_shape = util::fromNNShape(outputShape); assert(activation == ANEURALNETWORKS_FUSED_NONE || activation == ANEURALNETWORKS_FUSED_RELU);