FIX: build fail due to fromNNShape() (#599)
author김정현/동작제어Lab(SR)/Senior Engineer/삼성전자 <jh0822.kim@samsung.com>
Thu, 12 Apr 2018 00:07:50 +0000 (09:07 +0900)
committerGitHub Enterprise <noreply-CODE@samsung.com>
Thu, 12 Apr 2018 00:07:50 +0000 (09:07 +0900)
Build fail due to fromNNShape() is moved to namespace nnfw::kernel::acl::util.
This commit fixes this fail by chainging `fromNNShape()` to
`util::fromNNShape()`.

Signed-off-by: Junghyun Kim <jh0822.kim@samsung.com>
src/kernel/acl/src/cl/Pooling.cpp

index 0664a62..97377bb 100644 (file)
@@ -2,23 +2,14 @@
 #include <arm_compute/core/TensorShape.h>
 #include <arm_compute/core/TensorInfo.h>
 #include "../IO_accessor.h"
+#include "../shape.h"
 
 #include <cassert>
 
-namespace android {
-namespace nn {
-
-// TODO remove from this source and use it from runtime
-uint32_t getSizeOfDimension(const Shape& shape, uint32_t dimensionIdx);
-
-} // namespace nn
-} // namespace android
-
 namespace nnfw {
 namespace kernel {
 namespace acl {
 
-arm_compute::TensorShape fromNNShape(const android::nn::Shape& shape);
 
 bool maxPoolFloat32(const float* inputData, const android::nn::Shape& inputShape,
                  int32_t padding_left, int32_t padding_right,
@@ -28,8 +19,8 @@ bool maxPoolFloat32(const float* inputData, const android::nn::Shape& inputShape
                  int32_t activation,
                  float* outputData, const android::nn::Shape& outputShape)
 {
-  arm_compute::TensorShape input_shape = fromNNShape(inputShape);
-  arm_compute::TensorShape output_shape = fromNNShape(outputShape);
+  arm_compute::TensorShape input_shape = util::fromNNShape(inputShape);
+  arm_compute::TensorShape output_shape = util::fromNNShape(outputShape);
 
   assert(activation == ANEURALNETWORKS_FUSED_NONE || activation == ANEURALNETWORKS_FUSED_RELU);