From: 김정현/동작제어Lab(SR)/Senior Engineer/삼성전자 Date: Wed, 11 Apr 2018 07:18:10 +0000 (+0900) Subject: [NNOP] Introduce module shape (#570) X-Git-Tag: 0.1~329 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=105faca9370cdf6e835d81716edbd3eac33fb2b3;p=platform%2Fcore%2Fml%2Fnnfw.git [NNOP] Introduce module shape (#570) This commit introduces a module shape. This module includes getSizeOfDimension(), fromVectorNNShape(), and fromNNShape(). These functions are commonly used in various layer implementations. Signed-off-by: Junghyun Kim --- diff --git a/src/kernel/acl/CMakeLists.txt b/src/kernel/acl/CMakeLists.txt index cd6f3f4..845a5a9 100644 --- a/src/kernel/acl/CMakeLists.txt +++ b/src/kernel/acl/CMakeLists.txt @@ -26,6 +26,7 @@ link_directories(${CMAKE_INSTALL_PREFIX}/lib) # kernel library set(KERNELACL_SRCS "src/Init_acl.cpp" "src/IO_accessor.cpp" + "src/shape.cpp" "src/cl/Conv2D_acl.cpp" "src/cl/FullyConnected.cpp" "src/cl/Reshape.cpp" @@ -48,6 +49,7 @@ install(TARGETS ${LIB_KERNELACL} DESTINATION lib) # kernel test executable set(KERNELACL_TEST_SRCS "src/cl/Conv2D_acl.test.cpp" "src/util.cpp" + "src/shape.cpp" "src/gtest_env.cpp" "src/cl/FullyConnected.test.cpp" "src/cl/Reshape.test.cpp" diff --git a/src/kernel/acl/src/cl/Conv2D_acl.cpp b/src/kernel/acl/src/cl/Conv2D_acl.cpp index ebfaffa..849aba0 100644 --- a/src/kernel/acl/src/cl/Conv2D_acl.cpp +++ b/src/kernel/acl/src/cl/Conv2D_acl.cpp @@ -4,54 +4,15 @@ #include #include #include "../IO_accessor.h" +#include "../util.h" +#include "../shape.h" #include -namespace android { -namespace nn { - -// TODO remove from this source and use it from runtime -uint32_t getSizeOfDimension(const Shape& shape, uint32_t dimensionIdx) { - if (dimensionIdx >= shape.dimensions.size()) { - // TODO, log the error - return 0; - } - return shape.dimensions[dimensionIdx]; -} - -} // namespace nn -} // namespace android - namespace nnfw { namespace kernel { namespace acl { -arm_compute::TensorShape fromVectorNNShape(const android::nn::Shape& shape) -{ - assert(shape.dimensions.size() == 1); - - const uint32_t len = android::nn::getSizeOfDimension(shape, 0); - - return arm_compute::TensorShape(len); -} - -arm_compute::TensorShape fromNNShape(const android::nn::Shape& shape) -{ - // NNAPI assumes the following ordering: - // - // dim(0) -> N - // dim(1) -> H - // dim(2) -> W - // dim(3) -> C - // - uint32_t c = android::nn::getSizeOfDimension(shape, 3); - uint32_t h = android::nn::getSizeOfDimension(shape, 1); - uint32_t w = android::nn::getSizeOfDimension(shape, 2); - uint32_t n = android::nn::getSizeOfDimension(shape, 0); - - return arm_compute::TensorShape(w, h, c, n); -} - class CLUniqueTensor { public: @@ -125,10 +86,10 @@ bool convFloat32(const float* inputData, const android::nn::Shape& inputShape, int32_t activation, float* outputData, const android::nn::Shape& outputShape) { - arm_compute::TensorShape input_shape = fromNNShape(inputShape); - arm_compute::TensorShape filter_shape = fromNNShape(filterShape); - arm_compute::TensorShape bias_shape = fromVectorNNShape(biasShape); - arm_compute::TensorShape output_shape = fromNNShape(outputShape); + arm_compute::TensorShape input_shape = util::fromNNShape(inputShape); + arm_compute::TensorShape filter_shape = util::fromNNShape(filterShape); + arm_compute::TensorShape bias_shape = util::fromVectorNNShape(biasShape); + arm_compute::TensorShape output_shape = util::fromNNShape(outputShape); arm_compute::PadStrideInfo conv_info = arm_compute::PadStrideInfo(stride_width, stride_height, padding_left, padding_right, padding_top, padding_bottom, diff --git a/src/kernel/acl/src/cl/FullyConnected.cpp b/src/kernel/acl/src/cl/FullyConnected.cpp index ec745be..e35d8e5 100644 --- a/src/kernel/acl/src/cl/FullyConnected.cpp +++ b/src/kernel/acl/src/cl/FullyConnected.cpp @@ -4,32 +4,22 @@ // TODO: fix include path in CMakeFiles #include "../IO_accessor.h" - -namespace android { -namespace nn { - -// TODO remove from this source and use it from runtime -uint32_t getSizeOfDimension(const Shape& shape, uint32_t dimensionIdx); - -} // namespace nn -} // namespace android +#include "../shape.h" namespace nnfw { namespace kernel { namespace acl { -arm_compute::TensorShape fromNNShape(const android::nn::Shape& shape); - bool fullyConnectedFloat32(const float* inputData, const android::nn::Shape& inputShape, const float* weightsData, const android::nn::Shape& weightsShape, const float* biasData, const android::nn::Shape& biasShape, int32_t activation, float* outputData, const android::nn::Shape& outputShape) { - auto input_shape = fromNNShape(inputShape); - auto filter_shape = fromNNShape(weightsShape); - auto bias_shape = fromNNShape(biasShape); - auto output_shape = fromNNShape(outputShape); + auto input_shape = util::fromNNShape(inputShape); + auto filter_shape = util::fromNNShape(weightsShape); + auto bias_shape = util::fromNNShape(biasShape); + auto output_shape = util::fromNNShape(outputShape); arm_compute::CLTensor input, output, bias, filter; diff --git a/src/kernel/acl/src/cl/Reshape.cpp b/src/kernel/acl/src/cl/Reshape.cpp index f32b8f4..d6a3b87 100644 --- a/src/kernel/acl/src/cl/Reshape.cpp +++ b/src/kernel/acl/src/cl/Reshape.cpp @@ -4,27 +4,17 @@ // TODO: fix include path in CMakeFiles #include "../IO_accessor.h" - -namespace android { -namespace nn { - -// TODO remove from this source and use it from runtime -uint32_t getSizeOfDimension(const Shape& shape, uint32_t dimensionIdx); - -} // namespace nn -} // namespace android +#include "../shape.h" namespace nnfw { namespace kernel { namespace acl { -arm_compute::TensorShape fromNNShape(const android::nn::Shape& shape); - bool reshapeGeneric(const void* inputData, const android::nn::Shape& inputShape, void* outputData, const android::nn::Shape& outputShape) { - auto input_shape = fromNNShape(inputShape); - auto output_shape = fromNNShape(outputShape); + auto input_shape = util::fromNNShape(inputShape); + auto output_shape = util::fromNNShape(outputShape); arm_compute::CLTensor input, output; diff --git a/src/kernel/acl/src/shape.cpp b/src/kernel/acl/src/shape.cpp new file mode 100644 index 0000000..c2442a6 --- /dev/null +++ b/src/kernel/acl/src/shape.cpp @@ -0,0 +1,54 @@ +#include + +#include "shape.h" + +namespace android { +namespace nn { + +// TODO remove from this source and use it from runtime +uint32_t getSizeOfDimension(const Shape& shape, uint32_t dimensionIdx) { + if (dimensionIdx >= shape.dimensions.size()) { + // TODO, log the error + return 0; + } + return shape.dimensions[dimensionIdx]; +} + +} // namespace nn +} // namespace android + +namespace nnfw { +namespace kernel { +namespace acl { +namespace util { + +arm_compute::TensorShape fromVectorNNShape(const android::nn::Shape& shape) +{ + assert(shape.dimensions.size() == 1); + + const uint32_t len = android::nn::getSizeOfDimension(shape, 0); + + return arm_compute::TensorShape(len); +} + +arm_compute::TensorShape fromNNShape(const android::nn::Shape& shape) +{ + // NNAPI assumes the following ordering: + // + // dim(0) -> N + // dim(1) -> H + // dim(2) -> W + // dim(3) -> C + // + uint32_t c = android::nn::getSizeOfDimension(shape, 3); + uint32_t h = android::nn::getSizeOfDimension(shape, 1); + uint32_t w = android::nn::getSizeOfDimension(shape, 2); + uint32_t n = android::nn::getSizeOfDimension(shape, 0); + + return arm_compute::TensorShape(w, h, c, n); +} + +} // namespace util +} // namespace acl +} // namespace kernel +} // namespace nnfw diff --git a/src/kernel/acl/src/shape.h b/src/kernel/acl/src/shape.h new file mode 100644 index 0000000..891a9d5 --- /dev/null +++ b/src/kernel/acl/src/shape.h @@ -0,0 +1,25 @@ +#include +#include +#include + +namespace android { +namespace nn { + +// TODO remove from this source and use it from runtime +uint32_t getSizeOfDimension(const Shape& shape, uint32_t dimensionIdx); + +} // namespace nn +} // namespace android + +namespace nnfw { +namespace kernel { +namespace acl { +namespace util { + +arm_compute::TensorShape fromVectorNNShape(const android::nn::Shape& shape); +arm_compute::TensorShape fromNNShape(const android::nn::Shape& shape); + +} // namespace util +} // namespace acl +} // namespace kernel +} // namespace nnfw