From fdf8a841bdcfb167e53943e91ab758a8c8f437e3 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EC=9E=A5=EC=A7=80=EC=84=AD/On-Device=20Lab=28SR=29/Enginee?= =?utf8?q?r/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Wed, 2 Oct 2019 14:35:42 +0900 Subject: [PATCH] Apply optimized cpu kernel of MaxPoolFloat32 (#7886) This commit applies optimized cpu kernel of MaxPoolFloat32. - Introduce optimized cpu kernel of MaxPool for float - Apply that kernel for cpu backend of neurun Signed-off-by: jiseob.jang --- runtimes/libs/cker/include/cker/Types.h | 2 +- .../libs/cker/include/cker/operation/AveragePool.h | 6 +- .../libs/cker/include/cker/operation/MaxPool.h | 74 +++-------------- .../include/cker/operation/optimized/AveragePool.h | 4 +- .../include/cker/operation/optimized/MaxPool.h | 97 ++++++++++++++++++++++ .../include/cker/operation/reference/AveragePool.h | 4 +- .../include/cker/operation/reference/MaxPool.h | 84 +++++++++++++++++++ runtimes/neurun/backend/cpu/kernel/AvgPoolLayer.cc | 2 +- runtimes/neurun/backend/cpu/kernel/MaxPoolLayer.cc | 2 +- .../core/src/exec/interp/operations/AvgPool2D.cc | 2 +- .../core/src/exec/interp/operations/MaxPool2D.cc | 2 +- 11 files changed, 204 insertions(+), 75 deletions(-) create mode 100644 runtimes/libs/cker/include/cker/operation/optimized/MaxPool.h create mode 100644 runtimes/libs/cker/include/cker/operation/reference/MaxPool.h diff --git a/runtimes/libs/cker/include/cker/Types.h b/runtimes/libs/cker/include/cker/Types.h index 4c2569f..a3dd0f3 100644 --- a/runtimes/libs/cker/include/cker/Types.h +++ b/runtimes/libs/cker/include/cker/Types.h @@ -45,7 +45,7 @@ struct PaddingValues int16_t height; }; -struct AveragePoolParams +struct PoolParams { FusedActivationFunctionType activation; PaddingType padding_type; diff --git a/runtimes/libs/cker/include/cker/operation/AveragePool.h b/runtimes/libs/cker/include/cker/operation/AveragePool.h index f790982..b209194 100644 --- a/runtimes/libs/cker/include/cker/operation/AveragePool.h +++ b/runtimes/libs/cker/include/cker/operation/AveragePool.h @@ -29,8 +29,8 @@ namespace nnfw namespace cker { -inline void AveragePool(const AveragePoolParams ¶ms, const Shape &input_shape, - const float *input_data, const Shape &output_shape, float *output_data) +inline void AveragePool(const PoolParams ¶ms, const Shape &input_shape, const float *input_data, + const Shape &output_shape, float *output_data) { #if defined(CKER_OPTIMIZED_EIGEN) optimized::AveragePool(params, input_shape, input_data, output_shape, output_data); @@ -39,7 +39,7 @@ inline void AveragePool(const AveragePoolParams ¶ms, const Shape &input_shap #endif // defined(CKER_OPTIMIZED_EIGEN) } -inline void AveragePool(const AveragePoolParams ¶ms, const Shape &input_shape, +inline void AveragePool(const PoolParams ¶ms, const Shape &input_shape, const uint8_t *input_data, const Shape &output_shape, uint8_t *output_data) { assert(params.quantized_activation_min <= params.quantized_activation_max); diff --git a/runtimes/libs/cker/include/cker/operation/MaxPool.h b/runtimes/libs/cker/include/cker/operation/MaxPool.h index 9619e26..326168b 100644 --- a/runtimes/libs/cker/include/cker/operation/MaxPool.h +++ b/runtimes/libs/cker/include/cker/operation/MaxPool.h @@ -22,78 +22,26 @@ #include "cker/Types.h" #include "cker/Utils.h" +#include "cker/operation/optimized/MaxPool.h" +#include "cker/operation/reference/MaxPool.h" + namespace nnfw { namespace cker { -struct MaxPoolParams -{ - FusedActivationFunctionType activation; - PaddingType padding_type; - PaddingValues padding_values; - int stride_height; - int stride_width; - int filter_height; - int filter_width; - // uint8, etc, activation params. - int32_t quantized_activation_min; - int32_t quantized_activation_max; - // float activation params. - float float_activation_min; - float float_activation_max; -}; - -inline void MaxPool(const MaxPoolParams ¶ms, const Shape &input_shape, const float *input_data, +inline void MaxPool(const PoolParams ¶ms, const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data) { - assert(input_shape.DimensionsCount() == 4); - assert(output_shape.DimensionsCount() == 4); - const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int depth = MatchingDim(input_shape, 3, output_shape, 3); - const int input_height = input_shape.Dims(1); - const int input_width = input_shape.Dims(2); - const int output_height = output_shape.Dims(1); - const int output_width = output_shape.Dims(2); - const int stride_height = params.stride_height; - const int stride_width = params.stride_width; - for (int batch = 0; batch < batches; ++batch) - { - for (int out_y = 0; out_y < output_height; ++out_y) - { - for (int out_x = 0; out_x < output_width; ++out_x) - { - for (int channel = 0; channel < depth; ++channel) - { - const int in_x_origin = (out_x * stride_width) - params.padding_values.width; - const int in_y_origin = (out_y * stride_height) - params.padding_values.height; - // Compute the boundaries of the filter region clamped so as to - // ensure that the filter window fits in the input array. - const int filter_x_start = std::max(0, -in_x_origin); - const int filter_x_end = std::min(params.filter_width, input_width - in_x_origin); - const int filter_y_start = std::max(0, -in_y_origin); - const int filter_y_end = std::min(params.filter_height, input_height - in_y_origin); - float max = std::numeric_limits::lowest(); - for (int filter_y = filter_y_start; filter_y < filter_y_end; ++filter_y) - { - for (int filter_x = filter_x_start; filter_x < filter_x_end; ++filter_x) - { - const int in_x = in_x_origin + filter_x; - const int in_y = in_y_origin + filter_y; - max = std::max(max, input_data[Offset(input_shape, batch, in_y, in_x, channel)]); - } - } - output_data[Offset(output_shape, batch, out_y, out_x, channel)] = - ActivationFunctionWithMinMax(max, params.float_activation_min, - params.float_activation_max); - } - } - } - } +#if defined(CKER_OPTIMIZED_EIGEN) + optimized::MaxPool(params, input_shape, input_data, output_shape, output_data); +#else // defined(CKER_OPTIMIZED_EIGEN) + reference::MaxPool(params, input_shape, input_data, output_shape, output_data); +#endif // defined(CKER_OPTIMIZED_EIGEN) } -inline void MaxPool(const MaxPoolParams ¶ms, const Shape &input_shape, - const uint8_t *input_data, const Shape &output_shape, uint8_t *output_data) +inline void MaxPool(const PoolParams ¶ms, const Shape &input_shape, const uint8_t *input_data, + const Shape &output_shape, uint8_t *output_data) { assert(params.quantized_activation_min <= params.quantized_activation_max); assert(params.quantized_activation_min >= 0); diff --git a/runtimes/libs/cker/include/cker/operation/optimized/AveragePool.h b/runtimes/libs/cker/include/cker/operation/optimized/AveragePool.h index f2aa70e..d94a581 100644 --- a/runtimes/libs/cker/include/cker/operation/optimized/AveragePool.h +++ b/runtimes/libs/cker/include/cker/operation/optimized/AveragePool.h @@ -34,8 +34,8 @@ namespace optimized { // TODO Change to apply neon for this function if it is faster -inline void AveragePool(const AveragePoolParams ¶ms, const Shape &input_shape, - const float *input_data, const Shape &output_shape, float *output_data) +inline void AveragePool(const PoolParams ¶ms, const Shape &input_shape, const float *input_data, + const Shape &output_shape, float *output_data) { assert(input_shape.DimensionsCount() == 4); assert(output_shape.DimensionsCount() == 4); diff --git a/runtimes/libs/cker/include/cker/operation/optimized/MaxPool.h b/runtimes/libs/cker/include/cker/operation/optimized/MaxPool.h new file mode 100644 index 0000000..07a14ae --- /dev/null +++ b/runtimes/libs/cker/include/cker/operation/optimized/MaxPool.h @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2018 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NNFW_CKER_OPTIMIZED_MAX_POOL_H__ +#define __NNFW_CKER_OPTIMIZED_MAX_POOL_H__ + +#if defined(CKER_OPTIMIZED_EIGEN) +#include "cker/eigen/Utils.h" +#include "cker/Shape.h" +#include "cker/Types.h" +#include "cker/Utils.h" +#include + +namespace nnfw +{ +namespace cker +{ +namespace optimized +{ + +// TODO Change to apply neon for this function if it is faster +inline void MaxPool(const PoolParams ¶ms, const Shape &input_shape, const float *input_data, + const Shape &output_shape, float *output_data) +{ + assert(input_shape.DimensionsCount() == 4); + assert(output_shape.DimensionsCount() == 4); + const int batches = MatchingDim(input_shape, 0, output_shape, 0); + const int input_height = input_shape.Dims(1); + const int input_width = input_shape.Dims(2); + const int output_height = output_shape.Dims(1); + const int output_width = output_shape.Dims(2); + const int stride_height = params.stride_height; + const int stride_width = params.stride_width; + + const auto in_mat = MapAsMatrixWithLastDimAsRows(input_data, input_shape); + auto out_mat = MapAsMatrixWithLastDimAsRows(output_data, output_shape); + // Prefill the output to minimum representable float value + out_mat.setConstant(std::numeric_limits::lowest()); + for (int b = 0; b < batches; ++b) + { + for (int h = 0; h < input_height; ++h) + { + for (int w = 0; w < input_width; ++w) + { + // (h_start, h_end) * (w_start, w_end) is the range that the input + // vector projects to. + int hpad = h + params.padding_values.height; + int wpad = w + params.padding_values.width; + int h_start = + (hpad < params.filter_height) ? 0 : (hpad - params.filter_height) / stride_height + 1; + int h_end = std::min(hpad / stride_height + 1, output_height); + int w_start = + (wpad < params.filter_width) ? 0 : (wpad - params.filter_width) / stride_width + 1; + int w_end = std::min(wpad / stride_width + 1, output_width); + // compute elementwise sum + for (int ph = h_start; ph < h_end; ++ph) + { + for (int pw = w_start; pw < w_end; ++pw) + { + int out_offset = NodeOffset(b, ph, pw, output_height, output_width); + out_mat.col(out_offset) = + out_mat.col(out_offset) + .cwiseMax(in_mat.col(NodeOffset(b, h, w, input_height, input_width))); + } + } + } + } + } + const int flat_size = output_shape.FlatSize(); + for (int i = 0; i < flat_size; ++i) + { + output_data[i] = ActivationFunctionWithMinMax(output_data[i], params.float_activation_min, + params.float_activation_max); + } +} + +} // namespace optimized +} // namespace cker +} // namespace nnfw + +#endif // defined(CKER_OPTIMIZED_EIGEN) + +#endif // __NNFW_CKER_OPTIMIZED_MAX_POOL_H__ diff --git a/runtimes/libs/cker/include/cker/operation/reference/AveragePool.h b/runtimes/libs/cker/include/cker/operation/reference/AveragePool.h index 729ab3d..3ddab4b 100644 --- a/runtimes/libs/cker/include/cker/operation/reference/AveragePool.h +++ b/runtimes/libs/cker/include/cker/operation/reference/AveragePool.h @@ -29,8 +29,8 @@ namespace cker namespace reference { -inline void AveragePool(const AveragePoolParams ¶ms, const Shape &input_shape, - const float *input_data, const Shape &output_shape, float *output_data) +inline void AveragePool(const PoolParams ¶ms, const Shape &input_shape, const float *input_data, + const Shape &output_shape, float *output_data) { assert(input_shape.DimensionsCount() == 4); assert(output_shape.DimensionsCount() == 4); diff --git a/runtimes/libs/cker/include/cker/operation/reference/MaxPool.h b/runtimes/libs/cker/include/cker/operation/reference/MaxPool.h new file mode 100644 index 0000000..a0f0263 --- /dev/null +++ b/runtimes/libs/cker/include/cker/operation/reference/MaxPool.h @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2017 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NNFW_CKER_REFERENCE_MAX_POOL_H__ +#define __NNFW_CKER_REFERENCE_MAX_POOL_H__ + +#include "cker/Shape.h" +#include "cker/Types.h" +#include "cker/Utils.h" + +namespace nnfw +{ +namespace cker +{ +namespace reference +{ + +inline void MaxPool(const PoolParams ¶ms, const Shape &input_shape, const float *input_data, + const Shape &output_shape, float *output_data) +{ + assert(input_shape.DimensionsCount() == 4); + assert(output_shape.DimensionsCount() == 4); + const int batches = MatchingDim(input_shape, 0, output_shape, 0); + const int depth = MatchingDim(input_shape, 3, output_shape, 3); + const int input_height = input_shape.Dims(1); + const int input_width = input_shape.Dims(2); + const int output_height = output_shape.Dims(1); + const int output_width = output_shape.Dims(2); + const int stride_height = params.stride_height; + const int stride_width = params.stride_width; + for (int batch = 0; batch < batches; ++batch) + { + for (int out_y = 0; out_y < output_height; ++out_y) + { + for (int out_x = 0; out_x < output_width; ++out_x) + { + for (int channel = 0; channel < depth; ++channel) + { + const int in_x_origin = (out_x * stride_width) - params.padding_values.width; + const int in_y_origin = (out_y * stride_height) - params.padding_values.height; + // Compute the boundaries of the filter region clamped so as to + // ensure that the filter window fits in the input array. + const int filter_x_start = std::max(0, -in_x_origin); + const int filter_x_end = std::min(params.filter_width, input_width - in_x_origin); + const int filter_y_start = std::max(0, -in_y_origin); + const int filter_y_end = std::min(params.filter_height, input_height - in_y_origin); + float max = std::numeric_limits::lowest(); + for (int filter_y = filter_y_start; filter_y < filter_y_end; ++filter_y) + { + for (int filter_x = filter_x_start; filter_x < filter_x_end; ++filter_x) + { + const int in_x = in_x_origin + filter_x; + const int in_y = in_y_origin + filter_y; + max = std::max(max, input_data[Offset(input_shape, batch, in_y, in_x, channel)]); + } + } + output_data[Offset(output_shape, batch, out_y, out_x, channel)] = + ActivationFunctionWithMinMax(max, params.float_activation_min, + params.float_activation_max); + } + } + } + } +} + +} // namespace reference +} // namespace cker +} // namespace nnfw + +#endif // __NNFW_CKER_REFERENCE_MAX_POOL_H__ diff --git a/runtimes/neurun/backend/cpu/kernel/AvgPoolLayer.cc b/runtimes/neurun/backend/cpu/kernel/AvgPoolLayer.cc index 2020f69..15e015b 100644 --- a/runtimes/neurun/backend/cpu/kernel/AvgPoolLayer.cc +++ b/runtimes/neurun/backend/cpu/kernel/AvgPoolLayer.cc @@ -31,7 +31,7 @@ namespace kernel { #define AVGPOOLING_PARAMETERS \ - nnfw::cker::AveragePoolParams op_params; \ + nnfw::cker::PoolParams op_params; \ op_params.stride_height = _strideHeight; \ op_params.stride_width = _strideWidth; \ op_params.filter_height = _kernelHeight; \ diff --git a/runtimes/neurun/backend/cpu/kernel/MaxPoolLayer.cc b/runtimes/neurun/backend/cpu/kernel/MaxPoolLayer.cc index a7814e6..0bce4c3 100644 --- a/runtimes/neurun/backend/cpu/kernel/MaxPoolLayer.cc +++ b/runtimes/neurun/backend/cpu/kernel/MaxPoolLayer.cc @@ -30,7 +30,7 @@ namespace kernel { #define MAXPOOLING_PARAMETERS \ - nnfw::cker::MaxPoolParams op_params; \ + nnfw::cker::PoolParams op_params; \ op_params.stride_height = _strideHeight; \ op_params.stride_width = _strideWidth; \ op_params.filter_height = _kernelHeight; \ diff --git a/runtimes/neurun/core/src/exec/interp/operations/AvgPool2D.cc b/runtimes/neurun/core/src/exec/interp/operations/AvgPool2D.cc index b6dfba8..1d99533 100644 --- a/runtimes/neurun/core/src/exec/interp/operations/AvgPool2D.cc +++ b/runtimes/neurun/core/src/exec/interp/operations/AvgPool2D.cc @@ -76,7 +76,7 @@ void invoke(const ITensor *in_tensor, const ITensor *out_tensor, const auto padding = neurun::util::calculatePadding(param.padding, ifm_shape, ofm_shape, param.stride, param.kw, param.kh); // Calculate - nnfw::cker::AveragePoolParams cker_param; + nnfw::cker::PoolParams cker_param; calculateActivationRange(param.activation, &cker_param.float_activation_min, &cker_param.float_activation_max); cker_param.filter_width = param.kw; diff --git a/runtimes/neurun/core/src/exec/interp/operations/MaxPool2D.cc b/runtimes/neurun/core/src/exec/interp/operations/MaxPool2D.cc index e53fa14..680b084 100644 --- a/runtimes/neurun/core/src/exec/interp/operations/MaxPool2D.cc +++ b/runtimes/neurun/core/src/exec/interp/operations/MaxPool2D.cc @@ -76,7 +76,7 @@ void invoke(const ITensor *in_tensor, const ITensor *out_tensor, const auto padding = neurun::util::calculatePadding(param.padding, ifm_shape, ofm_shape, param.stride, param.kw, param.kh); // Calculate - nnfw::cker::MaxPoolParams cker_param; + nnfw::cker::PoolParams cker_param; calculateActivationRange(param.activation, &cker_param.float_activation_min, &cker_param.float_activation_max); cker_param.filter_width = param.kw; -- 2.7.4