From 8497b42d2a65f4e5f2634c14e355cb80dbc69058 Mon Sep 17 00:00:00 2001 From: =?utf8?q?=EC=9E=A5=EC=A7=80=EC=84=AD/On-Device=20Lab=28SR=29/Enginee?= =?utf8?q?r/=EC=82=BC=EC=84=B1=EC=A0=84=EC=9E=90?= Date: Tue, 1 Oct 2019 16:35:56 +0900 Subject: [PATCH] Apply optimized cpu kernel for AvgPoolFloat32 (#7834) This commit applies optimized cpu kernel for AvgPoolFloat32. - Introduce introduce optimized cpu kernel of AvgPool2D op - Apply that kernel for AvgPoolFloat32 Signed-off-by: jiseob.jang --- .../nnfw/cmake/options/options_aarch64-tizen.cmake | 1 + .../nnfw/cmake/options/options_armv7l-tizen.cmake | 1 + runtimes/libs/cker/CMakeLists.txt | 7 ++ runtimes/libs/cker/include/cker/Types.h | 17 ++++ runtimes/libs/cker/include/cker/Utils.h | 5 + runtimes/libs/cker/include/cker/eigen/Utils.h | 56 +++++++++++ .../libs/cker/include/cker/operation/AveragePool.h | 79 ++-------------- .../include/cker/operation/optimized/AveragePool.h | 105 +++++++++++++++++++++ .../include/cker/operation/reference/AveragePool.h | 90 ++++++++++++++++++ 9 files changed, 292 insertions(+), 69 deletions(-) create mode 100644 runtimes/libs/cker/include/cker/eigen/Utils.h create mode 100644 runtimes/libs/cker/include/cker/operation/optimized/AveragePool.h create mode 100644 runtimes/libs/cker/include/cker/operation/reference/AveragePool.h diff --git a/infra/nnfw/cmake/options/options_aarch64-tizen.cmake b/infra/nnfw/cmake/options/options_aarch64-tizen.cmake index 530659d..eff8100 100644 --- a/infra/nnfw/cmake/options/options_aarch64-tizen.cmake +++ b/infra/nnfw/cmake/options/options_aarch64-tizen.cmake @@ -4,6 +4,7 @@ option(BUILD_GTEST "Download and build Google Test" OFF) option(BUILD_ARMCOMPUTE "Build ARM Compute from the downloaded source" OFF) option(BUILD_TENSORFLOW_LITE "Build TensorFlow Lite from the downloaded source" OFF) +option(DOWNLOAD_EIGEN "Download Eigen source" OFF) option(DOWNLOAD_NEON2SSE "Download NEON2SSE library source" OFF) option(DOWNLOAD_NNPACK "Download NNPACK source" OFF) diff --git a/infra/nnfw/cmake/options/options_armv7l-tizen.cmake b/infra/nnfw/cmake/options/options_armv7l-tizen.cmake index e2b8081..9fe8f1f 100644 --- a/infra/nnfw/cmake/options/options_armv7l-tizen.cmake +++ b/infra/nnfw/cmake/options/options_armv7l-tizen.cmake @@ -4,6 +4,7 @@ option(BUILD_GTEST "Download and build Google Test" OFF) option(BUILD_ARMCOMPUTE "Build ARM Compute from the downloaded source" OFF) option(BUILD_TENSORFLOW_LITE "Build TensorFlow Lite from the downloaded source" OFF) +option(DOWNLOAD_EIGEN "Download Eigen source" OFF) option(DOWNLOAD_NEON2SSE "Download NEON2SSE library source" OFF) option(DOWNLOAD_NNPACK "Download NNPACK source" OFF) diff --git a/runtimes/libs/cker/CMakeLists.txt b/runtimes/libs/cker/CMakeLists.txt index 16a13f5..f81ee2a 100644 --- a/runtimes/libs/cker/CMakeLists.txt +++ b/runtimes/libs/cker/CMakeLists.txt @@ -1,2 +1,9 @@ add_library(nnfw_lib_cker INTERFACE) + +nnfw_find_package(Eigen QUIET) +if(Eigen_FOUND) + target_link_libraries(nnfw_lib_cker INTERFACE eigen) + target_compile_definitions(nnfw_lib_cker INTERFACE CKER_OPTIMIZED_EIGEN) +endif(Eigen_FOUND) + target_include_directories(nnfw_lib_cker INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}/include) diff --git a/runtimes/libs/cker/include/cker/Types.h b/runtimes/libs/cker/include/cker/Types.h index d8dedbd..4c2569f 100644 --- a/runtimes/libs/cker/include/cker/Types.h +++ b/runtimes/libs/cker/include/cker/Types.h @@ -45,6 +45,23 @@ struct PaddingValues int16_t height; }; +struct AveragePoolParams +{ + FusedActivationFunctionType activation; + PaddingType padding_type; + PaddingValues padding_values; + int stride_height; + int stride_width; + int filter_height; + int filter_width; + // uint8, etc, activation params. + int32_t quantized_activation_min; + int32_t quantized_activation_max; + // float activation params. + float float_activation_min; + float float_activation_max; +}; + } // namespace cker } // namespace nnfw diff --git a/runtimes/libs/cker/include/cker/Utils.h b/runtimes/libs/cker/include/cker/Utils.h index 84bbbc3..4c5a525 100644 --- a/runtimes/libs/cker/include/cker/Utils.h +++ b/runtimes/libs/cker/include/cker/Utils.h @@ -49,6 +49,11 @@ inline int32_t MultiplyByQuantizedMultiplierGreaterThanOne(int32_t x, int32_t qu return gemmlowp::SaturatingRoundingDoublingHighMul(x * (1 << left_shift), quantized_multiplier); } +inline int NodeOffset(int b, int h, int w, int height, int width) +{ + return (b * height + h) * width + w; +} + inline int CountLeadingZeros(uint32_t integer_input) { const uint32_t one_in_leading_positive = 1U << 31; diff --git a/runtimes/libs/cker/include/cker/eigen/Utils.h b/runtimes/libs/cker/include/cker/eigen/Utils.h new file mode 100644 index 0000000..645a614 --- /dev/null +++ b/runtimes/libs/cker/include/cker/eigen/Utils.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2018 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NNFW_CKER_EIGEN_UTILS_H__ +#define __NNFW_CKER_EIGEN_UTILS_H__ + +#if defined(CKER_OPTIMIZED_EIGEN) + +#include +#include +#include "cker/Shape.h" + +namespace nnfw +{ +namespace cker +{ + +// Make a local VectorMap typedef allowing to map a float array +// as a Eigen matrix expression. The same explanation as for VectorMap +// above also applies here. +template +using MatrixMap = typename std::conditional< + std::is_const::value, + Eigen::Map::type, Eigen::Dynamic, + Eigen::Dynamic>>, + Eigen::Map>>::type; + +template +MatrixMap MapAsMatrixWithLastDimAsRows(Scalar *data, const Shape &shape) +{ + const int dims_count = shape.DimensionsCount(); + const int rows = shape.Dims(dims_count - 1); + const int cols = FlatSizeSkipDim(shape, dims_count - 1); + return MatrixMap(data, rows, cols); +} + +} // namespace cker +} // namespace nnfw + +#endif // defined(CKER_OPTIMIZED_EIGEN) + +#endif // __NNFW_CKER_EIGEN_UTILS_H__ diff --git a/runtimes/libs/cker/include/cker/operation/AveragePool.h b/runtimes/libs/cker/include/cker/operation/AveragePool.h index 81e9933..f790982 100644 --- a/runtimes/libs/cker/include/cker/operation/AveragePool.h +++ b/runtimes/libs/cker/include/cker/operation/AveragePool.h @@ -18,84 +18,25 @@ #ifndef __NNFW_CKER_AVERAGE_POOL_H__ #define __NNFW_CKER_AVERAGE_POOL_H__ -#include "cker/Shape.h" -#include "cker/Types.h" -#include "cker/Utils.h" +#if defined(CKER_OPTIMIZED_EIGEN) +#include "cker/operation/optimized/AveragePool.h" +#endif // defined(CKER_OPTIMIZED_EIGEN) + +#include "cker/operation/reference/AveragePool.h" namespace nnfw { namespace cker { -struct AveragePoolParams -{ - FusedActivationFunctionType activation; - PaddingType padding_type; - PaddingValues padding_values; - int stride_height; - int stride_width; - int filter_height; - int filter_width; - // uint8, etc, activation params. - int32_t quantized_activation_min; - int32_t quantized_activation_max; - // float activation params. - float float_activation_min; - float float_activation_max; -}; - inline void AveragePool(const AveragePoolParams ¶ms, const Shape &input_shape, const float *input_data, const Shape &output_shape, float *output_data) { - assert(input_shape.DimensionsCount() == 4); - assert(output_shape.DimensionsCount() == 4); - const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int depth = MatchingDim(input_shape, 3, output_shape, 3); - const int input_height = input_shape.Dims(1); - const int input_width = input_shape.Dims(2); - const int output_height = output_shape.Dims(1); - const int output_width = output_shape.Dims(2); - const int stride_height = params.stride_height; - const int stride_width = params.stride_width; - for (int batch = 0; batch < batches; ++batch) - { - for (int out_y = 0; out_y < output_height; ++out_y) - { - for (int out_x = 0; out_x < output_width; ++out_x) - { - const int in_x_origin = (out_x * stride_width) - params.padding_values.width; - const int in_y_origin = (out_y * stride_height) - params.padding_values.height; - // Compute the boundaries of the filter region clamped so as to - // ensure that the filter window fits in the input array. - const int filter_x_start = std::max(0, -in_x_origin); - const int filter_x_end = std::min(params.filter_width, input_width - in_x_origin); - const int filter_y_start = std::max(0, -in_y_origin); - const int filter_y_end = std::min(params.filter_height, input_height - in_y_origin); - int filter_count = (filter_y_end - filter_y_start) * (filter_x_end - filter_x_start); - if (filter_count <= 0) - { - continue; - } - for (int channel = 0; channel < depth; ++channel) - { - float total = 0.f; - for (int filter_y = filter_y_start; filter_y < filter_y_end; ++filter_y) - { - for (int filter_x = filter_x_start; filter_x < filter_x_end; ++filter_x) - { - const int in_x = in_x_origin + filter_x; - const int in_y = in_y_origin + filter_y; - total += input_data[Offset(input_shape, batch, in_y, in_x, channel)]; - } - } - const float average = total / (float)filter_count; - output_data[Offset(output_shape, batch, out_y, out_x, channel)] = - ActivationFunctionWithMinMax(average, params.float_activation_min, - params.float_activation_max); - } - } - } - } +#if defined(CKER_OPTIMIZED_EIGEN) + optimized::AveragePool(params, input_shape, input_data, output_shape, output_data); +#else // defined(CKER_OPTIMIZED_EIGEN) + reference::AveragePool(params, input_shape, input_data, output_shape, output_data); +#endif // defined(CKER_OPTIMIZED_EIGEN) } inline void AveragePool(const AveragePoolParams ¶ms, const Shape &input_shape, diff --git a/runtimes/libs/cker/include/cker/operation/optimized/AveragePool.h b/runtimes/libs/cker/include/cker/operation/optimized/AveragePool.h new file mode 100644 index 0000000..f2aa70e --- /dev/null +++ b/runtimes/libs/cker/include/cker/operation/optimized/AveragePool.h @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2018 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NNFW_CKER_OPTIMIZED_AVERAGE_POOL_H__ +#define __NNFW_CKER_OPTIMIZED_AVERAGE_POOL_H__ + +#if defined(CKER_OPTIMIZED_EIGEN) + +#include "cker/eigen/Utils.h" +#include "cker/Shape.h" +#include "cker/Types.h" +#include "cker/Utils.h" +#include + +namespace nnfw +{ +namespace cker +{ +namespace optimized +{ + +// TODO Change to apply neon for this function if it is faster +inline void AveragePool(const AveragePoolParams ¶ms, const Shape &input_shape, + const float *input_data, const Shape &output_shape, float *output_data) +{ + assert(input_shape.DimensionsCount() == 4); + assert(output_shape.DimensionsCount() == 4); + const int batches = MatchingDim(input_shape, 0, output_shape, 0); + const int input_height = input_shape.Dims(1); + const int input_width = input_shape.Dims(2); + const int output_height = output_shape.Dims(1); + const int output_width = output_shape.Dims(2); + const int stride_height = params.stride_height; + const int stride_width = params.stride_width; + + // TODO(benoitjacob) make this a proper reference impl without Eigen! + const auto in_mat = MapAsMatrixWithLastDimAsRows(input_data, input_shape); + auto out_mat = MapAsMatrixWithLastDimAsRows(output_data, output_shape); + // TODO(benoitjacob) get rid of the dynamic memory allocation here! + Eigen::VectorXf out_count(out_mat.cols()); + out_count.setZero(); + // Prefill the output to 0. + out_mat.setZero(); + for (int b = 0; b < batches; ++b) + { + for (int h = 0; h < input_height; ++h) + { + for (int w = 0; w < input_width; ++w) + { + // (h_start, h_end) * (w_start, w_end) is the range that the input + // vector projects to. + int hpad = h + params.padding_values.height; + int wpad = w + params.padding_values.width; + int h_start = + (hpad < params.filter_height) ? 0 : (hpad - params.filter_height) / stride_height + 1; + int h_end = std::min(hpad / stride_height + 1, output_height); + int w_start = + (wpad < params.filter_width) ? 0 : (wpad - params.filter_width) / stride_width + 1; + int w_end = std::min(wpad / stride_width + 1, output_width); + // compute elementwise sum + for (int ph = h_start; ph < h_end; ++ph) + { + for (int pw = w_start; pw < w_end; ++pw) + { + int out_offset = NodeOffset(b, ph, pw, output_height, output_width); + out_mat.col(out_offset) += in_mat.col(NodeOffset(b, h, w, input_height, input_width)); + out_count(out_offset)++; + } + } + } + } + } + // Divide the output by the actual number of elements being averaged over + assert(out_count.minCoeff() > 0); + out_mat.array().rowwise() /= out_count.transpose().array(); + + const int flat_size = output_shape.FlatSize(); + for (int i = 0; i < flat_size; ++i) + { + output_data[i] = ActivationFunctionWithMinMax(output_data[i], params.float_activation_min, + params.float_activation_max); + } +} + +} // namespace optimized +} // namespace cker +} // namespace nnfw + +#endif // defined(CKER_OPTIMIZED_EIGEN) + +#endif // __NNFW_CKER_OPTIMIZED_AVERAGE_POOL_H__ diff --git a/runtimes/libs/cker/include/cker/operation/reference/AveragePool.h b/runtimes/libs/cker/include/cker/operation/reference/AveragePool.h new file mode 100644 index 0000000..729ab3d --- /dev/null +++ b/runtimes/libs/cker/include/cker/operation/reference/AveragePool.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2017 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NNFW_CKER_REFERENCE_AVERAGE_POOL_H__ +#define __NNFW_CKER_REFERENCE_AVERAGE_POOL_H__ + +#include "cker/Shape.h" +#include "cker/Types.h" +#include "cker/Utils.h" + +namespace nnfw +{ +namespace cker +{ +namespace reference +{ + +inline void AveragePool(const AveragePoolParams ¶ms, const Shape &input_shape, + const float *input_data, const Shape &output_shape, float *output_data) +{ + assert(input_shape.DimensionsCount() == 4); + assert(output_shape.DimensionsCount() == 4); + const int batches = MatchingDim(input_shape, 0, output_shape, 0); + const int depth = MatchingDim(input_shape, 3, output_shape, 3); + const int input_height = input_shape.Dims(1); + const int input_width = input_shape.Dims(2); + const int output_height = output_shape.Dims(1); + const int output_width = output_shape.Dims(2); + const int stride_height = params.stride_height; + const int stride_width = params.stride_width; + for (int batch = 0; batch < batches; ++batch) + { + for (int out_y = 0; out_y < output_height; ++out_y) + { + for (int out_x = 0; out_x < output_width; ++out_x) + { + const int in_x_origin = (out_x * stride_width) - params.padding_values.width; + const int in_y_origin = (out_y * stride_height) - params.padding_values.height; + // Compute the boundaries of the filter region clamped so as to + // ensure that the filter window fits in the input array. + const int filter_x_start = std::max(0, -in_x_origin); + const int filter_x_end = std::min(params.filter_width, input_width - in_x_origin); + const int filter_y_start = std::max(0, -in_y_origin); + const int filter_y_end = std::min(params.filter_height, input_height - in_y_origin); + int filter_count = (filter_y_end - filter_y_start) * (filter_x_end - filter_x_start); + if (filter_count <= 0) + { + continue; + } + for (int channel = 0; channel < depth; ++channel) + { + float total = 0.f; + for (int filter_y = filter_y_start; filter_y < filter_y_end; ++filter_y) + { + for (int filter_x = filter_x_start; filter_x < filter_x_end; ++filter_x) + { + const int in_x = in_x_origin + filter_x; + const int in_y = in_y_origin + filter_y; + total += input_data[Offset(input_shape, batch, in_y, in_x, channel)]; + } + } + const float average = total / (float)filter_count; + output_data[Offset(output_shape, batch, out_y, out_x, channel)] = + ActivationFunctionWithMinMax(average, params.float_activation_min, + params.float_activation_max); + } + } + } + } +} + +} // namespace reference +} // namespace cker +} // namespace nnfw + +#endif // __NNFW_CKER_REFERENCE_AVERAGE_POOL_H__ -- 2.7.4