From: 오형석/On-Device Lab(SR)/Staff Engineer/삼성전자 Date: Mon, 1 Apr 2019 00:37:29 +0000 (+0900) Subject: Introduce cpu maxpool kernel (#4897) X-Git-Tag: accepted/tizen/unified/20190430.113441~82 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=e1dd2c7930ea6ad527e6af6fa28c16465e037d8c;p=platform%2Fcore%2Fml%2Fnnfw.git Introduce cpu maxpool kernel (#4897) Introduce cpu maxpool kernel from tflite Use kernel in neurun cpu backend Signed-off-by: Hyeongseok Oh --- diff --git a/libs/cker/include/cker/operation/MaxPool.h b/libs/cker/include/cker/operation/MaxPool.h new file mode 100644 index 0000000..9619e26 --- /dev/null +++ b/libs/cker/include/cker/operation/MaxPool.h @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved + * Copyright 2017 The TensorFlow Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __NNFW_CKER_MAX_POOL_H__ +#define __NNFW_CKER_MAX_POOL_H__ + +#include "cker/Shape.h" +#include "cker/Types.h" +#include "cker/Utils.h" + +namespace nnfw +{ +namespace cker +{ + +struct MaxPoolParams +{ + FusedActivationFunctionType activation; + PaddingType padding_type; + PaddingValues padding_values; + int stride_height; + int stride_width; + int filter_height; + int filter_width; + // uint8, etc, activation params. + int32_t quantized_activation_min; + int32_t quantized_activation_max; + // float activation params. + float float_activation_min; + float float_activation_max; +}; + +inline void MaxPool(const MaxPoolParams ¶ms, const Shape &input_shape, const float *input_data, + const Shape &output_shape, float *output_data) +{ + assert(input_shape.DimensionsCount() == 4); + assert(output_shape.DimensionsCount() == 4); + const int batches = MatchingDim(input_shape, 0, output_shape, 0); + const int depth = MatchingDim(input_shape, 3, output_shape, 3); + const int input_height = input_shape.Dims(1); + const int input_width = input_shape.Dims(2); + const int output_height = output_shape.Dims(1); + const int output_width = output_shape.Dims(2); + const int stride_height = params.stride_height; + const int stride_width = params.stride_width; + for (int batch = 0; batch < batches; ++batch) + { + for (int out_y = 0; out_y < output_height; ++out_y) + { + for (int out_x = 0; out_x < output_width; ++out_x) + { + for (int channel = 0; channel < depth; ++channel) + { + const int in_x_origin = (out_x * stride_width) - params.padding_values.width; + const int in_y_origin = (out_y * stride_height) - params.padding_values.height; + // Compute the boundaries of the filter region clamped so as to + // ensure that the filter window fits in the input array. + const int filter_x_start = std::max(0, -in_x_origin); + const int filter_x_end = std::min(params.filter_width, input_width - in_x_origin); + const int filter_y_start = std::max(0, -in_y_origin); + const int filter_y_end = std::min(params.filter_height, input_height - in_y_origin); + float max = std::numeric_limits::lowest(); + for (int filter_y = filter_y_start; filter_y < filter_y_end; ++filter_y) + { + for (int filter_x = filter_x_start; filter_x < filter_x_end; ++filter_x) + { + const int in_x = in_x_origin + filter_x; + const int in_y = in_y_origin + filter_y; + max = std::max(max, input_data[Offset(input_shape, batch, in_y, in_x, channel)]); + } + } + output_data[Offset(output_shape, batch, out_y, out_x, channel)] = + ActivationFunctionWithMinMax(max, params.float_activation_min, + params.float_activation_max); + } + } + } + } +} + +inline void MaxPool(const MaxPoolParams ¶ms, const Shape &input_shape, + const uint8_t *input_data, const Shape &output_shape, uint8_t *output_data) +{ + assert(params.quantized_activation_min <= params.quantized_activation_max); + assert(params.quantized_activation_min >= 0); + assert(params.quantized_activation_max <= 255); + assert(input_shape.DimensionsCount() == 4); + assert(output_shape.DimensionsCount() == 4); + const int batches = MatchingDim(input_shape, 0, output_shape, 0); + const int depth = MatchingDim(input_shape, 3, output_shape, 3); + const int input_height = input_shape.Dims(1); + const int input_width = input_shape.Dims(2); + const int output_height = output_shape.Dims(1); + const int output_width = output_shape.Dims(2); + const int stride_height = params.stride_height; + const int stride_width = params.stride_width; + for (int batch = 0; batch < batches; ++batch) + { + for (int out_y = 0; out_y < output_height; ++out_y) + { + for (int out_x = 0; out_x < output_width; ++out_x) + { + for (int channel = 0; channel < depth; ++channel) + { + const int in_x_origin = (out_x * stride_width) - params.padding_values.width; + const int in_y_origin = (out_y * stride_height) - params.padding_values.height; + // Compute the boundaries of the filter region clamped so as to + // ensure that the filter window fits in the input array. + const int filter_x_start = std::max(0, -in_x_origin); + const int filter_x_end = std::min(params.filter_width, input_width - in_x_origin); + const int filter_y_start = std::max(0, -in_y_origin); + const int filter_y_end = std::min(params.filter_height, input_height - in_y_origin); + uint8_t max = 0; + for (int filter_y = filter_y_start; filter_y < filter_y_end; ++filter_y) + { + for (int filter_x = filter_x_start; filter_x < filter_x_end; ++filter_x) + { + const int in_x = in_x_origin + filter_x; + const int in_y = in_y_origin + filter_y; + max = std::max(max, input_data[Offset(input_shape, batch, in_y, in_x, channel)]); + } + } + max = std::max(max, params.quantized_activation_min); + max = std::min(max, params.quantized_activation_max); + output_data[Offset(output_shape, batch, out_y, out_x, channel)] = + static_cast(max); + } + } + } + } +} + +} // namespace cker +} // namespace nnfw + +#endif // __NNFW_CKER_MAX_POOL_H__ diff --git a/runtimes/neurun/backend/cpu/kernel/MaxPoolLayer.cc b/runtimes/neurun/backend/cpu/kernel/MaxPoolLayer.cc index e62ee9d..84071bf 100644 --- a/runtimes/neurun/backend/cpu/kernel/MaxPoolLayer.cc +++ b/runtimes/neurun/backend/cpu/kernel/MaxPoolLayer.cc @@ -16,7 +16,8 @@ #include "MaxPoolLayer.h" -#include "tensorflow/contrib/lite/kernels/internal/optimized/optimized_ops.h" +#include + #include "OperationUtils.h" namespace neurun @@ -29,7 +30,7 @@ namespace kernel { #define MAXPOOLING_PARAMETERS \ - tflite::PoolParams op_params; \ + nnfw::cker::MaxPoolParams op_params; \ op_params.stride_height = _strideHeight; \ op_params.stride_width = _strideWidth; \ op_params.filter_height = _kernelHeight; \ @@ -54,8 +55,8 @@ bool MaxPoolLayer::maxPoolFloat32() op_params.float_activation_min = output_activation_min; op_params.float_activation_max = output_activation_max; - ::tflite::optimized_ops::MaxPool(op_params, convertShapeToTFLiteShape(_inputShape), _inputData.f, - convertShapeToTFLiteShape(_outputShape), _outputData.f); + nnfw::cker::MaxPool(op_params, convertShapeToCkerShape(_inputShape), _inputData.f, + convertShapeToCkerShape(_outputShape), _outputData.f); return true; } bool MaxPoolLayer::maxPoolQuant8() @@ -68,8 +69,8 @@ bool MaxPoolLayer::maxPoolQuant8() op_params.quantized_activation_min = output_activation_min; op_params.quantized_activation_max = output_activation_max; - ::tflite::optimized_ops::MaxPool(op_params, convertShapeToTFLiteShape(_inputShape), _inputData.u8, - convertShapeToTFLiteShape(_outputShape), _outputData.u8); + nnfw::cker::MaxPool(op_params, convertShapeToCkerShape(_inputShape), _inputData.u8, + convertShapeToCkerShape(_outputShape), _outputData.u8); return true; }