2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "MaxPoolLayer.h"
19 #include <cker/operation/MaxPool.h>
30 #define MAXPOOLING_PARAMETERS \
31 nnfw::cker::PoolParams op_params; \
32 op_params.stride_height = _strideHeight; \
33 op_params.stride_width = _strideWidth; \
34 op_params.filter_height = _kernelHeight; \
35 op_params.filter_width = _kernelWidth; \
36 op_params.padding_values.height = (int8_t)_paddingTop; \
37 op_params.padding_values.width = (int8_t)_paddingLeft;
39 MaxPoolLayer::MaxPoolLayer()
40 : _input(nullptr), _output(nullptr), _paddingLeft(0), _paddingTop(0), _paddingRight(0),
41 _paddingBottom(0), _strideWidth(0), _strideHeight(0), _kernelWidth(0), _kernelHeight(0),
42 _activation(ir::Activation::NONE)
47 void MaxPoolLayer::maxPoolFloat32()
50 float output_activation_min, output_activation_max;
51 CalculateActivationRangeFloat(_activation, &output_activation_min, &output_activation_max);
52 op_params.float_activation_min = output_activation_min;
53 op_params.float_activation_max = output_activation_max;
55 nnfw::cker::MaxPool(op_params, convertTensorToCkerShape(_input),
56 reinterpret_cast<const float *>(_input->buffer()),
57 convertTensorToCkerShape(_output),
58 reinterpret_cast<float *>(_output->buffer()));
60 void MaxPoolLayer::maxPoolQuant8()
63 int32_t output_activation_min = 0;
64 int32_t output_activation_max = 0;
65 CalculateActivationRangeUint8(_activation, _output, &output_activation_min,
66 &output_activation_max);
67 op_params.quantized_activation_min = output_activation_min;
68 op_params.quantized_activation_max = output_activation_max;
70 nnfw::cker::MaxPool(op_params, convertTensorToCkerShape(_input),
71 reinterpret_cast<const uint8_t *>(_input->buffer()),
72 convertTensorToCkerShape(_output),
73 reinterpret_cast<uint8_t *>(_output->buffer()));
76 void MaxPoolLayer::configure(const operand::Tensor *input, const uint32_t paddingLeft,
77 const uint32_t paddingRight, const uint32_t paddingTop,
78 const uint32_t paddingBottom, const uint32_t strideWidth,
79 const uint32_t strideHeight, const uint32_t kernelWidth,
80 const uint32_t kernelHeight, const ir::Activation activation,
81 operand::Tensor *output)
84 _paddingLeft = paddingLeft;
85 _paddingRight = paddingRight;
86 _paddingTop = paddingTop;
87 _paddingBottom = paddingBottom;
88 _strideWidth = strideWidth;
89 _strideHeight = strideHeight;
90 _kernelWidth = kernelWidth;
91 _kernelHeight = kernelHeight;
92 _activation = activation;
96 void MaxPoolLayer::run()
98 if (_input->data_type() == OperandType::FLOAT32)
102 else if (_input->data_type() == OperandType::QUANT8_ASYMM)
108 #undef MAXPOOLING_PARAMETERS
110 } // namespace kernel
112 } // namespace backend