2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "MaxPoolLayer.h"
19 #include <cker/operation/MaxPool.h>
30 #define MAXPOOLING_PARAMETERS \
31 nnfw::cker::PoolParams op_params; \
32 op_params.stride_height = _strideHeight; \
33 op_params.stride_width = _strideWidth; \
34 op_params.filter_height = _kernelHeight; \
35 op_params.filter_width = _kernelWidth; \
36 op_params.padding_values.height = (int8_t)_paddingTop; \
37 op_params.padding_values.width = (int8_t)_paddingLeft;
39 MaxPoolLayer::MaxPoolLayer()
40 : _input(nullptr), _output(nullptr), _paddingLeft(0), _paddingTop(0), _paddingRight(0),
41 _paddingBottom(0), _strideWidth(0), _strideHeight(0), _kernelWidth(0), _kernelHeight(0),
42 _activation(ir::Activation::NONE)
47 void MaxPoolLayer::maxPoolFloat32()
50 float output_activation_min = 0, output_activation_max = 0;
51 CalculateActivationRange(_activation, &output_activation_min, &output_activation_max);
52 op_params.float_activation_min = output_activation_min;
53 op_params.float_activation_max = output_activation_max;
55 nnfw::cker::MaxPool(op_params, getTensorShape(_input),
56 reinterpret_cast<const float *>(_input->buffer()), getTensorShape(_output),
57 reinterpret_cast<float *>(_output->buffer()));
59 void MaxPoolLayer::maxPoolQuant8()
62 int32_t output_activation_min = 0;
63 int32_t output_activation_max = 0;
64 CalculateActivationRangeUint8(_activation, _output, &output_activation_min,
65 &output_activation_max);
66 op_params.quantized_activation_min = output_activation_min;
67 op_params.quantized_activation_max = output_activation_max;
69 nnfw::cker::MaxPool(op_params, getTensorShape(_input),
70 reinterpret_cast<const uint8_t *>(_input->buffer()), getTensorShape(_output),
71 reinterpret_cast<uint8_t *>(_output->buffer()));
74 void MaxPoolLayer::configure(const IPortableTensor *input, const uint32_t paddingLeft,
75 const uint32_t paddingRight, const uint32_t paddingTop,
76 const uint32_t paddingBottom, const uint32_t strideWidth,
77 const uint32_t strideHeight, const uint32_t kernelWidth,
78 const uint32_t kernelHeight, const ir::Activation activation,
79 IPortableTensor *output)
82 _paddingLeft = paddingLeft;
83 _paddingRight = paddingRight;
84 _paddingTop = paddingTop;
85 _paddingBottom = paddingBottom;
86 _strideWidth = strideWidth;
87 _strideHeight = strideHeight;
88 _kernelWidth = kernelWidth;
89 _kernelHeight = kernelHeight;
90 _activation = activation;
94 void MaxPoolLayer::run()
96 if (_input->data_type() == OperandType::FLOAT32)
100 else if (_input->data_type() == OperandType::QUANT_UINT8_ASYMM)
106 throw std::runtime_error{"MaxPool: unsupported data type"};
110 #undef MAXPOOLING_PARAMETERS
114 } // namespace backend