Imported Upstream version 1.4.0
[platform/core/ml/nnfw.git] / runtime / onert / backend / cpu / kernel / MaxPoolLayer.cc
1 /*
2  * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include "MaxPoolLayer.h"
18
19 #include <cker/operation/MaxPool.h>
20
21 namespace onert
22 {
23 namespace backend
24 {
25 namespace cpu
26 {
27 namespace kernel
28 {
29
30 #define MAXPOOLING_PARAMETERS                            \
31   nnfw::cker::PoolParams op_params;                      \
32   op_params.stride_height = _strideHeight;               \
33   op_params.stride_width = _strideWidth;                 \
34   op_params.filter_height = _kernelHeight;               \
35   op_params.filter_width = _kernelWidth;                 \
36   op_params.padding_values.height = (int8_t)_paddingTop; \
37   op_params.padding_values.width = (int8_t)_paddingLeft;
38
39 MaxPoolLayer::MaxPoolLayer()
40     : _input(nullptr), _output(nullptr), _paddingLeft(0), _paddingTop(0), _paddingRight(0),
41       _paddingBottom(0), _strideWidth(0), _strideHeight(0), _kernelWidth(0), _kernelHeight(0),
42       _activation(ir::Activation::NONE)
43 {
44   // DO NOTHING
45 }
46
47 void MaxPoolLayer::maxPoolFloat32()
48 {
49   MAXPOOLING_PARAMETERS
50   float output_activation_min, output_activation_max;
51   CalculateActivationRangeFloat(_activation, &output_activation_min, &output_activation_max);
52   op_params.float_activation_min = output_activation_min;
53   op_params.float_activation_max = output_activation_max;
54
55   nnfw::cker::MaxPool(op_params, convertTensorToCkerShape(_input),
56                       reinterpret_cast<const float *>(_input->buffer()),
57                       convertTensorToCkerShape(_output),
58                       reinterpret_cast<float *>(_output->buffer()));
59 }
60 void MaxPoolLayer::maxPoolQuant8()
61 {
62   MAXPOOLING_PARAMETERS
63   int32_t output_activation_min = 0;
64   int32_t output_activation_max = 0;
65   CalculateActivationRangeUint8(_activation, _output, &output_activation_min,
66                                 &output_activation_max);
67   op_params.quantized_activation_min = output_activation_min;
68   op_params.quantized_activation_max = output_activation_max;
69
70   nnfw::cker::MaxPool(op_params, convertTensorToCkerShape(_input),
71                       reinterpret_cast<const uint8_t *>(_input->buffer()),
72                       convertTensorToCkerShape(_output),
73                       reinterpret_cast<uint8_t *>(_output->buffer()));
74 }
75
76 void MaxPoolLayer::configure(const operand::Tensor *input, const uint32_t paddingLeft,
77                              const uint32_t paddingRight, const uint32_t paddingTop,
78                              const uint32_t paddingBottom, const uint32_t strideWidth,
79                              const uint32_t strideHeight, const uint32_t kernelWidth,
80                              const uint32_t kernelHeight, const ir::Activation activation,
81                              operand::Tensor *output)
82 {
83   _input = input;
84   _paddingLeft = paddingLeft;
85   _paddingRight = paddingRight;
86   _paddingTop = paddingTop;
87   _paddingBottom = paddingBottom;
88   _strideWidth = strideWidth;
89   _strideHeight = strideHeight;
90   _kernelWidth = kernelWidth;
91   _kernelHeight = kernelHeight;
92   _activation = activation;
93   _output = output;
94 }
95
96 void MaxPoolLayer::run()
97 {
98   if (_input->data_type() == OperandType::FLOAT32)
99   {
100     maxPoolFloat32();
101   }
102   else if (_input->data_type() == OperandType::QUANT8_ASYMM)
103   {
104     maxPoolQuant8();
105   }
106 }
107
108 #undef MAXPOOLING_PARAMETERS
109
110 } // namespace kernel
111 } // namespace cpu
112 } // namespace backend
113 } // namespace onert