Fix coverity 1229862 : uinit variable in PoolParams
[platform/core/ml/nnfw.git] / runtime / onert / backend / cpu / ops / PoolLayer.cc
1 /*
2  * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include "PoolLayer.h"
18
19 #include <cker/operation/AveragePool.h>
20 #include <cker/operation/MaxPool.h>
21
22 #include <unordered_map>
23
24 namespace onert
25 {
26 namespace backend
27 {
28 namespace cpu
29 {
30 namespace ops
31 {
32
33 namespace
34 {
35 template <typename T>
36 void avgPool2D(const nnfw::cker::PoolParams &params, const IPortableTensor *input,
37                IPortableTensor *output)
38 {
39   nnfw::cker::AveragePool<T>(params, getShape(input), getBuffer<T>(input), getShape(output),
40                              getBuffer<T>(output));
41 }
42
43 template <typename T>
44 void maxPool2D(const nnfw::cker::PoolParams &params, const IPortableTensor *input,
45                IPortableTensor *output)
46 {
47   nnfw::cker::MaxPool<T>(params, getShape(input), getBuffer<T>(input), getShape(output),
48                          getBuffer<T>(output));
49 }
50
51 template <typename T>
52 std::function<void(const IPortableTensor *, IPortableTensor *)>
53 generateKernelGeneric(const nnfw::cker::PoolParams &params, PoolType op_type)
54 {
55   if (op_type == PoolType::kAvg)
56   {
57     return std::bind(&avgPool2D<T>, params, std::placeholders::_1, std::placeholders::_2);
58   }
59   else if (op_type == PoolType::kMax)
60   {
61     return std::bind(&maxPool2D<T>, params, std::placeholders::_1, std::placeholders::_2);
62   }
63   else
64   {
65     throw std::runtime_error{"Pool: unsupported pool type"};
66   }
67 }
68 } // namespace
69
70 PoolLayer::PoolLayer() : _input(nullptr), _output(nullptr), _kernel()
71 {
72   // DO NOTHING
73 }
74
75 #define POOLING_PARAMETERS                              \
76   nnfw::cker::PoolParams op_params;                     \
77   op_params.stride_height = strideHeight;               \
78   op_params.stride_width = strideWidth;                 \
79   op_params.filter_height = kernelHeight;               \
80   op_params.filter_width = kernelWidth;                 \
81   op_params.padding_values.height = (int8_t)paddingTop; \
82   op_params.padding_values.width = (int8_t)paddingLeft; \
83   op_params.float_activation_min = 0;                   \
84   op_params.float_activation_max = 0;                   \
85   op_params.quantized_activation_min = 0;               \
86   op_params.quantized_activation_max = 0;
87
88 void PoolLayer::configure(const IPortableTensor *input, const uint32_t paddingLeft, const uint32_t,
89                           const uint32_t paddingTop, const uint32_t, const uint32_t strideWidth,
90                           const uint32_t strideHeight, const uint32_t kernelWidth,
91                           const uint32_t kernelHeight, const ir::Activation activation,
92                           IPortableTensor *output, const PoolType op_type)
93 {
94   assert(input != nullptr);
95   assert(output != nullptr);
96
97   _input = input;
98   _output = output;
99
100   POOLING_PARAMETERS
101
102   switch (_input->data_type())
103   {
104     case OperandType::FLOAT32:
105     {
106       float output_activation_min = 0;
107       float output_activation_max = 0;
108       CalculateActivationRange<float>(activation, &output_activation_min, &output_activation_max);
109       op_params.float_activation_min = output_activation_min;
110       op_params.float_activation_max = output_activation_max;
111
112       _kernel = generateKernelGeneric<float>(op_params, op_type);
113       break;
114     }
115     case OperandType::QUANT_UINT8_ASYMM:
116     {
117       int32_t output_activation_min = 0;
118       int32_t output_activation_max = 0;
119       CalculateActivationRangeQuantized(activation, _output, &output_activation_min,
120                                         &output_activation_max);
121       op_params.quantized_activation_min = output_activation_min;
122       op_params.quantized_activation_max = output_activation_max;
123       _kernel = generateKernelGeneric<uint8_t>(op_params, op_type);
124       break;
125     }
126     case OperandType::QUANT_INT8_ASYMM:
127     {
128       int32_t output_activation_min = 0;
129       int32_t output_activation_max = 0;
130       CalculateActivationRangeQuantized(activation, _output, &output_activation_min,
131                                         &output_activation_max);
132       op_params.quantized_activation_min = output_activation_min;
133       op_params.quantized_activation_max = output_activation_max;
134       _kernel = generateKernelGeneric<int8_t>(op_params, op_type);
135       break;
136     }
137     default:
138       throw std::runtime_error{"Pool: unsupported data type"};
139   }
140 }
141
142 void PoolLayer::run() { _kernel(_input, _output); }
143
144 #undef AVGPOOLING_PARAMETERS
145
146 } // namespace ops
147 } // namespace cpu
148 } // namespace backend
149 } // namespace onert