2 * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
3 * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #ifndef __NNFW_CKER_AVERAGE_POOL_H__
19 #define __NNFW_CKER_AVERAGE_POOL_H__
21 #include "cker/neon/neon_check.h"
22 #include "cker/eigen/Utils.h"
23 #include "cker/Shape.h"
24 #include "cker/Types.h"
25 #include "cker/Utils.h"
34 // TODO Change to apply neon for this function if it is faster
35 inline void AveragePool(const PoolParams ¶ms, const Shape &input_shape, const float *input_data,
36 const Shape &output_shape, float *output_data)
38 assert(input_shape.DimensionsCount() == 4);
39 assert(output_shape.DimensionsCount() == 4);
40 const int batches = MatchingDim(input_shape, 0, output_shape, 0);
41 const int input_height = input_shape.Dims(1);
42 const int input_width = input_shape.Dims(2);
43 const int output_height = output_shape.Dims(1);
44 const int output_width = output_shape.Dims(2);
45 const int stride_height = params.stride_height;
46 const int stride_width = params.stride_width;
48 // TODO(benoitjacob) make this a proper reference impl without Eigen!
49 const auto in_mat = MapAsMatrixWithLastDimAsRows(input_data, input_shape);
50 auto out_mat = MapAsMatrixWithLastDimAsRows(output_data, output_shape);
51 // TODO(benoitjacob) get rid of the dynamic memory allocation here!
52 Eigen::VectorXf out_count(out_mat.cols());
54 // Prefill the output to 0.
56 for (int b = 0; b < batches; ++b)
58 for (int h = 0; h < input_height; ++h)
60 for (int w = 0; w < input_width; ++w)
62 // (h_start, h_end) * (w_start, w_end) is the range that the input
63 // vector projects to.
64 int hpad = h + params.padding_values.height;
65 int wpad = w + params.padding_values.width;
67 (hpad < params.filter_height) ? 0 : (hpad - params.filter_height) / stride_height + 1;
68 int h_end = std::min(hpad / stride_height + 1, output_height);
70 (wpad < params.filter_width) ? 0 : (wpad - params.filter_width) / stride_width + 1;
71 int w_end = std::min(wpad / stride_width + 1, output_width);
72 // compute elementwise sum
73 for (int ph = h_start; ph < h_end; ++ph)
75 for (int pw = w_start; pw < w_end; ++pw)
77 int out_offset = NodeOffset(b, ph, pw, output_height, output_width);
78 out_mat.col(out_offset) += in_mat.col(NodeOffset(b, h, w, input_height, input_width));
79 out_count(out_offset)++;
85 // Divide the output by the actual number of elements being averaged over
86 assert(out_count.minCoeff() > 0);
87 out_mat.array().rowwise() /= out_count.transpose().array();
89 const int flat_size = output_shape.FlatSize();
90 for (int i = 0; i < flat_size; ++i)
92 output_data[i] = ActivationFunctionWithMinMax(output_data[i], params.float_activation_min,
93 params.float_activation_max);
97 inline void AveragePool16(const PoolParams ¶ms, const Shape &input_shape,
98 const uint8_t *input_data, const Shape &output_shape,
101 // Here, and in other pooling ops, in order to maintain locality of reference,
102 // to minimize some recalculations, and to load into NEON vector registers, we
103 // use an inner loop down the depth. Since depths can be large and hence we
104 // would need arbitrarily large temporary storage, we divide the work up into
105 // depth tranches just within the batch loop.
106 static constexpr int kPoolingAccTrancheSize = 256;
108 assert(params.quantized_activation_min <= params.quantized_activation_max);
109 assert(input_shape.DimensionsCount() == 4);
110 assert(output_shape.DimensionsCount() == 4);
111 const int batches = MatchingDim(input_shape, 0, output_shape, 0);
112 const int depth = MatchingDim(input_shape, 3, output_shape, 3);
113 const int input_height = input_shape.Dims(1);
114 const int input_width = input_shape.Dims(2);
115 const int output_height = output_shape.Dims(1);
116 const int output_width = output_shape.Dims(2);
117 const int stride_height = params.stride_height;
118 const int stride_width = params.stride_width;
120 uint16_t acc[kPoolingAccTrancheSize];
121 for (int batch = 0; batch < batches; ++batch)
123 // We proceed through the depth in tranches (see comment above). The
124 // depth_base is the depth at the beginning of the tranche. The
125 // tranche_depth is the depth dimension of the tranche.
126 for (int depth_base = 0; depth_base < depth; depth_base += kPoolingAccTrancheSize)
128 const int tranche_depth = std::min(depth - depth_base, kPoolingAccTrancheSize);
129 for (int out_y = 0; out_y < output_height; ++out_y)
131 for (int out_x = 0; out_x < output_width; ++out_x)
133 const int in_x_origin = (out_x * stride_width) - params.padding_values.width;
134 const int in_y_origin = (out_y * stride_height) - params.padding_values.height;
135 const int filter_x_start = std::max(0, -in_x_origin);
136 const int filter_x_end = std::min(params.filter_width, input_width - in_x_origin);
137 const int filter_y_start = std::max(0, -in_y_origin);
138 const int filter_y_end = std::min(params.filter_height, input_height - in_y_origin);
139 const int filter_count =
140 (filter_x_end - filter_x_start) * (filter_y_end - filter_y_start);
141 memset(acc, 0, tranche_depth * sizeof(acc[0]));
142 const uint8_t *input_ptr =
143 input_data + depth_base +
144 depth * (in_x_origin + input_width * (in_y_origin + input_height * batch));
145 for (int fy = filter_y_start; fy < filter_y_end; fy++)
147 const uint8_t *input_row_ptr = input_ptr + depth * (fy * input_width + filter_x_start);
148 for (int fx = filter_x_start; fx < filter_x_end; fx++)
150 const uint8_t *input_channel_ptr = input_row_ptr;
153 for (; channel <= tranche_depth - 16; channel += 16)
155 uint16x8_t acc_reg[2];
156 for (int i = 0; i < 2; i++)
158 acc_reg[i] = vld1q_u16(acc + channel + 8 * i);
160 uint8x16_t input_reg = vld1q_u8(input_channel_ptr);
161 input_channel_ptr += 16;
162 acc_reg[0] = vaddw_u8(acc_reg[0], vget_low_u8(input_reg));
163 acc_reg[1] = vaddw_u8(acc_reg[1], vget_high_u8(input_reg));
164 for (int i = 0; i < 2; i++)
166 vst1q_u16(acc + channel + 8 * i, acc_reg[i]);
169 for (; channel <= tranche_depth - 8; channel += 8)
171 uint16x8_t acc_reg = vld1q_u16(acc + channel);
172 uint8x8_t input_reg = vld1_u8(input_channel_ptr);
173 input_channel_ptr += 8;
174 acc_reg = vaddw_u8(acc_reg, input_reg);
175 vst1q_u16(acc + channel, acc_reg);
178 for (; channel < tranche_depth; ++channel)
180 acc[channel] += *input_channel_ptr++;
182 input_row_ptr += depth;
185 uint8_t *output_ptr = output_data + Offset(output_shape, batch, out_y, out_x, depth_base);
188 #define AVGPOOL_DIVIDING_BY(FILTER_COUNT) \
189 if (filter_count == FILTER_COUNT) \
191 for (; channel <= tranche_depth - 8; channel += 8) \
194 for (int i = 0; i < 8; i++) \
196 buf[i] = (acc[channel + i] + FILTER_COUNT / 2) / FILTER_COUNT; \
198 uint8x8_t buf8 = vqmovn_u16(vld1q_u16(buf)); \
199 buf8 = vmin_u8(buf8, vdup_n_u8(params.quantized_activation_max)); \
200 buf8 = vmax_u8(buf8, vdup_n_u8(params.quantized_activation_min)); \
201 vst1_u8(output_ptr + channel, buf8); \
204 AVGPOOL_DIVIDING_BY(9)
205 AVGPOOL_DIVIDING_BY(15)
206 #undef AVGPOOL_DIVIDING_BY
207 for (; channel <= tranche_depth - 8; channel += 8)
210 for (int i = 0; i < 8; i++)
212 buf[i] = (acc[channel + i] + filter_count / 2) / filter_count;
214 uint8x8_t buf8 = vqmovn_u16(vld1q_u16(buf));
215 buf8 = vmin_u8(buf8, vdup_n_u8(params.quantized_activation_max));
216 buf8 = vmax_u8(buf8, vdup_n_u8(params.quantized_activation_min));
217 vst1_u8(output_ptr + channel, buf8);
220 for (; channel < tranche_depth; ++channel)
222 uint8_t a = (acc[channel] + filter_count / 2) / filter_count;
223 a = std::max<uint16_t>(a, params.quantized_activation_min);
224 a = std::min<uint16_t>(a, params.quantized_activation_max);
225 output_ptr[channel] = static_cast<uint8_t>(a);
233 inline void AveragePool32(const PoolParams ¶ms, const Shape &input_shape,
234 const uint8_t *input_data, const Shape &output_shape,
235 uint8_t *output_data)
238 // Here, and in other pooling ops, in order to maintain locality of reference,
239 // to minimize some recalculations, and to load into NEON vector registers, we
240 // use an inner loop down the depth. Since depths can be large and hence we
241 // would need arbitrarily large temporary storage, we divide the work up into
242 // depth tranches just within the batch loop.
243 static constexpr int kPoolingAccTrancheSize = 256;
245 assert(params.quantized_activation_min <= params.quantized_activation_max);
246 assert(input_shape.DimensionsCount() == 4);
247 assert(output_shape.DimensionsCount() == 4);
248 const int batches = MatchingDim(input_shape, 0, output_shape, 0);
249 const int depth = MatchingDim(input_shape, 3, output_shape, 3);
250 const int input_height = input_shape.Dims(1);
251 const int input_width = input_shape.Dims(2);
252 const int output_height = output_shape.Dims(1);
253 const int output_width = output_shape.Dims(2);
254 const int stride_height = params.stride_height;
255 const int stride_width = params.stride_width;
257 uint32_t acc[kPoolingAccTrancheSize];
258 for (int batch = 0; batch < batches; ++batch)
260 // We proceed through the depth in tranches (see comment above). The
261 // depth_base is the depth at the beginning of the tranche. The
262 // tranche_depth is the depth dimension of the tranche.
263 for (int depth_base = 0; depth_base < depth; depth_base += kPoolingAccTrancheSize)
265 const int tranche_depth = std::min(depth - depth_base, kPoolingAccTrancheSize);
266 for (int out_y = 0; out_y < output_height; ++out_y)
268 for (int out_x = 0; out_x < output_width; ++out_x)
270 const int in_x_origin = (out_x * stride_width) - params.padding_values.width;
271 const int in_y_origin = (out_y * stride_height) - params.padding_values.height;
272 const int filter_x_start = std::max(0, -in_x_origin);
273 const int filter_x_end = std::min(params.filter_width, input_width - in_x_origin);
274 const int filter_y_start = std::max(0, -in_y_origin);
275 const int filter_y_end = std::min(params.filter_height, input_height - in_y_origin);
276 const int filter_count =
277 (filter_x_end - filter_x_start) * (filter_y_end - filter_y_start);
278 memset(acc, 0, tranche_depth * sizeof(acc[0]));
279 const uint8_t *input_ptr =
280 input_data + depth_base +
281 depth * (in_x_origin + input_width * (in_y_origin + input_height * batch));
282 for (int fy = filter_y_start; fy < filter_y_end; fy++)
284 const uint8_t *input_row_ptr = input_ptr + depth * (fy * input_width + filter_x_start);
285 for (int fx = filter_x_start; fx < filter_x_end; fx++)
287 const uint8_t *input_channel_ptr = input_row_ptr;
290 for (; channel <= tranche_depth - 16; channel += 16)
292 uint16x4_t acc_reg[4];
293 uint8x16_t input_reg = vld1q_u8(input_channel_ptr);
294 input_channel_ptr += 16;
295 acc_reg[0] = vget_low_u16(vmovl_u8(vget_low_u8(input_reg)));
296 acc_reg[1] = vget_high_u16(vmovl_u8(vget_low_u8(input_reg)));
297 acc_reg[2] = vget_low_u16(vmovl_u8(vget_high_u8(input_reg)));
298 acc_reg[3] = vget_high_u16(vmovl_u8(vget_high_u8(input_reg)));
299 for (int i = 0; i < 4; i++)
301 vst1q_u32(acc + channel + 4 * i,
302 vaddw_u16(vld1q_u32(acc + channel + 4 * i), acc_reg[i]));
305 for (; channel <= tranche_depth - 8; channel += 8)
307 uint16x4_t acc_reg[2];
308 uint16x8_t input_reg = vmovl_u8(vld1_u8(input_channel_ptr));
309 input_channel_ptr += 8;
310 acc_reg[0] = vget_low_u16(input_reg);
311 acc_reg[1] = vget_high_u16(input_reg);
312 for (int i = 0; i < 2; i++)
314 vst1q_u32(acc + channel + 4 * i,
315 vaddw_u16(vld1q_u32(acc + channel + 4 * i), acc_reg[i]));
319 for (; channel < tranche_depth; ++channel)
321 acc[channel] += *input_channel_ptr++;
323 input_row_ptr += depth;
326 uint8_t *output_ptr = output_data + Offset(output_shape, batch, out_y, out_x, depth_base);
329 #define AVGPOOL_DIVIDING_BY(FILTER_COUNT) \
330 if (filter_count == FILTER_COUNT) \
332 for (; channel <= tranche_depth - 8; channel += 8) \
335 for (int i = 0; i < 8; i++) \
337 buf[i] = (acc[channel + i] + FILTER_COUNT / 2) / FILTER_COUNT; \
339 uint8x8_t buf8 = vqmovn_u16(vld1q_u16(buf)); \
340 buf8 = vmin_u8(buf8, vdup_n_u8(params.quantized_activation_max)); \
341 buf8 = vmax_u8(buf8, vdup_n_u8(params.quantized_activation_min)); \
342 vst1_u8(output_ptr + channel, buf8); \
345 AVGPOOL_DIVIDING_BY(9)
346 AVGPOOL_DIVIDING_BY(15)
347 #undef AVGPOOL_DIVIDING_BY
348 for (; channel <= tranche_depth - 8; channel += 8)
351 for (int i = 0; i < 8; i++)
353 buf[i] = (acc[channel + i] + filter_count / 2) / filter_count;
355 uint8x8_t buf8 = vqmovn_u16(vld1q_u16(buf));
356 buf8 = vmin_u8(buf8, vdup_n_u8(params.quantized_activation_max));
357 buf8 = vmax_u8(buf8, vdup_n_u8(params.quantized_activation_min));
358 vst1_u8(output_ptr + channel, buf8);
361 for (; channel < tranche_depth; ++channel)
363 uint16_t a = (acc[channel] + filter_count / 2) / filter_count;
364 a = std::max<uint16_t>(a, params.quantized_activation_min);
365 a = std::min<uint16_t>(a, params.quantized_activation_max);
366 output_ptr[channel] = static_cast<uint8_t>(a);
374 inline void AveragePool(const PoolParams ¶ms, const Shape &input_shape,
375 const uint8_t *input_data, const Shape &output_shape, uint8_t *output_data)
377 if (params.filter_height * params.filter_width > 16 * 16)
379 AveragePool32(params, input_shape, input_data, output_shape, output_data);
383 AveragePool16(params, input_shape, input_data, output_shape, output_data);
390 #endif // __NNFW_CKER_AVERAGE_POOL_H__