2 * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <cker/operation/AveragePool.h>
19 #include "OperationUtil.h"
21 #include "interp/Registration.h"
22 #include "ir/operation/AvgPool2D.h"
23 #include "util/Utils.h"
24 #include "util/ShapeInference.h"
25 #include "misc/polymorphic_downcast.h"
34 void prepareAvgPool2D(ExecEnv *env, const ir::Operation &node)
36 const auto in_index = node.getInputs().at(0);
37 const auto out_index = node.getOutputs().at(0);
39 const auto in_tensor = env->tensorAt(in_index);
40 UNUSED_RELEASE(in_tensor);
42 assert(in_tensor->num_dimensions() == 4);
44 const auto output_info = env->graph().operands().at(out_index).info();
45 if (output_info.total_size() == 0)
47 // Handle unspecified output shape
48 const auto &avgpool_node =
49 nnfw::misc::polymorphic_downcast<const ir::operation::AvgPool2D &>(node);
50 const auto infered_output_shape =
51 shape_inference::inferAvgPoolShape(in_tensor->tensorInfo().shape(), avgpool_node.param());
52 env->allocateIfNeeded(
53 out_index, ir::OperandInfo::createStaticInfo(infered_output_shape, output_info.typeInfo()));
57 env->allocateIfNeeded(out_index, output_info);
60 auto out_tensor = env->tensorAt(out_index);
61 UNUSED_RELEASE(out_tensor);
63 // Handle same ifm & ofm data type only
64 assert(in_tensor->data_type() == out_tensor->data_type());
65 assert(out_tensor->num_dimensions() == 4);
68 void invoke(const ITensor *in_tensor, const ITensor *out_tensor,
69 const ir::operation::AvgPool2D::Param ¶m)
71 // TODO Support NCHW frontend
72 const auto ifm_shape = in_tensor->tensorInfo().shape().asFeature(ir::Layout::NHWC);
73 const auto ofm_shape = out_tensor->tensorInfo().shape().asFeature(ir::Layout::NHWC);
75 ir::calculatePadding(param.padding, ifm_shape, ofm_shape, param.stride, param.kw, param.kh);
77 nnfw::cker::PoolParams cker_param;
78 calculateActivationRange(param.activation, &cker_param.float_activation_min,
79 &cker_param.float_activation_max);
80 cker_param.filter_width = param.kw;
81 cker_param.filter_height = param.kh;
82 cker_param.padding_values.width = padding.left;
83 cker_param.padding_values.height = padding.top;
84 cker_param.stride_width = param.stride.horizontal;
85 cker_param.stride_height = param.stride.vertical;
87 const auto in_shape = convertShape(in_tensor->tensorInfo().shape());
88 const auto out_shape = convertShape(out_tensor->tensorInfo().shape());
89 const float *in_ptr = reinterpret_cast<const float *>(in_tensor->bufferRO());
90 float *out_ptr = reinterpret_cast<float *>(out_tensor->buffer());
92 nnfw::cker::AveragePool(cker_param, in_shape, in_ptr, out_shape, out_ptr);
95 void invokeAvgPool2D(const ExecEnv *env, const ir::Operation &node)
97 const auto &avgpool_node =
98 nnfw::misc::polymorphic_downcast<const ir::operation::AvgPool2D &>(node);
100 const auto in_index = node.getInputs().at(0);
101 const auto out_index = node.getOutputs().at(0);
103 // Check lhs shape is same with rhs (with broadcast)
104 const auto in_tensor = env->tensorAt(in_index);
105 const auto out_tensor = env->tensorAt(out_index);
107 const auto data_type = in_tensor->data_type();
108 if (data_type == ir::DataType::FLOAT32)
110 invoke(in_tensor, out_tensor, avgpool_node.param());
114 throw std::runtime_error{"NYI: Support float only"};
117 } // namespace avgpool2d
119 OpKernel *getAvgPool2D()
121 static OpKernel kernel = {avgpool2d::prepareAvgPool2D, avgpool2d::invokeAvgPool2D};
125 } // namespace interp