2 * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <cker/operation/AveragePool.h>
18 #include <cker/operation/MaxPool.h>
20 #include "OperationUtil.h"
22 #include "interp/Registration.h"
23 #include "ir/operation/Pool2D.h"
24 #include "util/Utils.h"
25 #include "util/ShapeInference.h"
26 #include "misc/polymorphic_downcast.h"
35 void preparePool2D(ExecEnv *env, const ir::Operation &node)
37 const auto &pool_node = nnfw::misc::polymorphic_downcast<const ir::operation::Pool2D &>(node);
38 const auto in_index = node.getInputs().at(pool_node.INPUT);
39 const auto out_index = node.getOutputs().at(0);
41 const auto in_tensor = env->tensorAt(in_index);
42 UNUSED_RELEASE(in_tensor);
44 assert(in_tensor->num_dimensions() == 4);
46 const auto output_info = env->graph().operands().at(out_index).info();
47 if (output_info.total_size() == 0)
49 // Handle unspecified output shape
50 const auto infered_output_shape =
51 shape_inference::inferPoolShape(in_tensor->tensorInfo().shape(), pool_node.param());
52 env->allocateIfNeeded(
53 out_index, ir::OperandInfo::createStaticInfo(infered_output_shape, output_info.typeInfo()));
57 env->allocateIfNeeded(out_index, output_info);
60 auto out_tensor = env->tensorAt(out_index);
61 UNUSED_RELEASE(out_tensor);
63 // Handle same ifm & ofm data type only
64 assert(in_tensor->data_type() == out_tensor->data_type());
65 assert(out_tensor->num_dimensions() == 4);
69 void invoke(const nnfw::cker::PoolParams ¶ms, const nnfw::cker::Shape &in_shape,
70 const T *in_ptr, const nnfw::cker::Shape &out_shape, T *out_ptr,
71 ir::operation::Pool2D::PoolType op_type)
75 case ir::operation::Pool2D::PoolType::AVG:
76 nnfw::cker::AveragePool<T>(params, in_shape, in_ptr, out_shape, out_ptr);
78 case ir::operation::Pool2D::PoolType::MAX:
79 nnfw::cker::MaxPool<T>(params, in_shape, in_ptr, out_shape, out_ptr);
82 throw std::runtime_error{"Interp(Pool2D): NYI unsupported operation"};
87 void invokePool2DOps(const ExecEnv *env, const ir::Operation &node)
89 const auto &pool_node = nnfw::misc::polymorphic_downcast<const ir::operation::Pool2D &>(node);
91 const auto in_index = node.getInputs().at(0);
92 const auto out_index = node.getOutputs().at(0);
94 // Check lhs shape is same with rhs (with broadcast)
95 const auto in_tensor = env->tensorAt(in_index);
96 const auto out_tensor = env->tensorAt(out_index);
98 // TODO support NCHW frontend
99 const auto ifm_shape = in_tensor->tensorInfo().shape().asFeature(ir::Layout::NHWC);
100 const auto ofm_shape = out_tensor->tensorInfo().shape().asFeature(ir::Layout::NHWC);
101 const auto param = pool_node.param();
103 ir::calculatePadding(param.padding, ifm_shape, ofm_shape, param.stride, param.kw, param.kh);
105 nnfw::cker::PoolParams cker_param;
106 cker_param.filter_width = param.kw;
107 cker_param.filter_height = param.kh;
108 cker_param.padding_values.width = padding.left;
109 cker_param.padding_values.height = padding.top;
110 cker_param.stride_width = param.stride.horizontal;
111 cker_param.stride_height = param.stride.vertical;
113 const auto data_type = in_tensor->data_type();
114 if (data_type == ir::DataType::FLOAT32)
116 calculateActivationRange(param.activation, &cker_param.float_activation_min,
117 &cker_param.float_activation_max);
119 const auto in_shape = convertShape(in_tensor->tensorInfo().shape());
120 const auto out_shape = convertShape(out_tensor->tensorInfo().shape());
121 const float *in_ptr = reinterpret_cast<const float *>(in_tensor->bufferRO());
122 float *out_ptr = reinterpret_cast<float *>(out_tensor->buffer());
123 // Now, invoke() supports only Pool2D in float
124 invoke<float>(cker_param, in_shape, in_ptr, out_shape, out_ptr, param.op_type);
128 throw std::runtime_error{"NYI: Support float only"};
131 } // namespace pool2d
133 OpKernel *getPool2D()
135 static OpKernel kernel = {pool2d::preparePool2D, pool2d::invokePool2DOps};
139 } // namespace interp