2 * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <cker/operation/Conv.h>
19 #include "OperationUtil.h"
21 #include "interp/Registration.h"
22 #include "ir/operation/Conv2D.h"
23 #include "util/Utils.h"
24 #include "util/ShapeInference.h"
25 #include "misc/polymorphic_downcast.h"
34 void prepareConv2D(ExecEnv *env, const ir::Operation &node)
36 const auto in_index = node.getInputs().at(ir::operation::Conv2D::INPUT);
37 const auto kernel_index = node.getInputs().at(ir::operation::Conv2D::KERNEL);
38 const auto bias_index = node.getInputs().at(ir::operation::Conv2D::BIAS);
39 const auto out_index = node.getOutputs().at(0);
41 const auto in_tensor = env->tensorAt(in_index);
42 const auto kernel_tensor = env->tensorAt(kernel_index);
43 const auto bias_tensor = env->tensorAt(bias_index);
45 assert(in_tensor->num_dimensions() == 4);
46 assert(kernel_tensor->num_dimensions() == 4);
47 assert(bias_tensor->num_dimensions() == 1);
49 UNUSED_RELEASE(in_tensor);
50 UNUSED_RELEASE(kernel_tensor);
51 UNUSED_RELEASE(bias_tensor);
53 const auto output_info = env->graph().operands().at(out_index).info();
54 if (output_info.total_size() == 0)
56 // Handle unspecified output shape
57 const auto &conv_node = nnfw::misc::polymorphic_downcast<const ir::operation::Conv2D &>(node);
58 const auto infered_output_shape = shape_inference::inferConv2DShape(
59 in_tensor->tensorInfo().shape(), kernel_tensor->tensorInfo().shape(), conv_node.param());
60 env->allocateIfNeeded(
61 out_index, ir::OperandInfo::createStaticInfo(infered_output_shape, output_info.typeInfo()));
65 env->allocateIfNeeded(out_index, output_info);
68 auto out_tensor = env->tensorAt(out_index);
69 UNUSED_RELEASE(out_tensor);
71 // Handle same ifm & ofm data type only
72 assert(in_tensor->data_type() == out_tensor->data_type());
73 assert(out_tensor->num_dimensions() == 4);
76 void invoke(const ITensor *ifm_tensor, const ITensor *ker_tensor, const ITensor *bias_tensor,
77 const ITensor *ofm_tensor, const ir::operation::Conv2D::Param ¶m)
79 // TODO Support NCHW frontned
80 const auto ifm_shape = ifm_tensor->tensorInfo().shape().asFeature(ir::Layout::NHWC);
81 const auto ofm_shape = ofm_tensor->tensorInfo().shape().asFeature(ir::Layout::NHWC);
82 // Kernel format is [depth_out, kernel_height, kernel_width, depth_in].
83 const auto &ker_shape = ker_tensor->tensorInfo().shape();
84 const auto ker_height = ker_shape.dim(1);
85 const auto ker_width = ker_shape.dim(2);
86 const auto padding = ir::calculatePadding(param.padding, ifm_shape, ofm_shape, param.stride,
87 ker_width, ker_height);
90 float activation_min, activation_max;
91 calculateActivationRange(param.activation, &activation_min, &activation_max);
93 nnfw::cker::ConvParams cker_param;
94 cker_param.padding_type = convertPaddingType(param.padding.type);
95 cker_param.padding_values.width = padding.left;
96 cker_param.padding_values.height = padding.top;
97 cker_param.stride_width = param.stride.horizontal;
98 cker_param.stride_height = param.stride.vertical;
99 cker_param.dilation_width_factor = 1;
100 cker_param.dilation_height_factor = 1;
101 cker_param.float_activation_min = activation_min;
102 cker_param.float_activation_max = activation_max;
104 const auto cker_ifm_shape = convertShape(ifm_tensor->tensorInfo().shape());
105 const auto cker_ker_shape = convertShape(ker_tensor->tensorInfo().shape());
106 const auto cker_bias_shape = convertShape(bias_tensor->tensorInfo().shape());
107 const auto cker_ofm_shape = convertShape(ofm_tensor->tensorInfo().shape());
108 const float *ifm_ptr = reinterpret_cast<const float *>(ifm_tensor->bufferRO());
109 const float *ker_ptr = reinterpret_cast<const float *>(ker_tensor->bufferRO());
110 const float *bias_ptr = reinterpret_cast<const float *>(bias_tensor->bufferRO());
111 float *ofm_ptr = reinterpret_cast<float *>(ofm_tensor->buffer());
113 nnfw::cker::Conv conv_kernel;
114 conv_kernel(cker_param, cker_ifm_shape, ifm_ptr, cker_ker_shape, ker_ptr, cker_bias_shape,
115 bias_ptr, cker_ofm_shape, ofm_ptr);
118 void invokeConv2D(const ExecEnv *env, const ir::Operation &node)
120 const auto &conv_node = nnfw::misc::polymorphic_downcast<const ir::operation::Conv2D &>(node);
122 const auto ifm_index = node.getInputs().at(ir::operation::Conv2D::INPUT);
123 const auto ker_index = node.getInputs().at(ir::operation::Conv2D::KERNEL);
124 const auto bias_index = node.getInputs().at(ir::operation::Conv2D::BIAS);
125 const auto ofm_index = node.getOutputs().at(0);
127 const auto ifm_tensor = env->tensorAt(ifm_index);
128 const auto ker_tensor = env->tensorAt(ker_index);
129 const auto bias_tensor = env->tensorAt(bias_index);
130 const auto ofm_tensor = env->tensorAt(ofm_index);
132 const auto data_type = ifm_tensor->data_type();
133 if (data_type == ir::DataType::FLOAT32)
135 invoke(ifm_tensor, ker_tensor, bias_tensor, ofm_tensor, conv_node.param());
139 throw std::runtime_error{"NYI: Support float32 only"};
142 } // namespace conv2d
144 OpKernel *getConv2D()
146 static OpKernel kernel = {conv2d::prepareConv2D, conv2d::invokeConv2D};
150 } // namespace interp