Imported Upstream version 1.12.0
[platform/core/ml/nnfw.git] / runtime / onert / backend / ruy / ops / ConvolutionLayer.cc
1 /*
2  * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include "ConvolutionLayer.h"
18
19 #include "../Tensor.h"
20 #include "ir/Padding.h"
21
22 namespace onert
23 {
24 namespace backend
25 {
26 namespace ruy
27 {
28 namespace ops
29 {
30 ConvolutionLayer::ConvolutionLayer()
31     : _input(nullptr), _kernel(nullptr), _bias(nullptr), _output(nullptr),
32       _paddingType(ir::PaddingType::EXPLICIT), _paddingLeft(0), _paddingTop(0), _paddingRight(0),
33       _paddingBottom(0), _strideWidth(0), _strideHeight(0), _dilationWidthFactor(1),
34       _dilationHeightFactor(1), _activation(ir::Activation::NONE),
35       _conv_kernel(new nnfw::ruy::Conv()), _prepare(false)
36 {
37   // DO NOTHING
38 }
39
40 ConvolutionLayer::~ConvolutionLayer() = default;
41
42 void ConvolutionLayer::convFloat32()
43 {
44   float output_activation_min = 0, output_activation_max = 0;
45   CalculateActivationRange(_activation, &output_activation_min, &output_activation_max);
46
47   nnfw::ruy::ConvParams op_params;
48   op_params.padding_type = getPaddingType(_paddingType);
49   op_params.padding_values.width = _paddingLeft;
50   op_params.padding_values.height = _paddingTop;
51   op_params.stride_width = _strideWidth;
52   op_params.stride_height = _strideHeight;
53   op_params.dilation_width_factor = _dilationWidthFactor;
54   op_params.dilation_height_factor = _dilationHeightFactor;
55   op_params.float_activation_min = output_activation_min;
56   op_params.float_activation_max = output_activation_max;
57
58   nnfw::ruy::Conv &kernel = *_conv_kernel;
59   kernel(op_params, getTensorShape(_input), reinterpret_cast<const float *>(_input->buffer()),
60          getTensorShape(_kernel), reinterpret_cast<const float *>(_kernel->buffer()),
61          getTensorShape(_bias), reinterpret_cast<const float *>(_bias->buffer()),
62          getTensorShape(_output), reinterpret_cast<float *>(_output->buffer()),
63          _external_context->ruy_context());
64 }
65
66 void ConvolutionLayer::configure(const IPortableTensor *input, const IPortableTensor *kernel,
67                                  const IPortableTensor *bias, const ir::PaddingType paddingType,
68                                  const uint32_t paddingLeft, const uint32_t paddingRight,
69                                  const uint32_t paddingTop, const uint32_t paddingBottom,
70                                  const uint32_t strideWidth, const uint32_t strideHeight,
71                                  const uint32_t dilationWidthFactor,
72                                  const uint32_t dilationHeightFactor,
73                                  const ir::Activation activation, IPortableTensor *output,
74                                  const std::shared_ptr<ExternalContext> &external_context)
75 {
76   _input = input;
77   _kernel = kernel;
78   _bias = bias;
79   _paddingType = paddingType;
80   _paddingLeft = paddingLeft;
81   _paddingRight = paddingRight;
82   _paddingTop = paddingTop;
83   _paddingBottom = paddingBottom;
84   _strideWidth = strideWidth;
85   _strideHeight = strideHeight;
86   _dilationWidthFactor = dilationWidthFactor;
87   _dilationHeightFactor = dilationHeightFactor;
88   _activation = activation;
89   _output = output;
90   _external_context = external_context;
91 }
92
93 void ConvolutionLayer::run()
94 {
95   prepare();
96
97   if (_input->is_dynamic() || _kernel->is_dynamic())
98   {
99     const auto ifm_shape = _input->getShape().asFeature(_input->layout());
100     const auto ofm_shape = _output->getShape().asFeature(_input->layout());
101     // Kernel format is [depth_out, kernel_height, kernel_width, depth_in].
102     const auto ker_shape = _kernel->getShape();
103     const auto ker_height = ker_shape.dim(1);
104     const auto ker_width = ker_shape.dim(2);
105
106     ir::Stride stride;
107     stride.vertical = _strideWidth;
108     stride.horizontal = _strideWidth;
109
110     ir::Padding param_padding;
111     param_padding.type = _paddingType;
112     param_padding.param.left = _paddingLeft;
113     param_padding.param.right = _paddingRight;
114     param_padding.param.top = _paddingTop;
115     param_padding.param.bottom = _paddingBottom;
116
117     const auto padding =
118         ir::calculatePadding(param_padding, ifm_shape, ofm_shape, stride, ker_width, ker_height,
119                              _dilationWidthFactor, _dilationHeightFactor);
120
121     _paddingLeft = padding.left;
122     _paddingRight = padding.right;
123     _paddingTop = padding.top;
124     _paddingBottom = padding.bottom;
125   }
126   if (_input->data_type() == OperandType::FLOAT32)
127   {
128     convFloat32();
129   }
130   else
131   {
132     throw std::runtime_error{"Conv: unsupported data type"};
133   }
134 }
135
136 void ConvolutionLayer::prepare()
137 {
138   if (_prepare)
139     return;
140
141   nnfw::ruy::Conv &kernel = *_conv_kernel;
142   if (_input->data_type() == OperandType::FLOAT32 && _kernel->is_constant())
143   {
144     kernel.prepare(getTensorShape(_input), getTensorShape(_kernel), getTensorShape(_output),
145                    _strideWidth, _strideHeight, _dilationWidthFactor, _dilationHeightFactor);
146   }
147   _prepare = true;
148 }
149
150 } // namespace ops
151 } // namespace ruy
152 } // namespace backend
153 } // namespace onert