2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "ConvolutionLayer.h"
19 #include "../Tensor.h"
20 #include "ir/Padding.h"
30 ConvolutionLayer::ConvolutionLayer()
31 : _input(nullptr), _kernel(nullptr), _bias(nullptr), _output(nullptr),
32 _paddingType(ir::PaddingType::EXPLICIT), _paddingLeft(0), _paddingTop(0), _paddingRight(0),
33 _paddingBottom(0), _strideWidth(0), _strideHeight(0), _dilationWidthFactor(1),
34 _dilationHeightFactor(1), _activation(ir::Activation::NONE),
35 _conv_kernel(new nnfw::ruy::Conv()), _prepare(false)
40 ConvolutionLayer::~ConvolutionLayer() = default;
42 void ConvolutionLayer::convFloat32()
44 float output_activation_min = 0, output_activation_max = 0;
45 CalculateActivationRange(_activation, &output_activation_min, &output_activation_max);
47 nnfw::ruy::ConvParams op_params;
48 op_params.padding_type = getPaddingType(_paddingType);
49 op_params.padding_values.width = _paddingLeft;
50 op_params.padding_values.height = _paddingTop;
51 op_params.stride_width = _strideWidth;
52 op_params.stride_height = _strideHeight;
53 op_params.dilation_width_factor = _dilationWidthFactor;
54 op_params.dilation_height_factor = _dilationHeightFactor;
55 op_params.float_activation_min = output_activation_min;
56 op_params.float_activation_max = output_activation_max;
58 nnfw::ruy::Conv &kernel = *_conv_kernel;
59 kernel(op_params, getTensorShape(_input), reinterpret_cast<const float *>(_input->buffer()),
60 getTensorShape(_kernel), reinterpret_cast<const float *>(_kernel->buffer()),
61 getTensorShape(_bias), reinterpret_cast<const float *>(_bias->buffer()),
62 getTensorShape(_output), reinterpret_cast<float *>(_output->buffer()),
63 _external_context->ruy_context());
66 void ConvolutionLayer::configure(const IPortableTensor *input, const IPortableTensor *kernel,
67 const IPortableTensor *bias, const ir::PaddingType paddingType,
68 const uint32_t paddingLeft, const uint32_t paddingRight,
69 const uint32_t paddingTop, const uint32_t paddingBottom,
70 const uint32_t strideWidth, const uint32_t strideHeight,
71 const uint32_t dilationWidthFactor,
72 const uint32_t dilationHeightFactor,
73 const ir::Activation activation, IPortableTensor *output,
74 const std::shared_ptr<ExternalContext> &external_context)
79 _paddingType = paddingType;
80 _paddingLeft = paddingLeft;
81 _paddingRight = paddingRight;
82 _paddingTop = paddingTop;
83 _paddingBottom = paddingBottom;
84 _strideWidth = strideWidth;
85 _strideHeight = strideHeight;
86 _dilationWidthFactor = dilationWidthFactor;
87 _dilationHeightFactor = dilationHeightFactor;
88 _activation = activation;
90 _external_context = external_context;
93 void ConvolutionLayer::run()
97 if (_input->is_dynamic() || _kernel->is_dynamic())
99 const auto ifm_shape = _input->getShape().asFeature(_input->layout());
100 const auto ofm_shape = _output->getShape().asFeature(_input->layout());
101 // Kernel format is [depth_out, kernel_height, kernel_width, depth_in].
102 const auto ker_shape = _kernel->getShape();
103 const auto ker_height = ker_shape.dim(1);
104 const auto ker_width = ker_shape.dim(2);
107 stride.vertical = _strideWidth;
108 stride.horizontal = _strideWidth;
110 ir::Padding param_padding;
111 param_padding.type = _paddingType;
112 param_padding.param.left = _paddingLeft;
113 param_padding.param.right = _paddingRight;
114 param_padding.param.top = _paddingTop;
115 param_padding.param.bottom = _paddingBottom;
118 ir::calculatePadding(param_padding, ifm_shape, ofm_shape, stride, ker_width, ker_height,
119 _dilationWidthFactor, _dilationHeightFactor);
121 _paddingLeft = padding.left;
122 _paddingRight = padding.right;
123 _paddingTop = padding.top;
124 _paddingBottom = padding.bottom;
126 if (_input->data_type() == OperandType::FLOAT32)
132 throw std::runtime_error{"Conv: unsupported data type"};
136 void ConvolutionLayer::prepare()
141 nnfw::ruy::Conv &kernel = *_conv_kernel;
142 if (_input->data_type() == OperandType::FLOAT32 && _kernel->is_constant())
144 kernel.prepare(getTensorShape(_input), getTensorShape(_kernel), getTensorShape(_output),
145 _strideWidth, _strideHeight, _dilationWidthFactor, _dilationHeightFactor);
152 } // namespace backend