Imported Upstream version 1.25.0
[platform/core/ml/nnfw.git] / runtime / onert / backend / ruy / KernelGenerator.cc
1 /*
2  * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include "KernelGenerator.h"
18
19 #include "ops/ConvolutionLayer.h"
20 #include "ops/FullyConnectedLayer.h"
21
22 #include <backend/Backend.h>
23 #include <backend/IConfig.h>
24 #include <memory>
25 #include <util/Utils.h>
26 #include <util/logging.h>
27 #include <exec/DynamicShapeInferer.h>
28
29 #include <stdexcept>
30
31 namespace onert
32 {
33 namespace backend
34 {
35 namespace ruy
36 {
37
38 std::unique_ptr<exec::FunctionSequence> KernelGenerator::generate(ir::OperationIndex ind)
39 {
40   auto ret = std::make_unique<exec::FunctionSequence>();
41
42   assert(_tensor_builder->dynamicTensorManager());
43   assert(_tensor_reg);
44
45   // Prepare to handle dynamic tensors later
46   auto dyn_ctx = std::make_shared<exec::FunctionSequence::DynamicTensorCtx>();
47   {
48     dyn_ctx->op = &_operations_ctx.at(ind);
49     dyn_ctx->dynamic_shape_inferer = std::make_shared<exec::DynamicShapeInferer>(_ctx, _tensor_reg);
50   }
51   ret->dynamic_tensor_ctx(dyn_ctx);
52
53   auto &op = _graph.operations().at(ind);
54   op.accept(*this);
55   assert(_return_fn); // _return_fn must have been generated
56   ret->append(std::move(_return_fn));
57
58   for (auto &&ind : (op.getInputs() | ir::Remove::UNDEFINED) + op.getOutputs())
59   {
60     auto portable_tensor = _tensor_reg->getPortableTensor(ind);
61     if (portable_tensor)
62     {
63       assert(portable_tensor->layout() == ir::Layout::NHWC);
64     }
65
66     auto tensor = _tensor_reg->getNativeTensor(ind);
67     if (tensor)
68     {
69       tensor->increase_ref();
70     }
71   }
72   return ret;
73 }
74
75 KernelGenerator::KernelGenerator(
76   const ir::Graph &graph, const std::shared_ptr<TensorBuilder> &tensor_builder,
77   const std::shared_ptr<basic::TensorRegistry> &tensor_reg,
78   const std::shared_ptr<backend::custom::IKernelBuilder> &kernel_builder,
79   const std::shared_ptr<ExternalContext> &external_context)
80   : basic::KernelGeneratorBase{graph},
81     _ctx(graph.operands()), _operations_ctx{graph.operations()}, _current_layout{graph.layout()},
82     _tensor_builder(tensor_builder), _tensor_reg{tensor_reg}, _kernel_builder(kernel_builder),
83     _external_context(external_context)
84 {
85   // DO NOTHING
86 }
87
88 void KernelGenerator::visit(const ir::operation::Conv2D &node)
89 {
90   using ir::operation::Conv2D;
91
92   const auto ofm_index{node.getOutputs().at(0)};
93   const auto ifm_index{node.getInputs().at(Conv2D::Input::INPUT)};
94   const auto ker_index{node.getInputs().at(Conv2D::Input::KERNEL)};
95   const auto bias_index{node.getInputs().at(Conv2D::Input::BIAS)};
96
97   auto ofm_tensor = _tensor_reg->getPortableTensor(ofm_index);
98   auto ifm_tensor = _tensor_reg->getPortableTensor(ifm_index);
99   auto ker_tensor = _tensor_reg->getPortableTensor(ker_index);
100   auto bias_tensor = _tensor_reg->getPortableTensor(bias_index);
101
102   const auto stride = node.param().stride;
103   const auto activation = node.param().activation;
104   const auto param_padding = node.param().padding;
105   const auto dilation = node.param().dilation;
106   auto fn = std::make_unique<ops::ConvolutionLayer>();
107
108   if (_ctx.at(ifm_index).info().isDynamic() || _ctx.at(ker_index).info().isDynamic())
109   {
110     fn->configure(ifm_tensor, ker_tensor, bias_tensor, param_padding.type, param_padding.param.left,
111                   param_padding.param.right, param_padding.param.top, param_padding.param.bottom,
112                   stride.horizontal, stride.vertical, dilation.width_factor, dilation.height_factor,
113                   activation, ofm_tensor, _external_context);
114
115     _return_fn = std::move(fn);
116     return;
117   }
118   const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature(_current_layout);
119   const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature(_current_layout);
120   // Kernel format is [depth_out, kernel_height, kernel_width, depth_in].
121   const auto &ker_shape = _ctx.at(ker_index).shape();
122   const auto ker_height = ker_shape.dim(1);
123   const auto ker_width = ker_shape.dim(2);
124
125   const auto padding =
126     ir::calculatePadding(param_padding, ifm_shape, ofm_shape, stride, ker_width, ker_height,
127                          dilation.width_factor, dilation.height_factor);
128
129   fn->configure(ifm_tensor, ker_tensor, bias_tensor, param_padding.type, padding.left,
130                 padding.right, padding.top, padding.bottom, stride.horizontal, stride.vertical,
131                 dilation.width_factor, dilation.height_factor, activation, ofm_tensor,
132                 _external_context);
133
134   _return_fn = std::move(fn);
135 }
136
137 void KernelGenerator::visit(const ir::operation::FullyConnected &node)
138 {
139   using ir::operation::FullyConnected;
140
141   const auto output_index{node.getOutputs().at(0)};
142   const auto input_index{node.getInputs().at(FullyConnected::Input::INPUT)};
143   const auto weight_index{node.getInputs().at(FullyConnected::Input::WEIGHT)};
144   const auto bias_index{node.getInputs().at(FullyConnected::Input::BIAS)};
145   const auto activation = node.param().activation;
146   const auto weights_format = node.param().weights_format;
147
148   auto output_tensor = _tensor_reg->getPortableTensor(output_index);
149   auto input_tensor = _tensor_reg->getPortableTensor(input_index);
150   auto weight_tensor = _tensor_reg->getPortableTensor(weight_index);
151   auto bias_tensor = bias_index.undefined() ? nullptr : _tensor_reg->getPortableTensor(bias_index);
152
153   auto fn = std::make_unique<ops::FullyConnectedLayer>();
154
155   fn->configure(input_tensor, weight_tensor, bias_tensor, activation, weights_format, output_tensor,
156                 _external_context);
157
158   _return_fn = std::move(fn);
159 }
160
161 } // namespace ruy
162 } // namespace backend
163 } // namespace onert