2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "KernelGenerator.h"
19 #include <backend/BackendContext.h>
20 #include <util/Utils.h>
21 #include "kernel/IfLayer.h"
22 #include "kernel/WhileLayer.h"
23 #include "kernel/PermuteLayer.h"
24 #include "exec/ExecutorBase.h"
25 #include "exec/FunctionSequence.h"
34 KernelGenerator::KernelGenerator(const ir::Graph &graph, IDynamicTensorManager *dyn_tensor_manager,
35 const std::shared_ptr<TensorRegistry> &tensor_reg,
36 const std::shared_ptr<ExternalContext> &external_context)
37 : _graph{graph}, _dyn_tensor_manager{dyn_tensor_manager}, _tensor_reg{tensor_reg},
38 _tensor_registries{}, _executor_map{nullptr}, _external_context{external_context}
40 UNUSED_RELEASE(_graph);
41 UNUSED_RELEASE(_tensor_registries);
42 UNUSED_RELEASE(_executor_map);
45 void KernelGenerator::visit(const ir::OpSequence &op_seq)
47 assert(!_return_fn_seq);
48 assert(_dyn_tensor_manager);
51 auto dyn_shape_inferer =
52 std::make_unique<exec::DynamicShapeInferer>(_graph.operands(), _tensor_reg);
54 _return_fn_seq = std::make_unique<exec::FunctionSequence>();
56 // Prepare to handle dynamic tensors later
57 auto dyn_ctx = std::make_shared<exec::FunctionSequence::DynamicTensorCtx>();
59 dyn_ctx->op_seq = &op_seq;
60 dyn_ctx->operations = &_graph.operations();
61 dyn_ctx->dynamic_shape_inferer = std::move(dyn_shape_inferer);
62 dyn_ctx->dynamic_tensor_manager = _dyn_tensor_manager;
64 _return_fn_seq->dynamic_tensor_ctx(dyn_ctx);
67 for (const auto &op_idx : op_seq.operations())
69 const auto &node = _graph.operations().at(op_idx);
71 _return_fn_seq->append(releaseFunction());
75 void KernelGenerator::visit(const ir::operation::If &node)
77 const auto then_subg_index = node.param().then_subg_index;
78 const auto else_subg_index = node.param().else_subg_index;
80 std::vector<backend::ITensor *> input_tensors;
81 for (const auto input_index : node.getInputs())
83 auto input_tensor = getTensor(input_index);
85 input_tensors.emplace_back(input_tensor);
88 std::vector<backend::ITensor *> output_tensors;
89 for (const auto output_index : node.getOutputs())
91 auto output_tensor = getTensor(output_index);
92 output_tensors.emplace_back(output_tensor);
95 // IfLayer just set ExecutorMap instead of then and else executor to avoid complexity of
96 // creating executor recusively
97 const auto cond_tensor = input_tensors.front();
98 input_tensors.erase(input_tensors.begin());
99 auto fn = std::make_unique<::onert::backend::controlflow::kernel::IfLayer>(
100 cond_tensor, input_tensors, output_tensors, node.getOutputs(), _graph, then_subg_index,
101 else_subg_index, _executor_map, _external_context);
103 _return_fn = std::move(fn);
106 void KernelGenerator::visit(const ir::operation::Permute &node)
108 const auto output_index{node.getOutputs().at(0)};
109 const auto input_index{node.getInputs().at(0)};
112 std::vector<ITensor *> output_tensors{getTensor(output_index)};
113 std::vector<ITensor *> input_tensors{getTensor(input_index)};
116 std::make_unique<kernel::PermuteLayer>(input_tensors, output_tensors, _external_context);
117 _return_fn = std::move(fn);
120 void KernelGenerator::visit(const ir::operation::While &node)
122 const auto cond_subg_index = node.param().cond_subg_index;
123 const auto body_subg_index = node.param().body_subg_index;
125 // This op does not support input as a constant, because controlflow backend does not have
127 std::vector<backend::ITensor *> input_tensors;
128 for (const auto input_index : node.getInputs())
130 auto input_tensor = getTensor(input_index);
132 input_tensors.emplace_back(input_tensor);
135 std::vector<backend::ITensor *> output_tensors;
136 for (const auto output_index : node.getOutputs())
138 auto output_tensor = getTensor(output_index);
139 output_tensors.emplace_back(output_tensor);
142 // WhileLayer just set ExecutorMap instead of cond and body executor to avoid complexity of
143 // creating executor recusively
144 auto fn = std::make_unique<::onert::backend::controlflow::kernel::WhileLayer>(
145 input_tensors, output_tensors, node.getOutputs(), _graph, cond_subg_index, body_subg_index,
146 _executor_map, _external_context);
148 _return_fn = std::move(fn);
151 backend::ITensor *KernelGenerator::getTensor(const ir::OperandIndex &index)
153 backend::ITensor *ret = _tensor_registries.getITensor(index);
154 assert(ret != nullptr);
158 } // namespace controlflow
159 } // namespace backend