2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "BackendContext.h"
19 #include "KernelGenerator.h"
20 #include "backend/cpu_common/BackendContextHelpers.h"
29 void BackendContext::initConsts()
31 for (auto &op : operation_list())
33 constant_initializer->setLayout(op.layout);
34 graph()->operations().at(op.index).accept(*constant_initializer);
37 for (auto ind : operand_list())
39 const auto &obj = graph()->operands().at(ind);
40 if (obj.isConstant() && !constant_initializer->exist(ind))
42 constant_initializer->registerDefaultInitializer(ind, obj);
46 constant_initializer->run();
49 ITensorRegistry *BackendContext::genTensors(const std::vector<onert::ir::OpSequenceIndex> &order,
50 const ir::OpSequences &op_seqs,
51 const ir::LowerInfoMap &lower_info)
53 auto model_io = (graph()->getInputs() + graph()->getOutputs()) | ir::Remove::UNDEFINED |
54 ir::Remove::DUPLICATED;
55 for (auto index : operand_list())
57 if (model_io.contains(index))
59 const auto &obj = graph()->operands().at(index);
60 const auto frontend_layout = [&]() {
61 if (obj.getUses().size() == 0)
62 return ir::Layout::UNKNOWN;
63 auto use_op_ind = *obj.getUses().begin(); // FIXME What if it has two or more uses?
64 for (auto &operation_info : operation_list())
66 if (operation_info.index == use_op_ind)
67 return operation_info.layout;
69 return ir::Layout::UNKNOWN;
71 const auto &permute_factor = lower_info.operand.at(index)->def_factors().getOnlyElement();
72 if (permute_factor.backend() != backend())
74 const auto backend_layout = permute_factor.layout();
75 ir::OperandInfo backend_info{permuteShape(obj.shape(), frontend_layout, backend_layout),
76 obj.typeInfo(), obj.info().memAllocType(), obj.isConstant()};
77 tensor_builder->registerTensorInfo(index, backend_info, backend_layout);
80 // TODO Get compiler options from compiler, and use it rather than getting it from Env
81 if (util::getConfigString(util::config::EXECUTOR) == "Linear")
83 cpu_common::planTensors(*this, order, op_seqs, lower_info);
87 // For the executors that does not have fixed linear execution order:
88 // To make tensors never be deallocated, this is a workaround to use static memory planner
89 for (auto ind : operand_list())
91 if (tensor_builder->isRegistered(ind))
92 tensor_builder->notifyFirstUse(ind);
96 tensor_builder->prepare();
98 return tensor_registry.get();
101 FunctionMap BackendContext::genKernels(const std::vector<ir::OpSequenceIndex> &order,
102 const ir::OpSequences &op_seqs)
106 for (auto op_seq_ind : order)
108 const auto &op_seq = op_seqs.at(op_seq_ind);
109 bool assigned = [&]() {
110 for (auto op_info : operation_list())
111 if (op_seq.exist(op_info.index))
117 auto fn_seq = kernel_gen->generate(op_seqs.at(op_seq_ind));
118 ret.emplace_back(op_seq_ind, std::move(fn_seq));
123 // NOTE For memory optimization, we want to free some operand data
124 for (auto ind : operand_list())
126 // TODO Remove const_cast
127 auto &obj = const_cast<ir::Graph *>(graph())->operands().at(ind);
133 auto &fn_seq = it.second;
134 fn_seq->iterate([&](exec::IFunction &ifunc) { ifunc.prepare(); });
140 } // namespace controlflow
141 } // namespace backend