2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef __ONERT_BACKEND_CPU_COMMON_BACKEND_CONTEXT_HELPERS_H__
18 #define __ONERT_BACKEND_CPU_COMMON_BACKEND_CONTEXT_HELPERS_H__
23 #include "ir/OpSequences.h"
24 #include "ir/LowerInfoMap.h"
25 #include "util/logging.h"
34 // TODO Remove the template param BackendContext once unification of cpu backend context is done
35 template <typename T_BackendContext>
36 void planTensors(const T_BackendContext &ctx, const std::vector<onert::ir::OpSequenceIndex> &order,
37 const ir::OpSequences &op_seqs, const ir::LowerInfoMap &lower_info)
39 auto graph = ctx.graph();
40 auto tensor_builder = ctx.tensor_builder;
42 ir::OperandIndexMap<uint32_t> uses_map;
43 ir::OperandIndexMap<uint32_t> def_map;
44 ir::OperandIndexSequence constants;
47 (graph->getInputs() + graph->getOutputs()) | ir::Remove::UNDEFINED | ir::Remove::DUPLICATED;
50 for (auto ind : ctx.operand_list())
52 if (model_io.contains(ind))
54 const auto &obj = graph->operands().at(ind);
55 const auto &li = lower_info.operand.at(ind);
56 if (li->def_factors().getOnlyElement().backend() != ctx.backend())
59 // Ignore unused tensor
60 if (li->def_factors().size() == 0 && li->use_factors().size() == 0)
62 VERBOSE_F() << "Operand #" << ind.value() << " will not be used. no more process."
67 uses_map[ind] = obj.getUses().size();
68 def_map[ind] = obj.getDef().valid() ? 1 : 0;
71 constants.append(ind);
73 auto factor = li->def_factors().getOnlyElement();
74 if (!tensor_builder->isRegistered(ind))
76 // These tensors do not exist in any op_seq (No use and def)
77 const auto info = obj.info();
78 const auto backend_layout = factor.layout();
79 // TODO Change tensor info to have permuted shape
80 tensor_builder->registerTensorInfo(ind, info, backend_layout);
84 // Start scanning to do notify{First|Last}Use for each tensor
86 // If a tensor is a constant, increase the use of the tensor and allocate it first.
87 // Increasing use count here makes the tensor never be deallocated, i.e it they will be
89 for (const auto &ind : constants)
92 tensor_builder->notifyFirstUse(ind);
96 // 1. Scan DEF of outputs. If the DEF, allocate it
97 // 2. Scan DEF of inputs. If variable tensor, allocate it
98 // 3. Scan USE of inputs. Decrease the USE and deallocate if the USE is 0
99 for (const auto op_seq_ind : order)
101 const auto &op_seq = op_seqs.at(op_seq_ind);
102 for (const auto &op_idx : op_seq.operations())
104 auto op_inputs = graph->operations().at(op_idx).getInputs() | ir::Remove::DUPLICATED |
105 ir::Remove::UNDEFINED;
106 auto op_outputs = graph->operations().at(op_idx).getOutputs() | ir::Remove::DUPLICATED |
107 ir::Remove::UNDEFINED;
110 for (const auto &ind : op_outputs)
112 if (model_io.contains(ind))
114 if (!tensor_builder->isRegistered(ind))
116 assert(def_map.find(ind) != def_map.end());
120 tensor_builder->notifyFirstUse(ind);
124 // Scan variable tensors
125 // This tensor has features like constant. But OperandInfo and LowerInfo treat them as
126 // non-constant because of less memory usage by memory planning in here
127 for (const auto &ind : op_inputs)
129 if (model_io.contains(ind))
131 if (!tensor_builder->isRegistered(ind))
133 const auto &operand = graph->operands().at(ind);
134 if (operand.info().isVariable())
136 // The variable tensor with buffer is not supported yet
137 assert(operand.data() == nullptr);
138 assert(operand.getUses().size() == 1 && !operand.getDef().valid());
139 assert(lower_info.operand.at(ind)->def_factors().size() == 1 &&
140 lower_info.operand.at(ind)->use_factors().size() == 1);
141 assert(uses_map[ind] == 1 && def_map[ind] == 0);
142 tensor_builder->notifyFirstUse(ind);
146 for (const auto &ind : op_inputs)
148 if (model_io.contains(ind))
150 if (!tensor_builder->isRegistered(ind))
152 assert(uses_map.find(ind) != uses_map.end());
153 assert(uses_map[ind] > 0);
155 if (uses_map[ind] == 0)
157 // plan for deallocation of static tensornode
158 tensor_builder->notifyLastUse(ind);
160 // plan for deallocation of dynamic tensor
161 auto dyn_tensor_manager = tensor_builder->dynamicTensorManager();
162 auto *tensor = ctx.tensor_registry->getITensor(ind);
164 dyn_tensor_manager->planDealloc(op_idx, tensor);
170 // Dispose and validate
171 for (const auto &ind : constants)
174 if (uses_map[ind] == 0) // To prevent notifyLastUse from being called twice
176 tensor_builder->notifyLastUse(ind);
181 std::all_of(uses_map.begin(), uses_map.end(),
182 [](std::pair<const ir::OperandIndex, uint32_t> it) { return it.second == 0; }));
185 std::all_of(def_map.begin(), def_map.end(),
186 [](std::pair<const ir::OperandIndex, uint32_t> it) { return it.second == 0; }));
189 } // namespace cpu_common
190 } // namespace backend
193 #endif // __ONERT_BACKEND_CPU_COMMON_BACKEND_CONTEXT_HELPERS_H__