2 * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef __ONERT_BACKEND_ACL_COMMON_TENSOR_MANAGER_H__
18 #define __ONERT_BACKEND_ACL_COMMON_TENSOR_MANAGER_H__
20 #include <arm_compute/runtime/IMemoryManager.h>
22 #include "AclMemoryManager.h"
23 #include "AclInternalBufferManager.h"
24 #include "ir/OperandIndexMap.h"
33 template <typename T_ITensor, typename T_Tensor, typename T_SubTensor> class AclTensorManager
36 using T_AclMemoryManager = AclMemoryManager<T_ITensor, T_Tensor, T_SubTensor>;
38 AclTensorManager(T_AclMemoryManager *const_mgr, T_AclMemoryManager *nonconst_mgr,
39 IInternalBufferManager *inter_mgr);
41 virtual ~AclTensorManager() = default;
43 void allocateConsts(void);
44 void allocateNonconsts(void);
45 void deallocateConsts(void);
46 void deallocateNonconsts(void);
48 void allocateInternalBufferManager(void);
49 void deallocateInternalBufferManager(void);
51 void buildTensor(const ir::OperandIndex &ind, const ::arm_compute::TensorInfo &info, size_t rank,
52 bool as_const, size_t num_uses);
53 void buildSubtensor(const ir::OperandIndex &parent, const ir::OperandIndex &child,
54 const ::arm_compute::TensorShape &shape,
55 const ::arm_compute::Coordinates &coordinates, size_t rank,
58 std::shared_ptr<T_ITensor> findTensorAsParent(const ir::OperandIndex &ind);
60 void startLifetime(const ir::OperandIndex &ind);
61 void finishLifetime(const ir::OperandIndex &ind);
63 std::shared_ptr<T_ITensor> at(const ir::OperandIndex &ind);
65 ir::OperandIndexMap<std::shared_ptr<T_Tensor>> &constTensors(void);
66 ir::OperandIndexMap<std::shared_ptr<T_Tensor>> &nonconstTensors(void);
67 ir::OperandIndexMap<std::shared_ptr<T_SubTensor>> &nonconstSubtensors(void);
69 std::shared_ptr<::arm_compute::IMemoryManager> internal_buffer_manager(void);
71 void iterate(const std::function<void(const ir::OperandIndex &)> &fn);
73 void tryDeallocConstants(void);
76 std::unique_ptr<T_AclMemoryManager> _const_mgr;
77 std::unique_ptr<T_AclMemoryManager> _nonconst_mgr;
78 std::unique_ptr<IInternalBufferManager> _inter_mgr;
79 ir::OperandIndexMap<T_AclMemoryManager &> _ind_to_mgr;
82 } // namespace acl_common
83 } // namespace backend
87 #include "util/logging.h"
96 template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
97 AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::AclTensorManager(
98 T_AclMemoryManager *const_mgr, T_AclMemoryManager *nonconst_mgr,
99 IInternalBufferManager *inter_mgr)
100 : _const_mgr{const_mgr}, _nonconst_mgr{nonconst_mgr}, _inter_mgr{inter_mgr}
105 template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
106 void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::allocateConsts(void)
108 _const_mgr->allocate();
111 template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
112 void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::allocateNonconsts(void)
114 _nonconst_mgr->allocate();
117 template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
118 void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::deallocateConsts(void)
120 _const_mgr->deallocate();
123 template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
124 void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::deallocateNonconsts(void)
126 _nonconst_mgr->deallocate();
129 template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
130 void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::allocateInternalBufferManager(void)
132 _inter_mgr->allocate();
135 template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
136 void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::deallocateInternalBufferManager(void)
138 _inter_mgr->deallocate();
141 template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
142 void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::buildTensor(
143 const ir::OperandIndex &ind, const ::arm_compute::TensorInfo &info, size_t rank, bool as_const,
146 assert(_ind_to_mgr.find(ind) == _ind_to_mgr.end());
149 _const_mgr->buildTensor(ind, info, rank, num_uses);
150 _ind_to_mgr.insert({ind, *_const_mgr});
154 _nonconst_mgr->buildTensor(ind, info, rank, num_uses);
155 _ind_to_mgr.insert({ind, *_nonconst_mgr});
159 template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
160 void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::buildSubtensor(
161 const ir::OperandIndex &parent, const ir::OperandIndex &child,
162 const ::arm_compute::TensorShape &shape, const ::arm_compute::Coordinates &coordinates,
163 size_t rank, bool extent_parent)
165 assert(_ind_to_mgr.find(child) == _ind_to_mgr.end());
166 std::shared_ptr<T_ITensor> parent_tensor = findTensorAsParent(parent);
167 assert(parent_tensor);
168 _nonconst_mgr->buildSubtensor(parent_tensor, child, shape, coordinates, rank, extent_parent);
169 _ind_to_mgr.insert({child, *_nonconst_mgr});
172 template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
173 std::shared_ptr<T_ITensor>
174 AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::findTensorAsParent(const ir::OperandIndex &ind)
177 auto &tensors = _nonconst_mgr->tensors();
178 auto &subtensors = _nonconst_mgr->subtensors();
179 if (tensors.find(ind) != tensors.end())
181 // Parent is allocated as tensor
184 else if (subtensors.find(ind) != subtensors.end())
186 // Parent is allocated as subtensor
187 return subtensors[ind];
195 template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
196 void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::startLifetime(const ir::OperandIndex &ind)
198 assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
199 _ind_to_mgr.at(ind).startLifetime(ind);
202 template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
203 void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::finishLifetime(const ir::OperandIndex &ind)
205 assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
206 _ind_to_mgr.at(ind).finishLifetime(ind);
209 template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
210 std::shared_ptr<T_ITensor>
211 AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::at(const ir::OperandIndex &ind)
213 if (_ind_to_mgr.find(ind) == _ind_to_mgr.end())
216 auto &tensors = _ind_to_mgr.at(ind).tensors();
217 if (tensors.find(ind) != tensors.end())
219 return tensors.at(ind);
223 auto subtensors = _ind_to_mgr.at(ind).subtensors();
224 auto itr = subtensors.find(ind);
225 if (itr == subtensors.end())
232 template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
233 ir::OperandIndexMap<std::shared_ptr<T_Tensor>> &
234 AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::constTensors(void)
236 return _const_mgr->tensors();
239 template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
240 ir::OperandIndexMap<std::shared_ptr<T_Tensor>> &
241 AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::nonconstTensors(void)
243 return _nonconst_mgr->tensors();
246 template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
247 ir::OperandIndexMap<std::shared_ptr<T_SubTensor>> &
248 AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::nonconstSubtensors(void)
250 return _nonconst_mgr->subtensors();
253 template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
254 std::shared_ptr<::arm_compute::IMemoryManager>
255 AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::internal_buffer_manager(void)
257 return _inter_mgr->internal_buffer_manager();
260 template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
261 void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::iterate(
262 const std::function<void(const ir::OperandIndex &)> &fn)
264 for (auto it : _nonconst_mgr->tensors())
267 for (auto it : _nonconst_mgr->subtensors())
270 for (auto it : _const_mgr->tensors())
274 template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
275 void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::tryDeallocConstants(void)
277 auto &tensors = _const_mgr->tensors();
279 for (auto it = tensors.begin(); it != tensors.end();)
281 const auto &ind = it->first;
282 auto tensor = it->second;
283 // NOTE The condition "tensor->num_uses() < 2" is used to prevent deallocating a constant tensor
284 // used in several nodes.
285 if (tensor->handle() && !tensor->handle()->is_used() && tensor->num_uses() < 2)
287 VERBOSE(AclTensorManager) << "Tensor " << ind
288 << " will be deallocated as an unused constant tensor" << std::endl;
289 tensor->allocator()->free();
291 it = tensors.erase(it);
300 } // namespace acl_common
301 } // namespace backend
304 #endif // __ONERT_BACKEND_ACL_COMMON_TENSOR_MANAGER_H__