2 * Copyright (c) 2021 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "TensorManager.h"
19 #include <util/logging.h>
30 TensorManager::TensorManager(MemoryManager *const_mgr, MemoryManager *nonconst_mgr)
31 : _const_mgr{const_mgr}, _nonconst_mgr{nonconst_mgr}
36 void TensorManager::allocateConsts(void) { _const_mgr->allocate(); }
38 void TensorManager::allocateNonconsts(void) { _nonconst_mgr->allocate(); }
40 void TensorManager::deallocateConsts(void) { _const_mgr->deallocate(); }
42 void TensorManager::deallocateNonconsts(void) { _nonconst_mgr->deallocate(); }
44 void TensorManager::buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &info,
47 assert(_ind_to_mgr.find(ind) == _ind_to_mgr.end());
49 if (info.isConstant())
51 _const_mgr->buildTensor(ind, info, type);
52 _ind_to_mgr.insert({ind, *_const_mgr});
56 _nonconst_mgr->buildTensor(ind, info, type);
57 _ind_to_mgr.insert({ind, *_nonconst_mgr});
60 ir::OperandIndex TensorManager::addTensor(const ir::Shape &shape)
62 auto ind = _nonconst_mgr->addTensor(shape);
63 _ind_to_mgr.insert({ind, *_nonconst_mgr});
68 void TensorManager::startLifetime(const ir::OperandIndex &ind)
70 assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
71 _ind_to_mgr.at(ind).startLifetime(ind);
74 void TensorManager::finishLifetime(const ir::OperandIndex &ind)
76 assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
77 _ind_to_mgr.at(ind).finishLifetime(ind);
80 std::shared_ptr<operand::ICLTensor> TensorManager::at(const ir::OperandIndex &ind)
82 if (_ind_to_mgr.find(ind) == _ind_to_mgr.end())
85 auto &tensors = _ind_to_mgr.at(ind).tensors();
86 if (tensors.find(ind) != tensors.end())
88 return tensors.at(ind);
94 ir::OperandIndexMap<std::shared_ptr<operand::CLTensor>> &TensorManager::constTensors(void)
96 return _const_mgr->tensors();
99 ir::OperandIndexMap<std::shared_ptr<operand::CLTensor>> &TensorManager::nonconstTensors(void)
101 return _nonconst_mgr->tensors();
104 void TensorManager::iterate(const std::function<void(const ir::OperandIndex &)> &fn)
106 for (auto &&it : _nonconst_mgr->tensors())
109 for (auto &&it : _const_mgr->tensors())
113 void TensorManager::tryDeallocConstants(void)
118 } // namespace gpu_cl
119 } // namespace backend