2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "TensorBuilder.h"
19 #include <util/logging.h>
30 TensorBuilder::TensorBuilder()
31 : _tensor_reg{new cpu_common::TensorRegistry()},
32 _static_tensor_mgr{new cpu_common::StaticTensorManager(_tensor_reg)},
33 _dynamic_tensor_mgr{new cpu_common::DynamicTensorManager(_tensor_reg)}
38 void TensorBuilder::registerTensorInfo(const ir::OperandIndex &ind, const ir::OperandInfo &info,
41 _tensor_info_map.emplace(ind, info);
43 // CPU backend supports only one layout as NHWC
44 assert(layout == ir::Layout::NHWC);
47 _dynamic_tensor_mgr->buildTensor(ind, info, layout);
51 _static_tensor_mgr->buildTensor(ind, info, layout, info.isConstant());
55 void TensorBuilder::notifyFirstUse(const ir::OperandIndex &ind)
57 assert(_tensor_info_map.find(ind) != _tensor_info_map.end());
58 const auto tensor_info = _tensor_info_map.at(ind);
60 if (!at(ind)->is_dynamic())
62 const auto size = tensor_info.total_size();
63 _static_tensor_mgr->claimPlan(ind, size);
67 void TensorBuilder::notifyLastUse(const ir::OperandIndex &ind)
69 if (!at(ind)->is_dynamic())
71 _static_tensor_mgr->releasePlan(ind);
75 bool TensorBuilder::isRegistered(const ir::OperandIndex &ind) const
77 return _tensor_info_map.find(ind) != _tensor_info_map.end();
80 void TensorBuilder::prepare(void)
82 _static_tensor_mgr->allocateConsts();
83 _static_tensor_mgr->allocateNonconsts();
86 void TensorBuilder::allocate()
88 // NOTE For now nothing to do. Allocation is done in prepare stage, which is not appropriate
89 // This is because CPU kernels require `ITensor`s to be allocated before Kernel Generation.
92 std::shared_ptr<ITensor> TensorBuilder::tensorAt(const ir::OperandIndex &ind)
94 return _tensor_reg->getITensor(ind);
97 std::shared_ptr<IPortableTensor> TensorBuilder::portableAt(const ir::OperandIndex &ind)
99 return _tensor_reg->getPortableTensor(ind);
102 bool TensorBuilder::setExternalTensor(const ir::OperandIndex &ind,
103 const std::shared_ptr<IPortableTensor> &tensor)
105 return _tensor_reg->setExternalTensor(ind, tensor);
108 void TensorBuilder::iterate(const IterateFunction &fn) { _static_tensor_mgr->iterate(fn); }
110 std::shared_ptr<cpu_common::Tensor> TensorBuilder::at(const ir::OperandIndex &ind)
112 return _tensor_reg->getManagedTensor(ind);
115 std::unique_ptr<ITensorManager> TensorBuilder::releaseStaticTensorManager(void)
117 return std::move(_static_tensor_mgr);
120 std::unique_ptr<ITensorManager> TensorBuilder::releaseDynamicTensorManager(void)
122 return std::move(_dynamic_tensor_mgr);
126 } // namespace backend