Remove `wrapTensor` methods of TensorBuilder and TensorManager.
Signed-off-by: Sergei Barannikov <s.barannikov@samsung.com>
_subtensors[child_ind] = subtensor;
}
- std::shared_ptr<T_Object> wrapTensor(const model::OperandIndex &ind)
- {
- if (_objects.find(ind) != _objects.end())
- {
- return _objects.at(ind);
- }
- else
- {
- if (_tensors.find(ind) != _tensors.end())
- {
- return _objects[ind] = std::make_shared<T_Object>(_tensors.at(ind));
- }
- else
- {
- return _objects[ind] = std::make_shared<T_Object>(_subtensors.at(ind));
- }
- }
- }
-
model::OperandIndexMap<std::shared_ptr<T_Tensor>> &tensors(void) { return _tensors; }
model::OperandIndexMap<std::shared_ptr<T_SubTensor>> &subtensors(void) { return _subtensors; }
- model::OperandIndexMap<std::shared_ptr<T_Object>> &objects(void) { return _objects; }
-
private:
model::OperandIndexMap<std::shared_ptr<T_Tensor>> _tensors;
model::OperandIndexMap<std::shared_ptr<T_SubTensor>> _subtensors;
- model::OperandIndexMap<std::shared_ptr<T_Object>> _objects;
};
} // namespace acl_common
void startLifetime(const model::OperandIndex &ind);
void finishLifetime(const model::OperandIndex &ind);
- std::shared_ptr<backend::operand::IObject> wrapTensor(const model::OperandIndex &ind);
std::shared_ptr<T_ITensor> at(const ::neurun::model::OperandIndex &ind);
model::OperandIndexMap<std::shared_ptr<T_Tensor>> &constTensors(void);
}
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
-std::shared_ptr<backend::operand::IObject>
-AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::wrapTensor(
- const model::OperandIndex &ind)
-{
- assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
- return _ind_to_mgr.at(ind).wrapTensor(ind);
-}
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
std::shared_ptr<T_ITensor> AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::at(
const ::neurun::model::OperandIndex &ind)
{
void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor, T_Object>::tryDeallocConstants(void)
{
auto &tensors = _const_mgr->tensors();
- auto &objects = _const_mgr->objects();
for (auto it = tensors.begin(); it != tensors.end();)
{
tensor->allocator()->free();
tensor.reset();
it = tensors.erase(it);
- objects.erase(ind);
}
else
{
std::shared_ptr<::neurun::backend::operand::ITensor>
tensorAt(const model::OperandIndex &ind) override;
- std::shared_ptr<backend::operand::IObject> wrapTensor(const model::OperandIndex &ind) override;
void iterate(const IterateFunction &fn) override;
void preVisit(const model::Operation &node) override;
}
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
-std::shared_ptr<backend::operand::IObject>
-TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::wrapTensor(
- const model::OperandIndex &ind)
-{
- return _tensor_mgr->wrapTensor(ind);
-}
-
-template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::iterate(
const IterateFunction &fn)
{
}
}
-std::shared_ptr<backend::operand::IObject> MemoryManager::wrapTensor(const model::OperandIndex &ind)
-{
- if (_objects.find(ind) != _objects.end())
- {
- return _objects.at(ind);
- }
- else
- {
- return _objects[ind] = std::make_shared<::neurun::backend::operand::Object>(_tensors.at(ind));
- }
-}
-
} // namespace cpu
} // namespace backend
} // namespace neurun
model::OperandIndexMap<std::shared_ptr<operand::Tensor>> &tensors(void) { return _tensors; }
- std::shared_ptr<backend::operand::IObject> wrapTensor(const model::OperandIndex &ind);
-
private:
IMemoryPlanner *createMemoryPlanner();
private:
model::OperandIndexMap<std::shared_ptr<operand::Tensor>> _tensors;
- model::OperandIndexMap<std::shared_ptr<::neurun::backend::operand::Object>> _objects;
model::OperandIndexMap<Block> _tensor_mem_map;
std::shared_ptr<IMemoryPlanner> _mem_planner;
std::shared_ptr<Allocator> _mem_alloc;
return _tensor_mgr->at(ind);
}
-std::shared_ptr<backend::operand::IObject> TensorBuilder::wrapTensor(const model::OperandIndex &ind)
-{
- return _tensor_mgr->wrapTensor(ind);
-}
-
void TensorBuilder::iterate(const IterateFunction &fn) { _tensor_mgr->iterate(fn); }
std::shared_ptr<operand::Tensor> TensorBuilder::at(const ::neurun::model::OperandIndex &ind)
std::shared_ptr<::neurun::backend::operand::ITensor>
tensorAt(const model::OperandIndex &ind) override;
- std::shared_ptr<backend::operand::IObject> wrapTensor(const model::OperandIndex &ind) override;
-
void iterate(const IterateFunction &fn) override;
void preVisit(const model::Operation &) override { /* DO NOTHING */}
_ind_to_mgr.at(ind).releasePlan(ind);
}
-std::shared_ptr<backend::operand::IObject> TensorManager::wrapTensor(const model::OperandIndex &ind)
-{
- assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
- return _ind_to_mgr.at(ind).wrapTensor(ind);
-}
-
std::shared_ptr<operand::Tensor> TensorManager::at(const ::neurun::model::OperandIndex &ind)
{
assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
void claimPlan(const model::OperandIndex &ind, uint32_t size);
void releasePlan(const model::OperandIndex &ind);
- std::shared_ptr<backend::operand::IObject> wrapTensor(const model::OperandIndex &ind);
std::shared_ptr<operand::Tensor> at(const ::neurun::model::OperandIndex &ind);
model::OperandIndexMap<std::shared_ptr<operand::Tensor>> &constTensors(void);
const auto &kernel_obj = _operands.at(kernel_index);
util::Coordinates permutation{0, 1, 2, 3};
const auto frontend_layout = _current_subg_layout;
- const auto backend_layout = _tensor_builder->wrapTensor(kernel_index)->ptr()->layout();
+ const auto backend_layout = _tensor_builder->tensorAt(kernel_index)->layout();
assert(frontend_layout == neurun::model::Layout::NHWC ||
frontend_layout == neurun::model::Layout::NCHW);
assert(backend_layout == neurun::model::Layout::NHWC ||
const auto &kernel_obj = _operands.at(kernel_index);
util::Coordinates permutation{0, 1, 2, 3};
const auto frontend_layout = _current_subg_layout;
- const auto backend_layout = _tensor_builder->wrapTensor(kernel_index)->ptr()->layout();
+ const auto backend_layout = _tensor_builder->tensorAt(kernel_index)->layout();
assert(frontend_layout == neurun::model::Layout::NHWC ||
frontend_layout == neurun::model::Layout::NCHW);
assert(backend_layout == neurun::model::Layout::NHWC ||
const auto &kernel_index = node.getInputs().at(model::operation::TransposeConv::KERNEL);
const auto &kernel_obj = _operands.at(kernel_index);
const auto frontend_layout = _current_subg_layout;
- const auto backend_layout = _tensor_builder->wrapTensor(kernel_index)->ptr()->layout();
+ const auto backend_layout = _tensor_builder->tensorAt(kernel_index)->layout();
assert(frontend_layout == neurun::model::Layout::NHWC ||
frontend_layout == neurun::model::Layout::NCHW);
assert(backend_layout == neurun::model::Layout::NHWC ||
}
}
-std::shared_ptr<backend::operand::IObject> MemoryManager::wrapTensor(const model::OperandIndex &ind)
-{
- if (_objects.find(ind) != _objects.end())
- {
- return _objects.at(ind);
- }
- else
- {
- return _objects[ind] = std::make_shared<::neurun::backend::operand::Object>(_tensors.at(ind));
- }
-}
-
} // namespace srcn
} // namespace backend
} // namespace neurun
model::OperandIndexMap<std::shared_ptr<operand::Tensor>> &tensors(void) { return _tensors; }
- std::shared_ptr<backend::operand::IObject> wrapTensor(const model::OperandIndex &ind);
-
private:
IMemoryPlanner *createMemoryPlanner();
private:
model::OperandIndexMap<std::shared_ptr<operand::Tensor>> _tensors;
- model::OperandIndexMap<std::shared_ptr<::neurun::backend::operand::Object>> _objects;
model::OperandIndexMap<Block> _tensor_mem_map;
std::shared_ptr<IMemoryPlanner> _mem_planner;
std::shared_ptr<Allocator> _mem_alloc;
return _tensor_mgr->at(ind);
}
-std::shared_ptr<backend::operand::IObject> TensorBuilder::wrapTensor(const model::OperandIndex &ind)
-{
- return _tensor_mgr->wrapTensor(ind);
-}
-
void TensorBuilder::iterate(const IterateFunction &fn) { _tensor_mgr->iterate(fn); }
std::shared_ptr<operand::Tensor> TensorBuilder::at(const ::neurun::model::OperandIndex &ind)
std::shared_ptr<::neurun::backend::operand::ITensor>
tensorAt(const model::OperandIndex &ind) override;
- std::shared_ptr<backend::operand::IObject> wrapTensor(const model::OperandIndex &ind) override;
-
void iterate(const IterateFunction &fn) override;
void preVisit(const model::Operation &) override { /* DO NOTHING */}
_ind_to_mgr.at(ind).releasePlan(ind);
}
-std::shared_ptr<backend::operand::IObject> TensorManager::wrapTensor(const model::OperandIndex &ind)
-{
- assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
- return _ind_to_mgr.at(ind).wrapTensor(ind);
-}
-
std::shared_ptr<operand::Tensor> TensorManager::at(const ::neurun::model::OperandIndex &ind)
{
assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
void claimPlan(const model::OperandIndex &ind, uint32_t size);
void releasePlan(const model::OperandIndex &ind);
- std::shared_ptr<backend::operand::IObject> wrapTensor(const model::OperandIndex &ind);
std::shared_ptr<operand::Tensor> at(const ::neurun::model::OperandIndex &ind);
model::OperandIndexMap<std::shared_ptr<operand::Tensor>> &constTensors(void);
virtual std::shared_ptr<::neurun::backend::operand::ITensor>
tensorAt(const model::OperandIndex &ind) = 0;
- virtual std::shared_ptr<backend::operand::IObject> wrapTensor(const model::OperandIndex &ind) = 0;
virtual void iterate(const IterateFunction &fn) = 0;
virtual void preVisit(const model::Operation &) = 0;