This commit replaces resistering tensors with using TensorRegister.
Signed-off-by: jiseob.jang <jiseob.jang@samsung.com>
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::registerTensorInfo(
- const model::OperandIndex &ind, const model::OperandInfo &info, model::Layout frontend_layout,
+ const model::OperandIndex &ind, const model::OperandInfo &info, model::Layout,
model::Layout backend_layout, bool as_const)
{
assert(_tensor_mgr->constTensors().size() == 0);
_tensor_info_map.emplace(ind, info);
_apply_dim_correction_map.emplace(ind, true);
- _tensor_layouts_map.insert({ind, std::make_pair(frontend_layout, backend_layout)});
+ _tensor_layouts_map.insert({ind, std::make_pair(model::Layout::UNKNOWN, backend_layout)});
if (as_const)
_constants.append(ind);
const auto &info = entry.second;
// NOTE SubTensor's layout must be the same with layout of parent tensor
const auto &root_parent = findRootParent(ind);
- const auto &frontend_layout = _tensor_layouts_map[root_parent].first;
const auto &backend_layout = _tensor_layouts_map[root_parent].second;
- auto tensor_info = asTensorInfo(info.shape(), info.typeInfo(), frontend_layout, backend_layout,
- _apply_dim_correction_map[ind]);
+ auto tensor_info = asTensorInfo(info.shape(), info.typeInfo(), model::Layout::UNKNOWN,
+ backend_layout, _apply_dim_correction_map[ind]);
_tensor_mgr->buildTensor(ind, tensor_info, info.shape().rank(), _constants.contains(ind));
}
}
// NOTE SubTensor's layout must be the same with layout of parent tensor
const auto &root_parent = findRootParent(parent);
- const auto &frontend_layout = _tensor_layouts_map[root_parent].first;
const auto &backend_layout = _tensor_layouts_map[root_parent].second;
- auto shape = asTensorShape(info.shape(), frontend_layout, backend_layout,
+ auto shape = asTensorShape(info.shape(), model::Layout::UNKNOWN, backend_layout,
_apply_dim_correction_map[current]);
::arm_compute::Coordinates coordinates =
- asTensorCoordinate(info.offset(), frontend_layout, backend_layout);
+ asTensorCoordinate(info.offset(), model::Layout::UNKNOWN, backend_layout);
_tensor_mgr->buildSubtensor(parent, current, shape, coordinates, info.shape().rank(), true);
stack.pop();
}
}
void TensorBuilder::registerTensorInfo(const model::OperandIndex &ind,
- const model::OperandInfo &info,
- model::Layout frontend_layout, model::Layout backend_layout,
+ const model::OperandInfo &info, model::Layout, model::Layout,
bool as_const)
{
_tensor_info_map.emplace(ind, info);
- _tensor_layouts_map.insert({ind, std::make_pair(frontend_layout, backend_layout)});
if (as_const)
_constants.append(ind);
void TensorBuilder::notifyFirstUse(const model::OperandIndex &ind)
{
assert(_tensor_info_map.find(ind) != _tensor_info_map.end());
- const auto tensor_info = asTensorInfo(_tensor_info_map.at(ind), _tensor_layouts_map[ind].first);
+ const auto tensor_info = asTensorInfo(_tensor_info_map.at(ind), model::Layout::UNKNOWN);
const auto size = tensor_info.total_size();
_tensor_mgr->buildTensor(ind, tensor_info, _constants.contains(ind));
_tensor_mgr->claimPlan(ind, size);
private:
std::unique_ptr<TensorManager> _tensor_mgr;
model::OperandIndexMap<model::OperandInfo> _tensor_info_map;
- model::OperandIndexMap<std::pair<model::Layout, model::Layout>> _tensor_layouts_map;
model::OperandIndexSequence _constants;
};
#include "backend/IConstantInitializer.h"
#include "backend/IKernelGenerator.h"
#include "backend/IShapeFixer.h"
+#include "backend/ITensorRegister.h"
#include "cpp14/memory.h"
namespace neurun
subg.accept(subtensor_analyzer);
});
- // Fix shapes
+ // Fix shapes and register tensors
graph.subgraphs().iterate(
[&](const model::SubgraphIndex &subg_index, const model::Subgraph &subg) {
auto backend = graph.getLowerInfo(subg_index)->backend();
auto shape_fixer = graph.backend_resolver()->getBackendContext(backend)->shape_fixer;
shape_fixer->setLowerInfoMap(graph.getLowerInfo());
shape_fixer->fix(subg);
+ const auto tensor_register =
+ graph.backend_resolver()->getBackendContext(backend)->tensor_register;
+ tensor_register->registerTensors(subg, graph.getLowerInfo());
});
graph.operands().iterate([&](const model::OperandIndex &ind, const model::Operand &obj) {
const auto lower_info = graph.getLowerInfo(ind);
for (auto factor : lower_info->def_factors())
{
- bool isSubTensor = false;
auto backend = factor.backend();
auto tensor_builder = graph.backend_resolver()->getBackendContext(backend)->tensor_builder;
- if (backend->config()->SupportSubTensorAlloc())
+ if (!tensor_builder->isRegistered(ind))
{
- const auto parentInfo = obj.parent_info();
- if (parentInfo != nullptr)
- {
- isSubTensor = true;
- }
- }
+ // These tensors do not exist in any subgraph (No use and def)
+ // These tensors cannot be a SubTensor
+ assert(obj.parent_info() == nullptr);
- if (isSubTensor)
- {
- const compiler::SubTensorInfo info(obj);
- tensor_builder->registerSubTensorInfo(ind, info);
- }
- else
- {
const auto info = obj.info();
- // NOTE This assumes an operand can have one layout, and only Permutate can have
- // different layouts for input and output
- const auto &def = *obj.getDef().list().cbegin();
- auto frontend_layout =
- graph.subgraphs().at(graph.subgraphs().getOperation(def)).getLayout();
- if (frontend_layout == model::Layout::UNKNOWN)
- {
- const auto &use = *obj.getUses().list().cbegin();
- frontend_layout = graph.subgraphs().at(graph.subgraphs().getOperation(use)).getLayout();
- }
+ // TODO Get layout of this operand on frontend
+ const auto frontend_layout = model::Layout::UNKNOWN;
const auto backend_layout = lower_info->def_factors().getOnlyElement().layout();
tensor_builder->registerTensorInfo(ind, info, frontend_layout, backend_layout,
obj.isConstant());
+ }
+
+ // Is not SubTensor?
+ if (!backend->config()->SupportSubTensorAlloc() || obj.parent_info() == nullptr)
+ {
// To make this never be deallocated, this is a workaround to use static memory planner
tensor_builder->notifyFirstUse(ind);
}
#include "backend/IShapeFixer.h"
#include "backend/IConfig.h"
#include "backend/IConstantInitializer.h"
+#include "backend/ITensorRegister.h"
#include "backend/Backend.h"
#include "compiler/SubTensorInfo.h"
model::OperandIndexMap<uint32_t> def_map;
model::OperandIndexSequence constants;
+ iterate([&](const neurun::compiler::Linear::Element &element) {
+ const auto backend = element.lower_info->backend();
+ const auto tensor_register =
+ _graph.backend_resolver()->getBackendContext(backend)->tensor_register;
+ tensor_register->registerTensors(*element.subgraph, _graph.getLowerInfo());
+ });
+
// Prepare scanning
_graph.operands().iterate([&](const model::OperandIndex &ind, const model::Operand &obj) {
const auto lower_info = _graph.getLowerInfo(ind);
constants.append(ind);
}
- model::Subgraphs &subgraphs = _graph.subgraphs();
for (auto factor : lower_info->def_factors())
{
- bool isSubTensor = false;
auto backend = factor.backend();
auto tensor_builder = _graph.backend_resolver()->getBackendContext(backend)->tensor_builder;
- if (backend->config()->SupportSubTensorAlloc())
+ if (!tensor_builder->isRegistered(ind))
{
- const auto parentInfo = obj.parent_info();
- if (parentInfo != nullptr)
- {
- isSubTensor = true;
- }
- }
+ // These tensors do not exist in any subgraph (No use and def)
+ // These tensors cannot be a SubTensor
+ assert(obj.parent_info() == nullptr);
- if (isSubTensor)
- {
- const compiler::SubTensorInfo info(obj);
- tensor_builder->registerSubTensorInfo(ind, info);
- }
- else
- {
const auto info = obj.info();
-
- // NOTE This assumes an operand can have one layout, and only Permutate can have
- // different layouts for input and output
- const auto &def = *obj.getDef().list().cbegin();
+ // TODO Get layout of this operand on frontend
auto frontend_layout = model::Layout::UNKNOWN;
-
- if (subgraphs.containsOperation(def))
- {
- frontend_layout = subgraphs.at(subgraphs.getOperation(def)).getLayout();
- if (frontend_layout == model::Layout::UNKNOWN)
- {
- const auto &use = *obj.getUses().list().cbegin();
- frontend_layout = subgraphs.at(subgraphs.getOperation(use)).getLayout();
- }
- }
-
const auto backend_layout = lower_info->def_factors().getOnlyElement().layout();
tensor_builder->registerTensorInfo(ind, info, frontend_layout, backend_layout, is_const);
}