* @param[in] layout Tensor data layout
*/
void registerTensorInfo(const model::OperandIndex &ind, const model::OperandInfo &info,
- model::Layout frontend_layout, model::Layout backend_layout) override;
+ model::Layout frontend_layout, model::Layout backend_layout,
+ bool as_const) override;
/**
* @brief Register subtensor information to allocate on ACL-CL backend
* @param[in] ind Operand index
void prepare(void) override;
void allocate(void) override;
+ // TODO Fill these
+ void allocateConsts() override {}
+ void allocateNonconsts() override {}
+ void postFunctionPrepare() override {}
+ void finalize() override {}
+
std::shared_ptr<::neurun::backend::operand::ITensor>
tensorAt(const model::OperandIndex &ind) override;
std::shared_ptr<backend::operand::IObject> wrapTensor(const model::OperandIndex &ind) override;
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::registerTensorInfo(
const model::OperandIndex &ind, const model::OperandInfo &info, model::Layout frontend_layout,
- model::Layout backend_layout)
+ model::Layout backend_layout, bool /*as_const*/)
{
+ // TODO Adding handling tensor as const
assert(_mem_mgr->tensors().size() == 0);
_tensor_info_map.emplace(ind, info);
void TensorBuilder::registerTensorInfo(const model::OperandIndex &ind,
const model::OperandInfo &info,
- model::Layout frontend_layout, model::Layout backend_layout)
+ model::Layout frontend_layout, model::Layout backend_layout,
+ bool /*as_const*/)
{
+ // TODO Adding handling tensor as const
_tensor_info_map.emplace(ind, info);
_tensor_layouts_map.insert({ind, std::make_pair(frontend_layout, backend_layout)});
}
* @param[in] layout Operand data layout
*/
void registerTensorInfo(const model::OperandIndex &ind, const model::OperandInfo &info,
- model::Layout frontend_layout, model::Layout backend_layout) override;
+ model::Layout frontend_layout, model::Layout backend_layout,
+ bool as_const) override;
/**
* @brief Register subtensor information to allocate on CPU backend
* @param[in] ind Operand index
void allocate(void) override;
+ // TODO Fill these
+ void allocateConsts() override {}
+ void allocateNonconsts() override {}
+ void postFunctionPrepare() override {}
+ void finalize() override {}
+
std::shared_ptr<::neurun::backend::operand::ITensor>
tensorAt(const model::OperandIndex &ind) override;
* @brief Register tensor information to allocate on backend
*/
virtual void registerTensorInfo(const model::OperandIndex &, const model::OperandInfo &,
- model::Layout frontend_layout, model::Layout backend_layout) = 0;
+ model::Layout frontend_layout, model::Layout backend_layout,
+ bool as_const) = 0;
/**
* @brief Register subtensor information to allocate on backend
*/
virtual void notifyLastUse(const model::OperandIndex &) = 0;
virtual void prepare(void) = 0;
+ // TODO Remove after all of apis appended land
virtual void allocate(void) = 0;
+ virtual void allocateConsts() = 0;
+ virtual void allocateNonconsts() = 0;
+ virtual void postFunctionPrepare() = 0;
+ virtual void finalize() = 0;
+
virtual std::shared_ptr<::neurun::backend::operand::ITensor>
tensorAt(const model::OperandIndex &ind) = 0;
virtual std::shared_ptr<backend::operand::IObject> wrapTensor(const model::OperandIndex &ind) = 0;
frontend_layout = graph.subgraphs().at(graph.subgraphs().getOperation(use)).getLayout();
}
const auto backend_layout = lower_info->def_factors().getOnlyElement().layout();
- tensor_builder->registerTensorInfo(ind, info, frontend_layout, backend_layout);
+ tensor_builder->registerTensorInfo(ind, info, frontend_layout, backend_layout, false);
// To make this never be deallocated, this is a workaround to use static memory planner
tensor_builder->notifyFirstUse(ind);
}
frontend_layout = _subgraphs->at(_subgraphs->getOperation(use)).getLayout();
}
const auto backend_layout = lower_info->def_factors().getOnlyElement().layout();
- tensor_builder->registerTensorInfo(ind, info, frontend_layout, backend_layout);
+ tensor_builder->registerTensorInfo(ind, info, frontend_layout, backend_layout, false);
}
tensor_builder_map[ind] = tensor_builder;