// DO NOTHING
}
-void TensorBuilder::registerTensorInfo(const ::neurun::graph::operand::Index &ind,
+void TensorBuilder::registerTensorInfo(const graph::operand::Index &ind,
const ::arm_compute::TensorInfo &info)
{
assert(_tensors.size() == 0);
_tensor_info_map.insert({ind, info});
}
+void TensorBuilder::registerSubTensorInfo(const graph::operand::Index &ind,
+ const backend::operand::SubTensorInfo &info)
+{
+ assert(_tensors.size() == 0);
+
+ _subtensor_info_map.insert({ind, info});
+}
+
void TensorBuilder::notifyFirstUse(const graph::operand::Index &)
{
// DO NOTHING
// TODO Handle SubTensor(subsumption)
// Currently this TensorBuilder does not have subsumption info yet
+ assert(_subtensor_info_map.size() == 0);
for (auto &entry : _tensor_info_map)
{
*/
virtual void registerTensorInfo(const graph::operand::Index &ind,
const ::arm_compute::TensorInfo &info) override;
+ /**
+ * @brief Register subtensor information to allocate on ACL-CL backend
+ * @param[in] ind Operand index
+ * @param[in] info Tensor information
+ */
+ virtual void registerSubTensorInfo(const graph::operand::Index &ind,
+ const backend::operand::SubTensorInfo &info) override;
+
virtual void notifyFirstUse(const graph::operand::Index &) override;
virtual void notifyLastUse(const graph::operand::Index &) override;
private:
std::unordered_map<graph::operand::Index, ::arm_compute::TensorInfo> _tensor_info_map;
+ std::unordered_map<graph::operand::Index, backend::operand::SubTensorInfo> _subtensor_info_map;
std::unordered_map<graph::operand::Index, std::shared_ptr<::arm_compute::CLTensor>> _tensors;
};
_tensor_info_map.insert({ind, info});
}
+void TensorBuilder::registerSubTensorInfo(const graph::operand::Index &,
+ const backend::operand::SubTensorInfo &)
+{
+ // Not supported yet
+ assert(false);
+}
+
void TensorBuilder::notifyFirstUse(const graph::operand::Index &ind)
{
assert(_tensor_info_map.find(ind) != _tensor_info_map.end());
*/
virtual void registerTensorInfo(const graph::operand::Index &ind,
const ::arm_compute::TensorInfo &info) override;
+ /**
+ * @brief Register subtensor information to allocate on CPU backend
+ * @param[in] ind Operand index
+ * @param[in] info Tensor information
+ */
+ virtual void registerSubTensorInfo(const graph::operand::Index &ind,
+ const backend::operand::SubTensorInfo &info) override;
+
virtual void notifyFirstUse(const graph::operand::Index &) override;
virtual void notifyLastUse(const graph::operand::Index &) override;
+
virtual void prepare(void) override;
virtual void allocate(void) override;
#include "graph/operand/Index.h"
#include "operand/IObject.h"
+#include "backend/common/operand/SubTensorInfo.h"
namespace neurun
{
*/
virtual void registerTensorInfo(const graph::operand::Index &,
const ::arm_compute::TensorInfo &) = 0;
+ /**
+ * @brief Register subtensor information to allocate on backend
+ */
+ virtual void registerSubTensorInfo(const graph::operand::Index &,
+ const backend::operand::SubTensorInfo &) = 0;
+
virtual void notifyFirstUse(const graph::operand::Index &) = 0;
virtual void notifyLastUse(const graph::operand::Index &) = 0;
- // TODO Add an interface for adding subsumption info
virtual void prepare(void) = 0;
virtual void allocate(void) = 0;