#include <cassert>
#include "operand/Object.h"
+#include "MemoryAllocator.h"
namespace neurun
{
{
assert(_tensors.size() == 0);
+ // TODO Do not use global var for accessing MemoryAllocator
+ auto &mem_alloc = BumpAllocator::instance();
+
+ // TODO Remove this when okay to pass tests in runtime_android_nn_test
+ mem_alloc.reset();
+
for (auto &entry : _tensor_info_map)
{
auto ind = entry.first;
const auto &info = entry.second;
auto tensor = std::make_shared<operand::Tensor>(info);
_tensors[ind] = tensor;
+ // If we do not make tensor here currently, stages would cause segment fault
+
+ const auto size = info.total_size(); // NOTE This size may not be accurate
+ auto mem_id = mem_alloc.allocate(size);
+ _tensor_mem_map[ind] = mem_id;
+ }
+ assert(_tensor_info_map.size() == _tensor_mem_map.size());
+
+ // TODO below code can be moved in TensorBuild::allocate()
+ // if StageGerator was modified like
+ // from
+ // fn->configure(ifm_alloc->buffer(), param.ifm_shape, ker_alloc->buffer(), param.ker_shape,
+ // to
+ // fn->configure(ifm_alloc, param.ifm_shape, ker_alloc, param.ker_shape,
+ mem_alloc.finalize();
+ assert(mem_alloc.base());
+
+ for (auto &entry : _tensor_mem_map)
+ {
+ auto ind = entry.first;
+ auto mem_id = entry.second;
+ auto mem_blk = mem_alloc.getMemoryBlock(mem_id);
+ auto &tensor = _tensors[ind];
+ tensor->setBuffer(mem_alloc.base() + mem_blk.base_offset);
}
}
assert(_tensor_info_map.size() == _tensors.size());
// NOTE For now nothing to do. Allocation is done in prepare stage, which is wrong
- // See also: comment in `prepare()`
}
std::shared_ptr<::arm_compute::ITensor> TensorBuilder::tensorAt(const graph::operand::Index &ind)