struct IMemoryAllocator
{
virtual ~IMemoryAllocator() = default;
- // FIXME Remove this when instance() is removed
- virtual void reset() = 0;
virtual uint32_t allocate(size_t) = 0;
virtual void free(uint32_t) = 0;
virtual void finalize() = 0;
{
public:
virtual ~BumpAllocator() override;
- virtual void reset() override;
virtual uint32_t allocate(size_t size) override;
virtual void free(uint32_t mem_id) override;
virtual void finalize() override;
uint32_t _curr_pos = 0;
uint32_t _mem_idx = 0;
std::unordered_map<uint32_t, MemoryBlock> _mem_blk_map;
-
-public:
- // This should be moved into something class in backend, not as global var
- static IMemoryAllocator &instance()
- {
- static BumpAllocator inst;
- return inst;
- }
};
} // namespace cpu
namespace cpu
{
-TensorBuilder::TensorBuilder()
+// TODO Apply FirstFitAllocator in the future
+TensorBuilder::TensorBuilder() : _mem_alloc(std::make_shared<BumpAllocator>())
{
// DO NOTHING
}
void TensorBuilder::prepare(void)
{
assert(_tensors.size() == 0);
-
- // TODO Do not use global var for accessing MemoryAllocator
- auto &mem_alloc = BumpAllocator::instance();
-
- // TODO Remove this when okay to pass tests in runtime_android_nn_test
- mem_alloc.reset();
+ assert(_mem_alloc);
for (auto &entry : _tensor_info_map)
{
// If we do not make tensor here currently, stages would cause segment fault
const auto size = info.total_size(); // NOTE This size may not be accurate
- auto mem_id = mem_alloc.allocate(size);
+ auto mem_id = _mem_alloc->allocate(size);
_tensor_mem_map[ind] = mem_id;
}
assert(_tensor_info_map.size() == _tensor_mem_map.size());
// fn->configure(ifm_alloc->buffer(), param.ifm_shape, ker_alloc->buffer(), param.ker_shape,
// to
// fn->configure(ifm_alloc, param.ifm_shape, ker_alloc, param.ker_shape,
- mem_alloc.finalize();
- assert(mem_alloc.base());
+ _mem_alloc->finalize();
+ assert(_mem_alloc->base());
for (auto &entry : _tensor_mem_map)
{
auto ind = entry.first;
auto mem_id = entry.second;
- auto mem_blk = mem_alloc.getMemoryBlock(mem_id);
+ auto mem_blk = _mem_alloc->getMemoryBlock(mem_id);
auto &tensor = _tensors[ind];
- tensor->setBuffer(mem_alloc.base() + mem_blk.base_offset);
+ tensor->setBuffer(_mem_alloc->base() + mem_blk.base_offset);
}
}
namespace cpu
{
+struct IMemoryAllocator;
+
class TensorBuilder : public ITensorBuilder
{
public:
std::unordered_map<graph::operand::Index, ::arm_compute::TensorInfo> _tensor_info_map;
std::unordered_map<graph::operand::Index, std::shared_ptr<operand::Tensor>> _tensors;
std::unordered_map<graph::operand::Index, uint32_t> _tensor_mem_map;
+ std::shared_ptr<IMemoryAllocator> _mem_alloc;
};
} // namespace cpu
TEST(BumpAllocator, allocate_test)
{
- auto &allocator = ::neurun::backend::cpu::BumpAllocator::instance();
-
- allocator.reset();
+ ::neurun::backend::cpu::BumpAllocator allocator;
size_t mem_sz0 = 10;
auto mem_id0 = allocator.allocate(mem_sz0);
TEST(BumpAllocator, finalize_test)
{
- auto &allocator = ::neurun::backend::cpu::BumpAllocator::instance();
-
- allocator.reset();
+ ::neurun::backend::cpu::BumpAllocator allocator;
size_t mem_sz0 = 10;
allocator.allocate(mem_sz0);