Now IMemoryPlanner and Allocator plays the role of IMemoryAllocator.
This commits includes some fixing for bugs to apply on backend/cpu.
Signed-off-by: Yongseop Kim <yons.kim@samsung.com>
+++ /dev/null
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "MemoryAllocator.h"
-#include "logging.h"
-#include <cassert>
-
-namespace neurun
-{
-namespace backend
-{
-namespace cpu
-{
-
-BumpAllocator::~BumpAllocator()
-{
- if (_base)
- delete[] _base;
-}
-
-Block BumpAllocator::allocate(const graph::operand::Index &index, size_t size)
-{
- assert(size != 0);
-
- Block blk{_pos, size};
- _pos += size;
-
- VERBOSE(BP_ALLOC) << "alloc(#" << index.value() << "): " << blk.offset << ", " << blk.size
- << std::endl;
-
- return blk;
-}
-
-void BumpAllocator::finalize()
-{
- assert(!_base && _pos != 0);
-
- _base = new uint8_t[_pos];
-
- VERBOSE(BP_ALLOC) << "final position: " << _pos << std::endl;
- VERBOSE(BP_ALLOC) << "base pointer: " << static_cast<void *>(_base) << std::endl;
-}
-
-void BumpAllocator::free(const graph::operand::Index &index)
-{
- VERBOSE(BP_ALLOC) << "free(#" << index.value() << "): "
- << "NOTHING does" << std::endl;
-}
-
-FirstFitAllocator::~FirstFitAllocator() { delete[] _base; }
-
-// There are some assumptions for allocating(exactly making a reservation for memory).
-// 1. About _alloc_table(std::map).
-// - The table's data structure is std::map so that it always sorts
-// values(Alloc(pair<Index,size_t>)) by key(base_offset).
-// - This allocate() inserts key/value into _alloc_table and the free() removes the key/value from
-// _alloc_table.
-// - _alloc_table shows the memory status at a certain point in time. Therefore,
-// - If _alloc_table has an offset and a certain size at a certain point in time,
-// it means the place has been already allocated(can't allocate now. need to find someplace
-// new).
-// - If _alloc_table doesn't have any element for an offset and a certain size at a certain
-// point in time, it means the place can be allocated.
-// 2. In the loop for _alloc_table, we can assume the current alloc_base_offset value is bigger than
-// the previous alloc_base_offset.
-Block FirstFitAllocator::allocate(const graph::operand::Index &index, size_t size)
-{
- assert(size != 0);
-
- // Find the right position for allocating
- uint32_t next_offset = 0;
- for (auto &mem_alloc : _alloc_table)
- {
- auto alloc_base_offset = mem_alloc.first;
- auto alloc_size = mem_alloc.second.second;
- if (next_offset + size <= alloc_base_offset)
- {
- break;
- }
- else
- {
- next_offset = alloc_base_offset + alloc_size;
- }
- }
-
- // Now next_offset is set to the proper offset
- auto alloc = std::make_pair(index, size);
- _alloc_table[next_offset] = alloc;
-
- VERBOSE(FF_ALLOC) << "alloc(#" << index.value() << "): [+" << next_offset << ", sz " << size
- << "]" << std::endl;
-
- if (_pos < next_offset + size)
- {
- _pos = next_offset + size;
- }
-
- Block blk{next_offset, size};
- return blk;
-}
-
-void FirstFitAllocator::finalize()
-{
- assert(!_base && _pos != 0);
-
- _base = new uint8_t[_pos];
-
- VERBOSE(FF_ALLOC) << "final position: " << _pos << std::endl;
- VERBOSE(FF_ALLOC) << "base pointer: " << static_cast<void *>(_base) << std::endl;
-}
-
-void FirstFitAllocator::free(const graph::operand::Index &index)
-{
- for (auto it = _alloc_table.cbegin(); it != _alloc_table.cend(); ++it)
- {
- auto alloc = it->second;
- if (alloc.first == index)
- {
- it = _alloc_table.erase(it);
- VERBOSE(FF_ALLOC) << "free(#" << index.value() << "): [+" << it->first << ", sz "
- << alloc.second << "]" << std::endl;
- return;
- }
- }
- assert(false && "can't enter here");
-}
-
-} // namespace cpu
-} // namespace backend
-} // namespace neurun
+++ /dev/null
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NEURUN_BACKEND_CPU_MEMORY_ALLOCATOR_H__
-#define __NEURUN_BACKEND_CPU_MEMORY_ALLOCATOR_H__
-
-#include <map>
-
-#include "graph/operand/Index.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace cpu
-{
-
-struct Block
-{
- uint32_t offset;
- uint32_t size;
-};
-
-struct IMemoryAllocator
-{
- virtual ~IMemoryAllocator() = default;
- virtual Block allocate(const graph::operand::Index &, size_t) = 0;
- virtual void free(const graph::operand::Index &) = 0;
- virtual void finalize() = 0;
- virtual uint8_t *base() const = 0;
-};
-
-class BumpAllocator : public IMemoryAllocator
-{
-public:
- virtual ~BumpAllocator() override;
- virtual Block allocate(const graph::operand::Index &index, size_t size) override;
- virtual void free(const graph::operand::Index &index) override;
- virtual void finalize() override;
- virtual uint8_t *base() const override { return _base; }
-
-private:
- uint8_t *_base = nullptr;
- uint32_t _pos = 0;
-};
-
-class FirstFitAllocator : public IMemoryAllocator
-{
-public:
- virtual ~FirstFitAllocator() override;
- virtual Block allocate(const graph::operand::Index &index, size_t size) override;
- virtual void free(const graph::operand::Index &index) override;
- virtual void finalize() override;
- virtual uint8_t *base() const override { return _base; }
-
-private:
- uint8_t *_base = nullptr;
- uint32_t _pos = 0;
-
- using Alloc = std::pair<graph::operand::Index, size_t>; // <index, size>
- // Use std::map because allocate() assumes that _alloc_table is sorted by uint32_t(base_offset)
- std::map<uint32_t, Alloc> _alloc_table; // <base_offset, Alloc>
-};
-
-} // namespace cpu
-} // namespace backend
-} // namespace neurun
-
-#endif // __NEURUN_BACKEND_CPU_MEMORY_ALLOCATOR_H__
_claim_table.erase(it);
- VERBOSE(FF_ALLOC) << "release(#" << index << "): [+" << offset << ", " << size << "sz]"
- << std::endl;
+ VERBOSE(FF_PLANNER) << "release(#" << index << "): [+" << offset << ", " << size << "sz]"
+ << std::endl;
return;
}
}
- assert(false && "CAN'T ENTER HERE");
+ assert(!"Cannot release for given index. It has been not claimed or released already.");
}
} // namespace cpu
#include <cassert>
#include "operand/Object.h"
-#include "MemoryAllocator.h"
#include "logging.h"
namespace neurun
namespace cpu
{
-TensorBuilder::TensorBuilder() : _mem_alloc(std::make_shared<FirstFitAllocator>())
+TensorBuilder::TensorBuilder() : _mem_planner(std::make_shared<FirstFitPlanner>())
{
// DO NOTHING
}
void TensorBuilder::registerTensorInfo(const graph::operand::Index &ind,
const ::arm_compute::TensorInfo &info)
{
- assert(_mem_alloc);
-
_tensor_info_map.insert({ind, info});
}
const auto &info = _tensor_info_map.at(ind);
const auto size = info.total_size();
- auto mem_blk = _mem_alloc->allocate(ind, size);
- _tensor_mem_map[ind] = mem_blk;
-
- VERBOSE(CPU_TENSORBUILDER) << "ASSIGN(#" << ind.value() << "): mem_blk[" << mem_blk.offset << ", "
- << mem_blk.size << "]" << std::endl;
+ _mem_planner->claim(ind, size);
}
-void TensorBuilder::notifyLastUse(const graph::operand::Index &ind)
-{
- assert(_mem_alloc);
-
- _mem_alloc->free(ind);
-
- VERBOSE(CPU_TENSORBUILDER) << "UNASSIGN(#" << ind.value() << ")" << std::endl;
-}
+void TensorBuilder::notifyLastUse(const graph::operand::Index &ind) { _mem_planner->release(ind); }
void TensorBuilder::prepare(void)
{
assert(_tensors.size() == 0);
- assert(_mem_alloc);
- _mem_alloc->finalize();
+ _mem_alloc = std::make_shared<Allocator>(_mem_planner->capacity());
assert(_mem_alloc->base());
- for (auto &entry : _tensor_mem_map)
+ for (auto &mem_plan : _mem_planner->memory_plans())
{
- auto ind = entry.first;
- auto mem_blk = entry.second;
+ auto ind = mem_plan.first;
+ auto mem_blk = mem_plan.second;
const auto &info = _tensor_info_map[ind];
uint8_t *buffer = _mem_alloc->base() + mem_blk.offset;
#include "backend/interface/ITensorBuilder.h"
#include "backend/cpu/operand/Tensor.h"
#include "graph/operand/Index.h"
-#include "MemoryAllocator.h"
+#include "MemoryPlanner.h"
namespace neurun
{
std::unordered_map<graph::operand::Index, ::arm_compute::TensorInfo> _tensor_info_map;
std::unordered_map<graph::operand::Index, std::shared_ptr<operand::Tensor>> _tensors;
std::unordered_map<graph::operand::Index, Block> _tensor_mem_map;
- std::shared_ptr<IMemoryAllocator> _mem_alloc;
+ std::shared_ptr<IMemoryPlanner> _mem_planner;
+ std::shared_ptr<Allocator> _mem_alloc;
};
} // namespace cpu
+++ /dev/null
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <gtest/gtest.h>
-
-#include "backend/cpu/MemoryAllocator.h"
-#include "graph/operand/Index.h"
-
-TEST(BumpAllocator, allocate_test)
-{
- ::neurun::backend::cpu::BumpAllocator allocator;
-
- auto allocate = [&allocator](uint32_t index, size_t size, uint32_t expected_offset) {
- ::neurun::graph::operand::Index mem_idx(index);
- auto mem_blk = allocator.allocate(mem_idx, size);
- ASSERT_EQ(mem_blk.offset, expected_offset);
- ASSERT_EQ(mem_blk.size, size);
- };
-
- allocate(0, 10, 0);
- allocate(1, 20, 10);
- allocate(2, 30, 30);
-
- ASSERT_EQ(allocator.base(), nullptr);
-}
-
-TEST(BumpAllocator, finalize_test)
-{
- ::neurun::backend::cpu::BumpAllocator allocator;
-
- auto allocate = [&allocator](uint32_t index, size_t size) {
- ::neurun::graph::operand::Index mem_idx(index);
- auto mem_blk = allocator.allocate(mem_idx, size);
- };
-
- allocate(0, 10);
- allocate(1, 20);
- allocate(2, 30);
-
- allocator.finalize();
-
- ASSERT_NE(allocator.base(), nullptr);
-}
-
-TEST(FirstFitAllocator, allocate_free_test)
-{
- ::neurun::backend::cpu::FirstFitAllocator allocator;
-
- auto allocate = [&allocator](uint32_t index, size_t size, uint32_t expected_offset) {
- ::neurun::graph::operand::Index mem_idx(index);
- auto mem_blk = allocator.allocate(mem_idx, size);
- ASSERT_EQ(mem_blk.offset, expected_offset);
- ASSERT_EQ(mem_blk.size, size);
- };
-
- auto free = [&allocator](uint32_t index) {
- ::neurun::graph::operand::Index mem_idx(index);
- allocator.free(mem_idx);
- };
-
- // 0 ALLOC - 10
- allocate(0, 10, 0);
-
- // 1 ALLOC - 20
- allocate(1, 20, 10);
-
- // 2 ALLOC - 30
- allocate(2, 30, 30);
-
- // 0 FREE - 10
- free(0);
-
- // 3 ALLOC - 20
- allocate(3, 20, 60);
-
- // 4 ALLOC - 5
- allocate(4, 5, 0);
-
- // 5 ALLOC - 10
- allocate(5, 10, 80);
-
- // 6 ALLOC - 5
- allocate(6, 5, 5);
-
- // 2 FREE - 30
- free(2);
-
- // 7 ALLOC - 35
- allocate(7, 35, 90);
-
- // 8 ALLOC - 10
- allocate(8, 10, 30);
-
- // 4 FREE - 5
- free(4);
-
- // 9 ALLOC - 10
- allocate(9, 10, 40);
-
- // 10 ALLOC - 10
- allocate(10, 10, 50);
-
- // 6 FREE
- free(6);
-
- // 1 FREE
- free(1);
-
- // 8 FREE
- free(8);
-
- // 9 FREE
- free(9);
-
- // 10 FREE
- free(10);
-
- // 3 FREE
- free(3);
-
- // 5 FREE
- free(5);
-
- // 7 FREE
- free(7);
-}
claim(2, 30, 30);
}
-TEST(FirstFitAllocator, claim_release_test)
+TEST(FirstFitPlanner, claim_release_test)
{
::neurun::backend::cpu::FirstFitPlanner planner;