* limitations under the License.
*/
-//#include "internal/cpu/MemoryAllocator.h"
+#include "MemoryAllocator.h"
+#include <cassert>
+
+namespace neurun
+{
+namespace backend
+{
+namespace cpu
+{
+
+BumpAllocator::~BumpAllocator()
+{
+ if (_base)
+ delete[] _base;
+}
+
+void BumpAllocator::reset()
+{
+ if (_base)
+ delete[] _base;
+
+ _base = nullptr;
+ _curr_pos = 0;
+ _reserved_size = 0;
+ _mem_idx = 0;
+ _mem_blk_map.clear();
+}
+
+uint32_t BumpAllocator::allocate(size_t size)
+{
+ assert(size != 0);
+
+ MemoryBlock blk{_curr_pos, size};
+ _mem_blk_map[_mem_idx] = blk;
+ _curr_pos += size;
+ return _mem_idx++;
+}
+
+void BumpAllocator::finalize()
+{
+ assert(!_base && !_reserved_size && _curr_pos != 0);
+ _reserved_size = _curr_pos;
+ _base = new uint8_t[_reserved_size];
+}
+
+void BumpAllocator::free(uint32_t)
+{
+ assert(_base && _reserved_size > 0);
+
+ // DO NOTHING
+ // In the case of this BumpAllocator, ignore the case like where reallocations are needed
+}
+
+MemoryBlock BumpAllocator::getMemoryBlock(uint32_t mem_id)
+{
+ assert(_mem_blk_map.size() > 0);
+
+ return _mem_blk_map[mem_id];
+}
+
+} // namespace cpu
+} // namespace backend
+} // namespace neurun
* limitations under the License.
*/
-#ifndef __INTERNAL_CPU_MEMORY_ALLOCATOR_H__
-#define __INTERNAL_CPU_MEMORY_ALLOCATOR_H__
+#ifndef __BACKEND_CPU_MEMORY_ALLOCATOR_H__
+#define __BACKEND_CPU_MEMORY_ALLOCATOR_H__
-#include "arm_compute/runtime/ITensorAllocator.h"
-#include "arm_compute/runtime/Memory.h"
+#include <unordered_map>
-#include <cstdint>
-#include <memory>
-#include <vector>
+namespace neurun
+{
+namespace backend
+{
+namespace cpu
+{
-namespace arm_compute
+struct MemoryBlock
{
-class Coordinates;
-class TensorInfo;
-class Tensor;
+ uint32_t base_offset;
+ uint32_t size;
};
-/** Basic implementation of a CPU memory tensor allocator. */
-class TensorAllocator : public ITensorAllocator
+// FIXME Should extract MemoryAllocator from this BumpAllocator
+class BumpAllocator
{
public:
- /** Default constructor. */
- TensorAllocator(Tensor *owner = nullptr);
- /** Default destructor */
- ~TensorAllocator();
-
- /** Make ITensorAllocator's init methods available */
- using ITensorAllocator::init;
-
- /** Shares the same backing memory with another tensor allocator, while the tensor info might be
- * different.
- * In other words this can be used to create a sub-tensor from another tensor while sharing the
- * same memory.
- *
- * @note TensorAllocator have to be of the same specialized type.
- *
- * @param[in] allocator The allocator that owns the backing memory to be shared. Ownership becomes
- * shared afterwards.
- * @param[in] coords The starting coordinates of the new tensor inside the parent tensor.
- * @param[in] sub_info The new tensor information (e.g. shape etc)
- */
- void init(const TensorAllocator &allocator, const Coordinates &coords, TensorInfo sub_info);
-
- /** Returns the pointer to the allocated data. */
- uint8_t *data() const;
-
- /** Allocate size specified by TensorInfo of CPU memory.
- *
- * @note The tensor must not already be allocated when calling this function.
- *
- */
- void allocate() override;
-
- /** Free allocated CPU memory.
- *
- * @note The tensor must have been allocated when calling this function.
- *
- */
- void free() override;
- /** Import an existing memory as a tensor's backing memory
- *
- * @warning If the tensor is flagged to be managed by a memory manager,
- * this call will lead to an error.
- * @warning Ownership of memory depends on the way the @ref Memory object was constructed
- * @note Calling free on a tensor with imported memory will just clear
- * the internal pointer value.
- *
- * @param[in] memory Memory to import
- *
- * @return error status
- */
- arm_compute::Status import_memory(Memory memory);
- /** Associates the tensor with a memory group
- *
- * @param[in] associated_memory_group Memory group to associate the tensor with
- */
- void set_associated_memory_group(MemoryGroup *associated_memory_group);
-
-protected:
- /** No-op for CPU memory
- *
- * @return A pointer to the beginning of the tensor's allocation.
- */
- uint8_t *lock() override;
-
- /** No-op for CPU memory. */
- void unlock() override;
+ virtual ~BumpAllocator();
+ // FIXME Remove this when instance() is removed
+ virtual void reset();
+ virtual uint32_t allocate(size_t size);
+ virtual void free(uint32_t mem_id);
+ virtual void finalize();
+ virtual uint8_t *base() const { return _base; }
+ virtual MemoryBlock getMemoryBlock(uint32_t mem_id);
private:
- MemoryGroup *_associated_memory_group; /**< Registered memory manager */
- Memory _memory; /**< CPU memory */
- Tensor *_owner; /**< Owner of the allocator */
-};
+ uint8_t *_base = nullptr;
+ uint32_t _reserved_size = 0;
+ uint32_t _curr_pos = 0;
+ uint32_t _mem_idx = 0;
+ std::unordered_map<uint32_t, MemoryBlock> _mem_blk_map;
-namespace internal
-{
-namespace cpu
-{
-
-class MemoryAllocator : public
-{
+public:
+ // This should be moved into something class in backend, not as global var
+ static BumpAllocator &instance()
+ {
+ static BumpAllocator inst;
+ return inst;
+ }
};
} // namespace cpu
-} // namespace internal
+} // namespace backend
+} // namespace neurun
-#endif // __INTERNAL_CPU_MEMORY_ALLOCATOR_H__
+#endif // __BACKEND_CPU_MEMORY_ALLOCATOR_H__
--- /dev/null
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include "backend/cpu/MemoryAllocator.h"
+
+TEST(BumpAllocator, allocate_test)
+{
+ auto &allocator = ::neurun::backend::cpu::BumpAllocator::instance();
+
+ allocator.reset();
+
+ size_t mem_sz0 = 10;
+ auto mem_id0 = allocator.allocate(mem_sz0);
+ auto mem_blk0 = allocator.getMemoryBlock(mem_id0);
+ ASSERT_EQ(mem_blk0.base_offset, 0);
+ ASSERT_EQ(mem_blk0.size, mem_sz0);
+
+ size_t mem_sz1 = 20;
+ auto mem_id1 = allocator.allocate(mem_sz1);
+ auto mem_blk1 = allocator.getMemoryBlock(mem_id1);
+ ASSERT_EQ(mem_blk1.base_offset, mem_sz0);
+ ASSERT_EQ(mem_blk1.size, mem_sz1);
+
+ size_t mem_sz2 = 30;
+ auto mem_id2 = allocator.allocate(mem_sz2);
+ auto mem_blk2 = allocator.getMemoryBlock(mem_id2);
+ ASSERT_EQ(mem_blk2.base_offset, mem_sz0 + mem_sz1);
+ ASSERT_EQ(mem_blk2.size, mem_sz2);
+
+ ASSERT_EQ(allocator.base(), nullptr);
+}
+
+TEST(BumpAllocator, finalize_test)
+{
+ auto &allocator = ::neurun::backend::cpu::BumpAllocator::instance();
+
+ allocator.reset();
+
+ size_t mem_sz0 = 10;
+ allocator.allocate(mem_sz0);
+
+ size_t mem_sz1 = 20;
+ allocator.allocate(mem_sz1);
+
+ size_t mem_sz2 = 30;
+ allocator.allocate(mem_sz2);
+
+ allocator.finalize();
+
+ ASSERT_NE(allocator.base(), nullptr);
+}