1 // Copyright 2020 The Pigweed Authors
3 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
4 // use this file except in compliance with the License. You may obtain a copy of
7 // https://www.apache.org/licenses/LICENSE-2.0
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 // License for the specific language governing permissions and limitations under
15 // WARNING: This code is a experimental WIP & exploration only, and is far from
21 #include "pw_assert/assert.h"
22 #include "pw_status/status.h"
24 namespace pw::allocator {
26 #if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
27 // Add poison offset of sizeof(void*) bytes before and after usable space in all
29 #define PW_ALLOCATOR_POISON_OFFSET sizeof(void*)
31 // Set the poison offset to 0 bytes; will not add poisson space before and
32 // after usable space in all Blocks.
33 #define PW_ALLOCATOR_POISON_OFFSET static_cast<size_t>(0)
34 #endif // PW_ALLOCATOR_POISON_ENABLE
36 // The "Block" type is intended to be a building block component for
37 // allocators. In the this design, there is an explicit pointer to next and
38 // prev from the block header; the size is not encoded. The below diagram shows
39 // what this would look like for two blocks.
41 // .------+---------------------------------.-----------------------------
42 // | Block A (first) | Block B (second)
44 // +------+------+--------------------------+------+------+---------------
45 // | Next | Prev | usable space | Next | Prev | usable space..
46 // +------+------+--------------------------+------+--+---+---------------
48 // | '-------------------------------------' |
50 // '----------- Block B's prev points to Block A -----'
52 // One use for these blocks is to use them as allocations, where each block
53 // represents an allocation handed out by malloc(). These blocks could also be
54 // used as part of a slab or buddy allocator.
56 // Each block also contains flags for whether it is the last block (i.e. whether
57 // the "next" pointer points to a valid block, or just denotes the end of this
58 // block), and whether the block is in use. These are encoded into the last two
59 // bits of the "next" pointer, as follows:
61 // .-----------------------------------------------------------------------.
63 // +-----------------------------------------------------------------------+
64 // | Next | Prev | usable space |
65 // +----------------+------+------+ + |
66 // | Ptr[N..2] | Last | Used | | |
67 // +----------------+------+------+------+---------------------------------+
70 // '----------- Next() = Next & ~0x3 --------------------------------->
72 // The first block in a chain is denoted by a nullptr "prev" field, and the last
73 // block is denoted by the "Last" bit being set.
75 // Note, This block class requires that the given block is aligned to a
76 // alignof(Block*) boundary. Because of this alignment requirement, each
77 // returned block will also be aligned to a alignof(Block*) boundary, and the
78 // size will always be rounded up to a multiple of alignof(Block*).
80 // This class must be constructed using the static Init call.
84 Block(const Block& other) = delete;
85 Block& operator=(const Block& other) = delete;
86 Block(Block&& other) = delete;
87 Block& operator=(Block&& other) = delete;
89 // Create the first block for a given memory region.
90 // Note that the start of the given memory region must be aligned to an
91 // alignof(Block) boundary.
93 // INVALID_ARGUMENT if the given region is unaligned to too small, or
95 static Status Init(const std::span<std::byte> region, Block** block);
97 // Returns a pointer to a Block, given a pointer to the start of the usable
98 // space inside the block (i.e. the opposite operation to UsableSpace()). In
99 // reality, this method just subtracts the appropriate amount from
100 // usable_space to point to the start of the owning block.
102 // Be aware that this method does not do any sanity checking; passing a random
103 // pointer will return a non-null pointer.
104 static Block* FromUsableSpace(std::byte* usable_space) {
105 return reinterpret_cast<Block*>(usable_space - sizeof(Block) -
106 PW_ALLOCATOR_POISON_OFFSET);
109 // Size including the header.
110 size_t OuterSize() const {
111 return reinterpret_cast<intptr_t>(Next()) -
112 reinterpret_cast<intptr_t>(this);
115 // Usable bytes inside the block.
116 size_t InnerSize() const {
117 return OuterSize() - sizeof(*this) - 2 * PW_ALLOCATOR_POISON_OFFSET;
120 // Return the usable space inside this block.
121 std::byte* UsableSpace() {
122 return reinterpret_cast<std::byte*>(this) + sizeof(*this) +
123 PW_ALLOCATOR_POISON_OFFSET;
126 // Split this block, such that this block has an inner size of
127 // `head_block_inner_size`, and return a new block in the remainder of the
128 // space in `new_block`.
130 // The "remainder" block will be aligned to a alignof(Block*) boundary (and
131 // `head_block_inner_size` will be rounded up). If the remaining space is not
132 // large enough to store a new `Block` after rounding, no splitting will
135 // This may return the following:
136 // OK: The split completed successfully.
137 // INVALID_ARGUMENT: new_block is null
138 // FAILED_PRECONDITION: This block is in use and cannot be split.
139 // OUT_OF_RANGE: The requested size for "this" block is greater than the
140 // current inner_size.
141 // RESOURCE_EXHAUSTED: The split cannot occur because the "remainder" block
142 // would not be large enough to store a block header.
143 Status Split(size_t head_block_inner_size, Block** new_block);
145 // Merge this block with the one that comes after it.
146 // This function will not merge blocks if either are in use.
148 // This may return the following:
149 // OK: Merge was successful.
150 // OUT_OF_RANGE: Attempting to merge the "last" block.
151 // FAILED_PRECONDITION: The blocks could not be merged because one of them
155 // Merge this block with the one that comes before it.
156 // This function will not merge blocks if either are in use.
158 // Warning: merging with a previous block will invalidate this block instance.
159 // do not perform any operations on this instance after merging.
161 // This may return the following:
162 // OK: Merge was successful.
163 // OUT_OF_RANGE: Attempting to merge the "first" block.
164 // FAILED_PRECONDITION: The blocks could not be merged because one of them
168 // Returns whether this block is in-use or not
169 bool Used() const { return (NextAsUIntPtr() & kInUseFlag) == kInUseFlag; }
171 // Returns whether this block is the last block or
172 // not (i.e. whether NextBlock points to a valid block or not).
173 // This is needed because NextBlock points to the end of this block,
174 // whether there is a valid block there or not.
175 bool Last() const { return (NextAsUIntPtr() & kLastFlag) == kLastFlag; }
177 // Mark this block as in-use
179 next_ = reinterpret_cast<Block*>((NextAsUIntPtr() | kInUseFlag));
182 // Mark this block as free
184 next_ = reinterpret_cast<Block*>((NextAsUIntPtr() & ~kInUseFlag));
187 // Mark this block as the last one in the chain.
189 next_ = reinterpret_cast<Block*>((NextAsUIntPtr() | kLastFlag));
192 // Clear the "last" bit from this block.
194 next_ = reinterpret_cast<Block*>((NextAsUIntPtr() & ~kLastFlag));
197 // Fetch the block immediately after this one.
198 // Note: you should also check Last(); this function may return a valid
199 // block, even if one does not exist.
200 Block* Next() const {
201 return reinterpret_cast<Block*>(
202 (NextAsUIntPtr() & ~(kInUseFlag | kLastFlag)));
205 // Return the block immediately before this one. This will return nullptr
206 // if this is the "first" block.
207 Block* Prev() const { return prev_; }
209 // Return true if the block is aligned, the prev/next field matches with the
210 // previous and next block, and the poisoned bytes is not damaged. Otherwise,
211 // return false to indicate this block is corrupted.
212 bool IsValid() const { return CheckStatus() == BlockStatus::VALID; }
214 // Uses PW_DCHECK to log information about the reason if a blcok is invalid.
215 // This function will do nothing if the block is valid.
216 void CrashIfInvalid();
219 static constexpr uintptr_t kInUseFlag = 0x1;
220 static constexpr uintptr_t kLastFlag = 0x2;
221 static constexpr std::byte POISON_PATTERN[8] = {std::byte{0x92},
239 // Helper to reduce some of the casting nesting in the block management
241 uintptr_t NextAsUIntPtr() const { return reinterpret_cast<uintptr_t>(next_); }
244 bool CheckPoisonBytes() const;
245 BlockStatus CheckStatus() const;
247 // Note: Consider instead making these next/prev offsets from the current
248 // block, with templated type for the offset size. There are some interesting
249 // tradeoffs here; perhaps a pool of small allocations could use 1-byte
250 // next/prev offsets to reduce size further.
255 } // namespace pw::allocator