// -----------------------------------------------------------------------------
// Free lists for old object spaces implementation
-void FreeListNode::set_size(int size_in_bytes) {
+void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
ASSERT(size_in_bytes > 0);
ASSERT(IsAligned(size_in_bytes, kPointerSize));
// field and a next pointer, we give it a filler map that gives it the
// correct size.
if (size_in_bytes > ByteArray::kHeaderSize) {
- set_map(HEAP->raw_unchecked_byte_array_map());
+ set_map(heap->raw_unchecked_byte_array_map());
// Can't use ByteArray::cast because it fails during deserialization.
ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
} else if (size_in_bytes == kPointerSize) {
- set_map(HEAP->raw_unchecked_one_pointer_filler_map());
+ set_map(heap->raw_unchecked_one_pointer_filler_map());
} else if (size_in_bytes == 2 * kPointerSize) {
- set_map(HEAP->raw_unchecked_two_pointer_filler_map());
+ set_map(heap->raw_unchecked_two_pointer_filler_map());
} else {
UNREACHABLE();
}
}
-Address FreeListNode::next() {
+Address FreeListNode::next(Heap* heap) {
ASSERT(IsFreeListNode(this));
- if (map() == HEAP->raw_unchecked_byte_array_map()) {
+ if (map() == heap->raw_unchecked_byte_array_map()) {
ASSERT(Size() >= kNextOffset + kPointerSize);
return Memory::Address_at(address() + kNextOffset);
} else {
}
-void FreeListNode::set_next(Address next) {
+void FreeListNode::set_next(Heap* heap, Address next) {
ASSERT(IsFreeListNode(this));
- if (map() == HEAP->raw_unchecked_byte_array_map()) {
+ if (map() == heap->raw_unchecked_byte_array_map()) {
ASSERT(Size() >= kNextOffset + kPointerSize);
Memory::Address_at(address() + kNextOffset) = next;
} else {
}
-OldSpaceFreeList::OldSpaceFreeList(AllocationSpace owner) : owner_(owner) {
+OldSpaceFreeList::OldSpaceFreeList(Heap* heap, AllocationSpace owner)
+ : heap_(heap),
+ owner_(owner) {
Reset();
}
Isolate::Current()->memory_allocator()->ZapBlock(start, size_in_bytes);
#endif
FreeListNode* node = FreeListNode::FromAddress(start);
- node->set_size(size_in_bytes);
+ node->set_size(heap_, size_in_bytes);
// We don't use the freelists in compacting mode. This makes it more like a
// GC that only has mark-sweep-compact and doesn't have a mark-sweep
// Insert other blocks at the head of an exact free list.
int index = size_in_bytes >> kPointerSizeLog2;
- node->set_next(free_[index].head_node_);
+ node->set_next(heap_, free_[index].head_node_);
free_[index].head_node_ = node->address();
available_ += size_in_bytes;
needs_rebuild_ = true;
if (free_[index].head_node_ != NULL) {
FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_);
// If this was the last block of its size, remove the size.
- if ((free_[index].head_node_ = node->next()) == NULL) RemoveSize(index);
+ if ((free_[index].head_node_ = node->next(heap_)) == NULL)
+ RemoveSize(index);
available_ -= size_in_bytes;
*wasted_bytes = 0;
ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
finger_ = prev;
free_[prev].next_size_ = rem;
// If this was the last block of size cur, remove the size.
- if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
+ if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) {
free_[rem].next_size_ = free_[cur].next_size_;
} else {
free_[rem].next_size_ = cur;
}
// Add the remainder block.
- rem_node->set_size(rem_bytes);
- rem_node->set_next(free_[rem].head_node_);
+ rem_node->set_size(heap_, rem_bytes);
+ rem_node->set_next(heap_, free_[rem].head_node_);
free_[rem].head_node_ = rem_node->address();
} else {
// If this was the last block of size cur, remove the size.
- if ((free_[cur].head_node_ = cur_node->next()) == NULL) {
+ if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) {
finger_ = prev;
free_[prev].next_size_ = free_[cur].next_size_;
}
if (rem_bytes < kMinBlockSize) {
// Too-small remainder is wasted.
- rem_node->set_size(rem_bytes);
+ rem_node->set_size(heap_, rem_bytes);
available_ -= size_in_bytes + rem_bytes;
*wasted_bytes = rem_bytes;
return cur_node;
}
// Add the remainder block and, if needed, insert its size.
- rem_node->set_size(rem_bytes);
- rem_node->set_next(free_[rem].head_node_);
+ rem_node->set_size(heap_, rem_bytes);
+ rem_node->set_next(heap_, free_[rem].head_node_);
free_[rem].head_node_ = rem_node->address();
- if (rem_node->next() == NULL) InsertSize(rem);
+ if (rem_node->next(heap_) == NULL) InsertSize(rem);
}
available_ -= size_in_bytes;
*wasted_bytes = 0;
Address cur_addr = free_[i].head_node_;
while (cur_addr != NULL) {
FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
- cur_addr = cur_node->next();
+ cur_addr = cur_node->next(heap_);
cur_node->SetMark();
}
}
while (cur_addr != NULL) {
FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
if (cur_node == node) return true;
- cur_addr = cur_node->next();
+ cur_addr = cur_node->next(heap_);
}
}
return false;
#endif
-FixedSizeFreeList::FixedSizeFreeList(AllocationSpace owner, int object_size)
- : owner_(owner), object_size_(object_size) {
+FixedSizeFreeList::FixedSizeFreeList(Heap* heap,
+ AllocationSpace owner,
+ int object_size)
+ : heap_(heap), owner_(owner), object_size_(object_size) {
Reset();
}
// We only use the freelists with mark-sweep.
ASSERT(!HEAP->mark_compact_collector()->IsCompacting());
FreeListNode* node = FreeListNode::FromAddress(start);
- node->set_size(object_size_);
- node->set_next(NULL);
+ node->set_size(heap_, object_size_);
+ node->set_next(heap_, NULL);
if (head_ == NULL) {
tail_ = head_ = node->address();
} else {
- FreeListNode::FromAddress(tail_)->set_next(node->address());
+ FreeListNode::FromAddress(tail_)->set_next(heap_, node->address());
tail_ = node->address();
}
available_ += object_size_;
ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
FreeListNode* node = FreeListNode::FromAddress(head_);
- head_ = node->next();
+ head_ = node->next(heap_);
available_ -= object_size_;
return node;
}
Address cur_addr = head_;
while (cur_addr != NULL && cur_addr != tail_) {
FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
- cur_addr = cur_node->next();
+ cur_addr = cur_node->next(heap_);
cur_node->SetMark();
}
}
// function also writes a map to the first word of the block so that it
// looks like a heap object to the garbage collector and heap iteration
// functions.
- void set_size(int size_in_bytes);
+ void set_size(Heap* heap, int size_in_bytes);
// Accessors for the next field.
- inline Address next();
- inline void set_next(Address next);
+ inline Address next(Heap* heap);
+ inline void set_next(Heap* heap, Address next);
private:
static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize);
// The free list for the old space.
class OldSpaceFreeList BASE_EMBEDDED {
public:
- explicit OldSpaceFreeList(AllocationSpace owner);
+ OldSpaceFreeList(Heap* heap, AllocationSpace owner);
// Clear the free list.
void Reset();
static const int kMinBlockSize = 2 * kPointerSize;
static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
+ Heap* heap_;
+
// The identity of the owning space, for building allocation Failure
// objects.
AllocationSpace owner_;
// The free list for the map space.
class FixedSizeFreeList BASE_EMBEDDED {
public:
- FixedSizeFreeList(AllocationSpace owner, int object_size);
+ FixedSizeFreeList(Heap* heap, AllocationSpace owner, int object_size);
// Clear the free list.
void Reset();
void MarkNodes();
private:
+
+ Heap* heap_;
+
// Available bytes on the free list.
intptr_t available_;
intptr_t max_capacity,
AllocationSpace id,
Executability executable)
- : PagedSpace(heap, max_capacity, id, executable), free_list_(id) {
+ : PagedSpace(heap, max_capacity, id, executable),
+ free_list_(heap, id) {
page_extra_ = 0;
}
: PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
object_size_in_bytes_(object_size_in_bytes),
name_(name),
- free_list_(id, object_size_in_bytes) {
+ free_list_(heap, id, object_size_in_bytes) {
page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
}