MemoryChunk* chunk =
Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size,
+ desc.instr_size,
EXECUTABLE,
NULL);
if (chunk == NULL) {
PagedSpace* map_space = Heap::map_space();
PagedSpace* cell_space = Heap::cell_space();
LargeObjectSpace* lo_space = Heap::lo_space();
+ bool one_old_space_gc_has_been_performed = false;
bool gc_performed = true;
int counter = 0;
static const int kThreshold = 20;
+ bool old_space_gc_performed;
+
while (gc_performed && counter++ < kThreshold) {
+ old_space_gc_performed = false;
gc_performed = false;
if (!new_space->ReserveSpace(new_space_size)) {
Heap::CollectGarbage(NEW_SPACE);
if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
Heap::CollectGarbage(OLD_POINTER_SPACE);
gc_performed = true;
+ old_space_gc_performed = true;
}
if (!(old_data_space->ReserveSpace(data_space_size))) {
Heap::CollectGarbage(OLD_DATA_SPACE);
gc_performed = true;
+ old_space_gc_performed = true;
}
if (!(code_space->ReserveSpace(code_space_size))) {
Heap::CollectGarbage(CODE_SPACE);
gc_performed = true;
+ old_space_gc_performed = true;
}
if (!(map_space->ReserveSpace(map_space_size))) {
Heap::CollectGarbage(MAP_SPACE);
gc_performed = true;
+ old_space_gc_performed = true;
}
if (!(cell_space->ReserveSpace(cell_space_size))) {
Heap::CollectGarbage(CELL_SPACE);
gc_performed = true;
+ old_space_gc_performed = true;
}
// We add a slack-factor of 2 in order to have space for a series of
// large-object allocations that are only just larger than the page size.
// allocation in the other spaces.
large_object_size += cell_space_size + map_space_size + code_space_size +
data_space_size + pointer_space_size;
- if (!(lo_space->ReserveSpace(large_object_size))) {
+
+ // If we already did one GC in order to make space in old space, there is
+ // no sense in doing another one. We will attempt to force through the
+ // large object space allocation, which comes directly from the OS,
+ // regardless of any soft limit.
+ if (!one_old_space_gc_has_been_performed &&
+ !(lo_space->ReserveSpace(large_object_size))) {
Heap::CollectGarbage(LO_SPACE);
gc_performed = true;
}
+ if (old_space_gc_performed) one_old_space_gc_has_been_performed = true;
}
if (gc_performed) {
// Failed to reserve the space after several attempts.
- V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
+ V8::FatalProcessOutOfMemory("Heap.:ReserveSpace");
}
}
// It's difficult to filter out slots recorded for large objects.
if (chunk->owner()->identity() == LO_SPACE &&
- chunk->size() > static_cast<size_t>(Page::kPageSize) &&
+ chunk->size() > Page::kPageSize &&
is_compacting) {
chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
}
for ( ; live_objects != 0; live_objects--) {
Address free_end = object_address + offsets[live_index++] * kPointerSize;
if (free_end != free_start) {
- space->Free(free_start, static_cast<int>(free_end - free_start));
+ space->AddToFreeLists(free_start,
+ static_cast<int>(free_end - free_start));
}
HeapObject* live_object = HeapObject::FromAddress(free_end);
ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
cells[cell_index] = 0;
}
if (free_start != p->ObjectAreaEnd()) {
- space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start));
+ space->AddToFreeLists(free_start,
+ static_cast<int>(p->ObjectAreaEnd() - free_start));
}
p->ResetLiveBytes();
}
Page* p = evacuation_candidates_[i];
if (!p->IsEvacuationCandidate()) continue;
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
- space->Free(p->ObjectAreaStart(), Page::kObjectAreaSize);
+ space->AddToFreeLists(
+ p->ObjectAreaStart(),
+ static_cast<int>(p->ObjectAreaEnd() - p->ObjectAreaStart()));
p->set_scan_on_scavenge(false);
slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
p->ClearEvacuationCandidate();
}
size_t size = block_address - p->ObjectAreaStart();
if (cell_index == last_cell_index) {
- freed_bytes += static_cast<int>(space->Free(p->ObjectAreaStart(),
- static_cast<int>(size)));
+ freed_bytes += static_cast<int>(space->AddToFreeLists(
+ p->ObjectAreaStart(), static_cast<int>(size)));
ASSERT_EQ(0, p->LiveBytes());
return freed_bytes;
}
Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
// Free the first free space.
size = free_end - p->ObjectAreaStart();
- freed_bytes += space->Free(p->ObjectAreaStart(),
- static_cast<int>(size));
+ freed_bytes += space->AddToFreeLists(p->ObjectAreaStart(),
+ static_cast<int>(size));
// The start of the current free area is represented in undigested form by
// the address of the last 32-word section that contained a live object and
// the marking bitmap for that cell, which describes where the live object
// so now we need to find the start of the first live object at the
// end of the free space.
free_end = StartOfLiveObject(block_address, cell);
- freed_bytes += space->Free(free_start,
- static_cast<int>(free_end - free_start));
+ freed_bytes += space->AddToFreeLists(
+ free_start, static_cast<int>(free_end - free_start));
}
}
// Update our undigested record of where the current free area started.
// Handle the free space at the end of the page.
if (block_address - free_start > 32 * kPointerSize) {
free_start = DigestFreeStart(free_start, free_start_cell);
- freed_bytes += space->Free(free_start,
- static_cast<int>(block_address - free_start));
+ freed_bytes += space->AddToFreeLists(
+ free_start, static_cast<int>(block_address - free_start));
}
p->ResetLiveBytes();
pages_[LO_SPACE].Add(address);
}
last_object_address_ = address;
+ ASSERT(address >= Page::FromAddress(address)->ObjectAreaStart());
return address;
}
int offset = source_->GetInt();
ASSERT(!SpaceIsLarge(space));
offset <<= kObjectAlignmentBits;
- return HeapObject::FromAddress(high_water_[space] - offset);
+ Address address = high_water_[space] - offset;
+ // This assert will fail if kMinimumSpaceSizes is too small for a space,
+ // because we rely on the fact that all allocation is linear when the VM
+ // is very young.
+ ASSERT(address >= Page::FromAddress(address)->ObjectAreaStart());
+ return HeapObject::FromAddress(address);
}
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "isolate.h"
+#include "spaces.h"
#ifndef V8_SNAPSHOT_H_
#define V8_SNAPSHOT_H_
DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot);
};
+
} } // namespace v8::internal
#endif // V8_SNAPSHOT_H_
Executability executable,
PagedSpace* owner) {
Page* page = reinterpret_cast<Page*>(chunk);
- ASSERT(chunk->size() == static_cast<size_t>(kPageSize));
+ ASSERT(chunk->size() <= kPageSize);
ASSERT(chunk->owner() == owner);
- owner->IncreaseCapacity(Page::kObjectAreaSize);
- owner->Free(page->ObjectAreaStart(),
- static_cast<int>(page->ObjectAreaEnd() -
- page->ObjectAreaStart()));
+ int object_bytes =
+ static_cast<int>(page->ObjectAreaEnd() - page->ObjectAreaStart());
+ owner->IncreaseCapacity(object_bytes);
+ owner->AddToFreeLists(page->ObjectAreaStart(), object_bytes);
heap->incremental_marking()->SetOldSpacePageFlags(chunk);
if (new_top > allocation_info_.limit) return NULL;
allocation_info_.top = new_top;
+ ASSERT(new_top >= Page::FromAllocationTop(new_top)->ObjectAreaStart());
return HeapObject::FromAddress(current_top);
}
#include "macro-assembler.h"
#include "mark-compact.h"
#include "platform.h"
+#include "snapshot.h"
namespace v8 {
namespace internal {
: isolate_(isolate),
capacity_(0),
capacity_executable_(0),
- size_(0),
+ memory_allocator_reserved_(0),
size_executable_(0) {
}
capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
ASSERT_GE(capacity_, capacity_executable_);
- size_ = 0;
+ memory_allocator_reserved_ = 0;
size_executable_ = 0;
return true;
void MemoryAllocator::TearDown() {
// Check that spaces were torn down before MemoryAllocator.
- ASSERT(size_ == 0);
+ CHECK_EQ(memory_allocator_reserved_, 0);
// TODO(gc) this will be true again when we fix FreeMemory.
// ASSERT(size_executable_ == 0);
capacity_ = 0;
// TODO(gc) make code_range part of memory allocator?
ASSERT(reservation->IsReserved());
size_t size = reservation->size();
- ASSERT(size_ >= size);
- size_ -= size;
+ ASSERT(memory_allocator_reserved_ >= size);
+ memory_allocator_reserved_ -= size;
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
size_t size,
Executability executable) {
// TODO(gc) make code_range part of memory allocator?
- ASSERT(size_ >= size);
- size_ -= size;
+ ASSERT(memory_allocator_reserved_ >= size);
+ memory_allocator_reserved_ -= size;
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
VirtualMemory reservation(size, alignment);
if (!reservation.IsReserved()) return NULL;
- size_ += reservation.size();
+ memory_allocator_reserved_ += reservation.size();
Address base = RoundUp(static_cast<Address>(reservation.address()),
alignment);
controller->TakeControl(&reservation);
Address MemoryAllocator::AllocateAlignedMemory(size_t size,
+ size_t reserved_size,
size_t alignment,
Executability executable,
VirtualMemory* controller) {
+ ASSERT(RoundUp(reserved_size, OS::CommitPageSize()) >=
+ RoundUp(size, OS::CommitPageSize()));
VirtualMemory reservation;
- Address base = ReserveAlignedMemory(size, alignment, &reservation);
+ Address base = ReserveAlignedMemory(reserved_size, alignment, &reservation);
if (base == NULL) return NULL;
if (!reservation.Commit(base,
size,
}
+void Page::CommitMore(intptr_t space_needed) {
+ intptr_t reserved_page_size = reservation_.IsReserved() ?
+ reservation_.size() :
+ Page::kPageSize;
+ ASSERT(size() + space_needed <= reserved_page_size);
+ // At increase the page size by at least 64k (this also rounds to OS page
+ // size).
+ int expand = Min(reserved_page_size - size(),
+ RoundUp(size() + space_needed, Page::kGrowthUnit) - size());
+ ASSERT(expand <= kPageSize - size());
+ ASSERT(expand <= reserved_page_size - size());
+ Executability executable =
+ IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
+ Address old_end = ObjectAreaEnd();
+ if (!VirtualMemory::CommitRegion(old_end, expand, executable)) return;
+
+ set_size(size() + expand);
+
+ PagedSpace* paged_space = reinterpret_cast<PagedSpace*>(owner());
+ paged_space->heap()->isolate()->memory_allocator()->AllocationBookkeeping(
+ paged_space,
+ old_end,
+ 0, // No new memory was reserved.
+ expand, // New memory committed.
+ executable);
+ paged_space->IncreaseCapacity(expand);
+
+ // In spaces with alignment requirements (e.g. map space) we have to align
+ // the expanded area with the correct object alignment.
+ uintptr_t object_area_size = old_end - ObjectAreaStart();
+ uintptr_t aligned_object_area_size =
+ object_area_size - object_area_size % paged_space->ObjectAlignment();
+ if (aligned_object_area_size != object_area_size) {
+ aligned_object_area_size += paged_space->ObjectAlignment();
+ }
+ Address new_area =
+ reinterpret_cast<Address>(ObjectAreaStart() + aligned_object_area_size);
+ // In spaces with alignment requirements, this will waste the space for one
+ // object per doubling of the page size until the next GC.
+ paged_space->AddToFreeLists(old_end, new_area - old_end);
+
+ expand -= (new_area - old_end);
+
+ paged_space->AddToFreeLists(new_area, expand);
+}
+
+
NewSpacePage* NewSpacePage::Initialize(Heap* heap,
Address start,
SemiSpace* semi_space) {
MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
+ intptr_t committed_body_size,
Executability executable,
Space* owner) {
- size_t chunk_size = MemoryChunk::kObjectStartOffset + body_size;
+ ASSERT(body_size >= committed_body_size);
+ size_t chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + body_size,
+ OS::CommitPageSize());
+ intptr_t committed_chunk_size =
+ committed_body_size + MemoryChunk::kObjectStartOffset;
+ committed_chunk_size = RoundUp(committed_chunk_size, OS::CommitPageSize());
Heap* heap = isolate_->heap();
Address base = NULL;
VirtualMemory reservation;
ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
MemoryChunk::kAlignment));
if (base == NULL) return NULL;
- size_ += chunk_size;
- // Update executable memory size.
- size_executable_ += chunk_size;
+ // The AllocateAlignedMemory method will update the memory allocator
+ // memory used, but we are not using that if we have a code range, so
+ // we update it here.
+ memory_allocator_reserved_ += chunk_size;
} else {
- base = AllocateAlignedMemory(chunk_size,
+ base = AllocateAlignedMemory(committed_chunk_size,
+ chunk_size,
MemoryChunk::kAlignment,
executable,
&reservation);
if (base == NULL) return NULL;
- // Update executable memory size.
- size_executable_ += reservation.size();
}
} else {
- base = AllocateAlignedMemory(chunk_size,
+ base = AllocateAlignedMemory(committed_chunk_size,
+ chunk_size,
MemoryChunk::kAlignment,
executable,
&reservation);
if (base == NULL) return NULL;
}
-#ifdef DEBUG
- ZapBlock(base, chunk_size);
-#endif
- isolate_->counters()->memory_allocated()->
- Increment(static_cast<int>(chunk_size));
-
- LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
- if (owner != NULL) {
- ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
- PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
- }
+ AllocationBookkeeping(
+ owner, base, chunk_size, committed_chunk_size, executable);
MemoryChunk* result = MemoryChunk::Initialize(heap,
base,
- chunk_size,
+ committed_chunk_size,
executable,
owner);
result->set_reserved_memory(&reservation);
}
-Page* MemoryAllocator::AllocatePage(PagedSpace* owner,
+void MemoryAllocator::AllocationBookkeeping(Space* owner,
+ Address base,
+ intptr_t reserved_chunk_size,
+ intptr_t committed_chunk_size,
+ Executability executable) {
+ if (executable == EXECUTABLE) {
+ // Update executable memory size.
+ size_executable_ += reserved_chunk_size;
+ }
+
+#ifdef DEBUG
+ ZapBlock(base, committed_chunk_size);
+#endif
+ isolate_->counters()->memory_allocated()->
+ Increment(static_cast<int>(committed_chunk_size));
+
+ LOG(isolate_, NewEvent("MemoryChunk", base, committed_chunk_size));
+ if (owner != NULL) {
+ ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
+ PerformAllocationCallback(
+ space, kAllocationActionAllocate, committed_chunk_size);
+ }
+}
+
+
+Page* MemoryAllocator::AllocatePage(intptr_t committed_object_area_size,
+ PagedSpace* owner,
Executability executable) {
- MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, executable, owner);
+ ASSERT(committed_object_area_size <= Page::kObjectAreaSize);
+
+ MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize,
+ committed_object_area_size,
+ executable,
+ owner);
if (chunk == NULL) return NULL;
LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
Executability executable,
Space* owner) {
- MemoryChunk* chunk = AllocateChunk(object_size, executable, owner);
+ MemoryChunk* chunk =
+ AllocateChunk(object_size, object_size, executable, owner);
if (chunk == NULL) return NULL;
return LargePage::Initialize(isolate_->heap(), chunk);
}
if (reservation->IsReserved()) {
FreeMemory(reservation, chunk->executable());
} else {
+ // When we do not have a reservation that is because this allocation
+ // is part of the huge reserved chunk of memory reserved for code on
+ // x64. In that case the size was rounded up to the page size on
+ // allocation so we do the same now when freeing.
FreeMemory(chunk->address(),
- chunk->size(),
+ RoundUp(chunk->size(), Page::kPageSize),
chunk->executable());
}
}
#ifdef DEBUG
void MemoryAllocator::ReportStatistics() {
- float pct = static_cast<float>(capacity_ - size_) / capacity_;
+ float pct =
+ static_cast<float>(capacity_ - memory_allocator_reserved_) / capacity_;
PrintF(" capacity: %" V8_PTR_PREFIX "d"
", used: %" V8_PTR_PREFIX "d"
", available: %%%d\n\n",
- capacity_, size_, static_cast<int>(pct*100));
+ capacity_, memory_allocator_reserved_, static_cast<int>(pct*100));
}
#endif
bool PagedSpace::CanExpand() {
ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
- ASSERT(Capacity() % Page::kObjectAreaSize == 0);
if (Capacity() == max_capacity_) return false;
return true;
}
-bool PagedSpace::Expand() {
+bool PagedSpace::Expand(intptr_t size_in_bytes) {
if (!CanExpand()) return false;
+ Page* last_page = anchor_.prev_page();
+ if (last_page != &anchor_) {
+ // We have run out of linear allocation space. This may be because the
+ // most recently allocated page (stored last in the list) is a small one,
+ // that starts on a page aligned boundary, but has not a full kPageSize of
+ // committed memory. Let's commit more memory for the page.
+ intptr_t reserved_page_size = last_page->reserved_memory()->IsReserved() ?
+ last_page->reserved_memory()->size() :
+ Page::kPageSize;
+ if (last_page->size() < reserved_page_size &&
+ (reserved_page_size - last_page->size()) >= size_in_bytes &&
+ !last_page->IsEvacuationCandidate() &&
+ last_page->WasSwept()) {
+ last_page->CommitMore(size_in_bytes);
+ return true;
+ }
+ }
+
+ // We initially only commit a part of the page, but the deserialization
+ // of the initial snapshot makes the assumption that it can deserialize
+ // into linear memory of a certain size per space, so some of the spaces
+ // need to have a little more committed memory.
+ int initial =
+ Max(OS::CommitPageSize(), static_cast<intptr_t>(Page::kGrowthUnit));
+
+ ASSERT(Page::kPageSize - initial < Page::kObjectAreaSize);
+
+ intptr_t expansion_size =
+ Max(initial,
+ RoundUpToPowerOf2(MemoryChunk::kObjectStartOffset + size_in_bytes)) -
+ MemoryChunk::kObjectStartOffset;
+
Page* p = heap()->isolate()->memory_allocator()->
- AllocatePage(this, executable());
+ AllocatePage(expansion_size, this, executable());
if (p == NULL) return false;
ASSERT(Capacity() <= max_capacity_);
allocation_info_.top = allocation_info_.limit = NULL;
}
+ intptr_t size = page->ObjectAreaEnd() - page->ObjectAreaStart();
+
page->Unlink();
if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
heap()->isolate()->memory_allocator()->Free(page);
}
ASSERT(Capacity() > 0);
- ASSERT(Capacity() % Page::kObjectAreaSize == 0);
- accounting_stats_.ShrinkSpace(Page::kObjectAreaSize);
+ accounting_stats_.ShrinkSpace(size);
}
// is big enough to be a FreeSpace with at least one extra word (the next
// pointer), we set its map to be the free space map and its size to an
// appropriate array length for the desired size from HeapObject::Size().
- // If the block is too small (eg, one or two words), to hold both a size
+ // If the block is too small (e.g. one or two words), to hold both a size
// field and a next pointer, we give it a filler map that gives it the
// correct size.
if (size_in_bytes > FreeSpace::kHeaderSize) {
}
-FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) {
+FreeListNode* FreeList::PickNodeFromList(FreeListNode** list,
+ int* node_size,
+ int minimum_size) {
FreeListNode* node = *list;
if (node == NULL) return NULL;
+ ASSERT(node->map() == node->GetHeap()->raw_unchecked_free_space_map());
+
while (node != NULL &&
Page::FromAddress(node->address())->IsEvacuationCandidate()) {
available_ -= node->Size();
node = node->next();
}
- if (node != NULL) {
- *node_size = node->Size();
- *list = node->next();
- } else {
+ if (node == NULL) {
*list = NULL;
+ return NULL;
}
+ // Gets the size without checking the map. When we are booting we have
+ // a FreeListNode before we have created its map.
+ intptr_t size = reinterpret_cast<FreeSpace*>(node)->Size();
+
+ // We don't search the list for one that fits, preferring to look in the
+ // list of larger nodes, but we do check the first in the list, because
+ // if we had to expand the space or page we may have placed an entry that
+ // was just long enough at the head of one of the lists.
+ if (size < minimum_size) return NULL;
+
+ *node_size = size;
+ available_ -= size;
+ *list = node->next();
+
return node;
}
-FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
+FreeListNode* FreeList::FindAbuttingNode(
+ int size_in_bytes, int* node_size, Address limit, FreeListNode** list_head) {
+ FreeListNode* first_node = *list_head;
+ if (first_node != NULL &&
+ first_node->address() == limit &&
+ reinterpret_cast<FreeSpace*>(first_node)->Size() >= size_in_bytes &&
+ !Page::FromAddress(first_node->address())->IsEvacuationCandidate()) {
+ FreeListNode* answer = first_node;
+ int size = reinterpret_cast<FreeSpace*>(first_node)->Size();
+ available_ -= size;
+ *node_size = size;
+ *list_head = first_node->next();
+ ASSERT(IsVeryLong() || available_ == SumFreeLists());
+ return answer;
+ }
+ return NULL;
+}
+
+
+FreeListNode* FreeList::FindNodeFor(int size_in_bytes,
+ int* node_size,
+ Address limit) {
FreeListNode* node = NULL;
- if (size_in_bytes <= kSmallAllocationMax) {
- node = PickNodeFromList(&small_list_, node_size);
+ if (limit != NULL) {
+ // We may have a memory area at the head of the free list, which abuts the
+ // old linear allocation area. This happens if the linear allocation area
+ // has been shortened to allow an incremental marking step to be performed.
+ // In that case we prefer to return the free memory area that is contiguous
+ // with the old linear allocation area.
+ node = FindAbuttingNode(size_in_bytes, node_size, limit, &large_list_);
if (node != NULL) return node;
- }
-
- if (size_in_bytes <= kMediumAllocationMax) {
- node = PickNodeFromList(&medium_list_, node_size);
+ node = FindAbuttingNode(size_in_bytes, node_size, limit, &huge_list_);
if (node != NULL) return node;
}
- if (size_in_bytes <= kLargeAllocationMax) {
- node = PickNodeFromList(&large_list_, node_size);
- if (node != NULL) return node;
- }
+ node = PickNodeFromList(&small_list_, node_size, size_in_bytes);
+ ASSERT(IsVeryLong() || available_ == SumFreeLists());
+ if (node != NULL) return node;
+
+ node = PickNodeFromList(&medium_list_, node_size, size_in_bytes);
+ ASSERT(IsVeryLong() || available_ == SumFreeLists());
+ if (node != NULL) return node;
+
+ node = PickNodeFromList(&large_list_, node_size, size_in_bytes);
+ ASSERT(IsVeryLong() || available_ == SumFreeLists());
+ if (node != NULL) return node;
+ // The tricky third clause in this for statement is due to the fact that
+ // PickNodeFromList can cut pages out of the list if they are unavailable for
+ // new allocation (e.g. if they are on a page that has been scheduled for
+ // evacuation).
for (FreeListNode** cur = &huge_list_;
*cur != NULL;
- cur = (*cur)->next_address()) {
- FreeListNode* cur_node = *cur;
- while (cur_node != NULL &&
- Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
- available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size();
- cur_node = cur_node->next();
- }
-
- *cur = cur_node;
- if (cur_node == NULL) break;
-
- ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map());
- FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
- int size = cur_as_free_space->Size();
- if (size >= size_in_bytes) {
- // Large enough node found. Unlink it from the list.
- node = *cur;
- *node_size = size;
- *cur = node->next();
- break;
- }
+ cur = (*cur) == NULL ? cur : (*cur)->next_address()) {
+ node = PickNodeFromList(cur, node_size, size_in_bytes);
+ ASSERT(IsVeryLong() || available_ == SumFreeLists());
+ if (node != NULL) return node;
}
return node;
ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
int new_node_size = 0;
- FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
+ FreeListNode* new_node =
+ FindNodeFor(size_in_bytes, &new_node_size, owner_->limit());
if (new_node == NULL) return NULL;
- available_ -= new_node_size;
+ if (new_node->address() == owner_->limit()) {
+ // The new freelist node we were given is an extension of the one we had
+ // last. This is a common thing to happen when we extend a small page by
+ // committing more memory. In this case we just add the new node to the
+ // linear allocation area and recurse.
+ owner_->Allocate(new_node_size);
+ owner_->SetTop(owner_->top(), new_node->address() + new_node_size);
+ MaybeObject* allocation = owner_->AllocateRaw(size_in_bytes);
+ Object* answer;
+ if (!allocation->ToObject(&answer)) return NULL;
+ return HeapObject::cast(answer);
+ }
+
ASSERT(IsVeryLong() || available_ == SumFreeLists());
int bytes_left = new_node_size - size_in_bytes;
// Mark the old linear allocation area with a free space map so it can be
// skipped when scanning the heap. This also puts it back in the free list
// if it is big enough.
- owner_->Free(owner_->top(), old_linear_size);
+ if (old_linear_size != 0) {
+ owner_->AddToFreeLists(owner_->top(), old_linear_size);
+ }
#ifdef DEBUG
for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
// We don't want to give too large linear areas to the allocator while
// incremental marking is going on, because we won't check again whether
// we want to do another increment until the linear area is used up.
- owner_->Free(new_node->address() + size_in_bytes + linear_size,
- new_node_size - size_in_bytes - linear_size);
+ owner_->AddToFreeLists(new_node->address() + size_in_bytes + linear_size,
+ new_node_size - size_in_bytes - linear_size);
owner_->SetTop(new_node->address() + size_in_bytes,
new_node->address() + size_in_bytes + linear_size);
} else if (bytes_left > 0) {
owner_->SetTop(new_node->address() + size_in_bytes,
new_node->address() + new_node_size);
} else {
+ ASSERT(bytes_left == 0);
// TODO(gc) Try not freeing linear allocation region when bytes_left
// are zero.
owner_->SetTop(NULL, NULL);
HeapObject* allocation = HeapObject::cast(object);
Address top = allocation_info_.top;
if ((top - bytes) == allocation->address()) {
- allocation_info_.top = allocation->address();
+ Address new_top = allocation->address();
+ ASSERT(new_top >= Page::FromAddress(new_top - 1)->ObjectAreaStart());
+ allocation_info_.top = new_top;
return true;
}
// There may be a borderline case here where the allocation succeeded, but
// Mark the old linear allocation area with a free space map so it can be
// skipped when scanning the heap.
int old_linear_size = static_cast<int>(limit() - top());
- Free(top(), old_linear_size);
+ AddToFreeLists(top(), old_linear_size);
SetTop(NULL, NULL);
// Stop lazy sweeping and clear marking bits for unswept pages.
// Mark the old linear allocation area with a free space so it can be
// skipped when scanning the heap. This also puts it back in the free list
// if it is big enough.
- Free(top(), old_linear_size);
+ AddToFreeLists(top(), old_linear_size);
SetTop(new_area->address(), new_area->address() + size_in_bytes);
- Allocate(size_in_bytes);
+ // The AddToFreeLists call above will reduce the size of the space in the
+ // allocation stats. We don't need to add this linear area to the size
+ // with an Allocate(size_in_bytes) call here, because the
+ // free_list_.Allocate() call above already accounted for this memory.
return true;
}
}
// Try to expand the space and allocate in the new next page.
- if (Expand()) {
+ if (Expand(size_in_bytes)) {
return free_list_.Allocate(size_in_bytes);
}
heap()->mark_compact_collector()->ReportDeleteIfNeeded(
object, heap()->isolate());
size_ -= static_cast<int>(page->size());
+ ASSERT(size_ >= 0);
objects_size_ -= object->Size();
page_count_--;
static const int kObjectStartOffset = kBodyOffset - 1 +
(kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
- size_t size() const { return size_; }
+ intptr_t size() const { return size_; }
- void set_size(size_t size) {
- size_ = size;
- }
+ void set_size(size_t size) { size_ = size; }
Executability executable() {
return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
Address ObjectAreaStart() { return address() + kObjectStartOffset; }
// Returns the end address (exclusive) of the object area in this page.
- Address ObjectAreaEnd() { return address() + Page::kPageSize; }
+ Address ObjectAreaEnd() { return address() + size(); }
// Checks whether an address is page aligned.
static bool IsAlignedToPageSize(Address a) {
return address() + offset;
}
+ // Expand the committed area for pages that are small.
+ void CommitMore(intptr_t space_needed);
+
// ---------------------------------------------------------------------
// Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits;
+ // For a 1Mbyte page grow 64k at a time.
+ static const int kGrowthUnit = 1 << (kPageSizeBits - 4);
+
// Page size mask.
static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
FreeBlock(Address start_arg, size_t size_arg)
: start(start_arg), size(size_arg) {
ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
- ASSERT(size >= static_cast<size_t>(Page::kPageSize));
}
FreeBlock(void* start_arg, size_t size_arg)
: start(static_cast<Address>(start_arg)), size(size_arg) {
ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
- ASSERT(size >= static_cast<size_t>(Page::kPageSize));
}
Address start;
void TearDown();
- Page* AllocatePage(PagedSpace* owner, Executability executable);
+ Page* AllocatePage(intptr_t object_area_size,
+ PagedSpace* owner,
+ Executability executable);
LargePage* AllocateLargePage(intptr_t object_size,
Executability executable,
void Free(MemoryChunk* chunk);
// Returns the maximum available bytes of heaps.
- intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
+ intptr_t Available() {
+ return capacity_ < memory_allocator_reserved_ ?
+ 0 :
+ capacity_ - memory_allocator_reserved_;
+ }
// Returns allocated spaces in bytes.
- intptr_t Size() { return size_; }
+ intptr_t Size() { return memory_allocator_reserved_; }
// Returns the maximum available executable bytes of heaps.
intptr_t AvailableExecutable() {
#endif
MemoryChunk* AllocateChunk(intptr_t body_size,
+ intptr_t committed_body_size,
Executability executable,
Space* space);
size_t alignment,
VirtualMemory* controller);
Address AllocateAlignedMemory(size_t requested,
+ size_t committed,
size_t alignment,
Executability executable,
VirtualMemory* controller);
// and false otherwise.
bool UncommitBlock(Address start, size_t size);
+ void AllocationBookkeeping(Space* owner,
+ Address base,
+ intptr_t reserved_size,
+ intptr_t committed_size,
+ Executability executable);
+
// Zaps a contiguous block of memory [start..(start+size)[ thus
// filling it up with a recognizable non-NULL bit pattern.
void ZapBlock(Address start, size_t size);
size_t capacity_executable_;
// Allocated space size in bytes.
- size_t size_;
+ size_t memory_allocator_reserved_;
// Allocated executable space size in bytes.
size_t size_executable_;
static const int kMinBlockSize = 3 * kPointerSize;
static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
- FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size);
+ FreeListNode* PickNodeFromList(FreeListNode** list,
+ int* node_size,
+ int minimum_size);
- FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
+ FreeListNode* FindNodeFor(int size_in_bytes, int* node_size, Address limit);
+ FreeListNode* FindAbuttingNode(int size_in_bytes,
+ int* node_size,
+ Address limit,
+ FreeListNode** list_head);
PagedSpace* owner_;
Heap* heap_;
// free bytes that were not found at all due to lazy sweeping.
virtual intptr_t Waste() { return accounting_stats_.Waste(); }
+ virtual int ObjectAlignment() { return kObjectAlignment; }
+
// Returns the allocation pointer in this space.
Address top() { return allocation_info_.top; }
Address limit() { return allocation_info_.limit; }
// the free list or accounted as waste.
// If add_to_freelist is false then just accounting stats are updated and
// no attempt to add area to free list is made.
- int Free(Address start, int size_in_bytes) {
+ int AddToFreeLists(Address start, int size_in_bytes) {
int wasted = free_list_.Free(start, size_in_bytes);
accounting_stats_.DeallocateBytes(size_in_bytes - wasted);
return size_in_bytes - wasted;
// Set space allocation info.
void SetTop(Address top, Address limit) {
+ ASSERT(top == NULL || top >= Page::FromAddress(top - 1)->ObjectAreaStart());
ASSERT(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
allocation_info_.top = top;
void IncreaseUnsweptFreeBytes(Page* p) {
ASSERT(ShouldBeSweptLazily(p));
- unswept_free_bytes_ += (Page::kObjectAreaSize - p->LiveBytes());
+ unswept_free_bytes_ +=
+ (p->ObjectAreaEnd() - p->ObjectAreaStart()) - p->LiveBytes();
}
void DecreaseUnsweptFreeBytes(Page* p) {
ASSERT(ShouldBeSweptLazily(p));
- unswept_free_bytes_ -= (Page::kObjectAreaSize - p->LiveBytes());
+ unswept_free_bytes_ -=
+ (p->ObjectAreaEnd() - p->ObjectAreaStart() - p->LiveBytes());
}
bool AdvanceSweeper(intptr_t bytes_to_sweep);
return !first_unswept_page_->is_valid();
}
+ inline bool HasAPage() { return anchor_.next_page() != &anchor_; }
Page* FirstPage() { return anchor_.next_page(); }
Page* LastPage() { return anchor_.prev_page(); }
FreeList::SizeStats sizes;
free_list_.CountFreeListItems(p, &sizes);
+ intptr_t object_area_size = p->ObjectAreaEnd() - p->ObjectAreaStart();
+
intptr_t ratio;
intptr_t ratio_threshold;
if (identity() == CODE_SPACE) {
ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
- Page::kObjectAreaSize;
+ object_area_size;
ratio_threshold = 10;
} else {
ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
- Page::kObjectAreaSize;
+ object_area_size;
ratio_threshold = 15;
}
identity(),
static_cast<int>(sizes.small_size_),
static_cast<double>(sizes.small_size_ * 100) /
- Page::kObjectAreaSize,
+ object_area_size,
static_cast<int>(sizes.medium_size_),
static_cast<double>(sizes.medium_size_ * 100) /
- Page::kObjectAreaSize,
+ object_area_size,
static_cast<int>(sizes.large_size_),
static_cast<double>(sizes.large_size_ * 100) /
- Page::kObjectAreaSize,
+ object_area_size,
static_cast<int>(sizes.huge_size_),
static_cast<double>(sizes.huge_size_ * 100) /
- Page::kObjectAreaSize,
+ object_area_size,
(ratio > ratio_threshold) ? "[fragmented]" : "");
}
- if (FLAG_always_compact && sizes.Total() != Page::kObjectAreaSize) {
+ if (FLAG_always_compact && sizes.Total() != object_area_size) {
return 1;
}
if (ratio <= ratio_threshold) return 0; // Not fragmented.
// Normal allocation information.
AllocationInfo allocation_info_;
- // Bytes of each page that cannot be allocated. Possibly non-zero
- // for pages in spaces with only fixed-size objects. Always zero
- // for pages in spaces with variable sized objects (those pages are
- // padded with free-list nodes).
- int page_extra_;
-
bool was_swept_conservatively_;
// The first page to be swept when the lazy sweeper advances. Is set
// done conservatively.
intptr_t unswept_free_bytes_;
- // Expands the space by allocating a fixed number of pages. Returns false if
- // it cannot allocate requested number of pages from OS, or if the hard heap
- // size limit has been hit.
- bool Expand();
+ // Expands the space by allocating a page. Returns false if it cannot
+ // allocate a page from OS, or if the hard heap size limit has been hit. The
+ // new page will have at least enough committed space to satisfy the object
+ // size indicated by the allocation_size argument;
+ bool Expand(intptr_t allocation_size);
// Generic fast case allocation function that tries linear allocation at the
// address denoted by top in allocation_info_.
anchor_(this),
current_page_(NULL) { }
- // Sets up the semispace using the given chunk.
+ // Sets up the semispace using the given chunk. After this, call Commit()
+ // to make the semispace usable.
void SetUp(Address start, int initial_capacity, int maximum_capacity);
// Tear down the space. Heap memory was not allocated by the space, so it
intptr_t max_capacity,
AllocationSpace id,
Executability executable)
- : PagedSpace(heap, max_capacity, id, executable) {
- page_extra_ = 0;
- }
-
- // The limit of allocation for a page in this space.
- virtual Address PageAllocationLimit(Page* page) {
- return page->ObjectAreaEnd();
- }
+ : PagedSpace(heap, max_capacity, id, executable) { }
public:
TRACK_MEMORY("OldSpace")
const char* name)
: PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
object_size_in_bytes_(object_size_in_bytes),
- name_(name) {
- page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
- }
-
- // The limit of allocation for a page in this space.
- virtual Address PageAllocationLimit(Page* page) {
- return page->ObjectAreaEnd() - page_extra_;
- }
+ name_(name) { }
int object_size_in_bytes() { return object_size_in_bytes_; }
+ virtual int ObjectAlignment() { return object_size_in_bytes_; }
+
// Prepares for a mark-compact GC.
virtual void PrepareForMarkCompact();
Address map_aligned_end = MapEndAlign(end);
ASSERT(map_aligned_start == start);
- ASSERT(map_aligned_end == end);
FindPointersToNewSpaceInMaps(map_aligned_start,
map_aligned_end,
RegionCallback region_callback,
ObjectSlotCallback slot_callback) {
Address visitable_start = page->ObjectAreaStart();
- Address end_of_page = page->ObjectAreaEnd();
Address visitable_end = visitable_start;
Object* free_space_map = heap_->free_space_map();
Object* two_pointer_filler_map = heap_->two_pointer_filler_map();
- while (visitable_end < end_of_page) {
- Object* o = *reinterpret_cast<Object**>(visitable_end);
- // Skip fillers but not things that look like fillers in the special
- // garbage section which can contain anything.
- if (o == free_space_map ||
- o == two_pointer_filler_map ||
- (visitable_end == space->top() && visitable_end != space->limit())) {
- if (visitable_start != visitable_end) {
- // After calling this the special garbage section may have moved.
- (this->*region_callback)(visitable_start,
- visitable_end,
- slot_callback);
- if (visitable_end >= space->top() && visitable_end < space->limit()) {
- visitable_end = space->limit();
- visitable_start = visitable_end;
- continue;
+ while (true) { // While the page grows (doesn't normally happen).
+ Address end_of_page = page->ObjectAreaEnd();
+ while (visitable_end < end_of_page) {
+ Object* o = *reinterpret_cast<Object**>(visitable_end);
+ // Skip fillers but not things that look like fillers in the special
+ // garbage section which can contain anything.
+ if (o == free_space_map ||
+ o == two_pointer_filler_map ||
+ (visitable_end == space->top() && visitable_end != space->limit())) {
+ if (visitable_start != visitable_end) {
+ // After calling this the special garbage section may have moved.
+ (this->*region_callback)(visitable_start,
+ visitable_end,
+ slot_callback);
+ if (visitable_end >= space->top() && visitable_end < space->limit()) {
+ visitable_end = space->limit();
+ visitable_start = visitable_end;
+ continue;
+ }
+ }
+ if (visitable_end == space->top() && visitable_end != space->limit()) {
+ visitable_start = visitable_end = space->limit();
+ } else {
+ // At this point we are either at the start of a filler or we are at
+ // the point where the space->top() used to be before the
+ // visit_pointer_region call above. Either way we can skip the
+ // object at the current spot: We don't promise to visit objects
+ // allocated during heap traversal, and if space->top() moved then it
+ // must be because an object was allocated at this point.
+ visitable_start =
+ visitable_end + HeapObject::FromAddress(visitable_end)->Size();
+ visitable_end = visitable_start;
}
- }
- if (visitable_end == space->top() && visitable_end != space->limit()) {
- visitable_start = visitable_end = space->limit();
} else {
- // At this point we are either at the start of a filler or we are at
- // the point where the space->top() used to be before the
- // visit_pointer_region call above. Either way we can skip the
- // object at the current spot: We don't promise to visit objects
- // allocated during heap traversal, and if space->top() moved then it
- // must be because an object was allocated at this point.
- visitable_start =
- visitable_end + HeapObject::FromAddress(visitable_end)->Size();
- visitable_end = visitable_start;
+ ASSERT(o != free_space_map);
+ ASSERT(o != two_pointer_filler_map);
+ ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
+ visitable_end += kPointerSize;
}
- } else {
- ASSERT(o != free_space_map);
- ASSERT(o != two_pointer_filler_map);
- ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
- visitable_end += kPointerSize;
}
+ ASSERT(visitable_end >= end_of_page);
+ // If the page did not grow we are done.
+ if (end_of_page == page->ObjectAreaEnd()) break;
}
- ASSERT(visitable_end == end_of_page);
+ ASSERT(visitable_end == page->ObjectAreaEnd());
if (visitable_start != visitable_end) {
(this->*region_callback)(visitable_start,
visitable_end,
}
-// Returns the smallest power of two which is >= x. If you pass in a
-// number that is already a power of two, it is returned as is.
-// Implementation is from "Hacker's Delight" by Henry S. Warren, Jr.,
-// figure 3-3, page 48, where the function is called clp2.
-inline uint32_t RoundUpToPowerOf2(uint32_t x) {
+template<typename int_type>
+inline int RoundUpToPowerOf2(int_type x_argument) {
+ uintptr_t x = static_cast<uintptr_t>(x_argument);
ASSERT(x <= 0x80000000u);
x = x - 1;
x = x | (x >> 1);
x = x | (x >> 4);
x = x | (x >> 8);
x = x | (x >> 16);
- return x + 1;
+ return static_cast<int_type>(x + 1);
}
obj = iterator.next()) {
size_of_objects_2 += obj->Size();
}
- // Delta must be within 5% of the larger result.
- // TODO(gc): Tighten this up by distinguishing between byte
- // arrays that are real and those that merely mark free space
- // on the heap.
+ // Delta must be within 1% of the larger result.
if (size_of_objects_1 > size_of_objects_2) {
intptr_t delta = size_of_objects_1 - size_of_objects_2;
PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, "
"Iterator: %" V8_PTR_PREFIX "d, "
"delta: %" V8_PTR_PREFIX "d\n",
size_of_objects_1, size_of_objects_2, delta);
- CHECK_GT(size_of_objects_1 / 20, delta);
+ CHECK_GT(size_of_objects_1 / 100, delta);
} else {
intptr_t delta = size_of_objects_2 - size_of_objects_1;
PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, "
intptr_t booted_memory = MemoryInUse();
if (sizeof(initial_memory) == 8) {
if (v8::internal::Snapshot::IsEnabled()) {
- CHECK_LE(booted_memory - initial_memory, 6654 * 1024); // 6444.
+ CHECK_LE(booted_memory - initial_memory, 3050 * 1024); // 2984.
} else {
- CHECK_LE(booted_memory - initial_memory, 6777 * 1024); // 6596.
+ CHECK_LE(booted_memory - initial_memory, 3050 * 1024); // 3008.
}
} else {
if (v8::internal::Snapshot::IsEnabled()) {
- CHECK_LE(booted_memory - initial_memory, 6500 * 1024); // 6356.
+ CHECK_LE(booted_memory - initial_memory, 2000 * 1024); // 1940.
} else {
- CHECK_LE(booted_memory - initial_memory, 6654 * 1024); // 6424
+ CHECK_LE(booted_memory - initial_memory, 2000 * 1024); // 1948
}
}
}
heap->MaxReserved(),
OLD_POINTER_SPACE,
NOT_EXECUTABLE);
- Page* first_page =
- memory_allocator->AllocatePage(&faked_space, NOT_EXECUTABLE);
+ Page* first_page = memory_allocator->AllocatePage(
+ Page::kObjectAreaSize, &faked_space, NOT_EXECUTABLE);
first_page->InsertAfter(faked_space.anchor()->prev_page());
CHECK(first_page->is_valid());
// Again, we should get n or n - 1 pages.
Page* other =
- memory_allocator->AllocatePage(&faked_space, NOT_EXECUTABLE);
+ memory_allocator->AllocatePage(
+ Page::kObjectAreaSize, &faked_space, NOT_EXECUTABLE);
CHECK(other->is_valid());
total_pages++;
other->InsertAfter(first_page);