// Mark the new block as FreeSpace to make sure the heap is iterable
// while we are capturing stack trace.
- FreeListNode::FromAddress(addr)->set_size(heap, size);
- DCHECK_EQ(HeapObject::FromAddress(addr)->Size(), size);
- DCHECK(FreeListNode::IsFreeListNode(HeapObject::FromAddress(addr)));
+ heap->CreateFillerObjectAt(addr, size);
Isolate* isolate = heap->isolate();
int length = 0;
#include "src/debug.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
+#include "src/full-codegen.h"
#include "src/global-handles.h"
#include "src/heap-profiler.h"
#include "src/heap-snapshot-generator-inl.h"
}
+void CheckDefaultReservationSizes(const i::StartupSerializer& startup_ser,
+ const i::PartialSerializer& context_ser) {
+#ifdef DEBUG
+ i::List<i::SerializedData::Reservation> startup_reservations;
+ i::List<i::SerializedData::Reservation> context_reservations;
+ startup_ser.EncodeReservations(&startup_reservations);
+ context_ser.EncodeReservations(&context_reservations);
+ for (int space = 0; space < i::Serializer::kNumberOfSpaces; space++) {
+ // Exactly one chunk per space.
+ CHECK(startup_reservations[space].is_last());
+ CHECK(startup_reservations[space].is_last());
+ uint32_t sum = startup_reservations[space].chunk_size() +
+ context_reservations[space].chunk_size();
+ uint32_t limit = 0;
+ const int constant_pool_delta = i::FLAG_enable_ool_constant_pool ? 48 : 0;
+ switch (space) {
+ case i::NEW_SPACE:
+ limit = 3 * i::kPointerSize;
+ break;
+ case i::OLD_POINTER_SPACE:
+ limit = (128 + constant_pool_delta) * i::kPointerSize * i::KB;
+ break;
+ case i::OLD_DATA_SPACE:
+ limit = 192 * i::KB;
+ break;
+ case i::MAP_SPACE:
+ limit = 16 * i::kPointerSize * i::KB;
+ break;
+ case i::CELL_SPACE:
+ limit = 16 * i::kPointerSize * i::KB;
+ break;
+ case i::PROPERTY_CELL_SPACE:
+ limit = 8 * i::kPointerSize * i::KB;
+ break;
+ case i::CODE_SPACE:
+ limit = RoundUp((480 - constant_pool_delta) * i::KB *
+ i::FullCodeGenerator::kBootCodeSizeMultiplier / 100,
+ i::kPointerSize);
+ break;
+ default:
+ break;
+ }
+ CHECK_LE(sum, limit);
+ }
+#endif // DEBUG
+}
+
+
StartupData V8::CreateSnapshotDataBlob(char* custom_source) {
Isolate::CreateParams params;
params.enable_serializer = true;
i::SnapshotData sd(snapshot_sink, ser);
i::SnapshotData csd(context_sink, context_ser);
+ if (custom_source == NULL) CheckDefaultReservationSizes(ser, context_ser);
+
result = i::Snapshot::CreateSnapshotBlob(sd.RawData(), csd.RawData(),
metadata);
}
static const int kBootCodeSizeMultiplier = 120;
#elif V8_TARGET_ARCH_MIPS64
static const int kCodeSizeMultiplier = 149;
- static const int kBootCodeSizeMultiplier = 120;
+ static const int kBootCodeSizeMultiplier = 170;
#else
#error Unsupported target architecture.
#endif
}
-void Heap::RepairFreeListsAfterBoot() {
+void Heap::RepairFreeListsAfterDeserialization() {
PagedSpaces spaces(this);
for (PagedSpace* space = spaces.next(); space != NULL;
space = spaces.next()) {
- space->RepairFreeListsAfterBoot();
+ space->RepairFreeListsAfterDeserialization();
}
}
} else {
allocation = paged_space(space)->AllocateRaw(size);
}
- FreeListNode* node;
- if (allocation.To(&node)) {
+ HeapObject* free_space;
+ if (allocation.To(&free_space)) {
// Mark with a free list node, in case we have a GC before
// deserializing.
- node->set_size(this, size);
+ Address free_space_address = free_space->address();
+ CreateFillerObjectAt(free_space_address, size);
DCHECK(space < Serializer::kNumberOfPreallocatedSpaces);
- chunk.start = node->address();
- chunk.end = node->address() + size;
+ chunk.start = free_space_address;
+ chunk.end = free_space_address + size;
} else {
perform_gc = true;
break;
void Heap::CreateFillerObjectAt(Address addr, int size) {
if (size == 0) return;
HeapObject* filler = HeapObject::FromAddress(addr);
+ // At this point, we may be deserializing the heap from a snapshot, and
+ // none of the maps have been created yet and are NULL.
if (size == kPointerSize) {
- filler->set_map_no_write_barrier(one_pointer_filler_map());
+ filler->set_map_no_write_barrier(raw_unchecked_one_pointer_filler_map());
+ DCHECK(filler->map() == NULL || filler->map() == one_pointer_filler_map());
} else if (size == 2 * kPointerSize) {
- filler->set_map_no_write_barrier(two_pointer_filler_map());
+ filler->set_map_no_write_barrier(raw_unchecked_two_pointer_filler_map());
+ DCHECK(filler->map() == NULL || filler->map() == two_pointer_filler_map());
} else {
- filler->set_map_no_write_barrier(free_space_map());
- FreeSpace::cast(filler)->set_size(size);
+ filler->set_map_no_write_barrier(raw_unchecked_free_space_map());
+ DCHECK(filler->map() == NULL || filler->map() == free_space_map());
+ FreeSpace::cast(filler)->nobarrier_set_size(size);
}
}
// Iterates the whole code space to clear all ICs of the given kind.
void ClearAllICsByKind(Code::Kind kind);
- // For use during bootup.
- void RepairFreeListsAfterBoot();
+ // FreeSpace objects have a null map after deserialization. Update the map.
+ void RepairFreeListsAfterDeserialization();
template <typename T>
static inline bool IsOneByte(T t, int chars);
return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
}
-
-bool FreeListNode::IsFreeListNode(HeapObject* object) {
- Map* map = object->map();
- Heap* heap = object->GetHeap();
- return map == heap->raw_unchecked_free_space_map() ||
- map == heap->raw_unchecked_one_pointer_filler_map() ||
- map == heap->raw_unchecked_two_pointer_filler_map();
-}
}
} // namespace v8::internal
intptr_t size = AreaSize();
- if (anchor_.next_page() == &anchor_) {
- size = SizeOfFirstPage();
- }
-
Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this,
executable());
if (p == NULL) return false;
}
-intptr_t PagedSpace::SizeOfFirstPage() {
- // If the snapshot contains a custom script, all size guarantees are off.
- if (Snapshot::EmbedsScript()) return AreaSize();
- // If using an ool constant pool then transfer the constant pool allowance
- // from the code space to the old pointer space.
- static const int constant_pool_delta = FLAG_enable_ool_constant_pool ? 48 : 0;
- int size = 0;
- switch (identity()) {
- case OLD_POINTER_SPACE:
- size = (128 + constant_pool_delta) * kPointerSize * KB;
- break;
- case OLD_DATA_SPACE:
- size = 192 * KB;
- break;
- case MAP_SPACE:
- size = 16 * kPointerSize * KB;
- break;
- case CELL_SPACE:
- size = 16 * kPointerSize * KB;
- break;
- case PROPERTY_CELL_SPACE:
- size = 8 * kPointerSize * KB;
- break;
- case CODE_SPACE: {
- CodeRange* code_range = heap()->isolate()->code_range();
- if (code_range != NULL && code_range->valid()) {
- // When code range exists, code pages are allocated in a special way
- // (from the reserved code range). That part of the code is not yet
- // upgraded to handle small pages.
- size = AreaSize();
- } else {
- size = RoundUp((480 - constant_pool_delta) * KB *
- FullCodeGenerator::kBootCodeSizeMultiplier / 100,
- kPointerSize);
- }
- break;
- }
- default:
- UNREACHABLE();
- }
- return Min(size, AreaSize());
-}
-
-
int PagedSpace::CountTotalPages() {
PageIterator it(this);
int count = 0;
// -----------------------------------------------------------------------------
// Free lists for old object spaces implementation
-void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
- DCHECK(size_in_bytes > 0);
- DCHECK(IsAligned(size_in_bytes, kPointerSize));
-
- // We write a map and possibly size information to the block. If the block
- // is big enough to be a FreeSpace with at least one extra word (the next
- // pointer), we set its map to be the free space map and its size to an
- // appropriate array length for the desired size from HeapObject::Size().
- // If the block is too small (eg, one or two words), to hold both a size
- // field and a next pointer, we give it a filler map that gives it the
- // correct size.
- if (size_in_bytes > FreeSpace::kHeaderSize) {
- // Can't use FreeSpace::cast because it fails during deserialization.
- // We have to set the size first with a release store before we store
- // the map because a concurrent store buffer scan on scavenge must not
- // observe a map with an invalid size.
- FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
- this_as_free_space->nobarrier_set_size(size_in_bytes);
- synchronized_set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
- } else if (size_in_bytes == kPointerSize) {
- set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
- } else if (size_in_bytes == 2 * kPointerSize) {
- set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
- } else {
- UNREACHABLE();
- }
- // We would like to DCHECK(Size() == size_in_bytes) but this would fail during
- // deserialization because the free space map is not done yet.
-}
-
-
-FreeListNode* FreeListNode::next() {
- DCHECK(IsFreeListNode(this));
- if (map() == GetHeap()->raw_unchecked_free_space_map()) {
- DCHECK(map() == NULL || Size() >= kNextOffset + kPointerSize);
- return reinterpret_cast<FreeListNode*>(
- Memory::Address_at(address() + kNextOffset));
- } else {
- return reinterpret_cast<FreeListNode*>(
- Memory::Address_at(address() + kPointerSize));
- }
-}
-
-
-FreeListNode** FreeListNode::next_address() {
- DCHECK(IsFreeListNode(this));
- if (map() == GetHeap()->raw_unchecked_free_space_map()) {
- DCHECK(Size() >= kNextOffset + kPointerSize);
- return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
- } else {
- return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
- }
-}
-
-
-void FreeListNode::set_next(FreeListNode* next) {
- DCHECK(IsFreeListNode(this));
- // While we are booting the VM the free space map will actually be null. So
- // we have to make sure that we don't try to use it for anything at that
- // stage.
- if (map() == GetHeap()->raw_unchecked_free_space_map()) {
- DCHECK(map() == NULL || Size() >= kNextOffset + kPointerSize);
- base::NoBarrier_Store(
- reinterpret_cast<base::AtomicWord*>(address() + kNextOffset),
- reinterpret_cast<base::AtomicWord>(next));
- } else {
- base::NoBarrier_Store(
- reinterpret_cast<base::AtomicWord*>(address() + kPointerSize),
- reinterpret_cast<base::AtomicWord>(next));
- }
-}
-
-
intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
intptr_t free_bytes = 0;
if (category->top() != NULL) {
intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
int sum = 0;
- FreeListNode* t = top();
- FreeListNode** n = &t;
+ FreeSpace* t = top();
+ FreeSpace** n = &t;
while (*n != NULL) {
if (Page::FromAddress((*n)->address()) == p) {
- FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
+ FreeSpace* free_space = *n;
sum += free_space->Size();
*n = (*n)->next();
} else {
bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
- FreeListNode* node = top();
+ FreeSpace* node = top();
while (node != NULL) {
if (Page::FromAddress(node->address()) == p) return true;
node = node->next();
}
-FreeListNode* FreeListCategory::PickNodeFromList(int* node_size) {
- FreeListNode* node = top();
+FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
+ FreeSpace* node = top();
if (node == NULL) return NULL;
while (node != NULL &&
Page::FromAddress(node->address())->IsEvacuationCandidate()) {
- available_ -= reinterpret_cast<FreeSpace*>(node)->Size();
+ available_ -= node->Size();
node = node->next();
}
if (node != NULL) {
set_top(node->next());
- *node_size = reinterpret_cast<FreeSpace*>(node)->Size();
+ *node_size = node->Size();
available_ -= *node_size;
} else {
set_top(NULL);
}
-FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes,
- int* node_size) {
- FreeListNode* node = PickNodeFromList(node_size);
+FreeSpace* FreeListCategory::PickNodeFromList(int size_in_bytes,
+ int* node_size) {
+ FreeSpace* node = PickNodeFromList(node_size);
if (node != NULL && *node_size < size_in_bytes) {
Free(node, *node_size);
*node_size = 0;
}
-void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
- node->set_next(top());
- set_top(node);
+void FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes) {
+ DCHECK_LE(FreeList::kSmallListMin, size_in_bytes);
+ free_space->set_next(top());
+ set_top(free_space);
if (end_ == NULL) {
- end_ = node;
+ end_ = free_space;
}
available_ += size_in_bytes;
}
void FreeListCategory::RepairFreeList(Heap* heap) {
- FreeListNode* n = top();
+ FreeSpace* n = top();
while (n != NULL) {
Map** map_location = reinterpret_cast<Map**>(n->address());
if (*map_location == NULL) {
int FreeList::Free(Address start, int size_in_bytes) {
if (size_in_bytes == 0) return 0;
- FreeListNode* node = FreeListNode::FromAddress(start);
- node->set_size(heap_, size_in_bytes);
+ heap_->CreateFillerObjectAt(start, size_in_bytes);
+
Page* page = Page::FromAddress(start);
// Early return to drop too-small blocks on the floor.
return size_in_bytes;
}
+ FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
// Insert other blocks at the head of a free list of the appropriate
// magnitude.
if (size_in_bytes <= kSmallListMax) {
- small_list_.Free(node, size_in_bytes);
+ small_list_.Free(free_space, size_in_bytes);
page->add_available_in_small_free_list(size_in_bytes);
} else if (size_in_bytes <= kMediumListMax) {
- medium_list_.Free(node, size_in_bytes);
+ medium_list_.Free(free_space, size_in_bytes);
page->add_available_in_medium_free_list(size_in_bytes);
} else if (size_in_bytes <= kLargeListMax) {
- large_list_.Free(node, size_in_bytes);
+ large_list_.Free(free_space, size_in_bytes);
page->add_available_in_large_free_list(size_in_bytes);
} else {
- huge_list_.Free(node, size_in_bytes);
+ huge_list_.Free(free_space, size_in_bytes);
page->add_available_in_huge_free_list(size_in_bytes);
}
}
-FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
- FreeListNode* node = NULL;
+FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
+ FreeSpace* node = NULL;
Page* page = NULL;
if (size_in_bytes <= kSmallAllocationMax) {
}
int huge_list_available = huge_list_.available();
- FreeListNode* top_node = huge_list_.top();
- for (FreeListNode** cur = &top_node; *cur != NULL;
+ FreeSpace* top_node = huge_list_.top();
+ for (FreeSpace** cur = &top_node; *cur != NULL;
cur = (*cur)->next_address()) {
- FreeListNode* cur_node = *cur;
+ FreeSpace* cur_node = *cur;
while (cur_node != NULL &&
Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
- int size = reinterpret_cast<FreeSpace*>(cur_node)->Size();
+ int size = cur_node->Size();
huge_list_available -= size;
page = Page::FromAddress(cur_node->address());
page->add_available_in_huge_free_list(-size);
break;
}
- DCHECK((*cur)->map() == heap_->raw_unchecked_free_space_map());
- FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
- int size = cur_as_free_space->Size();
+ int size = cur_node->Size();
if (size >= size_in_bytes) {
// Large enough node found. Unlink it from the list.
node = *cur;
old_linear_size);
int new_node_size = 0;
- FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
+ FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
if (new_node == NULL) {
owner_->SetTopAndLimit(NULL, NULL);
return NULL;
#ifdef DEBUG
intptr_t FreeListCategory::SumFreeList() {
intptr_t sum = 0;
- FreeListNode* cur = top();
+ FreeSpace* cur = top();
while (cur != NULL) {
DCHECK(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
- FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
- sum += cur_as_free_space->nobarrier_size();
+ sum += cur->nobarrier_size();
cur = cur->next();
}
return sum;
int FreeListCategory::FreeListLength() {
int length = 0;
- FreeListNode* cur = top();
+ FreeSpace* cur = top();
while (cur != NULL) {
length++;
cur = cur->next();
// on the heap. If there was already a free list then the elements on it
// were created with the wrong FreeSpaceMap (normally NULL), so we need to
// fix them.
-void PagedSpace::RepairFreeListsAfterBoot() { free_list_.RepairLists(heap()); }
+void PagedSpace::RepairFreeListsAfterDeserialization() {
+ free_list_.RepairLists(heap());
+ // Each page may have a small free space that is not tracked by a free list.
+ // Update the maps for those free space objects.
+ PageIterator iterator(this);
+ while (iterator.has_next()) {
+ Page* page = iterator.next();
+ int size = static_cast<int>(page->non_available_small_blocks());
+ if (size == 0) continue;
+ Address address = page->OffsetToAddress(Page::kPageSize - size);
+ heap()->CreateFillerObjectAt(address, size);
+ }
+}
void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
// -----------------------------------------------------------------------------
// Free lists for old object spaces
-//
-// Free-list nodes are free blocks in the heap. They look like heap objects
-// (free-list node pointers have the heap object tag, and they have a map like
-// a heap object). They have a size and a next pointer. The next pointer is
-// the raw address of the next free list node (or NULL).
-class FreeListNode : public HeapObject {
- public:
- // Obtain a free-list node from a raw address. This is not a cast because
- // it does not check nor require that the first word at the address is a map
- // pointer.
- static FreeListNode* FromAddress(Address address) {
- return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
- }
-
- static inline bool IsFreeListNode(HeapObject* object);
-
- // Set the size in bytes, which can be read with HeapObject::Size(). This
- // function also writes a map to the first word of the block so that it
- // looks like a heap object to the garbage collector and heap iteration
- // functions.
- void set_size(Heap* heap, int size_in_bytes);
-
- // Accessors for the next field.
- inline FreeListNode* next();
- inline FreeListNode** next_address();
- inline void set_next(FreeListNode* next);
-
- inline void Zap();
-
- static inline FreeListNode* cast(Object* object) {
- return reinterpret_cast<FreeListNode*>(object);
- }
-
- private:
- static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
-};
-
// The free list category holds a pointer to the top element and a pointer to
// the end element of the linked list of free memory blocks.
void Reset();
- void Free(FreeListNode* node, int size_in_bytes);
+ void Free(FreeSpace* node, int size_in_bytes);
- FreeListNode* PickNodeFromList(int* node_size);
- FreeListNode* PickNodeFromList(int size_in_bytes, int* node_size);
+ FreeSpace* PickNodeFromList(int* node_size);
+ FreeSpace* PickNodeFromList(int size_in_bytes, int* node_size);
intptr_t EvictFreeListItemsInList(Page* p);
bool ContainsPageFreeListItemsInList(Page* p);
void RepairFreeList(Heap* heap);
- FreeListNode* top() const {
- return reinterpret_cast<FreeListNode*>(base::NoBarrier_Load(&top_));
+ FreeSpace* top() const {
+ return reinterpret_cast<FreeSpace*>(base::NoBarrier_Load(&top_));
}
- void set_top(FreeListNode* top) {
+ void set_top(FreeSpace* top) {
base::NoBarrier_Store(&top_, reinterpret_cast<base::AtomicWord>(top));
}
- FreeListNode** GetEndAddress() { return &end_; }
- FreeListNode* end() const { return end_; }
- void set_end(FreeListNode* end) { end_ = end; }
+ FreeSpace* end() const { return end_; }
+ void set_end(FreeSpace* end) { end_ = end; }
int* GetAvailableAddress() { return &available_; }
int available() const { return available_; }
#endif
private:
- // top_ points to the top FreeListNode* in the free list category.
+ // top_ points to the top FreeSpace* in the free list category.
base::AtomicWord top_;
- FreeListNode* end_;
+ FreeSpace* end_;
base::Mutex mutex_;
// Total available bytes in all blocks of this free list category.
FreeListCategory* large_list() { return &large_list_; }
FreeListCategory* huge_list() { return &huge_list_; }
+ static const int kSmallListMin = 0x20 * kPointerSize;
+
private:
// The size range of blocks, in bytes.
static const int kMinBlockSize = 3 * kPointerSize;
static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
- FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
+ FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
PagedSpace* owner_;
Heap* heap_;
- static const int kSmallListMin = 0x20 * kPointerSize;
static const int kSmallListMax = 0xff * kPointerSize;
static const int kMediumListMax = 0x7ff * kPointerSize;
static const int kLargeListMax = 0x3fff * kPointerSize;
// During boot the free_space_map is created, and afterwards we may need
// to write it into the free list nodes that were already created.
- void RepairFreeListsAfterBoot();
+ void RepairFreeListsAfterDeserialization();
// Prepares for a mark-compact GC.
void PrepareForMarkCompact();
// Maximum capacity of this space.
intptr_t max_capacity_;
- intptr_t SizeOfFirstPage();
-
// Accounting information for this space.
AllocationStats accounting_stats_;
friend class ExecutionAccess;
friend class HandleScopeImplementer;
- friend class IsolateInitializer;
friend class OptimizingCompilerThread;
friend class SweeperThread;
friend class ThreadManager;
CAST_ACCESSOR(FixedDoubleArray)
CAST_ACCESSOR(FixedTypedArrayBase)
CAST_ACCESSOR(Foreign)
-CAST_ACCESSOR(FreeSpace)
CAST_ACCESSOR(GlobalObject)
CAST_ACCESSOR(HeapObject)
CAST_ACCESSOR(JSArray)
SYNCHRONIZED_SMI_ACCESSORS(String, length, kLengthOffset)
+FreeSpace* FreeSpace::next() {
+ DCHECK(map() == GetHeap()->raw_unchecked_free_space_map() ||
+ (!GetHeap()->deserialization_complete() && map() == NULL));
+ DCHECK_LE(kNextOffset + kPointerSize, nobarrier_size());
+ return reinterpret_cast<FreeSpace*>(
+ Memory::Address_at(address() + kNextOffset));
+}
+
+
+FreeSpace** FreeSpace::next_address() {
+ DCHECK(map() == GetHeap()->raw_unchecked_free_space_map() ||
+ (!GetHeap()->deserialization_complete() && map() == NULL));
+ DCHECK_LE(kNextOffset + kPointerSize, nobarrier_size());
+ return reinterpret_cast<FreeSpace**>(address() + kNextOffset);
+}
+
+
+void FreeSpace::set_next(FreeSpace* next) {
+ DCHECK(map() == GetHeap()->raw_unchecked_free_space_map() ||
+ (!GetHeap()->deserialization_complete() && map() == NULL));
+ DCHECK_LE(kNextOffset + kPointerSize, nobarrier_size());
+ base::NoBarrier_Store(
+ reinterpret_cast<base::AtomicWord*>(address() + kNextOffset),
+ reinterpret_cast<base::AtomicWord>(next));
+}
+
+
+FreeSpace* FreeSpace::cast(HeapObject* o) {
+ SLOW_DCHECK(!o->GetHeap()->deserialization_complete() || o->IsFreeSpace());
+ return reinterpret_cast<FreeSpace*>(o);
+}
+
+
uint32_t Name::hash_field() {
return READ_UINT32_FIELD(this, kHashFieldOffset);
}
};
-// FreeSpace represents fixed sized areas of the heap that are not currently in
-// use. Used by the heap and GC.
+// FreeSpace are fixed-size free memory blocks used by the heap and GC.
+// They look like heap objects (are heap object tagged and have a map) so that
+// the heap remains iterable. They have a size and a next pointer.
+// The next pointer is the raw address of the next FreeSpace object (or NULL)
+// in the free list.
class FreeSpace: public HeapObject {
public:
// [size]: size of the free space including the header.
inline int Size() { return size(); }
- DECLARE_CAST(FreeSpace)
+ // Accessors for the next field.
+ inline FreeSpace* next();
+ inline FreeSpace** next_address();
+ inline void set_next(FreeSpace* next);
+
+ inline static FreeSpace* cast(HeapObject* obj);
// Dispatched behavior.
DECLARE_PRINTER(FreeSpace)
// Layout description.
// Size is smi tagged when it is stored.
static const int kSizeOffset = HeapObject::kHeaderSize;
- static const int kHeaderSize = kSizeOffset + kPointerSize;
-
- static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
+ static const int kNextOffset = POINTER_SIZE_ALIGN(kSizeOffset + kPointerSize);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeSpace);
DCHECK(isolate_->handle_scope_implementer()->blocks()->is_empty());
isolate_->heap()->IterateSmiRoots(this);
isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
- isolate_->heap()->RepairFreeListsAfterBoot();
+ isolate_->heap()->RepairFreeListsAfterDeserialization();
isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
isolate_->heap()->set_native_contexts_list(
v8::internal::AllocationResult allocation =
space->AllocateRaw(v8::internal::Page::kMaxRegularHeapObjectSize);
if (allocation.IsRetry()) return false;
- v8::internal::FreeListNode* node =
- v8::internal::FreeListNode::cast(allocation.ToObjectChecked());
- node->set_size(space->heap(), v8::internal::Page::kMaxRegularHeapObjectSize);
+ v8::internal::HeapObject* free_space = NULL;
+ CHECK(allocation.To(&free_space));
+ space->heap()->CreateFillerObjectAt(
+ free_space->address(), v8::internal::Page::kMaxRegularHeapObjectSize);
return true;
}
-static inline void SimulateFullSpace(v8::internal::NewSpace* space) {
- int new_linear_size = static_cast<int>(*space->allocation_limit_address() -
+// Helper function that simulates a fill new-space in the heap.
+static inline void AllocateAllButNBytes(v8::internal::NewSpace* space,
+ int extra_bytes) {
+ int space_remaining = static_cast<int>(*space->allocation_limit_address() -
*space->allocation_top_address());
- if (new_linear_size > 0) {
- // Fill up the current page.
- v8::internal::AllocationResult allocation =
- space->AllocateRaw(new_linear_size);
- v8::internal::FreeListNode* node =
- v8::internal::FreeListNode::cast(allocation.ToObjectChecked());
- node->set_size(space->heap(), new_linear_size);
+ CHECK(space_remaining >= extra_bytes);
+ int new_linear_size = space_remaining - extra_bytes;
+ if (new_linear_size == 0) return;
+ v8::internal::AllocationResult allocation =
+ space->AllocateRaw(new_linear_size);
+ v8::internal::HeapObject* free_space = NULL;
+ CHECK(allocation.To(&free_space));
+ space->heap()->CreateFillerObjectAt(free_space->address(), new_linear_size);
+}
+
+
+static inline void FillCurrentPage(v8::internal::NewSpace* space) {
+ AllocateAllButNBytes(space, 0);
+}
+
+
+static inline void SimulateFullSpace(v8::internal::NewSpace* space) {
+ FillCurrentPage(space);
+ while (FillUpOnePage(space)) {
}
- // Fill up all remaining pages.
- while (FillUpOnePage(space))
- ;
}
}
-// Helper function that simulates a fill new-space in the heap.
-static inline void AllocateAllButNBytes(v8::internal::NewSpace* space,
- int extra_bytes) {
- int space_remaining = static_cast<int>(
- *space->allocation_limit_address() - *space->allocation_top_address());
- CHECK(space_remaining >= extra_bytes);
- int new_linear_size = space_remaining - extra_bytes;
- v8::internal::AllocationResult allocation =
- space->AllocateRaw(new_linear_size);
- v8::internal::FreeListNode* node =
- v8::internal::FreeListNode::cast(allocation.ToObjectChecked());
- node->set_size(space->heap(), new_linear_size);
-}
-
-
TEST(Regress169928) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_crankshaft = false;
}
+Vector<const uint8_t> ConstructSource(Vector<const uint8_t> head,
+ Vector<const uint8_t> body,
+ Vector<const uint8_t> tail, int repeats) {
+ int source_length = head.length() + body.length() * repeats + tail.length();
+ uint8_t* source = NewArray<uint8_t>(static_cast<size_t>(source_length));
+ CopyChars(source, head.start(), head.length());
+ for (int i = 0; i < repeats; i++) {
+ CopyChars(source + head.length() + i * body.length(), body.start(),
+ body.length());
+ }
+ CopyChars(source + head.length() + repeats * body.length(), tail.start(),
+ tail.length());
+ return Vector<const uint8_t>(const_cast<const uint8_t*>(source),
+ source_length);
+}
+
+
// Test that the whole heap can be serialized.
UNINITIALIZED_TEST(Serialize) {
if (!Snapshot::HaveASnapshotToStartFrom()) {
params.enable_serializer = true;
v8::Isolate* v8_isolate = v8::Isolate::New(params);
Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
- Heap* heap = isolate->heap();
{
v8::Isolate::Scope isolate_scope(v8_isolate);
"var r = Math.random() + Math.cos(0);"
"var f = (function(a, b) { return a + b; }).bind(1, 2, 3);"
"var s = parseInt('12345');");
+
+ Vector<const uint8_t> source = ConstructSource(
+ STATIC_CHAR_VECTOR("function g() { return [,"),
+ STATIC_CHAR_VECTOR("1,"),
+ STATIC_CHAR_VECTOR("];} a = g(); b = g(); b.push(1);"), 100000);
+ v8::Handle<v8::String> source_str = v8::String::NewFromOneByte(
+ v8_isolate, source.start(), v8::String::kNormalString,
+ source.length());
+ CompileRun(source_str);
+ source.Dispose();
}
// Make sure all builtin scripts are cached.
{
}
// If we don't do this then we end up with a stray root pointing at the
// context even after we have disposed of env.
- heap->CollectAllGarbage(Heap::kNoGCFlags);
+ isolate->heap()->CollectAllAvailableGarbage("snapshotting");
int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
CHECK_EQ(5, f);
v8::Handle<v8::String> s = CompileRun("s")->ToString(v8_isolate);
CHECK(s->Equals(v8_str("12345")));
+ int a = CompileRun("a.length")->ToNumber(v8_isolate)->Int32Value();
+ CHECK_EQ(100001, a);
+ int b = CompileRun("b.length")->ToNumber(v8_isolate)->Int32Value();
+ CHECK_EQ(100002, b);
}
}
v8_isolate->Dispose();
}
-Vector<const uint8_t> ConstructSource(Vector<const uint8_t> head,
- Vector<const uint8_t> body,
- Vector<const uint8_t> tail, int repeats) {
- int source_length = head.length() + body.length() * repeats + tail.length();
- uint8_t* source = NewArray<uint8_t>(static_cast<size_t>(source_length));
- CopyChars(source, head.start(), head.length());
- for (int i = 0; i < repeats; i++) {
- CopyChars(source + head.length() + i * body.length(), body.start(),
- body.length());
- }
- CopyChars(source + head.length() + repeats * body.length(), tail.start(),
- tail.length());
- return Vector<const uint8_t>(const_cast<const uint8_t*>(source),
- source_length);
-}
-
-
TEST(SerializeToplevelLargeCodeObject) {
FLAG_serialize_toplevel = true;
LocalContext context;
}
-static inline void FillCurrentPage(v8::internal::NewSpace* space) {
- int new_linear_size = static_cast<int>(*space->allocation_limit_address() -
- *space->allocation_top_address());
- if (new_linear_size == 0) return;
- v8::internal::AllocationResult allocation =
- space->AllocateRaw(new_linear_size);
- v8::internal::FreeListNode* node =
- v8::internal::FreeListNode::cast(allocation.ToObjectChecked());
- node->set_size(space->heap(), new_linear_size);
-}
-
-
UNINITIALIZED_TEST(NewSpaceGrowsToTargetCapacity) {
FLAG_target_semi_space_size = 2;
if (FLAG_optimize_for_size) return;
// Turn the allocation into a proper object so isolate teardown won't
// crash.
- v8::internal::FreeListNode* node =
- v8::internal::FreeListNode::cast(allocation.ToObjectChecked());
- node->set_size(new_space->heap(), 80);
+ HeapObject* free_space = NULL;
+ CHECK(allocation.To(&free_space));
+ new_space->heap()->CreateFillerObjectAt(free_space->address(), 80);
}
}
isolate->Dispose();