// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "macro-assembler.h"
-#include "mark-compact.h"
-#include "msan.h"
-#include "platform.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/full-codegen.h"
+#include "src/macro-assembler.h"
+#include "src/mark-compact.h"
+#include "src/msan.h"
+#include "src/platform.h"
namespace v8 {
namespace internal {
}
-bool CodeRange::SetUp(const size_t requested) {
+bool CodeRange::SetUp(size_t requested) {
ASSERT(code_range_ == NULL);
+ if (requested == 0) {
+ // When a target requires the code range feature, we put all code objects
+ // in a kMaximalCodeRangeSize range of virtual address space, so that
+ // they can call each other with near calls.
+ if (kRequiresCodeRange) {
+ requested = kMaximalCodeRangeSize;
+ } else {
+ return true;
+ }
+ }
+
+ ASSERT(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
code_range_ = new VirtualMemory(requested);
CHECK(code_range_ != NULL);
if (!code_range_->IsReserved()) {
// We are sure that we have mapped a block of requested addresses.
ASSERT(code_range_->size() == requested);
- LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
+ LOG(isolate_,
+ NewEvent("CodeRange", code_range_->address(), requested));
Address base = reinterpret_cast<Address>(code_range_->address());
Address aligned_base =
RoundUp(reinterpret_cast<Address>(code_range_->address()),
}
-void CodeRange::GetNextAllocationBlock(size_t requested) {
+bool CodeRange::GetNextAllocationBlock(size_t requested) {
for (current_allocation_block_index_++;
current_allocation_block_index_ < allocation_list_.length();
current_allocation_block_index_++) {
if (requested <= allocation_list_[current_allocation_block_index_].size) {
- return; // Found a large enough allocation block.
+ return true; // Found a large enough allocation block.
}
}
current_allocation_block_index_ < allocation_list_.length();
current_allocation_block_index_++) {
if (requested <= allocation_list_[current_allocation_block_index_].size) {
- return; // Found a large enough allocation block.
+ return true; // Found a large enough allocation block.
}
}
-
+ current_allocation_block_index_ = 0;
// Code range is full or too fragmented.
- V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
+ return false;
}
ASSERT(commit_size <= requested_size);
ASSERT(current_allocation_block_index_ < allocation_list_.length());
if (requested_size > allocation_list_[current_allocation_block_index_].size) {
- // Find an allocation block large enough. This function call may
- // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
- GetNextAllocationBlock(requested_size);
+ // Find an allocation block large enough.
+ if (!GetNextAllocationBlock(requested_size)) return NULL;
}
// Commit the requested memory at the start of the current allocation block.
size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
allocation_list_[current_allocation_block_index_].start += *allocated;
allocation_list_[current_allocation_block_index_].size -= *allocated;
if (*allocated == current.size) {
- GetNextAllocationBlock(0); // This block is used up, get the next one.
+ // This block is used up, get the next one.
+ if (!GetNextAllocationBlock(0)) return NULL;
}
return current.start;
}
size_executable_ -= size;
}
// Code which is part of the code-range does not have its own VirtualMemory.
- ASSERT(!isolate_->code_range()->contains(
- static_cast<Address>(reservation->address())));
- ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
+ ASSERT(isolate_->code_range() == NULL ||
+ !isolate_->code_range()->contains(
+ static_cast<Address>(reservation->address())));
+ ASSERT(executable == NOT_EXECUTABLE ||
+ isolate_->code_range() == NULL ||
+ !isolate_->code_range()->valid());
reservation->Release();
}
ASSERT(size_executable_ >= size);
size_executable_ -= size;
}
- if (isolate_->code_range()->contains(static_cast<Address>(base))) {
+ if (isolate_->code_range() != NULL &&
+ isolate_->code_range()->contains(static_cast<Address>(base))) {
ASSERT(executable == EXECUTABLE);
isolate_->code_range()->FreeRawMemory(base, size);
} else {
- ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
+ ASSERT(executable == NOT_EXECUTABLE ||
+ isolate_->code_range() == NULL ||
+ !isolate_->code_range()->valid());
bool result = VirtualMemory::ReleaseRegion(base, size);
USE(result);
ASSERT(result);
chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
chunk->progress_bar_ = 0;
chunk->high_water_mark_ = static_cast<int>(area_start - base);
- chunk->parallel_sweeping_ = 0;
+ chunk->set_parallel_sweeping(PARALLEL_SWEEPING_DONE);
chunk->available_in_small_free_list_ = 0;
chunk->available_in_medium_free_list_ = 0;
chunk->available_in_large_free_list_ = 0;
}
} else {
CodeRange* code_range = heap_->isolate()->code_range();
- ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
+ ASSERT(code_range != NULL && code_range->valid() &&
+ IsFlagSet(IS_EXECUTABLE));
if (!code_range->CommitRawMemory(start, length)) return false;
}
if (!reservation_.Uncommit(start, length)) return false;
} else {
CodeRange* code_range = heap_->isolate()->code_range();
- ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
+ ASSERT(code_range != NULL && code_range->valid() &&
+ IsFlagSet(IS_EXECUTABLE));
if (!code_range->UncommitRawMemory(start, length)) return false;
}
}
void MemoryChunk::InsertAfter(MemoryChunk* other) {
- next_chunk_ = other->next_chunk_;
- prev_chunk_ = other;
-
- // This memory barrier is needed since concurrent sweeper threads may iterate
- // over the list of pages while a new page is inserted.
- // TODO(hpayer): find a cleaner way to guarantee that the page list can be
- // expanded concurrently
- MemoryBarrier();
+ MemoryChunk* other_next = other->next_chunk();
- // The following two write operations can take effect in arbitrary order
- // since pages are always iterated by the sweeper threads in LIFO order, i.e,
- // the inserted page becomes visible for the sweeper threads after
- // other->next_chunk_ = this;
- other->next_chunk_->prev_chunk_ = this;
- other->next_chunk_ = this;
+ set_next_chunk(other_next);
+ set_prev_chunk(other);
+ other_next->set_prev_chunk(this);
+ other->set_next_chunk(this);
}
void MemoryChunk::Unlink() {
- if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) {
- heap_->decrement_scan_on_scavenge_pages();
- ClearFlag(SCAN_ON_SCAVENGE);
- }
- next_chunk_->prev_chunk_ = prev_chunk_;
- prev_chunk_->next_chunk_ = next_chunk_;
- prev_chunk_ = NULL;
- next_chunk_ = NULL;
+ MemoryChunk* next_element = next_chunk();
+ MemoryChunk* prev_element = prev_chunk();
+ next_element->set_prev_chunk(prev_element);
+ prev_element->set_next_chunk(next_element);
+ set_prev_chunk(NULL);
+ set_next_chunk(NULL);
}
OS::CommitPageSize());
// Allocate executable memory either from code range or from the
// OS.
- if (isolate_->code_range()->exists()) {
+ if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) {
base = isolate_->code_range()->AllocateRawMemory(chunk_size,
commit_size,
&chunk_size);
executable,
owner);
result->set_reserved_memory(&reservation);
- MSAN_MEMORY_IS_INITIALIZED(base, chunk_size);
+ MSAN_MEMORY_IS_INITIALIZED_IN_JIT(base, chunk_size);
return result;
}
: Space(heap, id, executable),
free_list_(this),
was_swept_conservatively_(false),
- first_unswept_page_(Page::FromAddress(NULL)),
- unswept_free_bytes_(0) {
+ unswept_free_bytes_(0),
+ end_of_unswept_pages_(NULL) {
if (id == CODE_SPACE) {
area_size_ = heap->isolate()->memory_allocator()->
CodePageAreaSize();
}
-MaybeObject* PagedSpace::FindObject(Address addr) {
+Object* PagedSpace::FindObject(Address addr) {
// Note: this function can only be called on precisely swept spaces.
ASSERT(!heap()->mark_compact_collector()->in_use());
- if (!Contains(addr)) return Failure::Exception();
+ if (!Contains(addr)) return Smi::FromInt(0); // Signaling not found.
Page* p = Page::FromAddress(addr);
HeapObjectIterator it(p, NULL);
}
UNREACHABLE();
- return Failure::Exception();
+ return Smi::FromInt(0);
}
int size = 0;
switch (identity()) {
case OLD_POINTER_SPACE:
- size = 72 * kPointerSize * KB;
+ size = 96 * kPointerSize * KB;
break;
case OLD_DATA_SPACE:
size = 192 * KB;
case PROPERTY_CELL_SPACE:
size = 8 * kPointerSize * KB;
break;
- case CODE_SPACE:
- if (heap()->isolate()->code_range()->exists()) {
+ case CODE_SPACE: {
+ CodeRange* code_range = heap()->isolate()->code_range();
+ if (code_range != NULL && code_range->valid()) {
// When code range exists, code pages are allocated in a special way
// (from the reserved code range). That part of the code is not yet
// upgraded to handle small pages.
size = AreaSize();
} else {
- size = 480 * KB;
+ size = RoundUp(
+ 480 * KB * FullCodeGenerator::kBootCodeSizeMultiplier / 100,
+ kPointerSize);
}
break;
+ }
default:
UNREACHABLE();
}
}
-void PagedSpace::ReleasePage(Page* page, bool unlink) {
+void PagedSpace::ReleasePage(Page* page) {
ASSERT(page->LiveBytes() == 0);
ASSERT(AreaSize() == page->area_size());
- // Adjust list of unswept pages if the page is the head of the list.
- if (first_unswept_page_ == page) {
- first_unswept_page_ = page->next_page();
- if (first_unswept_page_ == anchor()) {
- first_unswept_page_ = Page::FromAddress(NULL);
- }
- }
-
if (page->WasSwept()) {
intptr_t size = free_list_.EvictFreeListItems(page);
accounting_stats_.AllocateBytes(size);
DecreaseUnsweptFreeBytes(page);
}
- // TODO(hpayer): This check is just used for debugging purpose and
- // should be removed or turned into an assert after investigating the
- // crash in concurrent sweeping.
- CHECK(!free_list_.ContainsPageFreeListItems(page));
+ if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) {
+ heap()->decrement_scan_on_scavenge_pages();
+ page->ClearFlag(MemoryChunk::SCAN_ON_SCAVENGE);
+ }
+
+ ASSERT(!free_list_.ContainsPageFreeListItems(page));
if (Page::FromAllocationTop(allocation_info_.top()) == page) {
allocation_info_.set_top(NULL);
allocation_info_.set_limit(NULL);
}
- if (unlink) {
- page->Unlink();
- }
+ page->Unlink();
if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
heap()->isolate()->memory_allocator()->Free(page);
} else {
VerifyObject(object);
// The object itself should look OK.
- object->Verify();
+ object->ObjectVerify();
// All the interior pointers should be contained in the heap.
int size = object->Size();
}
-MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
+AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes) {
Address old_top = allocation_info_.top();
Address high = to_space_.page_high();
if (allocation_info_.limit() < high) {
top_on_previous_step_ = to_space_.page_low();
return AllocateRaw(size_in_bytes);
} else {
- return Failure::RetryAfterGC();
+ return AllocationResult::Retry();
}
}
CHECK(!object->IsCode());
// The object itself should look OK.
- object->Verify();
+ object->ObjectVerify();
// All the interior pointers should be contained in the heap.
VerifyPointersVisitor visitor;
// field and a next pointer, we give it a filler map that gives it the
// correct size.
if (size_in_bytes > FreeSpace::kHeaderSize) {
- set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
// Can't use FreeSpace::cast because it fails during deserialization.
+ // We have to set the size first with a release store before we store
+ // the map because a concurrent store buffer scan on scavenge must not
+ // observe a map with an invalid size.
FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
- this_as_free_space->set_size(size_in_bytes);
+ this_as_free_space->nobarrier_set_size(size_in_bytes);
+ synchronized_set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
} else if (size_in_bytes == kPointerSize) {
set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
} else if (size_in_bytes == 2 * kPointerSize) {
// stage.
if (map() == GetHeap()->raw_unchecked_free_space_map()) {
ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
- Memory::Address_at(address() + kNextOffset) =
- reinterpret_cast<Address>(next);
+ base::NoBarrier_Store(
+ reinterpret_cast<base::AtomicWord*>(address() + kNextOffset),
+ reinterpret_cast<base::AtomicWord>(next));
} else {
- Memory::Address_at(address() + kPointerSize) =
- reinterpret_cast<Address>(next);
+ base::NoBarrier_Store(
+ reinterpret_cast<base::AtomicWord*>(address() + kPointerSize),
+ reinterpret_cast<base::AtomicWord>(next));
}
}
intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
intptr_t free_bytes = 0;
- if (category->top_ != NULL) {
- ASSERT(category->end_ != NULL);
+ if (category->top() != NULL) {
// This is safe (not going to deadlock) since Concatenate operations
// are never performed on the same free lists at the same time in
// reverse order.
LockGuard<Mutex> target_lock_guard(mutex());
LockGuard<Mutex> source_lock_guard(category->mutex());
+ ASSERT(category->end_ != NULL);
free_bytes = category->available();
if (end_ == NULL) {
end_ = category->end();
} else {
- category->end()->set_next(top_);
+ category->end()->set_next(top());
}
- top_ = category->top();
+ set_top(category->top());
+ base::NoBarrier_Store(&top_, category->top_);
available_ += category->available();
category->Reset();
}
void FreeListCategory::Reset() {
- top_ = NULL;
- end_ = NULL;
- available_ = 0;
+ set_top(NULL);
+ set_end(NULL);
+ set_available(0);
}
intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
int sum = 0;
- FreeListNode** n = &top_;
+ FreeListNode* t = top();
+ FreeListNode** n = &t;
while (*n != NULL) {
if (Page::FromAddress((*n)->address()) == p) {
FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
n = (*n)->next_address();
}
}
- if (top_ == NULL) {
- end_ = NULL;
+ set_top(t);
+ if (top() == NULL) {
+ set_end(NULL);
}
available_ -= sum;
return sum;
bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
- FreeListNode** n = &top_;
- while (*n != NULL) {
- if (Page::FromAddress((*n)->address()) == p) return true;
- n = (*n)->next_address();
+ FreeListNode* node = top();
+ while (node != NULL) {
+ if (Page::FromAddress(node->address()) == p) return true;
+ node = node->next();
}
return false;
}
FreeListNode* FreeListCategory::PickNodeFromList(int *node_size) {
- FreeListNode* node = top_;
+ FreeListNode* node = top();
if (node == NULL) return NULL;
void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
- node->set_next(top_);
- top_ = node;
+ node->set_next(top());
+ set_top(node);
if (end_ == NULL) {
end_ = node;
}
void FreeListCategory::RepairFreeList(Heap* heap) {
- FreeListNode* n = top_;
+ FreeListNode* n = top();
while (n != NULL) {
Map** map_location = reinterpret_cast<Map**>(n->address());
if (*map_location == NULL) {
}
int huge_list_available = huge_list_.available();
- for (FreeListNode** cur = huge_list_.GetTopAddress();
+ FreeListNode* top_node = huge_list_.top();
+ for (FreeListNode** cur = &top_node;
*cur != NULL;
cur = (*cur)->next_address()) {
FreeListNode* cur_node = *cur;
}
}
+ huge_list_.set_top(top_node);
if (huge_list_.top() == NULL) {
huge_list_.set_end(NULL);
}
#ifdef DEBUG
intptr_t FreeListCategory::SumFreeList() {
intptr_t sum = 0;
- FreeListNode* cur = top_;
+ FreeListNode* cur = top();
while (cur != NULL) {
ASSERT(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
- sum += cur_as_free_space->Size();
+ sum += cur_as_free_space->nobarrier_size();
cur = cur->next();
}
return sum;
int FreeListCategory::FreeListLength() {
int length = 0;
- FreeListNode* cur = top_;
+ FreeListNode* cur = top();
while (cur != NULL) {
length++;
cur = cur->next();
// on the first allocation after the sweep.
EmptyAllocationInfo();
- // Stop lazy sweeping and clear marking bits for unswept pages.
- if (first_unswept_page_ != NULL) {
- Page* p = first_unswept_page_;
- do {
- // Do not use ShouldBeSweptLazily predicate here.
- // New evacuation candidates were selected but they still have
- // to be swept before collection starts.
- if (!p->WasSwept()) {
- Bitmap::Clear(p);
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n",
- reinterpret_cast<intptr_t>(p));
- }
- }
- p = p->next_page();
- } while (p != anchor());
- }
- first_unswept_page_ = Page::FromAddress(NULL);
+ // This counter will be increased for pages which will be swept by the
+ // sweeper threads.
unswept_free_bytes_ = 0;
// Clear the free list before a full GC---it will be rebuilt afterward.
intptr_t PagedSpace::SizeOfObjects() {
- ASSERT(!heap()->IsSweepingComplete() || (unswept_free_bytes_ == 0));
+ ASSERT(heap()->mark_compact_collector()->IsConcurrentSweepingInProgress() ||
+ (unswept_free_bytes_ == 0));
return Size() - unswept_free_bytes_ - (limit() - top());
}
}
-bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
- if (IsLazySweepingComplete()) return true;
-
- intptr_t freed_bytes = 0;
- Page* p = first_unswept_page_;
- do {
- Page* next_page = p->next_page();
- if (ShouldBeSweptLazily(p)) {
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
- reinterpret_cast<intptr_t>(p));
- }
- DecreaseUnsweptFreeBytes(p);
- freed_bytes +=
- MarkCompactCollector::
- SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
- this, NULL, p);
- }
- p = next_page;
- } while (p != anchor() && freed_bytes < bytes_to_sweep);
-
- if (p == anchor()) {
- first_unswept_page_ = Page::FromAddress(NULL);
- } else {
- first_unswept_page_ = p;
- }
-
- heap()->FreeQueuedChunks();
-
- return IsLazySweepingComplete();
-}
-
-
void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
if (allocation_info_.top() >= allocation_info_.limit()) return;
}
-bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) {
+HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
+ int size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector();
- if (collector->AreSweeperThreadsActivated()) {
- if (collector->IsConcurrentSweepingInProgress()) {
- if (collector->RefillFreeLists(this) < size_in_bytes) {
- if (!collector->sequential_sweeping()) {
- collector->WaitUntilSweepingCompleted();
- return true;
- }
- }
- return false;
- }
- return true;
- } else {
- return AdvanceSweeper(size_in_bytes);
+
+ // If sweeper threads are still running, wait for them.
+ if (collector->IsConcurrentSweepingInProgress()) {
+ collector->WaitUntilSweepingCompleted();
+
+ // After waiting for the sweeper threads, there may be new free-list
+ // entries.
+ return free_list_.Allocate(size_in_bytes);
}
+ return NULL;
}
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// Allocation in this space has failed.
- // If there are unswept pages advance lazy sweeper a bounded number of times
- // until we find a size_in_bytes contiguous piece of memory
- const int kMaxSweepingTries = 5;
- bool sweeping_complete = false;
-
- for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) {
- sweeping_complete = EnsureSweeperProgress(size_in_bytes);
+ // If sweeper threads are active, try to re-fill the free-lists.
+ MarkCompactCollector* collector = heap()->mark_compact_collector();
+ if (collector->IsConcurrentSweepingInProgress()) {
+ collector->RefillFreeList(this);
// Retry the free list allocation.
HeapObject* object = free_list_.Allocate(size_in_bytes);
// Free list allocation failed and there is no next page. Fail if we have
// hit the old generation size limit that should cause a garbage
// collection.
- if (!heap()->always_allocate() &&
- heap()->OldGenerationAllocationLimitReached()) {
- return NULL;
+ if (!heap()->always_allocate()
+ && heap()->OldGenerationAllocationLimitReached()) {
+ // If sweeper threads are active, wait for them at that point and steal
+ // elements form their free-lists.
+ HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
+ if (object != NULL) return object;
}
// Try to expand the space and allocate in the new next page.
return free_list_.Allocate(size_in_bytes);
}
- // Last ditch, sweep all the remaining pages to try to find space. This may
- // cause a pause.
- if (!IsLazySweepingComplete()) {
- EnsureSweeperProgress(kMaxInt);
-
- // Retry the free list allocation.
- HeapObject* object = free_list_.Allocate(size_in_bytes);
- if (object != NULL) return object;
- }
-
- // Finally, fail.
- return NULL;
+ // If sweeper threads are active, wait for them at that point and steal
+ // elements form their free-lists. Allocation may still fail their which
+ // would indicate that there is not enough memory for the given allocation.
+ return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
}
}
-MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
- Executability executable) {
+AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
+ Executability executable) {
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
if (!heap()->always_allocate() &&
heap()->OldGenerationAllocationLimitReached()) {
- return Failure::RetryAfterGC(identity());
+ return AllocationResult::Retry(identity());
}
if (Size() + object_size > max_capacity_) {
- return Failure::RetryAfterGC(identity());
+ return AllocationResult::Retry(identity());
}
LargePage* page = heap()->isolate()->memory_allocator()->
AllocateLargePage(object_size, this, executable);
- if (page == NULL) return Failure::RetryAfterGC(identity());
+ if (page == NULL) return AllocationResult::Retry(identity());
ASSERT(page->area_size() >= object_size);
size_ += static_cast<int>(page->size());
// GC support
-MaybeObject* LargeObjectSpace::FindObject(Address a) {
+Object* LargeObjectSpace::FindObject(Address a) {
LargePage* page = FindPage(a);
if (page != NULL) {
return page->GetObject();
}
- return Failure::Exception();
+ return Smi::FromInt(0); // Signaling not found.
}
bool owned = (chunk->owner() == this);
- SLOW_ASSERT(!owned || !FindObject(address)->IsFailure());
+ SLOW_ASSERT(!owned || FindObject(address)->IsHeapObject());
return owned;
}
object->IsFixedDoubleArray() || object->IsByteArray());
// The object itself should look OK.
- object->Verify();
+ object->ObjectVerify();
// Byte arrays and strings don't have interior pointers.
if (object->IsCode()) {