void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
+ // If we are stressing the GC, then always return the bump allocation area to
+ // the free list here, which will cause a crash if the top and limit are not
+ // up to date.
+ if (FLAG_gc_interval != -1) {
+ heap()->old_space()->ReturnLinearAllocationAreaToFreeList();
+ }
if (IsStopped() && ShouldActivateEvenWithoutIdleNotification()) {
Start(Heap::kNoGCFlags);
} else {
- Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
+ Step(allocated * kOldSpaceAllocationMarkingFactor, GC_VIA_STACK_GUARD);
}
}
ForceMarkingAction marking,
ForceCompletionAction completion) {
if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
- !FLAG_incremental_marking_steps ||
- (state_ != SWEEPING && state_ != MARKING)) {
+ !CanDoSteps()) {
return 0;
}
INLINE(bool IsMarking()) { return state() >= MARKING; }
- inline bool IsMarkingIncomplete() { return state() == MARKING; }
+ inline bool CanDoSteps() {
+ return FLAG_incremental_marking_steps &&
+ (state() == MARKING || state() == SWEEPING);
+ }
inline bool IsComplete() { return state() == COMPLETE; }
// But if we are promoting a lot of data we need to mark faster to keep up
// with the data that is entering the old space through promotion.
static const intptr_t kFastMarking = 3;
+ static const intptr_t kOldSpaceAllocationMarkingFactor =
+ kFastMarking / kInitialMarkingSpeed;
// After this many steps we increase the marking/allocating factor.
static const intptr_t kMarkingSpeedAccellerationInterval = 1024;
// This is how much we increase the marking/allocating factor by.
medium_list_.Reset();
large_list_.Reset();
huge_list_.Reset();
+ unreported_allocation_ = 0;
}
}
+void PagedSpace::SetTopAndLimit(Address top, Address limit) {
+ DCHECK(top == limit ||
+ Page::FromAddress(top) == Page::FromAddress(limit - 1));
+ MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+ allocation_info_.set_top(top);
+ allocation_info_.set_limit(limit);
+}
+
+
+void PagedSpace::ReturnLinearAllocationAreaToFreeList() {
+ int old_linear_size = static_cast<int>(limit() - top());
+ Free(top(), old_linear_size);
+ SetTopAndLimit(NULL, NULL);
+}
+
+
// Allocation on the old space free list. If it succeeds then a new linear
// allocation space has been set up with the top and limit of the space. If
// the allocation fails then NULL is returned, and the caller can perform a GC
// if it is big enough.
owner_->Free(owner_->top(), old_linear_size);
- owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
- old_linear_size);
-
int new_node_size = 0;
FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
if (new_node == NULL) {
// candidate.
DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
- const int kThreshold = IncrementalMarking::kAllocatedThreshold;
+ // An old-space step will mark more data per byte allocated, because old space
+ // allocation is more serious. We don't want the pause to be bigger, so we
+ // do marking after a smaller amount of allocation.
+ const int kThreshold = IncrementalMarking::kAllocatedThreshold *
+ IncrementalMarking::kOldSpaceAllocationMarkingFactor;
// Memory in the linear allocation area is counted as allocated. We may free
// a little of this again immediately - see below.
owner_->Allocate(new_node_size);
+ unreported_allocation_ += new_node_size;
+
if (owner_->heap()->inline_allocation_disabled()) {
// Keep the linear allocation area empty if requested to do so, just
// return area back to the free list instead.
owner_->Free(new_node->address() + size_in_bytes, bytes_left);
DCHECK(owner_->top() == NULL && owner_->limit() == NULL);
} else if (bytes_left > kThreshold &&
- owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
- FLAG_incremental_marking_steps) {
+ owner_->heap()->incremental_marking()->CanDoSteps()) {
int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
+
// We don't want to give too large linear areas to the allocator while
// incremental marking is going on, because we won't check again whether
// we want to do another increment until the linear area is used up.
new_node_size - size_in_bytes - linear_size);
owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
new_node->address() + size_in_bytes + linear_size);
- } else if (bytes_left > 0) {
- // Normally we give the rest of the node to the allocator as its new
- // linear allocation area.
- owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
- new_node->address() + new_node_size);
+ // It is important that we are done updating top and limit before we call
+ // this, because it might add the free space between top and limit to the
+ // free list, and that would be very bad if top and new_node were still
+ // pointing to the same place.
+ owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes +
+ linear_size);
+ unreported_allocation_ = 0;
} else {
- // TODO(gc) Try not freeing linear allocation region when bytes_left
- // are zero.
- owner_->SetTopAndLimit(NULL, NULL);
+ if (bytes_left > 0) {
+ // Normally we give the rest of the node to the allocator as its new
+ // linear allocation area.
+ owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
+ new_node->address() + new_node_size);
+ } else {
+ // TODO(gc) Try not freeing linear allocation region when bytes_left
+ // are zero.
+ owner_->SetTopAndLimit(NULL, NULL);
+ }
+ if (unreported_allocation_ > kThreshold) {
+ // This may start the incremental marker, or do a little work if it's
+ // already started. It is important that we are finished updating top
+ // and limit before we call this (see above).
+ owner_->heap()->incremental_marking()->OldSpaceStep(
+ Min(kThreshold, unreported_allocation_));
+ unreported_allocation_ = 0;
+ }
}
return new_node;
reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
}
- heap()->incremental_marking()->OldSpaceStep(object_size);
+ // We would like to tell the incremental marker to do a lot of work, since
+ // we just made a large allocation in old space, but that might cause a huge
+ // pause. Underreporting here may cause the marker to speed up because it
+ // will perceive that it is not keeping up with allocation. Although this
+ // causes some big incremental marking steps they are not as big as this one
+ // might have been. In testing, a very large pause was divided up into about
+ // 12 parts.
+ const int kThreshold = IncrementalMarking::kAllocatedThreshold *
+ IncrementalMarking::kOldSpaceAllocationMarkingFactor;
+ heap()->incremental_marking()->OldSpaceStep(kThreshold);
return object;
}
PagedSpace* owner_;
Heap* heap_;
+ int unreported_allocation_;
static const int kSmallListMax = 0xff * kPointerSize;
static const int kMediumListMax = 0x7ff * kPointerSize;
void ResetFreeList() { free_list_.Reset(); }
// Set space allocation info.
- void SetTopAndLimit(Address top, Address limit) {
- DCHECK(top == limit ||
- Page::FromAddress(top) == Page::FromAddress(limit - 1));
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
- allocation_info_.set_top(top);
- allocation_info_.set_limit(limit);
- }
+ void SetTopAndLimit(Address top, Address limit);
+ void ReturnLinearAllocationAreaToFreeList();
// Empty space allocation info, returning unused area to free list.
void EmptyAllocationInfo() {
HeapObject* filler2;
if (double_misalignment) {
start = AlignOldSpace(kDoubleAligned, 0);
+ // If we run out of linear allocation area then we might get null here. In
+ // that case we are unlucky and the test is not going to work, but it's not
+ // a test failure, this is a reasonable thing to happen. Just abandon.
+ if (start == NULL) return;
obj = OldSpaceAllocateAligned(kPointerSize, kDoubleAligned);
// The object is aligned, and a filler object is created after.
CHECK(IsAddressAligned(obj->address(), kDoubleAlignment));
filler1->Size() == kPointerSize);
// Try the opposite alignment case.
start = AlignOldSpace(kDoubleAligned, kPointerSize);
+ if (start == NULL) return;
obj = OldSpaceAllocateAligned(kPointerSize, kDoubleAligned);
CHECK(IsAddressAligned(obj->address(), kDoubleAlignment));
filler1 = HeapObject::FromAddress(start);
// Similarly for kDoubleUnaligned.
start = AlignOldSpace(kDoubleUnaligned, 0);
+ if (start == NULL) return;
obj = OldSpaceAllocateAligned(kPointerSize, kDoubleUnaligned);
// The object is aligned, and a filler object is created after.
CHECK(IsAddressAligned(obj->address(), kDoubleAlignment, kPointerSize));
filler1->Size() == kPointerSize);
// Try the opposite alignment case.
start = AlignOldSpace(kDoubleUnaligned, kPointerSize);
+ if (start == NULL) return;
obj = OldSpaceAllocateAligned(kPointerSize, kDoubleUnaligned);
CHECK(IsAddressAligned(obj->address(), kDoubleAlignment, kPointerSize));
filler1 = HeapObject::FromAddress(start);
// Now test SIMD alignment. There are 2 or 4 possible alignments, depending
// on platform.
start = AlignOldSpace(kSimd128Unaligned, 0);
+ if (start == NULL) return;
obj = OldSpaceAllocateAligned(kPointerSize, kSimd128Unaligned);
CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize));
// There is a filler object after the object.
CHECK(obj != filler1 && filler1->IsFiller() &&
filler1->Size() == kSimd128Size - kPointerSize);
start = AlignOldSpace(kSimd128Unaligned, kPointerSize);
+ if (start == NULL) return;
obj = OldSpaceAllocateAligned(kPointerSize, kSimd128Unaligned);
CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize));
// There is a filler object before the object.
if (double_misalignment) {
// Test the 2 other alignments possible on 32 bit platforms.
start = AlignOldSpace(kSimd128Unaligned, 2 * kPointerSize);
+ if (start == NULL) return;
obj = OldSpaceAllocateAligned(kPointerSize, kSimd128Unaligned);
CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize));
// There are filler objects before and after the object.
CHECK(obj != filler2 && filler2->IsFiller() &&
filler2->Size() == kPointerSize);
start = AlignOldSpace(kSimd128Unaligned, 3 * kPointerSize);
+ if (start == NULL) return;
obj = OldSpaceAllocateAligned(kPointerSize, kSimd128Unaligned);
CHECK(IsAddressAligned(obj->address(), kSimd128Alignment, kPointerSize));
// There are filler objects before and after the object.
Handle<JSObject> o = factory->NewJSObjectFromMap(map1, TENURED, false);
o->set_properties(*factory->empty_fixed_array());
- // Ensure that the object allocated where we need it.
+ // Ensure that the object allocated where we need it. If not, then abandon
+ // the test, since this isn't actually something we can reasonably require.
Page* page = Page::FromAddress(o->address());
- CHECK_EQ(desired_offset, page->Offset(o->address()));
+ if (desired_offset != page->Offset(o->address())) return;
// Now we have an object right at the end of the page.