void HeapProfiler::StartHeapAllocationsRecording() {
StartHeapObjectsTracking();
+ heap()->DisableInlineAllocation();
is_tracking_allocations_ = true;
DropCompiledCode();
snapshots_->UpdateHeapObjectsMap();
void HeapProfiler::StopHeapAllocationsRecording() {
StopHeapObjectsTracking();
+ heap()->EnableInlineAllocation();
is_tracking_allocations_ = false;
DropCompiledCode();
}
amount_of_external_allocated_memory_(0),
amount_of_external_allocated_memory_at_last_global_gc_(0),
old_gen_exhausted_(false),
+ inline_allocation_disabled_(false),
store_buffer_rebuilder_(store_buffer()),
hidden_string_(NULL),
gc_safe_size_of_old_object_(NULL),
}
+void Heap::EnableInlineAllocation() {
+ ASSERT(inline_allocation_disabled_);
+ inline_allocation_disabled_ = false;
+
+ // Update inline allocation limit for new space.
+ new_space()->UpdateInlineAllocationLimit(0);
+}
+
+
+void Heap::DisableInlineAllocation() {
+ ASSERT(!inline_allocation_disabled_);
+ inline_allocation_disabled_ = true;
+
+ // Update inline allocation limit for new space.
+ new_space()->UpdateInlineAllocationLimit(0);
+
+ // Update inline allocation limit for old spaces.
+ PagedSpaces spaces(this);
+ for (PagedSpace* space = spaces.next();
+ space != NULL;
+ space = spaces.next()) {
+ space->EmptyAllocationInfo();
+ }
+}
+
+
V8_DECLARE_ONCE(initialize_gc_once);
static void InitializeGCOnce() {
return Min(limit, halfway_to_the_max);
}
+ // Indicates whether inline bump-pointer allocation has been disabled.
+ bool inline_allocation_disabled() { return inline_allocation_disabled_; }
+
+ // Switch whether inline bump-pointer allocation should be used.
+ void EnableInlineAllocation();
+ void DisableInlineAllocation();
+
// Implements the corresponding V8 API function.
bool IdleNotification(int hint);
// last GC.
bool old_gen_exhausted_;
+ // Indicates that inline bump-pointer allocation has been globally disabled
+ // for all spaces. This is used to disable allocations in generated code.
+ bool inline_allocation_disabled_;
+
// Weak list heads, threaded through the objects.
// List heads are initilized lazily and contain the undefined_value at start.
Object* native_contexts_list_;
}
}
}
- allocation_info_.set_limit(to_space_.page_high());
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.set_top(to_space_.page_low());
allocation_info_.set_limit(to_space_.page_high());
-
- // Lower limit during incremental marking.
- if (heap()->incremental_marking()->IsMarking() &&
- inline_allocation_limit_step() != 0) {
- Address new_limit =
- allocation_info_.top() + inline_allocation_limit_step();
- allocation_info_.set_limit(Min(new_limit, allocation_info_.limit()));
- }
+ UpdateInlineAllocationLimit(0);
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
}
+void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
+ if (heap()->inline_allocation_disabled()) {
+ // Lowest limit when linear allocation was disabled.
+ Address high = to_space_.page_high();
+ Address new_top = allocation_info_.top() + size_in_bytes;
+ allocation_info_.set_limit(Min(new_top, high));
+ } else if (inline_allocation_limit_step() == 0) {
+ // Normal limit is the end of the current page.
+ allocation_info_.set_limit(to_space_.page_high());
+ } else {
+ // Lower limit during incremental marking.
+ Address high = to_space_.page_high();
+ Address new_top = allocation_info_.top() + size_in_bytes;
+ Address new_limit = new_top + inline_allocation_limit_step_;
+ allocation_info_.set_limit(Min(new_limit, high));
+ }
+ ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+}
+
+
bool NewSpace::AddFreshPage() {
Address top = allocation_info_.top();
if (NewSpacePage::IsAtStart(top)) {
MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
Address old_top = allocation_info_.top();
- Address new_top = old_top + size_in_bytes;
Address high = to_space_.page_high();
if (allocation_info_.limit() < high) {
- // Incremental marking has lowered the limit to get a
- // chance to do a step.
- Address new_limit = Min(
- allocation_info_.limit() + inline_allocation_limit_step_,
- high);
- allocation_info_.set_limit(new_limit);
+ // Either the limit has been lowered because linear allocation was disabled
+ // or because incremental marking wants to get a chance to do a step. Set
+ // the new limit accordingly.
+ Address new_top = old_top + size_in_bytes;
int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
heap()->incremental_marking()->Step(
bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
+ UpdateInlineAllocationLimit(size_in_bytes);
top_on_previous_step_ = new_top;
return AllocateRaw(size_in_bytes);
} else if (AddFreshPage()) {
int new_node_size = 0;
FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
if (new_node == NULL) {
- owner_->SetTop(NULL, NULL);
+ owner_->SetTopAndLimit(NULL, NULL);
return NULL;
}
// a little of this again immediately - see below.
owner_->Allocate(new_node_size);
- if (bytes_left > kThreshold &&
- owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
- FLAG_incremental_marking_steps) {
+ if (owner_->heap()->inline_allocation_disabled()) {
+ // Keep the linear allocation area empty if requested to do so, just
+ // return area back to the free list instead.
+ owner_->Free(new_node->address() + size_in_bytes, bytes_left);
+ ASSERT(owner_->top() == NULL && owner_->limit() == NULL);
+ } else if (bytes_left > kThreshold &&
+ owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
+ FLAG_incremental_marking_steps) {
int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
// We don't want to give too large linear areas to the allocator while
// incremental marking is going on, because we won't check again whether
// we want to do another increment until the linear area is used up.
owner_->Free(new_node->address() + size_in_bytes + linear_size,
new_node_size - size_in_bytes - linear_size);
- owner_->SetTop(new_node->address() + size_in_bytes,
- new_node->address() + size_in_bytes + linear_size);
+ owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
+ new_node->address() + size_in_bytes + linear_size);
} else if (bytes_left > 0) {
// Normally we give the rest of the node to the allocator as its new
// linear allocation area.
- owner_->SetTop(new_node->address() + size_in_bytes,
- new_node->address() + new_node_size);
+ owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
+ new_node->address() + new_node_size);
} else {
// TODO(gc) Try not freeing linear allocation region when bytes_left
// are zero.
- owner_->SetTop(NULL, NULL);
+ owner_->SetTopAndLimit(NULL, NULL);
}
return new_node;
void PagedSpace::PrepareForMarkCompact() {
// We don't have a linear allocation area while sweeping. It will be restored
// on the first allocation after the sweep.
- // Mark the old linear allocation area with a free space map so it can be
- // skipped when scanning the heap.
- int old_linear_size = static_cast<int>(limit() - top());
- Free(top(), old_linear_size);
- SetTop(NULL, NULL);
+ EmptyAllocationInfo();
// Stop lazy sweeping and clear marking bits for unswept pages.
if (first_unswept_page_ != NULL) {
}
// Set space allocation info.
- void SetTop(Address top, Address limit) {
+ void SetTopAndLimit(Address top, Address limit) {
ASSERT(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
allocation_info_.set_limit(limit);
}
+ // Empty space allocation info, returning unused area to free list.
+ void EmptyAllocationInfo() {
+ // Mark the old linear allocation area with a free space map so it can be
+ // skipped when scanning the heap.
+ int old_linear_size = static_cast<int>(limit() - top());
+ Free(top(), old_linear_size);
+ SetTopAndLimit(NULL, NULL);
+ }
+
void Allocate(int bytes) {
accounting_stats_.AllocateBytes(bytes);
}
// Reset the allocation pointer to the beginning of the active semispace.
void ResetAllocationInfo();
+ void UpdateInlineAllocationLimit(int size_in_bytes);
void LowerInlineAllocationLimit(intptr_t step) {
inline_allocation_limit_step_ = step;
- if (step == 0) {
- allocation_info_.set_limit(to_space_.page_high());
- } else {
- Address new_limit = Min(
- allocation_info_.top() + inline_allocation_limit_step_,
- allocation_info_.limit());
- allocation_info_.set_limit(new_limit);
- }
+ UpdateInlineAllocationLimit(0);
top_on_previous_step_ = allocation_info_.top();
}
AllocationFlags flags) {
ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
- if (!FLAG_inline_new ||
- // TODO(mstarzinger): Implement more efficiently by keeping then
- // bump-pointer allocation area empty instead of recompiling code.
- isolate()->heap_profiler()->is_tracking_allocations()) {
+ if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
movl(result, Immediate(0x7091));
Label* gc_required,
AllocationFlags flags) {
ASSERT((flags & SIZE_IN_WORDS) == 0);
- if (!FLAG_inline_new ||
- // TODO(mstarzinger): Implement more efficiently by keeping then
- // bump-pointer allocation area empty instead of recompiling code.
- isolate()->heap_profiler()->is_tracking_allocations()) {
+ if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
movl(result, Immediate(0x7091));
static inline void SimulateFullSpace(v8::internal::NewSpace* space) {
int new_linear_size = static_cast<int>(
*space->allocation_limit_address() - *space->allocation_top_address());
+ if (new_linear_size == 0) return;
v8::internal::MaybeObject* maybe = space->AllocateRaw(new_linear_size);
v8::internal::FreeListNode* node = v8::internal::FreeListNode::cast(maybe);
node->set_size(space->heap(), new_linear_size);
// Helper function that simulates a full old-space in the heap.
static inline void SimulateFullSpace(v8::internal::PagedSpace* space) {
- int old_linear_size = static_cast<int>(space->limit() - space->top());
- space->Free(space->top(), old_linear_size);
- space->SetTop(space->limit(), space->limit());
+ space->EmptyAllocationInfo();
space->ResetFreeList();
space->ClearStats();
}
marking->Step(100 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
ASSERT(marking->IsComplete());
}
+
+
+TEST(DisableInlineAllocation) {
+ i::FLAG_allow_natives_syntax = true;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ CompileRun("function test() {"
+ " var x = [];"
+ " for (var i = 0; i < 10; i++) {"
+ " x[i] = [ {}, [1,2,3], [1,x,3] ];"
+ " }"
+ "}"
+ "function run() {"
+ " %OptimizeFunctionOnNextCall(test);"
+ " test();"
+ " %DeoptimizeFunction(test);"
+ "}");
+
+ // Warm-up with inline allocation enabled.
+ CompileRun("test(); test(); run();");
+
+ // Run test with inline allocation disabled.
+ CcTest::heap()->DisableInlineAllocation();
+ CompileRun("run()");
+
+ // Run test with inline allocation disabled and pretenuring.
+ CcTest::heap()->SetNewSpaceHighPromotionModeActive(true);
+ CompileRun("run()");
+
+ // Run test with inline allocation re-enabled.
+ CcTest::heap()->EnableInlineAllocation();
+ CompileRun("run()");
+}