"src/heap/objects-visiting-inl.h",
"src/heap/objects-visiting.cc",
"src/heap/objects-visiting.h",
+ "src/heap/scavenge-job.h",
+ "src/heap/scavenge-job.cc",
"src/heap/scavenger-inl.h",
"src/heap/scavenger.cc",
"src/heap/scavenger.h",
PrintF("; finalized marking");
}
break;
- case DO_SCAVENGE:
- PrintF("scavenge");
- break;
case DO_FULL_GC:
PrintF("full GC");
break;
mark_compact_speed_in_bytes_per_ms);
PrintF("incremental_marking_speed=%" V8_PTR_PREFIX "d ",
incremental_marking_speed_in_bytes_per_ms);
- PrintF("scavenge_speed=%" V8_PTR_PREFIX "d ", scavenge_speed_in_bytes_per_ms);
- PrintF("new_space_size=%" V8_PTR_PREFIX "d ", used_new_space_size);
- PrintF("new_space_capacity=%" V8_PTR_PREFIX "d ", new_space_capacity);
- PrintF("new_space_allocation_throughput=%" V8_PTR_PREFIX "d ",
- new_space_allocation_throughput_in_bytes_per_ms);
}
}
-bool GCIdleTimeHandler::ShouldDoScavenge(
- size_t idle_time_in_ms, size_t new_space_size, size_t used_new_space_size,
- size_t scavenge_speed_in_bytes_per_ms,
- size_t new_space_allocation_throughput_in_bytes_per_ms) {
- if (idle_time_in_ms >= kMinBackgroundIdleTime) {
- // It is better to do full GC for the background tab.
- return false;
- }
-
- // Calculates how much memory are we able to scavenge in
- // kMaxFrameRenderingIdleTime ms. If scavenge_speed_in_bytes_per_ms is 0 we
- // will take care of this later.
- size_t idle_new_space_allocation_limit =
- kMaxFrameRenderingIdleTime * scavenge_speed_in_bytes_per_ms;
-
- // If the limit is larger than the new space size, then scavenging used to be
- // really fast. We can take advantage of the whole new space.
- if (idle_new_space_allocation_limit > new_space_size) {
- idle_new_space_allocation_limit = new_space_size;
- }
-
- // We do not know the allocation throughput before the first scavenge.
- // TODO(hpayer): Estimate allocation throughput before the first scavenge.
- if (new_space_allocation_throughput_in_bytes_per_ms > 0) {
- // We have to trigger scavenge before we reach the end of new space.
- size_t adjust_limit = new_space_allocation_throughput_in_bytes_per_ms *
- kTimeUntilNextIdleEvent;
- if (adjust_limit > idle_new_space_allocation_limit) {
- idle_new_space_allocation_limit = 0;
- } else {
- idle_new_space_allocation_limit -= adjust_limit;
- }
- }
-
- // The allocated new space limit to trigger a scavange has to be at least
- // kMinimumNewSpaceSizeToPerformScavenge.
- if (idle_new_space_allocation_limit < kMinimumNewSpaceSizeToPerformScavenge) {
- idle_new_space_allocation_limit = kMinimumNewSpaceSizeToPerformScavenge;
- }
-
- // Set an initial scavenge speed if it is unknown.
- if (scavenge_speed_in_bytes_per_ms == 0) {
- scavenge_speed_in_bytes_per_ms = kInitialConservativeScavengeSpeed;
- }
-
- // We apply a max factor to the new space size to make sure that a slowly
- // allocating application still leaves enough of wiggle room to schedule a
- // scavenge.
- size_t max_limit;
- const double kMaxNewSpaceSizeFactorLongIdleTimes = 0.5;
- const double kMaxNewSpaceSizeFactorShortIdleTimes = 0.8;
- if (idle_time_in_ms > kMaxFrameRenderingIdleTime) {
- max_limit = static_cast<size_t>(new_space_size *
- kMaxNewSpaceSizeFactorLongIdleTimes);
- } else {
- max_limit = static_cast<size_t>(new_space_size *
- kMaxNewSpaceSizeFactorShortIdleTimes);
- }
- idle_new_space_allocation_limit =
- Min(idle_new_space_allocation_limit, max_limit);
-
- // We perform a scavenge if we are over the idle new space limit and
- // a scavenge fits into the given idle time bucket.
- if (idle_new_space_allocation_limit <= used_new_space_size) {
- if (used_new_space_size / scavenge_speed_in_bytes_per_ms <=
- idle_time_in_ms) {
- return true;
- }
- }
- return false;
-}
-
-
bool GCIdleTimeHandler::ShouldDoMarkCompact(
size_t idle_time_in_ms, size_t size_of_objects,
size_t mark_compact_speed_in_bytes_per_ms) {
return NothingOrDone(idle_time_in_ms);
}
- if (ShouldDoScavenge(
- static_cast<size_t>(idle_time_in_ms), heap_state.new_space_capacity,
- heap_state.used_new_space_size,
- heap_state.scavenge_speed_in_bytes_per_ms,
- heap_state.new_space_allocation_throughput_in_bytes_per_ms)) {
- return GCIdleTimeAction::Scavenge();
- }
-
if (!FLAG_incremental_marking || heap_state.incremental_marking_stopped) {
return GCIdleTimeAction::Done();
}
DONE,
DO_NOTHING,
DO_INCREMENTAL_STEP,
- DO_SCAVENGE,
DO_FULL_GC,
};
return result;
}
- static GCIdleTimeAction Scavenge() {
- GCIdleTimeAction result;
- result.type = DO_SCAVENGE;
- result.additional_work = false;
- return result;
- }
-
static GCIdleTimeAction FullGC() {
GCIdleTimeAction result;
result.type = DO_FULL_GC;
size_t mark_compact_speed_in_bytes_per_ms;
size_t incremental_marking_speed_in_bytes_per_ms;
size_t final_incremental_mark_compact_speed_in_bytes_per_ms;
- size_t scavenge_speed_in_bytes_per_ms;
- size_t used_new_space_size;
- size_t new_space_capacity;
- size_t new_space_allocation_throughput_in_bytes_per_ms;
};
static const int kMinBackgroundIdleTime = 900;
- // We conservatively assume that in the next kTimeUntilNextIdleEvent ms
- // no idle notification happens.
- static const size_t kTimeUntilNextIdleEvent = 100;
-
// An allocation throughput below kLowAllocationThroughput bytes/ms is
// considered low
static const size_t kLowAllocationThroughput = 1000;
- // If we haven't recorded any scavenger events yet, we use a conservative
- // lower bound for the scavenger speed.
- static const size_t kInitialConservativeScavengeSpeed = 100 * KB;
-
- // The minimum size of allocated new space objects to trigger a scavenge.
- static const size_t kMinimumNewSpaceSizeToPerformScavenge = MB / 2;
-
// If contexts are disposed at a higher rate a full gc is triggered.
static const double kHighContextDisposalRate;
static bool ShouldDoOverApproximateWeakClosure(size_t idle_time_in_ms);
- static bool ShouldDoScavenge(
- size_t idle_time_in_ms, size_t new_space_size, size_t used_new_space_size,
- size_t scavenger_speed_in_bytes_per_ms,
- size_t new_space_allocation_throughput_in_bytes_per_ms);
-
private:
GCIdleTimeAction NothingOrDone(double idle_time_in_ms);
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
+#include "src/heap/scavenge-job.h"
#include "src/heap/scavenger-inl.h"
#include "src/heap/store-buffer.h"
#include "src/heap-profiler.h"
gc_idle_time_handler_(nullptr),
memory_reducer_(nullptr),
object_stats_(nullptr),
+ scavenge_job_(nullptr),
full_codegen_bytes_generated_(0),
crankshaft_codegen_bytes_generated_(0),
new_space_allocation_counter_(0),
}
+void Heap::ScheduleIdleScavengeIfNeeded(int bytes_allocated) {
+ scavenge_job_->ScheduleIdleTaskIfNeeded(this, bytes_allocated);
+}
+
+
void Heap::OverApproximateWeakClosure(const char* gc_reason) {
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Overapproximate weak closure (%s).\n",
heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
heap_state.mark_compact_speed_in_bytes_per_ms =
static_cast<size_t>(tracer()->MarkCompactSpeedInBytesPerMillisecond());
- heap_state.scavenge_speed_in_bytes_per_ms =
- static_cast<size_t>(tracer()->ScavengeSpeedInBytesPerMillisecond());
- heap_state.used_new_space_size = new_space_.Size();
- heap_state.new_space_capacity = new_space_.Capacity();
- heap_state.new_space_allocation_throughput_in_bytes_per_ms =
- tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond();
return heap_state;
}
CollectAllGarbage(kNoGCFlags, "idle notification: contexts disposed");
break;
}
- case DO_SCAVENGE:
- CollectGarbage(NEW_SPACE, "idle notification: scavenge");
- break;
case DO_NOTHING:
break;
}
}
+void Heap::LowerInlineAllocationLimit(intptr_t step) {
+ new_space()->LowerInlineAllocationLimit(step);
+}
+
+
+void Heap::ResetInlineAllocationLimit() {
+ new_space()->LowerInlineAllocationLimit(
+ ScavengeJob::kBytesAllocatedBeforeNextIdleTask);
+}
+
+
V8_DECLARE_ONCE(initialize_gc_once);
static void InitializeGCOnce() {
object_stats_ = new ObjectStats(this);
object_stats_->ClearObjectStats(true);
+ scavenge_job_ = new ScavengeJob();
+
array_buffer_tracker_ = new ArrayBufferTracker(this);
LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
mark_compact_collector()->SetUp();
+ ResetInlineAllocationLimit();
+
return true;
}
delete object_stats_;
object_stats_ = nullptr;
+ delete scavenge_job_;
+ scavenge_job_ = nullptr;
+
WaitUntilUnmappingOfFreeChunksCompleted();
delete array_buffer_tracker_;
class MemoryReducer;
class ObjectStats;
class Scavenger;
+class ScavengeJob;
class WeakObjectRetainer;
void SetOldGenerationAllocationLimit(intptr_t old_gen_size, double gc_speed,
double mutator_speed);
+ // ===========================================================================
+ // Inline allocation. ========================================================
+ // ===========================================================================
+
+ void LowerInlineAllocationLimit(intptr_t step);
+ void ResetInlineAllocationLimit();
+
// ===========================================================================
// Idle notification. ========================================================
// ===========================================================================
bool RecentIdleNotificationHappened();
+ void ScheduleIdleScavengeIfNeeded(int bytes_allocated);
// ===========================================================================
// Allocation methods. =======================================================
ObjectStats* object_stats_;
+ ScavengeJob* scavenge_job_;
+
// These two counters are monotomically increasing and never reset.
size_t full_codegen_bytes_generated_;
size_t crankshaft_codegen_bytes_generated_;
friend class IncrementalMarking;
friend class MarkCompactCollector;
friend class MarkCompactMarkingVisitor;
+ friend class NewSpace;
friend class ObjectStatsVisitor;
friend class Page;
friend class Scavenger;
state_ = SWEEPING;
}
- heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold);
+ heap_->LowerInlineAllocationLimit(kAllocatedThreshold);
incremental_marking_job()->Start(heap_);
}
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Stopping.\n");
}
- heap_->new_space()->LowerInlineAllocationLimit(0);
+ heap_->ResetInlineAllocationLimit();
IncrementalMarking::set_should_hurry(false);
ResetStepCounters();
if (IsMarking()) {
Hurry();
state_ = STOPPED;
is_compacting_ = false;
- heap_->new_space()->LowerInlineAllocationLimit(0);
+ heap_->ResetInlineAllocationLimit();
IncrementalMarking::set_should_hurry(false);
ResetStepCounters();
PatchIncrementalMarkingRecordWriteStubs(heap_,
--- /dev/null
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/scavenge-job.h"
+
+#include "src/base/platform/time.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/heap.h"
+#include "src/isolate.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+
+const double ScavengeJob::kMaxAllocationLimitAsFractionOfNewSpace = 0.8;
+
+void ScavengeJob::IdleTask::RunInternal(double deadline_in_seconds) {
+ Heap* heap = isolate_->heap();
+ double deadline_in_ms =
+ deadline_in_seconds *
+ static_cast<double>(base::Time::kMillisecondsPerSecond);
+ double start_ms = heap->MonotonicallyIncreasingTimeInMs();
+ double idle_time_in_ms = deadline_in_ms - start_ms;
+ size_t scavenge_speed_in_bytes_per_ms =
+ static_cast<size_t>(heap->tracer()->ScavengeSpeedInBytesPerMillisecond());
+ size_t new_space_size = heap->new_space()->Size();
+ size_t new_space_capacity = heap->new_space()->Capacity();
+
+ job_->NotifyIdleTask();
+
+ if (ReachedIdleAllocationLimit(scavenge_speed_in_bytes_per_ms, new_space_size,
+ new_space_capacity)) {
+ if (EnoughIdleTimeForScavenge(
+ idle_time_in_ms, scavenge_speed_in_bytes_per_ms, new_space_size)) {
+ heap->CollectGarbage(NEW_SPACE, "idle task: scavenge");
+ } else {
+ // Immediately request another idle task that can get larger idle time.
+ job_->RescheduleIdleTask(heap);
+ }
+ }
+}
+
+
+bool ScavengeJob::ReachedIdleAllocationLimit(
+ size_t scavenge_speed_in_bytes_per_ms, size_t new_space_size,
+ size_t new_space_capacity) {
+ if (scavenge_speed_in_bytes_per_ms == 0) {
+ scavenge_speed_in_bytes_per_ms = kInitialScavengeSpeedInBytesPerMs;
+ }
+
+ // Set the allocation limit to the number of bytes we can scavenge in an
+ // average idle task.
+ size_t allocation_limit = kAverageIdleTimeMs * scavenge_speed_in_bytes_per_ms;
+
+ // Keep the limit smaller than the new space capacity.
+ allocation_limit =
+ Min(allocation_limit,
+ static_cast<size_t>(new_space_capacity *
+ kMaxAllocationLimitAsFractionOfNewSpace));
+ // Adjust the limit to take into account bytes that will be allocated until
+ // the next check.
+ allocation_limit = allocation_limit < kBytesAllocatedBeforeNextIdleTask
+ ? 0
+ : allocation_limit - kBytesAllocatedBeforeNextIdleTask;
+ // Keep the limit large enough to avoid scavenges in tiny new space.
+ allocation_limit = Max(allocation_limit, kMinAllocationLimit);
+
+ return allocation_limit <= new_space_size;
+}
+
+
+bool ScavengeJob::EnoughIdleTimeForScavenge(
+ double idle_time_in_ms, size_t scavenge_speed_in_bytes_per_ms,
+ size_t new_space_size) {
+ if (scavenge_speed_in_bytes_per_ms == 0) {
+ scavenge_speed_in_bytes_per_ms = kInitialScavengeSpeedInBytesPerMs;
+ }
+ return new_space_size <= idle_time_in_ms * scavenge_speed_in_bytes_per_ms;
+}
+
+
+void ScavengeJob::RescheduleIdleTask(Heap* heap) {
+ // Make sure that we don't reschedule more than one time.
+ // Otherwise, we might spam the scheduler with idle tasks.
+ if (!idle_task_rescheduled_) {
+ ScheduleIdleTask(heap);
+ idle_task_rescheduled_ = true;
+ }
+}
+
+
+void ScavengeJob::ScheduleIdleTaskIfNeeded(Heap* heap, int bytes_allocated) {
+ bytes_allocated_since_the_last_task_ += bytes_allocated;
+ if (bytes_allocated_since_the_last_task_ >=
+ static_cast<int>(kBytesAllocatedBeforeNextIdleTask)) {
+ ScheduleIdleTask(heap);
+ bytes_allocated_since_the_last_task_ = 0;
+ idle_task_rescheduled_ = false;
+ }
+}
+
+
+void ScavengeJob::ScheduleIdleTask(Heap* heap) {
+ if (!idle_task_pending_) {
+ v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
+ if (V8::GetCurrentPlatform()->IdleTasksEnabled(isolate)) {
+ idle_task_pending_ = true;
+ auto task = new IdleTask(heap->isolate(), this);
+ V8::GetCurrentPlatform()->CallIdleOnForegroundThread(isolate, task);
+ }
+ }
+}
+}
+} // namespace v8::internal
--- /dev/null
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_SCAVENGE_JOB_H_
+#define V8_HEAP_SCAVENGE_JOB_H_
+
+#include "src/cancelable-task.h"
+#include "src/heap/gc-tracer.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+class Isolate;
+
+
+// This class posts idle tasks and performs scavenges in the idle tasks.
+class ScavengeJob {
+ public:
+ class IdleTask : public CancelableIdleTask {
+ public:
+ explicit IdleTask(Isolate* isolate, ScavengeJob* job)
+ : CancelableIdleTask(isolate), job_(job) {}
+ // CancelableIdleTask overrides.
+ void RunInternal(double deadline_in_seconds) override;
+
+ private:
+ ScavengeJob* job_;
+ };
+
+ ScavengeJob()
+ : idle_task_pending_(false),
+ idle_task_rescheduled_(false),
+ bytes_allocated_since_the_last_task_(0) {}
+
+ // Posts an idle task if the cumulative bytes allocated since the last
+ // idle task exceed kBytesAllocatedBeforeNextIdleTask.
+ void ScheduleIdleTaskIfNeeded(Heap* heap, int bytes_allocated);
+
+ // Posts an idle task ignoring the bytes allocated, but makes sure
+ // that the new idle task cannot reschedule again.
+ // This prevents infinite rescheduling.
+ void RescheduleIdleTask(Heap* heap);
+
+ bool IdleTaskPending() { return idle_task_pending_; }
+ void NotifyIdleTask() { idle_task_pending_ = false; }
+ bool IdleTaskRescheduled() { return idle_task_rescheduled_; }
+
+ static bool ReachedIdleAllocationLimit(size_t scavenge_speed_in_bytes_per_ms,
+ size_t new_space_size,
+ size_t new_space_capacity);
+
+ static bool EnoughIdleTimeForScavenge(double idle_time_ms,
+ size_t scavenge_speed_in_bytes_per_ms,
+ size_t new_space_size);
+
+ // If we haven't recorded any scavenger events yet, we use a conservative
+ // lower bound for the scavenger speed.
+ static const int kInitialScavengeSpeedInBytesPerMs = 256 * KB;
+ // Estimate of the average idle time that an idle task gets.
+ static const int kAverageIdleTimeMs = 5;
+ // The number of bytes to be allocated in new space before the next idle
+ // task is posted.
+ static const size_t kBytesAllocatedBeforeNextIdleTask = 512 * KB;
+ // The minimum size of allocated new space objects to trigger a scavenge.
+ static const size_t kMinAllocationLimit = 512 * KB;
+ // The allocation limit cannot exceed this fraction of the new space capacity.
+ static const double kMaxAllocationLimitAsFractionOfNewSpace;
+
+ private:
+ void ScheduleIdleTask(Heap* heap);
+ bool idle_task_pending_;
+ bool idle_task_rescheduled_;
+ int bytes_allocated_since_the_last_task_;
+};
+}
+} // namespace v8::internal
+
+#endif // V8_HEAP_SCAVENGE_JOB_H_
Address high = to_space_.page_high();
Address new_top = allocation_info_.top() + size_in_bytes;
allocation_info_.set_limit(Min(new_top, high));
- } else if (inline_allocation_limit_step() == 0) {
+ } else if (inline_allocation_limit_step_ == 0) {
// Normal limit is the end of the current page.
allocation_info_.set_limit(to_space_.page_high());
} else {
if (allocation_info_.limit() < high) {
// Either the limit has been lowered because linear allocation was disabled
- // or because incremental marking wants to get a chance to do a step. Set
- // the new limit accordingly.
+ // or because incremental marking wants to get a chance to do a step,
+ // or because idle scavenge job wants to get a chance to post a task.
+ // Set the new limit accordingly.
Address new_top = old_top + aligned_size_in_bytes;
InlineAllocationStep(new_top, new_top);
UpdateInlineAllocationLimit(aligned_size_in_bytes);
void NewSpace::InlineAllocationStep(Address top, Address new_top) {
if (top_on_previous_step_) {
int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
+ heap()->ScheduleIdleScavengeIfNeeded(bytes_allocated);
heap()->incremental_marking()->Step(bytes_allocated,
IncrementalMarking::GC_VIA_STACK_GUARD);
top_on_previous_step_ = new_top;
}
+static inline void DisableInlineAllocationSteps(v8::internal::NewSpace* space) {
+ space->LowerInlineAllocationLimit(0);
+}
+
+
// Helper function that simulates a full new-space in the heap.
static inline bool FillUpOnePage(v8::internal::NewSpace* space) {
+ DisableInlineAllocationSteps(space);
v8::internal::AllocationResult allocation = space->AllocateRawUnaligned(
v8::internal::Page::kMaxRegularHeapObjectSize);
if (allocation.IsRetry()) return false;
// Helper function that simulates a fill new-space in the heap.
static inline void AllocateAllButNBytes(v8::internal::NewSpace* space,
int extra_bytes) {
+ DisableInlineAllocationSteps(space);
int space_remaining = static_cast<int>(*space->allocation_limit_address() -
*space->allocation_top_address());
CHECK(space_remaining >= extra_bytes);
Page::kMaxRegularHeapObjectSize + kPointerSize);
int allocation_len = LenFromSize(allocation_amount);
NewSpace* new_space = heap->new_space();
+ DisableInlineAllocationSteps(new_space);
Address* top_addr = new_space->allocation_top_address();
Address* limit_addr = new_space->allocation_limit_address();
while ((*limit_addr - *top_addr) > allocation_amount) {
v8::Context::New(isolate)->Enter();
Heap* heap = i_isolate->heap();
NewSpace* new_space = heap->new_space();
+ DisableInlineAllocationSteps(new_space);
// In this test we will try to overwrite the promotion queue which is at the
// end of to-space. To actually make that possible, we need at least two
Min(FixedArray::kMaxSize, Page::kMaxRegularHeapObjectSize + kPointerSize);
int allocation_len = LenFromSize(allocation_amount);
NewSpace* new_space = heap->new_space();
+ DisableInlineAllocationSteps(new_space);
Address* top_addr = new_space->allocation_top_address();
Address* limit_addr = new_space->allocation_limit_address();
while ((*limit_addr - *top_addr) > allocation_amount) {
result.contexts_disposal_rate = GCIdleTimeHandler::kHighContextDisposalRate;
result.incremental_marking_stopped = false;
result.mark_compact_speed_in_bytes_per_ms = kMarkCompactSpeed;
- result.scavenge_speed_in_bytes_per_ms = kScavengeSpeed;
- result.used_new_space_size = 0;
- result.new_space_capacity = kNewSpaceCapacity;
- result.new_space_allocation_throughput_in_bytes_per_ms =
- kNewSpaceAllocationThroughput;
return result;
}
static const size_t kSizeOfObjects = 100 * MB;
static const size_t kMarkCompactSpeed = 200 * KB;
static const size_t kMarkingSpeed = 200 * KB;
- static const size_t kScavengeSpeed = 100 * KB;
- static const size_t kNewSpaceCapacity = 1 * MB;
- static const size_t kNewSpaceAllocationThroughput = 10 * KB;
static const int kMaxNotifications = 100;
private:
}
-TEST_F(GCIdleTimeHandlerTest, DoScavengeEmptyNewSpace) {
- GCIdleTimeHeapState heap_state = DefaultHeapState();
- int idle_time_ms = 16;
- EXPECT_FALSE(GCIdleTimeHandler::ShouldDoScavenge(
- idle_time_ms, heap_state.new_space_capacity,
- heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
- heap_state.new_space_allocation_throughput_in_bytes_per_ms));
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, DoScavengeFullNewSpace) {
- GCIdleTimeHeapState heap_state = DefaultHeapState();
- heap_state.used_new_space_size = kNewSpaceCapacity;
- int idle_time_ms = 16;
- EXPECT_TRUE(GCIdleTimeHandler::ShouldDoScavenge(
- idle_time_ms, heap_state.new_space_capacity,
- heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
- heap_state.new_space_allocation_throughput_in_bytes_per_ms));
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, DoScavengeUnknownScavengeSpeed) {
- GCIdleTimeHeapState heap_state = DefaultHeapState();
- heap_state.used_new_space_size = kNewSpaceCapacity;
- heap_state.scavenge_speed_in_bytes_per_ms = 0;
- int idle_time_ms = 8;
- EXPECT_FALSE(GCIdleTimeHandler::ShouldDoScavenge(
- idle_time_ms, heap_state.new_space_capacity,
- heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
- heap_state.new_space_allocation_throughput_in_bytes_per_ms));
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, DoScavengeLowScavengeSpeed) {
- GCIdleTimeHeapState heap_state = DefaultHeapState();
- heap_state.used_new_space_size = kNewSpaceCapacity;
- heap_state.scavenge_speed_in_bytes_per_ms = 1 * KB;
- int idle_time_ms = 16;
- EXPECT_FALSE(GCIdleTimeHandler::ShouldDoScavenge(
- idle_time_ms, heap_state.new_space_capacity,
- heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
- heap_state.new_space_allocation_throughput_in_bytes_per_ms));
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, DoScavengeLowAllocationRate) {
- GCIdleTimeHeapState heap_state = DefaultHeapState();
- heap_state.used_new_space_size = kNewSpaceCapacity;
- heap_state.new_space_allocation_throughput_in_bytes_per_ms =
- GCIdleTimeHandler::kLowAllocationThroughput - 1;
- int idle_time_ms = 16;
- EXPECT_TRUE(GCIdleTimeHandler::ShouldDoScavenge(
- idle_time_ms, heap_state.new_space_capacity,
- heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
- heap_state.new_space_allocation_throughput_in_bytes_per_ms));
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, DoScavengeHighScavengeSpeed) {
- GCIdleTimeHeapState heap_state = DefaultHeapState();
- heap_state.used_new_space_size = kNewSpaceCapacity;
- heap_state.scavenge_speed_in_bytes_per_ms = kNewSpaceCapacity;
- int idle_time_ms = 16;
- EXPECT_TRUE(GCIdleTimeHandler::ShouldDoScavenge(
- idle_time_ms, heap_state.new_space_capacity,
- heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
- heap_state.new_space_allocation_throughput_in_bytes_per_ms));
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, DoNotScavengeSmallNewSpaceSize) {
- GCIdleTimeHeapState heap_state = DefaultHeapState();
- heap_state.used_new_space_size = (MB / 2) - 1;
- heap_state.scavenge_speed_in_bytes_per_ms = kNewSpaceCapacity;
- int idle_time_ms = 16;
- EXPECT_FALSE(GCIdleTimeHandler::ShouldDoScavenge(
- idle_time_ms, heap_state.new_space_capacity,
- heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
- heap_state.new_space_allocation_throughput_in_bytes_per_ms));
-}
-
-
TEST_F(GCIdleTimeHandlerTest, ShouldDoMarkCompact) {
size_t idle_time_ms = GCIdleTimeHandler::kMaxScheduledIdleTime;
EXPECT_TRUE(GCIdleTimeHandler::ShouldDoMarkCompact(idle_time_ms, 0, 0));
}
-TEST_F(GCIdleTimeHandlerTest, Scavenge) {
- GCIdleTimeHeapState heap_state = DefaultHeapState();
- int idle_time_ms = 10;
- heap_state.used_new_space_size =
- heap_state.new_space_capacity -
- (kNewSpaceAllocationThroughput * idle_time_ms);
- GCIdleTimeAction action =
- handler()->Compute(static_cast<double>(idle_time_ms), heap_state);
- EXPECT_EQ(DO_SCAVENGE, action.type);
- heap_state.used_new_space_size = 0;
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, ScavengeAndDone) {
- GCIdleTimeHeapState heap_state = DefaultHeapState();
- int idle_time_ms = 10;
- heap_state.incremental_marking_stopped = true;
- heap_state.used_new_space_size =
- heap_state.new_space_capacity -
- (kNewSpaceAllocationThroughput * idle_time_ms);
- GCIdleTimeAction action =
- handler()->Compute(static_cast<double>(idle_time_ms), heap_state);
- EXPECT_EQ(DO_SCAVENGE, action.type);
- heap_state.used_new_space_size = 0;
- action = handler()->Compute(static_cast<double>(idle_time_ms), heap_state);
- EXPECT_EQ(DONE, action.type);
-}
-
-
TEST_F(GCIdleTimeHandlerTest, DoNotStartIncrementalMarking) {
GCIdleTimeHeapState heap_state = DefaultHeapState();
heap_state.incremental_marking_stopped = true;
--- /dev/null
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "src/globals.h"
+#include "src/heap/scavenge-job.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+const size_t kScavengeSpeedInBytesPerMs = 500 * KB;
+const size_t kNewSpaceCapacity = 8 * MB;
+
+
+TEST(ScavengeJob, AllocationLimitEmptyNewSpace) {
+ EXPECT_FALSE(ScavengeJob::ReachedIdleAllocationLimit(
+ kScavengeSpeedInBytesPerMs, 0, kNewSpaceCapacity));
+}
+
+
+TEST(ScavengeJob, AllocationLimitFullNewSpace) {
+ EXPECT_TRUE(ScavengeJob::ReachedIdleAllocationLimit(
+ kScavengeSpeedInBytesPerMs, kNewSpaceCapacity, kNewSpaceCapacity));
+}
+
+
+TEST(ScavengeJob, AllocationLimitUnknownScavengeSpeed) {
+ size_t expected_size = ScavengeJob::kInitialScavengeSpeedInBytesPerMs *
+ ScavengeJob::kAverageIdleTimeMs -
+ ScavengeJob::kBytesAllocatedBeforeNextIdleTask;
+ EXPECT_FALSE(ScavengeJob::ReachedIdleAllocationLimit(0, expected_size - 1,
+ kNewSpaceCapacity));
+ EXPECT_TRUE(ScavengeJob::ReachedIdleAllocationLimit(0, expected_size,
+ kNewSpaceCapacity));
+}
+
+
+TEST(ScavengeJob, AllocationLimitLowScavengeSpeed) {
+ size_t scavenge_speed = 1 * KB;
+ EXPECT_FALSE(ScavengeJob::ReachedIdleAllocationLimit(
+ scavenge_speed, ScavengeJob::kMinAllocationLimit - 1, kNewSpaceCapacity));
+ EXPECT_TRUE(ScavengeJob::ReachedIdleAllocationLimit(
+ scavenge_speed, ScavengeJob::kMinAllocationLimit, kNewSpaceCapacity));
+}
+
+
+TEST(ScavengeJob, AllocationLimitAverageScavengeSpeed) {
+ size_t expected_size =
+ kScavengeSpeedInBytesPerMs * ScavengeJob::kAverageIdleTimeMs -
+ ScavengeJob::kBytesAllocatedBeforeNextIdleTask;
+ EXPECT_FALSE(ScavengeJob::ReachedIdleAllocationLimit(
+ kScavengeSpeedInBytesPerMs, ScavengeJob::kMinAllocationLimit,
+ kNewSpaceCapacity));
+ EXPECT_FALSE(ScavengeJob::ReachedIdleAllocationLimit(
+ kScavengeSpeedInBytesPerMs, expected_size - 1, kNewSpaceCapacity));
+ EXPECT_TRUE(ScavengeJob::ReachedIdleAllocationLimit(
+ kScavengeSpeedInBytesPerMs, expected_size, kNewSpaceCapacity));
+}
+
+
+TEST(ScavengeJob, AllocationLimitHighScavengeSpeed) {
+ size_t scavenge_speed = kNewSpaceCapacity;
+ size_t expected_size =
+ static_cast<size_t>(
+ kNewSpaceCapacity *
+ ScavengeJob::kMaxAllocationLimitAsFractionOfNewSpace) -
+ ScavengeJob::kBytesAllocatedBeforeNextIdleTask;
+ EXPECT_FALSE(ScavengeJob::ReachedIdleAllocationLimit(
+ scavenge_speed, expected_size - 1, kNewSpaceCapacity));
+ EXPECT_TRUE(ScavengeJob::ReachedIdleAllocationLimit(
+ scavenge_speed, expected_size, kNewSpaceCapacity));
+}
+
+
+TEST(ScavengeJob, EnoughIdleTimeForScavengeUnknownScavengeSpeed) {
+ size_t scavenge_speed = ScavengeJob::kInitialScavengeSpeedInBytesPerMs;
+ size_t new_space_size = 1 * MB;
+ size_t expected_time = (new_space_size + scavenge_speed - 1) / scavenge_speed;
+ EXPECT_TRUE(
+ ScavengeJob::EnoughIdleTimeForScavenge(expected_time, 0, new_space_size));
+ EXPECT_FALSE(ScavengeJob::EnoughIdleTimeForScavenge(expected_time - 1, 0,
+ new_space_size));
+}
+
+
+TEST(ScavengeJob, EnoughIdleTimeForScavengeLowScavengeSpeed) {
+ size_t scavenge_speed = 1 * KB;
+ size_t new_space_size = 1 * MB;
+ size_t expected_time = (new_space_size + scavenge_speed - 1) / scavenge_speed;
+ EXPECT_TRUE(ScavengeJob::EnoughIdleTimeForScavenge(
+ expected_time, scavenge_speed, new_space_size));
+ EXPECT_FALSE(ScavengeJob::EnoughIdleTimeForScavenge(
+ expected_time - 1, scavenge_speed, new_space_size));
+}
+
+
+TEST(ScavengeJob, EnoughIdleTimeForScavengeHighScavengeSpeed) {
+ size_t scavenge_speed = kNewSpaceCapacity;
+ size_t new_space_size = 1 * MB;
+ size_t expected_time = (new_space_size + scavenge_speed - 1) / scavenge_speed;
+ EXPECT_TRUE(ScavengeJob::EnoughIdleTimeForScavenge(
+ expected_time, scavenge_speed, new_space_size));
+ EXPECT_FALSE(ScavengeJob::EnoughIdleTimeForScavenge(
+ expected_time - 1, scavenge_speed, new_space_size));
+}
+
+} // namespace internal
+} // namespace v8
'heap/gc-idle-time-handler-unittest.cc',
'heap/memory-reducer-unittest.cc',
'heap/heap-unittest.cc',
+ 'heap/scavenge-job-unittest.cc',
'run-all-unittests.cc',
'test-utils.h',
'test-utils.cc',
'../../src/heap/objects-visiting-inl.h',
'../../src/heap/objects-visiting.cc',
'../../src/heap/objects-visiting.h',
+ '../../src/heap/scavenge-job.h',
+ '../../src/heap/scavenge-job.cc',
'../../src/heap/scavenger-inl.h',
'../../src/heap/scavenger.cc',
'../../src/heap/scavenger.h',