'WIN32',
],
'msvs_configuration_attributes': {
+ 'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)',
'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
'CharacterSet': '1',
},
'target_conditions': [
['_toolset=="host"', {
'variables': {
- 'm32flag': '<!((echo | $(echo ${CXX_host:-$(which g++)}) -m32 -E - > /dev/null 2>&1) && echo "-m32" || true)',
+ 'm32flag': '<!((echo | $(echo ${CXX_host:-$(which g++)}) -m32 -E - > /dev/null 2>&1) && echo -n "-m32" || true)',
},
'cflags': [ '<(m32flag)' ],
'ldflags': [ '<(m32flag)' ],
}],
['_toolset=="target"', {
'variables': {
- 'm32flag': '<!((echo | $(echo ${CXX_target:-${CXX:-$(which g++)}}) -m32 -E - > /dev/null 2>&1) && echo "-m32" || true)',
+ 'm32flag': '<!((echo | $(echo ${CXX_target:-${CXX:-$(which g++)}}) -m32 -E - > /dev/null 2>&1) && echo -n "-m32" || true)',
},
'cflags': [ '<(m32flag)' ],
'ldflags': [ '<(m32flag)' ],
},
'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
- 'cflags': [ '-Wno-unused-parameter',
+ 'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual' ],
}],
],
bool Heap::IdleNotification(int hint) {
+ // Hints greater than this value indicate that
+ // the embedder is requesting a lot of GC work.
const int kMaxHint = 1000;
+ // Minimal hint that allows to do full GC.
+ const int kMinHintForFullGC = 100;
intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
// The size factor is in range [5..250]. The numbers here are chosen from
// experiments. If you changes them, make sure to test with
mark_sweeps_since_idle_round_started_ += new_mark_sweeps;
ms_count_at_last_idle_notification_ = ms_count_;
- if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
+ int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
+ mark_sweeps_since_idle_round_started_;
+
+ if (remaining_mark_sweeps <= 0) {
FinishIdleRound();
return true;
}
if (incremental_marking()->IsStopped()) {
- incremental_marking()->Start();
+ // If there are no more than two GCs left in this idle round and we are
+ // allowed to do a full GC, then make those GCs full in order to compact
+ // the code space.
+ // TODO(ulan): Once we enable code compaction for incremental marking,
+ // we can get rid of this special case and always start incremental marking.
+ if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
+ CollectAllGarbage(kReduceMemoryFootprintMask,
+ "idle notification: finalize idle round");
+ } else {
+ incremental_marking()->Start();
+ }
+ }
+ if (!incremental_marking()->IsStopped()) {
+ AdvanceIdleIncrementalMarking(step_size);
}
-
- AdvanceIdleIncrementalMarking(step_size);
return false;
}
space->identity() == OLD_DATA_SPACE ||
space->identity() == CODE_SPACE);
+ static const int kMaxMaxEvacuationCandidates = 1000;
int number_of_pages = space->CountTotalPages();
-
- const int kMaxMaxEvacuationCandidates = 1000;
- int max_evacuation_candidates = Min(
- kMaxMaxEvacuationCandidates,
- static_cast<int>(sqrt(static_cast<double>(number_of_pages / 2)) + 1));
+ int max_evacuation_candidates =
+ static_cast<int>(sqrt(static_cast<double>(number_of_pages / 2)) + 1);
if (FLAG_stress_compaction || FLAG_always_compact) {
max_evacuation_candidates = kMaxMaxEvacuationCandidates;
intptr_t over_reserved = reserved - space->SizeOfObjects();
static const intptr_t kFreenessThreshold = 50;
- if (over_reserved >= 2 * space->AreaSize() &&
- reduce_memory_footprint_) {
- mode = REDUCE_MEMORY_FOOTPRINT;
+ if (over_reserved >= 2 * space->AreaSize()) {
+ // If reduction of memory footprint was requested, we are aggressive
+ // about choosing pages to free. We expect that half-empty pages
+ // are easier to compact so slightly bump the limit.
+ if (reduce_memory_footprint_) {
+ mode = REDUCE_MEMORY_FOOTPRINT;
+ max_evacuation_candidates += 2;
+ }
- // We expect that empty pages are easier to compact so slightly bump the
- // limit.
- max_evacuation_candidates += 2;
+ // If over-usage is very high (more than a third of the space), we
+ // try to free all mostly empty pages. We expect that almost empty
+ // pages are even easier to compact so bump the limit even more.
+ if (over_reserved > reserved / 3) {
+ mode = REDUCE_MEMORY_FOOTPRINT;
+ max_evacuation_candidates *= 2;
+ }
- if (FLAG_trace_fragmentation) {
- PrintF("Estimated over reserved memory: %.1f MB (setting threshold %d)\n",
+ if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
+ PrintF("Estimated over reserved memory: %.1f / %.1f MB (threshold %d)\n",
static_cast<double>(over_reserved) / MB,
+ static_cast<double>(reserved) / MB,
static_cast<int>(kFreenessThreshold));
}
}
Candidate candidates[kMaxMaxEvacuationCandidates];
+ max_evacuation_candidates =
+ Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates);
+
int count = 0;
int fragmentation = 0;
Candidate* least = NULL;
bool lazy_sweeping_active = false;
bool unused_page_present = false;
- intptr_t old_space_size = heap()->PromotedSpaceSizeOfObjects();
- intptr_t space_left =
- Min(heap()->OldGenPromotionLimit(old_space_size),
- heap()->OldGenAllocationLimit(old_space_size)) - old_space_size;
-
while (it.has_next()) {
Page* p = it.next();
}
freed_bytes += SweepConservatively(space, p);
pages_swept++;
- if (space_left + freed_bytes > newspace_size) {
+ if (freed_bytes > 2 * newspace_size) {
space->SetPagesToSweep(p->next_page());
lazy_sweeping_active = true;
} else {
#define MAJOR_VERSION 3
#define MINOR_VERSION 11
#define BUILD_NUMBER 10
-#define PATCH_LEVEL 12
+#define PATCH_LEVEL 14
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
using namespace v8::internal;
-static inline void SimulateFullSpace(PagedSpace* space) {
+// Also used in test-heap.cc test cases.
+void SimulateFullSpace(PagedSpace* space) {
int old_linear_size = static_cast<int>(space->limit() - space->top());
space->Free(space->top(), old_linear_size);
space->SetTop(space->limit(), space->limit());
CHECK(root->IsJSObject());
CHECK(root->map()->IsMap());
}
+
+
+// Implemented in the test-alloc.cc test suite.
+void SimulateFullSpace(PagedSpace* space);
+
+
+TEST(ReleaseOverReservedPages) {
+ i::FLAG_trace_gc = true;
+ InitializeVM();
+ v8::HandleScope scope;
+ static const int number_of_test_pages = 20;
+
+ // Prepare many pages with low live-bytes count.
+ PagedSpace* old_pointer_space = HEAP->old_pointer_space();
+ CHECK_EQ(1, old_pointer_space->CountTotalPages());
+ for (int i = 0; i < number_of_test_pages; i++) {
+ AlwaysAllocateScope always_allocate;
+ SimulateFullSpace(old_pointer_space);
+ FACTORY->NewFixedArray(1, TENURED);
+ }
+ CHECK_EQ(number_of_test_pages + 1, old_pointer_space->CountTotalPages());
+
+ // Triggering one GC will cause a lot of garbage to be discovered but
+ // even spread across all allocated pages.
+ HEAP->CollectAllGarbage(Heap::kNoGCFlags, "triggered for preparation");
+ CHECK_EQ(number_of_test_pages + 1, old_pointer_space->CountTotalPages());
+
+ // Triggering subsequent GCs should cause at least half of the pages
+ // to be released to the OS after at most two cycles.
+ HEAP->CollectAllGarbage(Heap::kNoGCFlags, "triggered by test 1");
+ CHECK_GE(number_of_test_pages + 1, old_pointer_space->CountTotalPages());
+ HEAP->CollectAllGarbage(Heap::kNoGCFlags, "triggered by test 2");
+ CHECK_GE(number_of_test_pages + 1, old_pointer_space->CountTotalPages() * 2);
+
+ // Triggering a last-resort GC should cause all pages to be released
+ // to the OS so that other processes can seize the memory.
+ HEAP->CollectAllAvailableGarbage("triggered really hard");
+ CHECK_EQ(1, old_pointer_space->CountTotalPages());
+}