MapSpace* Heap::map_space_ = NULL;
LargeObjectSpace* Heap::lo_space_ = NULL;
-int Heap::promoted_space_limit_ = 0;
+static const int kMinimumPromotionLimit = 2*MB;
+static const int kMinimumAllocationLimit = 8*MB;
+
+int Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit;
+int Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit;
+
int Heap::old_gen_exhausted_ = false;
int Heap::amount_of_external_allocated_memory_ = 0;
}
// Is enough data promoted to justify a global GC?
- if (PromotedSpaceSize() + PromotedExternalMemorySize()
- > promoted_space_limit_) {
+ if (OldGenerationPromotionLimitReached()) {
Counters::gc_compactor_caused_by_promoted_data.Increment();
return MARK_COMPACTOR;
}
if (collector == MARK_COMPACTOR) {
MarkCompact(tracer);
- int promoted_space_size = PromotedSpaceSize();
- promoted_space_limit_ =
- promoted_space_size + Max(2 * MB, (promoted_space_size/100) * 35);
+ int old_gen_size = PromotedSpaceSize();
+ old_gen_promotion_limit_ =
+ old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
+ old_gen_allocation_limit_ =
+ old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 3);
old_gen_exhausted_ = false;
// If we have used the mark-compact collector to collect the new
PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
title, gc_count_);
PrintF("mark-compact GC : %d\n", mc_count_);
- PrintF("promoted_space_limit_ %d\n", promoted_space_limit_);
+ PrintF("old_gen_promotion_limit_ %d\n", old_gen_promotion_limit_);
+ PrintF("old_gen_allocation_limit_ %d\n", old_gen_allocation_limit_);
PrintF("\n");
PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
// Allocate unitialized fixed array (pretenure == NON_TENURE).
static Object* AllocateRawFixedArray(int length);
+ // True if we have reached the allocation limit in the old generation that
+ // should force the next GC (caused normally) to be a full one.
+ static bool OldGenerationPromotionLimitReached() {
+ return (PromotedSpaceSize() + PromotedExternalMemorySize())
+ > old_gen_promotion_limit_;
+ }
+
+ // True if we have reached the allocation limit in the old generation that
+ // should artificially cause a GC right now.
+ static bool OldGenerationAllocationLimitReached() {
+ return (PromotedSpaceSize() + PromotedExternalMemorySize())
+ > old_gen_allocation_limit_;
+ }
+
private:
static int semispace_size_;
static int initial_semispace_size_;
static bool disallow_allocation_failure_;
#endif // DEBUG
- // Promotion limit that trigger a global GC
- static int promoted_space_limit_;
+ // Limit that triggers a global GC on the next (normally caused) GC. This
+ // is checked when we have already decided to do a GC to help determine
+ // which collector to invoke.
+ static int old_gen_promotion_limit_;
+
+ // Limit that triggers a global GC as soon as is reasonable. This is
+ // checked before expanding a paged space in the old generation and on
+ // every allocation in large object space.
+ static int old_gen_allocation_limit_;
// The amount of external memory registered through the API kept alive
// by global handles
return HeapObject::cast(result);
}
- // Free list allocation failed and there is no next page. Try to expand
- // the space and allocate in the new next page.
+ // Free list allocation failed and there is no next page. Fail if we have
+ // hit the old generation size limit that should cause a garbage
+ // collection.
+ if (Heap::OldGenerationAllocationLimitReached()) {
+ return NULL;
+ }
+
+ // Try to expand the space and allocate in the new next page.
ASSERT(!current_page->next_page()->is_valid());
if (Expand(current_page)) {
return AllocateInNextPage(current_page, size_in_bytes);
}
}
- // Free list allocation failed and there is no next page. Try to expand
- // the space and allocate in the new next page.
+ // Free list allocation failed and there is no next page. Fail if we have
+ // hit the old generation size limit that should cause a garbage
+ // collection.
+ if (Heap::OldGenerationAllocationLimitReached()) {
+ return NULL;
+ }
+
+ // Try to expand the space and allocate in the new next page.
ASSERT(!current_page->next_page()->is_valid());
if (Expand(current_page)) {
return AllocateInNextPage(current_page, size_in_bytes);
int object_size,
Executability executable) {
ASSERT(0 < object_size && object_size <= requested_size);
+
+ // Check if we want to force a GC before growing the old space further.
+ // If so, fail the allocation.
+ if (Heap::OldGenerationAllocationLimitReached()) {
+ return Failure::RetryAfterGC(requested_size, identity());
+ }
+
size_t chunk_size;
LargeObjectChunk* chunk =
LargeObjectChunk::New(requested_size, &chunk_size, executable);