old_start_(NULL),
old_limit_(NULL),
old_top_(NULL),
+ old_regular_limit_(NULL),
old_reserved_limit_(NULL),
+ old_virtual_memory_(NULL),
+ old_store_buffer_length_(0),
old_buffer_is_sorted_(false),
old_buffer_is_filtered_(false),
- during_gc_(false),
+ allow_overflow_(false),
store_buffer_rebuilding_enabled_(false),
callback_(NULL),
may_move_store_buffer_entries_(true),
reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
limit_ = start_ + (kStoreBufferSize / kPointerSize);
+ // We set the maximum store buffer size to the maximum size of a semi-space.
+ // The store buffer may reach this limit during a full garbage collection.
+ // Note that half of the semi-space should be good enough since half of the
+ // memory in the semi-space are not object pointers.
+ old_store_buffer_length_ = heap_->MaxSemiSpaceSize() / sizeof(Address);
+
old_virtual_memory_ =
- new base::VirtualMemory(kOldStoreBufferLength * kPointerSize);
+ new base::VirtualMemory(old_store_buffer_length_ * kPointerSize);
old_top_ = old_start_ =
reinterpret_cast<Address*>(old_virtual_memory_->address());
// Don't know the alignment requirements of the OS, but it is certainly not
int initial_length =
static_cast<int>(base::OS::CommitPageSize() / kPointerSize);
ASSERT(initial_length > 0);
- ASSERT(initial_length <= kOldStoreBufferLength);
+ ASSERT(initial_length <= kOldRegularStoreBufferLength);
+ ASSERT(initial_length <= old_store_buffer_length_);
+ ASSERT(kOldRegularStoreBufferLength <= old_store_buffer_length_);
old_limit_ = old_start_ + initial_length;
- old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
+ old_regular_limit_ = old_start_ + kOldRegularStoreBufferLength;
+ old_reserved_limit_ = old_start_ + old_store_buffer_length_;
CHECK(old_virtual_memory_->Commit(
reinterpret_cast<void*>(old_start_),
delete old_virtual_memory_;
delete[] hash_set_1_;
delete[] hash_set_2_;
- old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
- start_ = limit_ = NULL;
+ old_start_ = NULL;
+ old_top_ = NULL;
+ old_limit_ = NULL;
+ old_reserved_limit_ = NULL;
+ old_regular_limit_ = NULL;
+ start_ = NULL;
+ limit_ = NULL;
heap_->public_set_store_buffer_top(start_);
}
}
+template<StoreBuffer::ExemptPopularPagesMode mode>
+void StoreBuffer::IterativelyExemptPopularPages(intptr_t space_needed) {
+ // Sample 1 entry in 97 and filter out the pages where we estimate that more
+ // than 1 in 8 pointers are to new space.
+ static const int kSampleFinenesses = 5;
+ static const struct Samples {
+ int prime_sample_step;
+ int threshold;
+ } samples[kSampleFinenesses] = {
+ { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 },
+ { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 },
+ { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 },
+ { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 },
+ { 1, 0}
+ };
+ for (int i = 0; i < kSampleFinenesses; i++) {
+ ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
+ // As a last resort we mark all pages as being exempt from the store buffer.
+ ASSERT(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
+ if (mode == ENSURE_SPACE && SpaceAvailable(space_needed)) return;
+ else if (mode == SHRINK_TO_REGULAR_SIZE && old_top_ < old_limit_) return;
+ }
+}
+
+
void StoreBuffer::EnsureSpace(intptr_t space_needed) {
while (old_limit_ - old_top_ < space_needed &&
- old_limit_ < old_reserved_limit_) {
+ ((!allow_overflow_ && old_limit_ < old_regular_limit_) ||
+ (allow_overflow_ && old_limit_ < old_reserved_limit_))) {
size_t grow = old_limit_ - old_start_; // Double size.
CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
grow * kPointerSize,
if (SpaceAvailable(space_needed)) return;
- // Sample 1 entry in 97 and filter out the pages where we estimate that more
- // than 1 in 8 pointers are to new space.
- static const int kSampleFinenesses = 5;
- static const struct Samples {
- int prime_sample_step;
- int threshold;
- } samples[kSampleFinenesses] = {
- { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 },
- { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 },
- { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 },
- { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 },
- { 1, 0}
- };
- for (int i = 0; i < kSampleFinenesses; i++) {
- ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
- // As a last resort we mark all pages as being exempt from the store buffer.
- ASSERT(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
- if (SpaceAvailable(space_needed)) return;
- }
- UNREACHABLE();
+ IterativelyExemptPopularPages<ENSURE_SPACE>(space_needed);
+ ASSERT(SpaceAvailable(space_needed));
}
}
-void StoreBuffer::GCPrologue() {
+void StoreBuffer::GCPrologue(bool allow_overflow) {
ClearFilteringHashSets();
- during_gc_ = true;
+ allow_overflow_ = allow_overflow;
}
void StoreBuffer::GCEpilogue() {
- during_gc_ = false;
+ if (allow_overflow_ && old_limit_ > old_regular_limit_) {
+ IterativelyExemptPopularPages<SHRINK_TO_REGULAR_SIZE>(0);
+ ASSERT(old_limit_ < old_regular_limit_);
+ old_virtual_memory_->Uncommit(old_limit_, old_regular_limit_ - old_limit_);
+ }
+
+ allow_overflow_ = false;
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
-typedef void (StoreBuffer::*RegionCallback)(Address start,
- Address end,
- ObjectSlotCallback slot_callback,
- bool clear_maps);
-
// Used to implement the write barrier by collecting addresses of pointers
// between spaces.
class StoreBuffer {
static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2);
static const int kStoreBufferSize = kStoreBufferOverflowBit;
static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
- static const int kOldStoreBufferLength = kStoreBufferLength * 16;
+ static const int kOldRegularStoreBufferLength = kStoreBufferLength * 16;
static const int kHashSetLengthLog2 = 12;
static const int kHashSetLength = 1 << kHashSetLengthLog2;
void Compact();
- void GCPrologue();
+ void GCPrologue(bool allow_overflow);
void GCEpilogue();
Object*** Limit() { return reinterpret_cast<Object***>(old_limit_); }
Address* old_start_;
Address* old_limit_;
Address* old_top_;
+
+ // The regular limit specifies how big the store buffer may become during
+ // mutator execution or while scavenging.
+ Address* old_regular_limit_;
+
+ // The reserved limit is bigger then the regular limit. It should be the size
+ // of a semi-space to avoid new scan-on-scavenge during new space evacuation
+ // after sweeping in a full garbage collection.
Address* old_reserved_limit_;
+
base::VirtualMemory* old_virtual_memory_;
+ int old_store_buffer_length_;
bool old_buffer_is_sorted_;
bool old_buffer_is_filtered_;
- bool during_gc_;
+
+ // If allow_overflow_ is set, we allow the store buffer to grow until
+ // old_reserved_limit_. But we will shrink the store buffer in the epilogue to
+ // stay within the old_regular_limit_.
+ bool allow_overflow_;
+
// The garbage collector iterates over many pointers to new space that are not
// handled by the store buffer. This flag indicates whether the pointers
// found by the callbacks should be added to the store buffer or not.
void Uniq();
void ExemptPopularPages(int prime_sample_step, int threshold);
+ enum ExemptPopularPagesMode {
+ ENSURE_SPACE,
+ SHRINK_TO_REGULAR_SIZE
+ };
+
+ template <ExemptPopularPagesMode mode>
+ void IterativelyExemptPopularPages(intptr_t space_needed);
+
// Set the map field of the object to NULL if contains a map.
inline void ClearDeadObject(HeapObject *object);
ObjectSlotCallback slot_callback,
bool clear_maps);
- // For each region of pointers on a page in use from an old space call
- // visit_pointer_region callback.
- // If either visit_pointer_region or callback can cause an allocation
- // in old space and changes in allocation watermark then
- // can_preallocate_during_iteration should be set to true.
- void IteratePointersOnPage(
- PagedSpace* space,
- Page* page,
- RegionCallback region_callback,
- ObjectSlotCallback slot_callback);
-
void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback,
bool clear_maps);