1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "src/counters.h"
10 #include "src/heap/store-buffer-inl.h"
15 StoreBuffer::StoreBuffer(Heap* heap)
22 old_reserved_limit_(NULL),
23 old_buffer_is_sorted_(false),
24 old_buffer_is_filtered_(false),
26 store_buffer_rebuilding_enabled_(false),
28 may_move_store_buffer_entries_(true),
29 virtual_memory_(NULL),
32 hash_sets_are_empty_(true) {}
35 void StoreBuffer::SetUp() {
36 virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 3);
37 uintptr_t start_as_int =
38 reinterpret_cast<uintptr_t>(virtual_memory_->address());
40 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
41 limit_ = start_ + (kStoreBufferSize / kPointerSize);
44 new base::VirtualMemory(kOldStoreBufferLength * kPointerSize);
45 old_top_ = old_start_ =
46 reinterpret_cast<Address*>(old_virtual_memory_->address());
47 // Don't know the alignment requirements of the OS, but it is certainly not
49 DCHECK((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
51 static_cast<int>(base::OS::CommitPageSize() / kPointerSize);
52 DCHECK(initial_length > 0);
53 DCHECK(initial_length <= kOldStoreBufferLength);
54 old_limit_ = old_start_ + initial_length;
55 old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
57 if (!old_virtual_memory_->Commit(reinterpret_cast<void*>(old_start_),
58 (old_limit_ - old_start_) * kPointerSize,
60 V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
63 DCHECK(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
64 DCHECK(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
65 Address* vm_limit = reinterpret_cast<Address*>(
66 reinterpret_cast<char*>(virtual_memory_->address()) +
67 virtual_memory_->size());
68 DCHECK(start_ <= vm_limit);
69 DCHECK(limit_ <= vm_limit);
71 DCHECK((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0);
72 DCHECK((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
75 if (!virtual_memory_->Commit(reinterpret_cast<Address>(start_),
77 false)) { // Not executable.
78 V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
80 heap_->public_set_store_buffer_top(start_);
82 hash_set_1_ = new uintptr_t[kHashSetLength];
83 hash_set_2_ = new uintptr_t[kHashSetLength];
84 hash_sets_are_empty_ = false;
86 ClearFilteringHashSets();
88 heap_->isolate()->set_store_buffer_hash_set_1_address(hash_set_1_);
89 heap_->isolate()->set_store_buffer_hash_set_2_address(hash_set_2_);
93 void StoreBuffer::TearDown() {
94 delete virtual_memory_;
95 delete old_virtual_memory_;
98 old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
99 start_ = limit_ = NULL;
100 heap_->public_set_store_buffer_top(start_);
104 void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
105 isolate->heap()->store_buffer()->Compact();
106 isolate->counters()->store_buffer_overflows()->Increment();
110 bool StoreBuffer::SpaceAvailable(intptr_t space_needed) {
111 return old_limit_ - old_top_ >= space_needed;
115 void StoreBuffer::EnsureSpace(intptr_t space_needed) {
116 while (old_limit_ - old_top_ < space_needed &&
117 old_limit_ < old_reserved_limit_) {
118 size_t grow = old_limit_ - old_start_; // Double size.
119 if (!old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
120 grow * kPointerSize, false)) {
121 V8::FatalProcessOutOfMemory("StoreBuffer::EnsureSpace");
126 if (SpaceAvailable(space_needed)) return;
128 if (old_buffer_is_filtered_) return;
129 DCHECK(may_move_store_buffer_entries_);
132 old_buffer_is_filtered_ = true;
133 bool page_has_scan_on_scavenge_flag = false;
135 PointerChunkIterator it(heap_);
137 while ((chunk = it.next()) != NULL) {
138 if (chunk->scan_on_scavenge()) {
139 page_has_scan_on_scavenge_flag = true;
144 if (page_has_scan_on_scavenge_flag) {
145 Filter(MemoryChunk::SCAN_ON_SCAVENGE);
148 if (SpaceAvailable(space_needed)) return;
150 // Sample 1 entry in 97 and filter out the pages where we estimate that more
151 // than 1 in 8 pointers are to new space.
152 static const int kSampleFinenesses = 5;
153 static const struct Samples {
154 int prime_sample_step;
156 } samples[kSampleFinenesses] = {
157 {97, ((Page::kPageSize / kPointerSize) / 97) / 8},
158 {23, ((Page::kPageSize / kPointerSize) / 23) / 16},
159 {7, ((Page::kPageSize / kPointerSize) / 7) / 32},
160 {3, ((Page::kPageSize / kPointerSize) / 3) / 256},
162 for (int i = 0; i < kSampleFinenesses; i++) {
163 ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
164 // As a last resort we mark all pages as being exempt from the store buffer.
165 DCHECK(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
166 if (SpaceAvailable(space_needed)) return;
172 // Sample the store buffer to see if some pages are taking up a lot of space
173 // in the store buffer.
174 void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
175 PointerChunkIterator it(heap_);
177 while ((chunk = it.next()) != NULL) {
178 chunk->set_store_buffer_counter(0);
180 bool created_new_scan_on_scavenge_pages = false;
181 MemoryChunk* previous_chunk = NULL;
182 for (Address* p = old_start_; p < old_top_; p += prime_sample_step) {
184 MemoryChunk* containing_chunk = NULL;
185 if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
186 containing_chunk = previous_chunk;
188 containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
190 int old_counter = containing_chunk->store_buffer_counter();
191 if (old_counter >= threshold) {
192 containing_chunk->set_scan_on_scavenge(true);
193 created_new_scan_on_scavenge_pages = true;
195 containing_chunk->set_store_buffer_counter(old_counter + 1);
196 previous_chunk = containing_chunk;
198 if (created_new_scan_on_scavenge_pages) {
199 Filter(MemoryChunk::SCAN_ON_SCAVENGE);
201 old_buffer_is_filtered_ = true;
205 void StoreBuffer::Filter(int flag) {
206 Address* new_top = old_start_;
207 MemoryChunk* previous_chunk = NULL;
208 for (Address* p = old_start_; p < old_top_; p++) {
210 MemoryChunk* containing_chunk = NULL;
211 if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
212 containing_chunk = previous_chunk;
214 containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
215 previous_chunk = containing_chunk;
217 if (!containing_chunk->IsFlagSet(flag)) {
223 // Filtering hash sets are inconsistent with the store buffer after this
225 ClearFilteringHashSets();
229 bool StoreBuffer::PrepareForIteration() {
231 PointerChunkIterator it(heap_);
233 bool page_has_scan_on_scavenge_flag = false;
234 while ((chunk = it.next()) != NULL) {
235 if (chunk->scan_on_scavenge()) {
236 page_has_scan_on_scavenge_flag = true;
241 if (page_has_scan_on_scavenge_flag) {
242 Filter(MemoryChunk::SCAN_ON_SCAVENGE);
245 // Filtering hash sets are inconsistent with the store buffer after
247 ClearFilteringHashSets();
249 return page_has_scan_on_scavenge_flag;
253 void StoreBuffer::ClearFilteringHashSets() {
254 if (!hash_sets_are_empty_) {
255 memset(reinterpret_cast<void*>(hash_set_1_), 0,
256 sizeof(uintptr_t) * kHashSetLength);
257 memset(reinterpret_cast<void*>(hash_set_2_), 0,
258 sizeof(uintptr_t) * kHashSetLength);
259 hash_sets_are_empty_ = true;
264 void StoreBuffer::GCPrologue() {
265 ClearFilteringHashSets();
271 void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
272 LargeObjectIterator it(space);
273 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
274 if (object->IsFixedArray()) {
275 Address slot_address = object->address();
276 Address end = object->address() + object->Size();
278 while (slot_address < end) {
279 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
280 // When we are not in GC the Heap::InNewSpace() predicate
281 // checks that pointers which satisfy predicate point into
282 // the active semispace.
283 Object* object = *slot;
284 heap_->InNewSpace(object);
285 slot_address += kPointerSize;
293 void StoreBuffer::Verify() {
295 VerifyPointers(heap_->lo_space());
300 void StoreBuffer::GCEpilogue() {
303 if (FLAG_verify_heap) {
310 void StoreBuffer::ProcessOldToNewSlot(Address slot_address,
311 ObjectSlotCallback slot_callback) {
312 Object** slot = reinterpret_cast<Object**>(slot_address);
313 Object* object = *slot;
315 // If the object is not in from space, it must be a duplicate store buffer
316 // entry and the slot was already updated.
317 if (heap_->InFromSpace(object)) {
318 HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
319 DCHECK(heap_object->IsHeapObject());
320 slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
322 // If the object was in from space before and is after executing the
323 // callback in to space, the object is still live.
324 // Unfortunately, we do not know about the slot. It could be in a
325 // just freed free space object.
326 if (heap_->InToSpace(object)) {
327 EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
333 void StoreBuffer::FindPointersToNewSpaceInRegion(
334 Address start, Address end, ObjectSlotCallback slot_callback) {
335 for (Address slot_address = start; slot_address < end;
336 slot_address += kPointerSize) {
337 ProcessOldToNewSlot(slot_address, slot_callback);
342 void StoreBuffer::IteratePointersInStoreBuffer(
343 ObjectSlotCallback slot_callback) {
344 Address* limit = old_top_;
345 old_top_ = old_start_;
347 DontMoveStoreBufferEntriesScope scope(this);
348 for (Address* current = old_start_; current < limit; current++) {
350 Address* saved_top = old_top_;
352 ProcessOldToNewSlot(*current, slot_callback);
353 DCHECK(old_top_ == saved_top + 1 || old_top_ == saved_top);
359 void StoreBuffer::ClearInvalidStoreBufferEntries() {
361 Address* new_top = old_start_;
362 for (Address* current = old_start_; current < old_top_; current++) {
363 Address addr = *current;
364 Object** slot = reinterpret_cast<Object**>(addr);
365 Object* object = *slot;
366 if (heap_->InNewSpace(object) && object->IsHeapObject()) {
367 // If the target object is not black, the source slot must be part
368 // of a non-black (dead) object.
369 HeapObject* heap_object = HeapObject::cast(object);
370 if (Marking::IsBlack(Marking::MarkBitFrom(heap_object)) &&
371 heap_->mark_compact_collector()->IsSlotInLiveObject(addr)) {
377 ClearFilteringHashSets();
379 // Don't scan on scavenge dead large objects.
380 LargeObjectIterator it(heap_->lo_space());
381 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
382 MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
383 if (chunk->scan_on_scavenge() && !Marking::MarkBitFrom(object).Get()) {
384 chunk->set_scan_on_scavenge(false);
390 void StoreBuffer::VerifyValidStoreBufferEntries() {
391 for (Address* current = old_start_; current < old_top_; current++) {
392 Object** slot = reinterpret_cast<Object**>(*current);
393 Object* object = *slot;
394 CHECK(object->IsHeapObject());
395 CHECK(heap_->InNewSpace(object));
396 heap_->mark_compact_collector()->VerifyIsSlotInLiveObject(
397 reinterpret_cast<Address>(slot), HeapObject::cast(object));
402 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
403 // We do not sort or remove duplicated entries from the store buffer because
404 // we expect that callback will rebuild the store buffer thus removing
405 // all duplicates and pointers to old space.
406 bool some_pages_to_scan = PrepareForIteration();
408 // TODO(gc): we want to skip slots on evacuation candidates
409 // but we can't simply figure that out from slot address
410 // because slot can belong to a large object.
411 IteratePointersInStoreBuffer(slot_callback);
413 // We are done scanning all the pointers that were in the store buffer, but
414 // there may be some pages marked scan_on_scavenge that have pointers to new
415 // space that are not in the store buffer. We must scan them now. As we
416 // scan, the surviving pointers to new space will be added to the store
417 // buffer. If there are still a lot of pointers to new space then we will
418 // keep the scan_on_scavenge flag on the page and discard the pointers that
419 // were added to the store buffer. If there are not many pointers to new
420 // space left on the page we will keep the pointers in the store buffer and
421 // remove the flag from the page.
422 if (some_pages_to_scan) {
423 if (callback_ != NULL) {
424 (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent);
426 PointerChunkIterator it(heap_);
428 while ((chunk = it.next()) != NULL) {
429 if (chunk->scan_on_scavenge()) {
430 chunk->set_scan_on_scavenge(false);
431 if (callback_ != NULL) {
432 (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent);
434 if (chunk->owner() == heap_->lo_space()) {
435 LargePage* large_page = reinterpret_cast<LargePage*>(chunk);
436 HeapObject* array = large_page->GetObject();
437 DCHECK(array->IsFixedArray());
438 Address start = array->address();
439 Address end = start + array->Size();
440 FindPointersToNewSpaceInRegion(start, end, slot_callback);
442 Page* page = reinterpret_cast<Page*>(chunk);
443 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
444 if (owner == heap_->map_space()) {
445 DCHECK(page->WasSwept());
446 HeapObjectIterator iterator(page, NULL);
447 for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
448 heap_object = iterator.Next()) {
449 // We skip free space objects.
450 if (!heap_object->IsFiller()) {
451 DCHECK(heap_object->IsMap());
452 FindPointersToNewSpaceInRegion(
453 heap_object->address() + Map::kPointerFieldsBeginOffset,
454 heap_object->address() + Map::kPointerFieldsEndOffset,
459 if (!page->SweepingCompleted()) {
460 heap_->mark_compact_collector()->SweepInParallel(page, owner);
461 if (!page->SweepingCompleted()) {
462 // We were not able to sweep that page, i.e., a concurrent
463 // sweeper thread currently owns this page.
464 // TODO(hpayer): This may introduce a huge pause here. We
465 // just care about finish sweeping of the scan on scavenge page.
466 heap_->mark_compact_collector()->EnsureSweepingCompleted();
469 CHECK(page->owner() == heap_->old_pointer_space());
470 HeapObjectIterator iterator(page, NULL);
471 for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
472 heap_object = iterator.Next()) {
473 // We iterate over objects that contain new space pointers only.
474 bool may_contain_raw_values = heap_object->MayContainRawValues();
475 if (!may_contain_raw_values) {
476 Address obj_address = heap_object->address();
477 const int start_offset = HeapObject::kHeaderSize;
478 const int end_offset = heap_object->Size();
479 #if V8_DOUBLE_FIELDS_UNBOXING
480 LayoutDescriptorHelper helper(heap_object->map());
481 bool has_only_tagged_fields = helper.all_fields_tagged();
483 if (!has_only_tagged_fields) {
484 for (int offset = start_offset; offset < end_offset;) {
485 int end_of_region_offset;
486 if (helper.IsTagged(offset, end_offset,
487 &end_of_region_offset)) {
488 FindPointersToNewSpaceInRegion(
489 obj_address + offset,
490 obj_address + end_of_region_offset, slot_callback);
492 offset = end_of_region_offset;
496 Address start_address = obj_address + start_offset;
497 Address end_address = obj_address + end_offset;
498 // Object has only tagged fields.
499 FindPointersToNewSpaceInRegion(start_address, end_address,
501 #if V8_DOUBLE_FIELDS_UNBOXING
510 if (callback_ != NULL) {
511 (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent);
517 void StoreBuffer::Compact() {
518 CHECK(hash_set_1_ == heap_->isolate()->store_buffer_hash_set_1_address());
519 CHECK(hash_set_2_ == heap_->isolate()->store_buffer_hash_set_2_address());
521 Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
523 if (top == start_) return;
525 // There's no check of the limit in the loop below so we check here for
526 // the worst case (compaction doesn't eliminate any pointers).
527 DCHECK(top <= limit_);
528 heap_->public_set_store_buffer_top(start_);
529 EnsureSpace(top - start_);
530 DCHECK(may_move_store_buffer_entries_);
531 // Goes through the addresses in the store buffer attempting to remove
532 // duplicates. In the interest of speed this is a lossy operation. Some
533 // duplicates will remain. We have two hash sets with different hash
534 // functions to reduce the number of unnecessary clashes.
535 hash_sets_are_empty_ = false; // Hash sets are in use.
536 for (Address* current = start_; current < top; current++) {
537 DCHECK(!heap_->cell_space()->Contains(*current));
538 DCHECK(!heap_->code_space()->Contains(*current));
539 DCHECK(!heap_->old_data_space()->Contains(*current));
540 uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
541 // Shift out the last bits including any tags.
542 int_addr >>= kPointerSizeLog2;
543 // The upper part of an address is basically random because of ASLR and OS
544 // non-determinism, so we use only the bits within a page for hashing to
545 // make v8's behavior (more) deterministic.
546 uintptr_t hash_addr =
547 int_addr & (Page::kPageAlignmentMask >> kPointerSizeLog2);
548 int hash1 = ((hash_addr ^ (hash_addr >> kHashSetLengthLog2)) &
549 (kHashSetLength - 1));
550 if (hash_set_1_[hash1] == int_addr) continue;
551 uintptr_t hash2 = (hash_addr - (hash_addr >> kHashSetLengthLog2));
552 hash2 ^= hash2 >> (kHashSetLengthLog2 * 2);
553 hash2 &= (kHashSetLength - 1);
554 if (hash_set_2_[hash2] == int_addr) continue;
555 if (hash_set_1_[hash1] == 0) {
556 hash_set_1_[hash1] = int_addr;
557 } else if (hash_set_2_[hash2] == 0) {
558 hash_set_2_[hash2] = int_addr;
560 // Rather than slowing down we just throw away some entries. This will
561 // cause some duplicates to remain undetected.
562 hash_set_1_[hash1] = int_addr;
563 hash_set_2_[hash2] = 0;
565 old_buffer_is_sorted_ = false;
566 old_buffer_is_filtered_ = false;
567 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
568 DCHECK(old_top_ <= old_limit_);
570 heap_->isolate()->counters()->store_buffer_compactions()->Increment();
573 } // namespace v8::internal