1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "src/base/atomicops.h"
10 #include "src/counters.h"
11 #include "src/heap/store-buffer-inl.h"
16 StoreBuffer::StoreBuffer(Heap* heap)
23 old_reserved_limit_(NULL),
24 old_buffer_is_sorted_(false),
25 old_buffer_is_filtered_(false),
27 store_buffer_rebuilding_enabled_(false),
29 may_move_store_buffer_entries_(true),
30 virtual_memory_(NULL),
33 hash_sets_are_empty_(true) {}
36 void StoreBuffer::SetUp() {
37 virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 3);
38 uintptr_t start_as_int =
39 reinterpret_cast<uintptr_t>(virtual_memory_->address());
41 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
42 limit_ = start_ + (kStoreBufferSize / kPointerSize);
45 new base::VirtualMemory(kOldStoreBufferLength * kPointerSize);
46 old_top_ = old_start_ =
47 reinterpret_cast<Address*>(old_virtual_memory_->address());
48 // Don't know the alignment requirements of the OS, but it is certainly not
50 DCHECK((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
52 static_cast<int>(base::OS::CommitPageSize() / kPointerSize);
53 DCHECK(initial_length > 0);
54 DCHECK(initial_length <= kOldStoreBufferLength);
55 old_limit_ = old_start_ + initial_length;
56 old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
58 if (!old_virtual_memory_->Commit(reinterpret_cast<void*>(old_start_),
59 (old_limit_ - old_start_) * kPointerSize,
61 V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
64 DCHECK(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
65 DCHECK(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
66 Address* vm_limit = reinterpret_cast<Address*>(
67 reinterpret_cast<char*>(virtual_memory_->address()) +
68 virtual_memory_->size());
69 DCHECK(start_ <= vm_limit);
70 DCHECK(limit_ <= vm_limit);
72 DCHECK((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0);
73 DCHECK((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
76 if (!virtual_memory_->Commit(reinterpret_cast<Address>(start_),
78 false)) { // Not executable.
79 V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
81 heap_->public_set_store_buffer_top(start_);
83 hash_set_1_ = new uintptr_t[kHashSetLength];
84 hash_set_2_ = new uintptr_t[kHashSetLength];
85 hash_sets_are_empty_ = false;
87 ClearFilteringHashSets();
89 heap_->isolate()->set_store_buffer_hash_set_1_address(hash_set_1_);
90 heap_->isolate()->set_store_buffer_hash_set_2_address(hash_set_2_);
94 void StoreBuffer::TearDown() {
95 delete virtual_memory_;
96 delete old_virtual_memory_;
99 old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
100 start_ = limit_ = NULL;
101 heap_->public_set_store_buffer_top(start_);
105 void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
106 isolate->heap()->store_buffer()->Compact();
107 isolate->counters()->store_buffer_overflows()->Increment();
111 void StoreBuffer::Uniq() {
112 // Remove adjacent duplicates and cells that do not point at new space.
113 Address previous = NULL;
114 Address* write = old_start_;
115 DCHECK(may_move_store_buffer_entries_);
116 for (Address* read = old_start_; read < old_top_; read++) {
117 Address current = *read;
118 if (current != previous) {
119 Object* object = reinterpret_cast<Object*>(
120 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(current)));
121 if (heap_->InNewSpace(object)) {
131 bool StoreBuffer::SpaceAvailable(intptr_t space_needed) {
132 return old_limit_ - old_top_ >= space_needed;
136 void StoreBuffer::EnsureSpace(intptr_t space_needed) {
137 while (old_limit_ - old_top_ < space_needed &&
138 old_limit_ < old_reserved_limit_) {
139 size_t grow = old_limit_ - old_start_; // Double size.
140 if (!old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
141 grow * kPointerSize, false)) {
142 V8::FatalProcessOutOfMemory("StoreBuffer::EnsureSpace");
147 if (SpaceAvailable(space_needed)) return;
149 if (old_buffer_is_filtered_) return;
150 DCHECK(may_move_store_buffer_entries_);
153 old_buffer_is_filtered_ = true;
154 bool page_has_scan_on_scavenge_flag = false;
156 PointerChunkIterator it(heap_);
158 while ((chunk = it.next()) != NULL) {
159 if (chunk->scan_on_scavenge()) {
160 page_has_scan_on_scavenge_flag = true;
165 if (page_has_scan_on_scavenge_flag) {
166 Filter(MemoryChunk::SCAN_ON_SCAVENGE);
169 if (SpaceAvailable(space_needed)) return;
171 // Sample 1 entry in 97 and filter out the pages where we estimate that more
172 // than 1 in 8 pointers are to new space.
173 static const int kSampleFinenesses = 5;
174 static const struct Samples {
175 int prime_sample_step;
177 } samples[kSampleFinenesses] = {
178 {97, ((Page::kPageSize / kPointerSize) / 97) / 8},
179 {23, ((Page::kPageSize / kPointerSize) / 23) / 16},
180 {7, ((Page::kPageSize / kPointerSize) / 7) / 32},
181 {3, ((Page::kPageSize / kPointerSize) / 3) / 256},
183 for (int i = 0; i < kSampleFinenesses; i++) {
184 ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
185 // As a last resort we mark all pages as being exempt from the store buffer.
186 DCHECK(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
187 if (SpaceAvailable(space_needed)) return;
193 // Sample the store buffer to see if some pages are taking up a lot of space
194 // in the store buffer.
195 void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
196 PointerChunkIterator it(heap_);
198 while ((chunk = it.next()) != NULL) {
199 chunk->set_store_buffer_counter(0);
201 bool created_new_scan_on_scavenge_pages = false;
202 MemoryChunk* previous_chunk = NULL;
203 for (Address* p = old_start_; p < old_top_; p += prime_sample_step) {
205 MemoryChunk* containing_chunk = NULL;
206 if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
207 containing_chunk = previous_chunk;
209 containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
211 int old_counter = containing_chunk->store_buffer_counter();
212 if (old_counter >= threshold) {
213 containing_chunk->set_scan_on_scavenge(true);
214 created_new_scan_on_scavenge_pages = true;
216 containing_chunk->set_store_buffer_counter(old_counter + 1);
217 previous_chunk = containing_chunk;
219 if (created_new_scan_on_scavenge_pages) {
220 Filter(MemoryChunk::SCAN_ON_SCAVENGE);
222 old_buffer_is_filtered_ = true;
226 void StoreBuffer::Filter(int flag) {
227 Address* new_top = old_start_;
228 MemoryChunk* previous_chunk = NULL;
229 for (Address* p = old_start_; p < old_top_; p++) {
231 MemoryChunk* containing_chunk = NULL;
232 if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
233 containing_chunk = previous_chunk;
235 containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
236 previous_chunk = containing_chunk;
238 if (!containing_chunk->IsFlagSet(flag)) {
244 // Filtering hash sets are inconsistent with the store buffer after this
246 ClearFilteringHashSets();
250 void StoreBuffer::SortUniq() {
252 if (old_buffer_is_sorted_) return;
253 std::sort(old_start_, old_top_);
256 old_buffer_is_sorted_ = true;
258 // Filtering hash sets are inconsistent with the store buffer after this
260 ClearFilteringHashSets();
264 bool StoreBuffer::PrepareForIteration() {
266 PointerChunkIterator it(heap_);
268 bool page_has_scan_on_scavenge_flag = false;
269 while ((chunk = it.next()) != NULL) {
270 if (chunk->scan_on_scavenge()) {
271 page_has_scan_on_scavenge_flag = true;
276 if (page_has_scan_on_scavenge_flag) {
277 Filter(MemoryChunk::SCAN_ON_SCAVENGE);
280 // Filtering hash sets are inconsistent with the store buffer after
282 ClearFilteringHashSets();
284 return page_has_scan_on_scavenge_flag;
289 void StoreBuffer::Clean() {
290 ClearFilteringHashSets();
291 Uniq(); // Also removes things that no longer point to new space.
292 EnsureSpace(kStoreBufferSize / 2);
296 static Address* in_store_buffer_1_element_cache = NULL;
299 bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) {
300 if (!FLAG_enable_slow_asserts) return true;
301 if (in_store_buffer_1_element_cache != NULL &&
302 *in_store_buffer_1_element_cache == cell_address) {
305 Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
306 for (Address* current = top - 1; current >= start_; current--) {
307 if (*current == cell_address) {
308 in_store_buffer_1_element_cache = current;
312 for (Address* current = old_top_ - 1; current >= old_start_; current--) {
313 if (*current == cell_address) {
314 in_store_buffer_1_element_cache = current;
323 void StoreBuffer::ClearFilteringHashSets() {
324 if (!hash_sets_are_empty_) {
325 memset(reinterpret_cast<void*>(hash_set_1_), 0,
326 sizeof(uintptr_t) * kHashSetLength);
327 memset(reinterpret_cast<void*>(hash_set_2_), 0,
328 sizeof(uintptr_t) * kHashSetLength);
329 hash_sets_are_empty_ = true;
334 void StoreBuffer::GCPrologue() {
335 ClearFilteringHashSets();
341 void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
342 LargeObjectIterator it(space);
343 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
344 if (object->IsFixedArray()) {
345 Address slot_address = object->address();
346 Address end = object->address() + object->Size();
348 while (slot_address < end) {
349 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
350 // When we are not in GC the Heap::InNewSpace() predicate
351 // checks that pointers which satisfy predicate point into
352 // the active semispace.
353 Object* object = reinterpret_cast<Object*>(
354 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
355 heap_->InNewSpace(object);
356 slot_address += kPointerSize;
364 void StoreBuffer::Verify() {
366 VerifyPointers(heap_->lo_space());
371 void StoreBuffer::GCEpilogue() {
374 if (FLAG_verify_heap) {
381 void StoreBuffer::FindPointersToNewSpaceInRegion(
382 Address start, Address end, ObjectSlotCallback slot_callback,
384 for (Address slot_address = start; slot_address < end;
385 slot_address += kPointerSize) {
386 Object** slot = reinterpret_cast<Object**>(slot_address);
387 Object* object = reinterpret_cast<Object*>(
388 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
389 if (heap_->InNewSpace(object)) {
390 HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
391 DCHECK(heap_object->IsHeapObject());
392 // The new space object was not promoted if it still contains a map
393 // pointer. Clear the map field now lazily.
394 if (clear_maps) ClearDeadObject(heap_object);
395 slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
396 object = reinterpret_cast<Object*>(
397 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
398 if (heap_->InNewSpace(object)) {
399 EnterDirectlyIntoStoreBuffer(slot_address);
406 void StoreBuffer::IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback,
408 Address* limit = old_top_;
409 old_top_ = old_start_;
411 DontMoveStoreBufferEntriesScope scope(this);
412 for (Address* current = old_start_; current < limit; current++) {
414 Address* saved_top = old_top_;
416 Object** slot = reinterpret_cast<Object**>(*current);
417 Object* object = reinterpret_cast<Object*>(
418 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
419 if (heap_->InFromSpace(object)) {
420 HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
421 // The new space object was not promoted if it still contains a map
422 // pointer. Clear the map field now lazily.
423 if (clear_maps) ClearDeadObject(heap_object);
424 slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
425 object = reinterpret_cast<Object*>(
426 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
427 if (heap_->InNewSpace(object)) {
428 EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
431 DCHECK(old_top_ == saved_top + 1 || old_top_ == saved_top);
437 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
438 IteratePointersToNewSpace(slot_callback, false);
442 void StoreBuffer::IteratePointersToNewSpaceAndClearMaps(
443 ObjectSlotCallback slot_callback) {
444 IteratePointersToNewSpace(slot_callback, true);
448 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback,
450 // We do not sort or remove duplicated entries from the store buffer because
451 // we expect that callback will rebuild the store buffer thus removing
452 // all duplicates and pointers to old space.
453 bool some_pages_to_scan = PrepareForIteration();
455 // TODO(gc): we want to skip slots on evacuation candidates
456 // but we can't simply figure that out from slot address
457 // because slot can belong to a large object.
458 IteratePointersInStoreBuffer(slot_callback, clear_maps);
460 // We are done scanning all the pointers that were in the store buffer, but
461 // there may be some pages marked scan_on_scavenge that have pointers to new
462 // space that are not in the store buffer. We must scan them now. As we
463 // scan, the surviving pointers to new space will be added to the store
464 // buffer. If there are still a lot of pointers to new space then we will
465 // keep the scan_on_scavenge flag on the page and discard the pointers that
466 // were added to the store buffer. If there are not many pointers to new
467 // space left on the page we will keep the pointers in the store buffer and
468 // remove the flag from the page.
469 if (some_pages_to_scan) {
470 if (callback_ != NULL) {
471 (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent);
473 PointerChunkIterator it(heap_);
475 while ((chunk = it.next()) != NULL) {
476 if (chunk->scan_on_scavenge()) {
477 chunk->set_scan_on_scavenge(false);
478 if (callback_ != NULL) {
479 (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent);
481 if (chunk->owner() == heap_->lo_space()) {
482 LargePage* large_page = reinterpret_cast<LargePage*>(chunk);
483 HeapObject* array = large_page->GetObject();
484 DCHECK(array->IsFixedArray());
485 Address start = array->address();
486 Address end = start + array->Size();
487 FindPointersToNewSpaceInRegion(start, end, slot_callback, clear_maps);
489 Page* page = reinterpret_cast<Page*>(chunk);
490 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
491 if (owner == heap_->map_space()) {
492 DCHECK(page->WasSwept());
493 HeapObjectIterator iterator(page, NULL);
494 for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
495 heap_object = iterator.Next()) {
496 // We skip free space objects.
497 if (!heap_object->IsFiller()) {
498 DCHECK(heap_object->IsMap());
499 FindPointersToNewSpaceInRegion(
500 heap_object->address() + Map::kPointerFieldsBeginOffset,
501 heap_object->address() + Map::kPointerFieldsEndOffset,
502 slot_callback, clear_maps);
506 if (!page->SweepingCompleted()) {
507 heap_->mark_compact_collector()->SweepInParallel(page, owner);
508 if (!page->SweepingCompleted()) {
509 // We were not able to sweep that page, i.e., a concurrent
510 // sweeper thread currently owns this page.
511 // TODO(hpayer): This may introduce a huge pause here. We
512 // just care about finish sweeping of the scan on scavenge page.
513 heap_->mark_compact_collector()->EnsureSweepingCompleted();
516 CHECK(page->owner() == heap_->old_pointer_space());
517 HeapObjectIterator iterator(page, NULL);
518 for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
519 heap_object = iterator.Next()) {
520 // We iterate over objects that contain new space pointers only.
521 bool may_contain_raw_values = heap_object->MayContainRawValues();
522 if (!may_contain_raw_values) {
523 Address obj_address = heap_object->address();
524 const int start_offset = HeapObject::kHeaderSize;
525 const int end_offset = heap_object->Size();
526 #if V8_DOUBLE_FIELDS_UNBOXING
527 LayoutDescriptorHelper helper(heap_object->map());
528 bool has_only_tagged_fields = helper.all_fields_tagged();
530 if (!has_only_tagged_fields) {
531 for (int offset = start_offset; offset < end_offset;) {
532 int end_of_region_offset;
533 if (helper.IsTagged(offset, end_offset,
534 &end_of_region_offset)) {
535 FindPointersToNewSpaceInRegion(
536 obj_address + offset,
537 obj_address + end_of_region_offset, slot_callback,
540 offset = end_of_region_offset;
544 Address start_address = obj_address + start_offset;
545 Address end_address = obj_address + end_offset;
546 // Object has only tagged fields.
547 FindPointersToNewSpaceInRegion(start_address, end_address,
548 slot_callback, clear_maps);
549 #if V8_DOUBLE_FIELDS_UNBOXING
558 if (callback_ != NULL) {
559 (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent);
565 void StoreBuffer::Compact() {
566 CHECK(hash_set_1_ == heap_->isolate()->store_buffer_hash_set_1_address());
567 CHECK(hash_set_2_ == heap_->isolate()->store_buffer_hash_set_2_address());
569 Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
571 if (top == start_) return;
573 // There's no check of the limit in the loop below so we check here for
574 // the worst case (compaction doesn't eliminate any pointers).
575 DCHECK(top <= limit_);
576 heap_->public_set_store_buffer_top(start_);
577 EnsureSpace(top - start_);
578 DCHECK(may_move_store_buffer_entries_);
579 // Goes through the addresses in the store buffer attempting to remove
580 // duplicates. In the interest of speed this is a lossy operation. Some
581 // duplicates will remain. We have two hash sets with different hash
582 // functions to reduce the number of unnecessary clashes.
583 hash_sets_are_empty_ = false; // Hash sets are in use.
584 for (Address* current = start_; current < top; current++) {
585 DCHECK(!heap_->cell_space()->Contains(*current));
586 DCHECK(!heap_->code_space()->Contains(*current));
587 DCHECK(!heap_->old_data_space()->Contains(*current));
588 uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
589 // Shift out the last bits including any tags.
590 int_addr >>= kPointerSizeLog2;
591 // The upper part of an address is basically random because of ASLR and OS
592 // non-determinism, so we use only the bits within a page for hashing to
593 // make v8's behavior (more) deterministic.
594 uintptr_t hash_addr =
595 int_addr & (Page::kPageAlignmentMask >> kPointerSizeLog2);
596 int hash1 = ((hash_addr ^ (hash_addr >> kHashSetLengthLog2)) &
597 (kHashSetLength - 1));
598 if (hash_set_1_[hash1] == int_addr) continue;
599 uintptr_t hash2 = (hash_addr - (hash_addr >> kHashSetLengthLog2));
600 hash2 ^= hash2 >> (kHashSetLengthLog2 * 2);
601 hash2 &= (kHashSetLength - 1);
602 if (hash_set_2_[hash2] == int_addr) continue;
603 if (hash_set_1_[hash1] == 0) {
604 hash_set_1_[hash1] = int_addr;
605 } else if (hash_set_2_[hash2] == 0) {
606 hash_set_2_[hash2] = int_addr;
608 // Rather than slowing down we just throw away some entries. This will
609 // cause some duplicates to remain undetected.
610 hash_set_1_[hash1] = int_addr;
611 hash_set_2_[hash2] = 0;
613 old_buffer_is_sorted_ = false;
614 old_buffer_is_filtered_ = false;
615 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
616 DCHECK(old_top_ <= old_limit_);
618 heap_->isolate()->counters()->store_buffer_compactions()->Increment();
621 } // namespace v8::internal