1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "src/base/atomicops.h"
10 #include "src/counters.h"
11 #include "src/heap/store-buffer-inl.h"
16 StoreBuffer::StoreBuffer(Heap* heap)
23 old_reserved_limit_(NULL),
24 old_buffer_is_sorted_(false),
25 old_buffer_is_filtered_(false),
27 store_buffer_rebuilding_enabled_(false),
29 may_move_store_buffer_entries_(true),
30 virtual_memory_(NULL),
33 hash_sets_are_empty_(true) {}
36 void StoreBuffer::SetUp() {
37 virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 3);
38 uintptr_t start_as_int =
39 reinterpret_cast<uintptr_t>(virtual_memory_->address());
41 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
42 limit_ = start_ + (kStoreBufferSize / kPointerSize);
45 new base::VirtualMemory(kOldStoreBufferLength * kPointerSize);
46 old_top_ = old_start_ =
47 reinterpret_cast<Address*>(old_virtual_memory_->address());
48 // Don't know the alignment requirements of the OS, but it is certainly not
50 DCHECK((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
52 static_cast<int>(base::OS::CommitPageSize() / kPointerSize);
53 DCHECK(initial_length > 0);
54 DCHECK(initial_length <= kOldStoreBufferLength);
55 old_limit_ = old_start_ + initial_length;
56 old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
58 CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_start_),
59 (old_limit_ - old_start_) * kPointerSize,
62 DCHECK(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
63 DCHECK(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
64 Address* vm_limit = reinterpret_cast<Address*>(
65 reinterpret_cast<char*>(virtual_memory_->address()) +
66 virtual_memory_->size());
67 DCHECK(start_ <= vm_limit);
68 DCHECK(limit_ <= vm_limit);
70 DCHECK((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0);
71 DCHECK((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
74 CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_),
76 false)); // Not executable.
77 heap_->public_set_store_buffer_top(start_);
79 hash_set_1_ = new uintptr_t[kHashSetLength];
80 hash_set_2_ = new uintptr_t[kHashSetLength];
81 hash_sets_are_empty_ = false;
83 ClearFilteringHashSets();
87 void StoreBuffer::TearDown() {
88 delete virtual_memory_;
89 delete old_virtual_memory_;
92 old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
93 start_ = limit_ = NULL;
94 heap_->public_set_store_buffer_top(start_);
98 void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
99 isolate->heap()->store_buffer()->Compact();
100 isolate->counters()->store_buffer_overflows()->Increment();
104 void StoreBuffer::Uniq() {
105 // Remove adjacent duplicates and cells that do not point at new space.
106 Address previous = NULL;
107 Address* write = old_start_;
108 DCHECK(may_move_store_buffer_entries_);
109 for (Address* read = old_start_; read < old_top_; read++) {
110 Address current = *read;
111 if (current != previous) {
112 if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) {
122 bool StoreBuffer::SpaceAvailable(intptr_t space_needed) {
123 return old_limit_ - old_top_ >= space_needed;
127 void StoreBuffer::EnsureSpace(intptr_t space_needed) {
128 while (old_limit_ - old_top_ < space_needed &&
129 old_limit_ < old_reserved_limit_) {
130 size_t grow = old_limit_ - old_start_; // Double size.
131 CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
132 grow * kPointerSize, false));
136 if (SpaceAvailable(space_needed)) return;
138 if (old_buffer_is_filtered_) return;
139 DCHECK(may_move_store_buffer_entries_);
142 old_buffer_is_filtered_ = true;
143 bool page_has_scan_on_scavenge_flag = false;
145 PointerChunkIterator it(heap_);
147 while ((chunk = it.next()) != NULL) {
148 if (chunk->scan_on_scavenge()) {
149 page_has_scan_on_scavenge_flag = true;
154 if (page_has_scan_on_scavenge_flag) {
155 Filter(MemoryChunk::SCAN_ON_SCAVENGE);
158 if (SpaceAvailable(space_needed)) return;
160 // Sample 1 entry in 97 and filter out the pages where we estimate that more
161 // than 1 in 8 pointers are to new space.
162 static const int kSampleFinenesses = 5;
163 static const struct Samples {
164 int prime_sample_step;
166 } samples[kSampleFinenesses] = {
167 {97, ((Page::kPageSize / kPointerSize) / 97) / 8},
168 {23, ((Page::kPageSize / kPointerSize) / 23) / 16},
169 {7, ((Page::kPageSize / kPointerSize) / 7) / 32},
170 {3, ((Page::kPageSize / kPointerSize) / 3) / 256},
172 for (int i = 0; i < kSampleFinenesses; i++) {
173 ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
174 // As a last resort we mark all pages as being exempt from the store buffer.
175 DCHECK(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
176 if (SpaceAvailable(space_needed)) return;
182 // Sample the store buffer to see if some pages are taking up a lot of space
183 // in the store buffer.
184 void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
185 PointerChunkIterator it(heap_);
187 while ((chunk = it.next()) != NULL) {
188 chunk->set_store_buffer_counter(0);
190 bool created_new_scan_on_scavenge_pages = false;
191 MemoryChunk* previous_chunk = NULL;
192 for (Address* p = old_start_; p < old_top_; p += prime_sample_step) {
194 MemoryChunk* containing_chunk = NULL;
195 if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
196 containing_chunk = previous_chunk;
198 containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
200 int old_counter = containing_chunk->store_buffer_counter();
201 if (old_counter >= threshold) {
202 containing_chunk->set_scan_on_scavenge(true);
203 created_new_scan_on_scavenge_pages = true;
205 containing_chunk->set_store_buffer_counter(old_counter + 1);
206 previous_chunk = containing_chunk;
208 if (created_new_scan_on_scavenge_pages) {
209 Filter(MemoryChunk::SCAN_ON_SCAVENGE);
211 old_buffer_is_filtered_ = true;
215 void StoreBuffer::Filter(int flag) {
216 Address* new_top = old_start_;
217 MemoryChunk* previous_chunk = NULL;
218 for (Address* p = old_start_; p < old_top_; p++) {
220 MemoryChunk* containing_chunk = NULL;
221 if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
222 containing_chunk = previous_chunk;
224 containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
225 previous_chunk = containing_chunk;
227 if (!containing_chunk->IsFlagSet(flag)) {
233 // Filtering hash sets are inconsistent with the store buffer after this
235 ClearFilteringHashSets();
239 void StoreBuffer::SortUniq() {
241 if (old_buffer_is_sorted_) return;
242 std::sort(old_start_, old_top_);
245 old_buffer_is_sorted_ = true;
247 // Filtering hash sets are inconsistent with the store buffer after this
249 ClearFilteringHashSets();
253 bool StoreBuffer::PrepareForIteration() {
255 PointerChunkIterator it(heap_);
257 bool page_has_scan_on_scavenge_flag = false;
258 while ((chunk = it.next()) != NULL) {
259 if (chunk->scan_on_scavenge()) {
260 page_has_scan_on_scavenge_flag = true;
265 if (page_has_scan_on_scavenge_flag) {
266 Filter(MemoryChunk::SCAN_ON_SCAVENGE);
269 // Filtering hash sets are inconsistent with the store buffer after
271 ClearFilteringHashSets();
273 return page_has_scan_on_scavenge_flag;
278 void StoreBuffer::Clean() {
279 ClearFilteringHashSets();
280 Uniq(); // Also removes things that no longer point to new space.
281 EnsureSpace(kStoreBufferSize / 2);
285 static Address* in_store_buffer_1_element_cache = NULL;
288 bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) {
289 if (!FLAG_enable_slow_asserts) return true;
290 if (in_store_buffer_1_element_cache != NULL &&
291 *in_store_buffer_1_element_cache == cell_address) {
294 Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
295 for (Address* current = top - 1; current >= start_; current--) {
296 if (*current == cell_address) {
297 in_store_buffer_1_element_cache = current;
301 for (Address* current = old_top_ - 1; current >= old_start_; current--) {
302 if (*current == cell_address) {
303 in_store_buffer_1_element_cache = current;
312 void StoreBuffer::ClearFilteringHashSets() {
313 if (!hash_sets_are_empty_) {
314 memset(reinterpret_cast<void*>(hash_set_1_), 0,
315 sizeof(uintptr_t) * kHashSetLength);
316 memset(reinterpret_cast<void*>(hash_set_2_), 0,
317 sizeof(uintptr_t) * kHashSetLength);
318 hash_sets_are_empty_ = true;
323 void StoreBuffer::GCPrologue() {
324 ClearFilteringHashSets();
330 void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
331 LargeObjectIterator it(space);
332 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
333 if (object->IsFixedArray()) {
334 Address slot_address = object->address();
335 Address end = object->address() + object->Size();
337 while (slot_address < end) {
338 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
339 // When we are not in GC the Heap::InNewSpace() predicate
340 // checks that pointers which satisfy predicate point into
341 // the active semispace.
342 Object* object = reinterpret_cast<Object*>(
343 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
344 heap_->InNewSpace(object);
345 slot_address += kPointerSize;
353 void StoreBuffer::Verify() {
355 VerifyPointers(heap_->lo_space());
360 void StoreBuffer::GCEpilogue() {
363 if (FLAG_verify_heap) {
370 void StoreBuffer::FindPointersToNewSpaceInRegion(
371 Address start, Address end, ObjectSlotCallback slot_callback,
373 for (Address slot_address = start; slot_address < end;
374 slot_address += kPointerSize) {
375 Object** slot = reinterpret_cast<Object**>(slot_address);
376 Object* object = reinterpret_cast<Object*>(
377 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
378 if (heap_->InNewSpace(object)) {
379 HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
380 DCHECK(heap_object->IsHeapObject());
381 // The new space object was not promoted if it still contains a map
382 // pointer. Clear the map field now lazily.
383 if (clear_maps) ClearDeadObject(heap_object);
384 slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
385 object = reinterpret_cast<Object*>(
386 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
387 if (heap_->InNewSpace(object)) {
388 EnterDirectlyIntoStoreBuffer(slot_address);
395 void StoreBuffer::IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback,
397 Address* limit = old_top_;
398 old_top_ = old_start_;
400 DontMoveStoreBufferEntriesScope scope(this);
401 for (Address* current = old_start_; current < limit; current++) {
403 Address* saved_top = old_top_;
405 Object** slot = reinterpret_cast<Object**>(*current);
406 Object* object = reinterpret_cast<Object*>(
407 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
408 if (heap_->InFromSpace(object)) {
409 HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
410 // The new space object was not promoted if it still contains a map
411 // pointer. Clear the map field now lazily.
412 if (clear_maps) ClearDeadObject(heap_object);
413 slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
414 object = reinterpret_cast<Object*>(
415 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
416 if (heap_->InNewSpace(object)) {
417 EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
420 DCHECK(old_top_ == saved_top + 1 || old_top_ == saved_top);
426 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
427 IteratePointersToNewSpace(slot_callback, false);
431 void StoreBuffer::IteratePointersToNewSpaceAndClearMaps(
432 ObjectSlotCallback slot_callback) {
433 IteratePointersToNewSpace(slot_callback, true);
437 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback,
439 // We do not sort or remove duplicated entries from the store buffer because
440 // we expect that callback will rebuild the store buffer thus removing
441 // all duplicates and pointers to old space.
442 bool some_pages_to_scan = PrepareForIteration();
444 // TODO(gc): we want to skip slots on evacuation candidates
445 // but we can't simply figure that out from slot address
446 // because slot can belong to a large object.
447 IteratePointersInStoreBuffer(slot_callback, clear_maps);
449 // We are done scanning all the pointers that were in the store buffer, but
450 // there may be some pages marked scan_on_scavenge that have pointers to new
451 // space that are not in the store buffer. We must scan them now. As we
452 // scan, the surviving pointers to new space will be added to the store
453 // buffer. If there are still a lot of pointers to new space then we will
454 // keep the scan_on_scavenge flag on the page and discard the pointers that
455 // were added to the store buffer. If there are not many pointers to new
456 // space left on the page we will keep the pointers in the store buffer and
457 // remove the flag from the page.
458 if (some_pages_to_scan) {
459 if (callback_ != NULL) {
460 (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent);
462 PointerChunkIterator it(heap_);
464 while ((chunk = it.next()) != NULL) {
465 if (chunk->scan_on_scavenge()) {
466 chunk->set_scan_on_scavenge(false);
467 if (callback_ != NULL) {
468 (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent);
470 if (chunk->owner() == heap_->lo_space()) {
471 LargePage* large_page = reinterpret_cast<LargePage*>(chunk);
472 HeapObject* array = large_page->GetObject();
473 DCHECK(array->IsFixedArray());
474 Address start = array->address();
475 Address end = start + array->Size();
476 FindPointersToNewSpaceInRegion(start, end, slot_callback, clear_maps);
478 Page* page = reinterpret_cast<Page*>(chunk);
479 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
480 if (owner == heap_->map_space()) {
481 DCHECK(page->WasSwept());
482 HeapObjectIterator iterator(page, NULL);
483 for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
484 heap_object = iterator.Next()) {
485 // We skip free space objects.
486 if (!heap_object->IsFiller()) {
487 DCHECK(heap_object->IsMap());
488 FindPointersToNewSpaceInRegion(
489 heap_object->address() + Map::kPointerFieldsBeginOffset,
490 heap_object->address() + Map::kPointerFieldsEndOffset,
491 slot_callback, clear_maps);
495 if (!page->SweepingCompleted()) {
496 heap_->mark_compact_collector()->SweepInParallel(page, owner);
497 if (!page->SweepingCompleted()) {
498 // We were not able to sweep that page, i.e., a concurrent
499 // sweeper thread currently owns this page.
500 // TODO(hpayer): This may introduce a huge pause here. We
501 // just care about finish sweeping of the scan on scavenge page.
502 heap_->mark_compact_collector()->EnsureSweepingCompleted();
505 CHECK(page->owner() == heap_->old_pointer_space());
506 HeapObjectIterator iterator(page, NULL);
507 for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
508 heap_object = iterator.Next()) {
509 // We iterate over objects that contain new space pointers only.
510 if (!heap_object->MayContainRawValues()) {
511 FindPointersToNewSpaceInRegion(
512 heap_object->address() + HeapObject::kHeaderSize,
513 heap_object->address() + heap_object->Size(), slot_callback,
521 if (callback_ != NULL) {
522 (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent);
528 void StoreBuffer::Compact() {
529 Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
531 if (top == start_) return;
533 // There's no check of the limit in the loop below so we check here for
534 // the worst case (compaction doesn't eliminate any pointers).
535 DCHECK(top <= limit_);
536 heap_->public_set_store_buffer_top(start_);
537 EnsureSpace(top - start_);
538 DCHECK(may_move_store_buffer_entries_);
539 // Goes through the addresses in the store buffer attempting to remove
540 // duplicates. In the interest of speed this is a lossy operation. Some
541 // duplicates will remain. We have two hash sets with different hash
542 // functions to reduce the number of unnecessary clashes.
543 hash_sets_are_empty_ = false; // Hash sets are in use.
544 for (Address* current = start_; current < top; current++) {
545 DCHECK(!heap_->cell_space()->Contains(*current));
546 DCHECK(!heap_->code_space()->Contains(*current));
547 DCHECK(!heap_->old_data_space()->Contains(*current));
548 uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
549 // Shift out the last bits including any tags.
550 int_addr >>= kPointerSizeLog2;
551 // The upper part of an address is basically random because of ASLR and OS
552 // non-determinism, so we use only the bits within a page for hashing to
553 // make v8's behavior (more) deterministic.
554 uintptr_t hash_addr =
555 int_addr & (Page::kPageAlignmentMask >> kPointerSizeLog2);
556 int hash1 = ((hash_addr ^ (hash_addr >> kHashSetLengthLog2)) &
557 (kHashSetLength - 1));
558 if (hash_set_1_[hash1] == int_addr) continue;
559 uintptr_t hash2 = (hash_addr - (hash_addr >> kHashSetLengthLog2));
560 hash2 ^= hash2 >> (kHashSetLengthLog2 * 2);
561 hash2 &= (kHashSetLength - 1);
562 if (hash_set_2_[hash2] == int_addr) continue;
563 if (hash_set_1_[hash1] == 0) {
564 hash_set_1_[hash1] = int_addr;
565 } else if (hash_set_2_[hash2] == 0) {
566 hash_set_2_[hash2] = int_addr;
568 // Rather than slowing down we just throw away some entries. This will
569 // cause some duplicates to remain undetected.
570 hash_set_1_[hash1] = int_addr;
571 hash_set_2_[hash2] = 0;
573 old_buffer_is_sorted_ = false;
574 old_buffer_is_filtered_ = false;
575 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
576 DCHECK(old_top_ <= old_limit_);
578 heap_->isolate()->counters()->store_buffer_compactions()->Increment();
581 } // namespace v8::internal