#ifdef VERIFY_HEAP
Heap* debug_heap = heap_;
- CHECK(!debug_heap->old_data_space()->was_swept_conservatively());
- CHECK(!debug_heap->old_pointer_space()->was_swept_conservatively());
- CHECK(!debug_heap->code_space()->was_swept_conservatively());
- CHECK(!debug_heap->cell_space()->was_swept_conservatively());
- CHECK(!debug_heap->property_cell_space()->
- was_swept_conservatively());
- CHECK(!debug_heap->map_space()->was_swept_conservatively());
+ CHECK(debug_heap->old_data_space()->is_iterable());
+ CHECK(debug_heap->old_pointer_space()->is_iterable());
+ CHECK(debug_heap->code_space()->is_iterable());
+ CHECK(debug_heap->cell_space()->is_iterable());
+ CHECK(debug_heap->property_cell_space()->is_iterable());
+ CHECK(debug_heap->map_space()->is_iterable());
#endif
#ifdef VERIFY_HEAP
// The old data space was normally swept conservatively so that the iterator
// doesn't work, so we normally skip the next bit.
- if (!heap->old_data_space()->was_swept_conservatively()) {
+ if (heap->old_data_space()->is_iterable()) {
HeapObjectIterator data_it(heap->old_data_space());
for (HeapObject* object = data_it.Next();
object != NULL; object = data_it.Next())
bool Heap::IsHeapIterable() {
- return (!old_pointer_space()->was_swept_conservatively() &&
- !old_data_space()->was_swept_conservatively() &&
+ return (old_pointer_space()->is_iterable() &&
+ old_data_space()->is_iterable() &&
new_space_top_after_last_gc_ == new_space()->top());
}
// TODO(hpayer): Bring back VerifyEvacuation for parallel-concurrently
// swept pages.
if ((FLAG_concurrent_sweeping || FLAG_parallel_sweeping) &&
- space->was_swept_conservatively()) return;
+ !space->is_iterable()) return;
PageIterator it(space);
while (it.has_next()) {
}
-bool MarkCompactCollector::IsConcurrentSweepingInProgress() {
- return sweeping_pending_;
+bool MarkCompactCollector::IsConcurrentSweepingInProgress(PagedSpace* space) {
+ return (space == NULL || space->is_swept_concurrently()) &&
+ sweeping_pending_;
}
static void DiscoverGreyObjectsInSpace(Heap* heap,
MarkingDeque* marking_deque,
PagedSpace* space) {
- if (!space->was_swept_conservatively()) {
+ if (space->is_iterable()) {
HeapObjectIterator it(space);
DiscoverGreyObjectsWithIterator(heap, marking_deque, &it);
} else {
void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
- space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
- sweeper == PARALLEL_CONSERVATIVE ||
- sweeper == CONCURRENT_CONSERVATIVE);
+ space->set_is_iterable(sweeper == PRECISE);
+ space->set_is_swept_concurrently(sweeper == CONCURRENT_CONSERVATIVE);
space->ClearStats();
// We defensively initialize end_of_unswept_pages_ here with the first page
bool AreSweeperThreadsActivated();
- bool IsConcurrentSweepingInProgress();
+ // If a paged space is passed in, this method checks if the given space is
+ // swept concurrently. Otherwise, this method checks if concurrent sweeping
+ // is in progress right now on any space.
+ bool IsConcurrentSweepingInProgress(PagedSpace* space = NULL);
void set_sequential_sweeping(bool sequential_sweeping) {
sequential_sweeping_ = sequential_sweeping;
HeapObjectIterator::PageMode mode,
HeapObjectCallback size_f) {
// Check that we actually can iterate this space.
- ASSERT(!space->was_swept_conservatively());
+ ASSERT(space->is_iterable());
space_ = space;
cur_addr_ = cur;
Executability executable)
: Space(heap, id, executable),
free_list_(this),
- was_swept_conservatively_(false),
+ is_iterable_(true),
+ is_swept_concurrently_(false),
unswept_free_bytes_(0),
end_of_unswept_pages_(NULL) {
if (id == CODE_SPACE) {
#ifdef VERIFY_HEAP
void PagedSpace::Verify(ObjectVisitor* visitor) {
// We can only iterate over the pages if they were swept precisely.
- if (was_swept_conservatively_) return;
+ if (!is_iterable_) return;
bool allocation_pointer_found_in_space =
(allocation_info_.top() == allocation_info_.limit());
intptr_t PagedSpace::SizeOfObjects() {
- ASSERT(heap()->mark_compact_collector()->IsConcurrentSweepingInProgress() ||
- (unswept_free_bytes_ == 0));
+ ASSERT(heap()->mark_compact_collector()->
+ IsConcurrentSweepingInProgress(this) || (unswept_free_bytes_ == 0));
return Size() - unswept_free_bytes_ - (limit() - top());
}
MarkCompactCollector* collector = heap()->mark_compact_collector();
// If sweeper threads are still running, wait for them.
- if (collector->IsConcurrentSweepingInProgress()) {
+ if (collector->IsConcurrentSweepingInProgress(this)) {
collector->WaitUntilSweepingCompleted();
// After waiting for the sweeper threads, there may be new free-list
// If sweeper threads are active, try to re-fill the free-lists.
MarkCompactCollector* collector = heap()->mark_compact_collector();
- if (collector->IsConcurrentSweepingInProgress()) {
+ if (collector->IsConcurrentSweepingInProgress(this)) {
collector->RefillFreeList(this);
// Retry the free list allocation.
", available: %" V8_PTR_PREFIX "d, %%%d\n",
Capacity(), Waste(), Available(), pct);
- if (was_swept_conservatively_) return;
+ if (!is_iterable_) return;
ClearHistograms(heap()->isolate());
HeapObjectIterator obj_it(this);
for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
static void ResetCodeStatistics(Isolate* isolate);
#endif
- bool was_swept_conservatively() { return was_swept_conservatively_; }
- void set_was_swept_conservatively(bool b) { was_swept_conservatively_ = b; }
+ bool is_iterable() { return is_iterable_; }
+ void set_is_iterable(bool b) { is_iterable_ = b; }
+
+ bool is_swept_concurrently() { return is_swept_concurrently_; }
+ void set_is_swept_concurrently(bool b) { is_swept_concurrently_ = b; }
// Evacuation candidates are swept by evacuator. Needs to return a valid
// result before _and_ after evacuation has finished.
// Normal allocation information.
AllocationInfo allocation_info_;
- bool was_swept_conservatively_;
+ // This space was swept precisely, hence it is iterable.
+ bool is_iterable_;
+
+ // This space is currently swept by sweeper threads.
+ bool is_swept_concurrently_;
// The number of free bytes which could be reclaimed by advancing the
// concurrent sweeper threads. This is only an estimation because concurrent