abort_incremental_marking_(false),
compacting_(false),
was_marked_incrementally_(false),
- collect_maps_(FLAG_collect_maps),
flush_monomorphic_ics_(false),
tracer_(NULL),
migration_slots_buffer_(NULL),
heap_(NULL),
code_flusher_(NULL),
- encountered_weak_maps_(NULL) { }
+ encountered_weak_maps_(NULL),
+ marker_(this, this) { }
#ifdef DEBUG
MarkLiveObjects();
ASSERT(heap_->incremental_marking()->IsStopped());
- if (collect_maps_) ClearNonLiveTransitions();
+ if (FLAG_collect_maps) ClearNonLiveTransitions();
ClearWeakMaps();
SweepSpaces();
- if (!collect_maps_) ReattachInitialMaps();
-
- heap_->isolate()->inner_pointer_to_code_cache()->Flush();
+ if (!FLAG_collect_maps) ReattachInitialMaps();
Finish();
for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
MarkBit mark_bit = Marking::MarkBitFrom(obj);
ASSERT(Marking::IsWhite(mark_bit));
+ ASSERT_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
}
}
#endif
MarkBit mark_bit = Marking::MarkBitFrom(obj);
mark_bit.Clear();
mark_bit.Next().Clear();
+ Page::FromAddress(obj->address())->ResetLiveBytes();
}
}
void MarkCompactCollector::Prepare(GCTracer* tracer) {
was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
- // Disable collection of maps if incremental marking is enabled.
- // Map collection algorithm relies on a special map transition tree traversal
- // order which is not implemented for incremental marking.
- collect_maps_ = FLAG_collect_maps && !was_marked_incrementally_;
-
// Monomorphic ICs are preserved when possible, but need to be flushed
// when they might be keeping a Context alive, or when the heap is about
// to be serialized.
ASSERT(!FLAG_never_compact || !FLAG_always_compact);
- if (collect_maps_) CreateBackPointers();
#ifdef ENABLE_GDB_JIT_INTERFACE
if (FLAG_gdbjit) {
// If GDBJIT interface is active disable compaction.
JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(object);
// Enqueue weak map in linked list of encountered weak maps.
- ASSERT(weak_map->next() == Smi::FromInt(0));
- weak_map->set_next(collector->encountered_weak_maps());
- collector->set_encountered_weak_maps(weak_map);
+ if (weak_map->next() == Smi::FromInt(0)) {
+ weak_map->set_next(collector->encountered_weak_maps());
+ collector->set_encountered_weak_maps(weak_map);
+ }
// Skip visiting the backing hash table containing the mappings.
int object_size = JSWeakMap::BodyDescriptor::SizeOf(map, object);
object_size);
// Mark the backing hash table without pushing it on the marking stack.
- ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
- ASSERT(!MarkCompactCollector::IsMarked(table));
- collector->SetMark(table, Marking::MarkBitFrom(table));
+ Object* table_object = weak_map->table();
+ if (!table_object->IsHashTable()) return;
+ ObjectHashTable* table = ObjectHashTable::cast(table_object);
+ Object** table_slot =
+ HeapObject::RawField(weak_map, JSWeakMap::kTableOffset);
+ MarkBit table_mark = Marking::MarkBitFrom(table);
+ collector->RecordSlot(table_slot, table_slot, table);
+ if (!table_mark.Get()) collector->SetMark(table, table_mark);
+ // Recording the map slot can be skipped, because maps are not compacted.
collector->MarkObject(table->map(), Marking::MarkBitFrom(table->map()));
ASSERT(MarkCompactCollector::IsMarked(table->map()));
}
Heap* heap = map->GetHeap();
Code* code = reinterpret_cast<Code*>(object);
if (FLAG_cleanup_code_caches_at_gc) {
- Object* raw_info = code->type_feedback_info();
- if (raw_info->IsTypeFeedbackInfo()) {
- TypeFeedbackCells* type_feedback_cells =
- TypeFeedbackInfo::cast(raw_info)->type_feedback_cells();
- for (int i = 0; i < type_feedback_cells->CellCount(); i++) {
- ASSERT(type_feedback_cells->AstId(i)->IsSmi());
- JSGlobalPropertyCell* cell = type_feedback_cells->Cell(i);
- cell->set_value(TypeFeedbackCells::RawUninitializedSentinel(heap));
- }
- }
+ code->ClearTypeFeedbackCells(heap);
}
code->CodeIterateBody<StaticMarkingVisitor>(heap);
}
static void VisitSharedFunctionInfoAndFlushCode(Map* map,
HeapObject* object) {
+ Heap* heap = map->GetHeap();
+ SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
+ if (shared->ic_age() != heap->global_ic_age()) {
+ shared->ResetForNewContext(heap->global_ic_age());
+ }
+
MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
if (!collector->is_code_flushing_enabled()) {
VisitSharedFunctionInfoGeneric(map, object);
if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
- if (shared->ic_age() != heap->global_ic_age()) {
- shared->ResetForNewContext(heap->global_ic_age());
- }
-
if (!known_flush_code_candidate) {
known_flush_code_candidate = IsFlushable(heap, shared);
if (known_flush_code_candidate) {
JSFunction::kCodeEntryOffset + kPointerSize),
HeapObject::RawField(object,
JSFunction::kNonWeakFieldsEndOffset));
-
- // Don't visit the next function list field as it is a weak reference.
- Object** next_function =
- HeapObject::RawField(object, JSFunction::kNextFunctionLinkOffset);
- heap->mark_compact_collector()->RecordSlot(
- next_function, next_function, *next_function);
}
static inline void VisitJSRegExpFields(Map* map,
// Since no objects have yet been moved we can safely access the map of
// the object.
- if (o->IsExternalString() ||
- (o->IsHeapObject() && HeapObject::cast(o)->map()->has_external_resource())) {
- heap_->FinalizeExternalString(HeapObject::cast(*p));
+ if (o->IsExternalString()) {
+ heap_->FinalizeExternalString(String::cast(*p));
}
// Set the entry to the_hole_value (as deleted).
*p = heap_->the_hole_value();
heap_->ClearCacheOnMap(map);
// When map collection is enabled we have to mark through map's transitions
- // in a special way to make transition links weak.
- // Only maps for subclasses of JSReceiver can have transitions.
+ // in a special way to make transition links weak. Only maps for subclasses
+ // of JSReceiver can have transitions.
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- if (collect_maps_ && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
- MarkMapContents(map);
+ if (FLAG_collect_maps && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
+ marker_.MarkMapContents(map);
} else {
marking_deque_.PushBlack(map);
}
}
-void MarkCompactCollector::MarkMapContents(Map* map) {
+// Force instantiation of template instances.
+template void Marker<IncrementalMarking>::MarkMapContents(Map* map);
+template void Marker<MarkCompactCollector>::MarkMapContents(Map* map);
+
+
+template <class T>
+void Marker<T>::MarkMapContents(Map* map) {
// Mark prototype transitions array but don't push it into marking stack.
// This will make references from it weak. We will clean dead prototype
// transitions in ClearNonLiveTransitions.
- FixedArray* prototype_transitions = map->prototype_transitions();
- MarkBit mark = Marking::MarkBitFrom(prototype_transitions);
- if (!mark.Get()) {
- mark.Set();
- MemoryChunk::IncrementLiveBytesFromGC(prototype_transitions->address(),
- prototype_transitions->Size());
+ Object** proto_trans_slot =
+ HeapObject::RawField(map, Map::kPrototypeTransitionsOrBackPointerOffset);
+ HeapObject* prototype_transitions = HeapObject::cast(*proto_trans_slot);
+ if (prototype_transitions->IsFixedArray()) {
+ mark_compact_collector()->RecordSlot(proto_trans_slot,
+ proto_trans_slot,
+ prototype_transitions);
+ MarkBit mark = Marking::MarkBitFrom(prototype_transitions);
+ if (!mark.Get()) {
+ mark.Set();
+ MemoryChunk::IncrementLiveBytesFromGC(prototype_transitions->address(),
+ prototype_transitions->Size());
+ }
}
- Object** raw_descriptor_array_slot =
+ // Make sure that the back pointer stored either in the map itself or inside
+ // its prototype transitions array is marked. Treat pointers in the descriptor
+ // array as weak and also mark that array to prevent visiting it later.
+ base_marker()->MarkObjectAndPush(HeapObject::cast(map->GetBackPointer()));
+
+ Object** descriptor_array_slot =
HeapObject::RawField(map, Map::kInstanceDescriptorsOrBitField3Offset);
- Object* raw_descriptor_array = *raw_descriptor_array_slot;
- if (!raw_descriptor_array->IsSmi()) {
- MarkDescriptorArray(
- reinterpret_cast<DescriptorArray*>(raw_descriptor_array));
+ Object* descriptor_array = *descriptor_array_slot;
+ if (!descriptor_array->IsSmi()) {
+ MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(descriptor_array));
+ }
+
+ // Mark the Object* fields of the Map. Since the descriptor array has been
+ // marked already, it is fine that one of these fields contains a pointer
+ // to it. But make sure to skip back pointer and prototype transitions.
+ STATIC_ASSERT(Map::kPointerFieldsEndOffset ==
+ Map::kPrototypeTransitionsOrBackPointerOffset + kPointerSize);
+ Object** start_slot = HeapObject::RawField(
+ map, Map::kPointerFieldsBeginOffset);
+ Object** end_slot = HeapObject::RawField(
+ map, Map::kPrototypeTransitionsOrBackPointerOffset);
+ for (Object** slot = start_slot; slot < end_slot; slot++) {
+ Object* obj = *slot;
+ if (!obj->NonFailureIsHeapObject()) continue;
+ mark_compact_collector()->RecordSlot(start_slot, slot, obj);
+ base_marker()->MarkObjectAndPush(reinterpret_cast<HeapObject*>(obj));
}
-
- // Mark the Object* fields of the Map.
- // Since the descriptor array has been marked already, it is fine
- // that one of these fields contains a pointer to it.
- Object** start_slot = HeapObject::RawField(map,
- Map::kPointerFieldsBeginOffset);
-
- Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
-
- StaticMarkingVisitor::VisitPointers(map->GetHeap(), start_slot, end_slot);
-}
-
-
-void MarkCompactCollector::MarkAccessorPairSlot(HeapObject* accessors,
- int offset) {
- Object** slot = HeapObject::RawField(accessors, offset);
- HeapObject* accessor = HeapObject::cast(*slot);
- if (accessor->IsMap()) return;
- RecordSlot(slot, slot, accessor);
- MarkObjectAndPush(accessor);
}
-void MarkCompactCollector::MarkDescriptorArray(
- DescriptorArray* descriptors) {
- MarkBit descriptors_mark = Marking::MarkBitFrom(descriptors);
- if (descriptors_mark.Get()) return;
+template <class T>
+void Marker<T>::MarkDescriptorArray(DescriptorArray* descriptors) {
// Empty descriptor array is marked as a root before any maps are marked.
- ASSERT(descriptors != heap()->empty_descriptor_array());
- SetMark(descriptors, descriptors_mark);
+ ASSERT(descriptors != descriptors->GetHeap()->empty_descriptor_array());
- FixedArray* contents = reinterpret_cast<FixedArray*>(
+ // The DescriptorArray contains a pointer to its contents array, but the
+ // contents array will be marked black and hence not be visited again.
+ if (!base_marker()->MarkObjectAndPush(descriptors)) return;
+ FixedArray* contents = FixedArray::cast(
descriptors->get(DescriptorArray::kContentArrayIndex));
- ASSERT(contents->IsHeapObject());
- ASSERT(!IsMarked(contents));
- ASSERT(contents->IsFixedArray());
ASSERT(contents->length() >= 2);
- MarkBit contents_mark = Marking::MarkBitFrom(contents);
- SetMark(contents, contents_mark);
- // Contents contains (value, details) pairs. If the details say that the type
- // of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
- // EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as
- // live. Only for MAP_TRANSITION, EXTERNAL_ARRAY_TRANSITION and
- // CONSTANT_TRANSITION is the value an Object* (a Map*).
+ ASSERT(Marking::IsWhite(Marking::MarkBitFrom(contents)));
+ base_marker()->MarkObjectWithoutPush(contents);
+
+ // Contents contains (value, details) pairs. If the descriptor contains a
+ // transition (value is a Map), we don't mark the value as live. It might
+ // be set to the NULL_DESCRIPTOR in ClearNonLiveTransitions later.
for (int i = 0; i < contents->length(); i += 2) {
- // If the pair (value, details) at index i, i+1 is not
- // a transition or null descriptor, mark the value.
PropertyDetails details(Smi::cast(contents->get(i + 1)));
Object** slot = contents->data_start() + i;
if (!(*slot)->IsHeapObject()) continue;
HeapObject* value = HeapObject::cast(*slot);
- RecordSlot(slot, slot, *slot);
+ mark_compact_collector()->RecordSlot(slot, slot, *slot);
switch (details.type()) {
case NORMAL:
case CONSTANT_FUNCTION:
case HANDLER:
case INTERCEPTOR:
- MarkObjectAndPush(value);
+ base_marker()->MarkObjectAndPush(value);
break;
case CALLBACKS:
if (!value->IsAccessorPair()) {
- MarkObjectAndPush(value);
- } else if (!MarkObjectWithoutPush(value)) {
- MarkAccessorPairSlot(value, AccessorPair::kGetterOffset);
- MarkAccessorPairSlot(value, AccessorPair::kSetterOffset);
+ base_marker()->MarkObjectAndPush(value);
+ } else if (base_marker()->MarkObjectWithoutPush(value)) {
+ AccessorPair* accessors = AccessorPair::cast(value);
+ MarkAccessorPairSlot(accessors, AccessorPair::kGetterOffset);
+ MarkAccessorPairSlot(accessors, AccessorPair::kSetterOffset);
}
break;
case ELEMENTS_TRANSITION:
// For maps with multiple elements transitions, the transition maps are
// stored in a FixedArray. Keep the fixed array alive but not the maps
// that it refers to.
- if (value->IsFixedArray()) MarkObjectWithoutPush(value);
+ if (value->IsFixedArray()) base_marker()->MarkObjectWithoutPush(value);
break;
case MAP_TRANSITION:
case CONSTANT_TRANSITION:
break;
}
}
- // The DescriptorArray descriptors contains a pointer to its contents array,
- // but the contents array is already marked.
- marking_deque_.PushBlack(descriptors);
}
-void MarkCompactCollector::CreateBackPointers() {
- HeapObjectIterator iterator(heap()->map_space());
- for (HeapObject* next_object = iterator.Next();
- next_object != NULL; next_object = iterator.Next()) {
- if (next_object->IsMap()) { // Could also be FreeSpace object on free list.
- Map* map = Map::cast(next_object);
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- if (map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
- map->CreateBackPointers();
- } else {
- ASSERT(map->instance_descriptors() == heap()->empty_descriptor_array());
- }
- }
- }
+template <class T>
+void Marker<T>::MarkAccessorPairSlot(AccessorPair* accessors, int offset) {
+ Object** slot = HeapObject::RawField(accessors, offset);
+ HeapObject* accessor = HeapObject::cast(*slot);
+ if (accessor->IsMap()) return;
+ mark_compact_collector()->RecordSlot(slot, slot, accessor);
+ base_marker()->MarkObjectAndPush(accessor);
}
static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, Page* p) {
+ ASSERT(!marking_deque->IsFull());
ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
void MarkCompactCollector::ClearNonLiveTransitions() {
HeapObjectIterator map_iterator(heap()->map_space());
// Iterate over the map space, setting map transitions that go from
- // a marked map to an unmarked map to null transitions. At the same time,
- // set all the prototype fields of maps back to their original value,
- // dropping the back pointers temporarily stored in the prototype field.
- // Setting the prototype field requires following the linked list of
- // back pointers, reversing them all at once. This allows us to find
- // those maps with map transitions that need to be nulled, and only
- // scan the descriptor arrays of those maps, not all maps.
- // All of these actions are carried out only on maps of JSObjects
- // and related subtypes.
+ // a marked map to an unmarked map to null transitions. This action
+ // is carried out only on maps of JSObjects and related subtypes.
for (HeapObject* obj = map_iterator.Next();
obj != NULL; obj = map_iterator.Next()) {
Map* map = reinterpret_cast<Map*>(obj);
void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
MarkBit map_mark) {
- // Follow the chain of back pointers to find the prototype.
- Object* real_prototype = map;
- while (real_prototype->IsMap()) {
- real_prototype = Map::cast(real_prototype)->prototype();
- ASSERT(real_prototype->IsHeapObject());
- }
+ Object* potential_parent = map->GetBackPointer();
+ if (!potential_parent->IsMap()) return;
+ Map* parent = Map::cast(potential_parent);
- // Follow back pointers, setting them to prototype, clearing map transitions
- // when necessary.
- Map* current = map;
+ // Follow back pointer, check whether we are dealing with a map transition
+ // from a live map to a dead path and in case clear transitions of parent.
bool current_is_alive = map_mark.Get();
- bool on_dead_path = !current_is_alive;
- while (current->IsMap()) {
- Object* next = current->prototype();
- // There should never be a dead map above a live map.
- ASSERT(on_dead_path || current_is_alive);
-
- // A live map above a dead map indicates a dead transition. This test will
- // always be false on the first iteration.
- if (on_dead_path && current_is_alive) {
- on_dead_path = false;
- current->ClearNonLiveTransitions(heap(), real_prototype);
- }
-
- Object** slot = HeapObject::RawField(current, Map::kPrototypeOffset);
- *slot = real_prototype;
- if (current_is_alive) RecordSlot(slot, slot, real_prototype);
-
- current = reinterpret_cast<Map*>(next);
- current_is_alive = Marking::MarkBitFrom(current).Get();
+ bool parent_is_alive = Marking::MarkBitFrom(parent).Get();
+ if (!current_is_alive && parent_is_alive) {
+ parent->ClearNonLiveTransitions(heap());
}
}
ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj)));
JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
+ Object** anchor = reinterpret_cast<Object**>(table->address());
for (int i = 0; i < table->Capacity(); i++) {
if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
- Object* value = table->get(table->EntryToValueIndex(i));
- StaticMarkingVisitor::VisitPointer(heap(), &value);
- table->set_unchecked(heap(),
- table->EntryToValueIndex(i),
- value,
- UPDATE_WRITE_BARRIER);
+ Object** key_slot =
+ HeapObject::RawField(table, FixedArray::OffsetOfElementAt(
+ ObjectHashTable::EntryToIndex(i)));
+ RecordSlot(anchor, key_slot, *key_slot);
+ Object** value_slot =
+ HeapObject::RawField(table, FixedArray::OffsetOfElementAt(
+ ObjectHashTable::EntryToValueIndex(i)));
+ StaticMarkingVisitor::MarkObjectByPointer(this, anchor, value_slot);
}
}
weak_map_obj = weak_map->next();
}
-static HeapObject* UpdateReferenceInExternalStringTableEntry(Heap* heap,
- Object** p) {
+static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
+ Object** p) {
MapWord map_word = HeapObject::cast(*p)->map_word();
if (map_word.IsForwardingAddress()) {
- return HeapObject::cast(map_word.ToForwardingAddress());
+ return String::cast(map_word.ToForwardingAddress());
}
- return HeapObject::cast(*p);
+ return String::cast(*p);
}
// under it.
ProcessInvalidatedCode(&updating_visitor);
+ heap_->isolate()->inner_pointer_to_code_cache()->Flush();
+
#ifdef DEBUG
if (FLAG_verify_heap) {
VerifyEvacuation(heap_);
bool lazy_sweeping_active = false;
bool unused_page_present = false;
- intptr_t old_space_size = heap()->PromotedSpaceSize();
+ intptr_t old_space_size = heap()->PromotedSpaceSizeOfObjects();
intptr_t space_left =
Min(heap()->OldGenPromotionLimit(old_space_size),
heap()->OldGenAllocationLimit(old_space_size)) - old_space_size;