From 88a9350f14ddb33afd3c93d559d729e5c17bfcc7 Mon Sep 17 00:00:00 2001 From: "mstarzinger@chromium.org" Date: Tue, 15 May 2012 12:01:23 +0000 Subject: [PATCH] Revert r11556 and r11558 to allow roll. R=yangguo@chromium.org Review URL: https://chromiumcodereview.appspot.com/10383182 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@11564 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- src/incremental-marking-inl.h | 24 ++----- src/incremental-marking.cc | 41 ++--------- src/incremental-marking.h | 15 +--- src/mark-compact-inl.h | 28 +++----- src/mark-compact.cc | 154 ++++++++++++++++++++---------------------- src/mark-compact.h | 57 ++++------------ test/cctest/test-heap.cc | 57 ---------------- 7 files changed, 113 insertions(+), 263 deletions(-) diff --git a/src/incremental-marking-inl.h b/src/incremental-marking-inl.h index 2dae6f2..5ce003f 100644 --- a/src/incremental-marking-inl.h +++ b/src/incremental-marking-inl.h @@ -1,4 +1,4 @@ -// Copyright 2012 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -118,29 +118,13 @@ void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj, void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) { - Marking::WhiteToGrey(mark_bit); + WhiteToGrey(obj, mark_bit); marking_deque_.PushGrey(obj); } -bool IncrementalMarking::MarkObjectAndPush(HeapObject* obj) { - MarkBit mark_bit = Marking::MarkBitFrom(obj); - if (!mark_bit.Get()) { - WhiteToGreyAndPush(obj, mark_bit); - return true; - } - return false; -} - - -bool IncrementalMarking::MarkObjectWithoutPush(HeapObject* obj) { - MarkBit mark_bit = Marking::MarkBitFrom(obj); - if (!mark_bit.Get()) { - mark_bit.Set(); - MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size()); - return true; - } - return false; +void IncrementalMarking::WhiteToGrey(HeapObject* obj, MarkBit mark_bit) { + Marking::WhiteToGrey(mark_bit); } diff --git a/src/incremental-marking.cc b/src/incremental-marking.cc index 94afffa..5b58c9d 100644 --- a/src/incremental-marking.cc +++ b/src/incremental-marking.cc @@ -1,4 +1,4 @@ -// Copyright 2012 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -42,7 +42,6 @@ IncrementalMarking::IncrementalMarking(Heap* heap) state_(STOPPED), marking_deque_memory_(NULL), marking_deque_memory_committed_(false), - marker_(this, heap->mark_compact_collector()), steps_count_(0), steps_took_(0), longest_step_(0.0), @@ -664,22 +663,6 @@ void IncrementalMarking::Hurry() { } else if (map == global_context_map) { // Global contexts have weak fields. VisitGlobalContext(Context::cast(obj), &marking_visitor); - } else if (map->instance_type() == MAP_TYPE) { - Map* map = Map::cast(obj); - heap_->ClearCacheOnMap(map); - - // When map collection is enabled we have to mark through map's - // transitions and back pointers in a special way to make these links - // weak. Only maps for subclasses of JSReceiver can have transitions. - STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); - if (FLAG_collect_maps && - map->instance_type() >= FIRST_JS_RECEIVER_TYPE) { - marker_.MarkMapContents(map); - } else { - marking_visitor.VisitPointers( - HeapObject::RawField(map, Map::kPointerFieldsBeginOffset), - HeapObject::RawField(map, Map::kPointerFieldsEndOffset)); - } } else { obj->Iterate(&marking_visitor); } @@ -824,6 +807,12 @@ void IncrementalMarking::Step(intptr_t allocated_bytes, Map* map = obj->map(); if (map == filler_map) continue; + if (obj->IsMap()) { + Map* map = Map::cast(obj); + heap_->ClearCacheOnMap(map); + } + + int size = obj->SizeFromMap(map); bytes_to_process -= size; MarkBit map_mark_bit = Marking::MarkBitFrom(map); @@ -841,22 +830,6 @@ void IncrementalMarking::Step(intptr_t allocated_bytes, MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache()); VisitGlobalContext(ctx, &marking_visitor); - } else if (map->instance_type() == MAP_TYPE) { - Map* map = Map::cast(obj); - heap_->ClearCacheOnMap(map); - - // When map collection is enabled we have to mark through map's - // transitions and back pointers in a special way to make these links - // weak. Only maps for subclasses of JSReceiver can have transitions. - STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); - if (FLAG_collect_maps && - map->instance_type() >= FIRST_JS_RECEIVER_TYPE) { - marker_.MarkMapContents(map); - } else { - marking_visitor.VisitPointers( - HeapObject::RawField(map, Map::kPointerFieldsBeginOffset), - HeapObject::RawField(map, Map::kPointerFieldsEndOffset)); - } } else if (map->instance_type() == JS_FUNCTION_TYPE) { marking_visitor.VisitPointers( HeapObject::RawField(obj, JSFunction::kPropertiesOffset), diff --git a/src/incremental-marking.h b/src/incremental-marking.h index 39e8dae..8cbe6c1 100644 --- a/src/incremental-marking.h +++ b/src/incremental-marking.h @@ -1,4 +1,4 @@ -// Copyright 2012 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -154,6 +154,8 @@ class IncrementalMarking { inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit); + inline void WhiteToGrey(HeapObject* obj, MarkBit mark_bit); + // Does white->black or keeps gray or black color. Returns true if converting // white to black. inline bool MarkBlackOrKeepGrey(MarkBit mark_bit) { @@ -167,16 +169,6 @@ class IncrementalMarking { return true; } - // Marks the object grey and pushes it on the marking stack. - // Returns true if object needed marking and false otherwise. - // This is for incremental marking only. - INLINE(bool MarkObjectAndPush(HeapObject* obj)); - - // Marks the object black without pushing it on the marking stack. - // Returns true if object needed marking and false otherwise. - // This is for incremental marking only. - INLINE(bool MarkObjectWithoutPush(HeapObject* obj)); - inline int steps_count() { return steps_count_; } @@ -268,7 +260,6 @@ class IncrementalMarking { VirtualMemory* marking_deque_memory_; bool marking_deque_memory_committed_; MarkingDeque marking_deque_; - Marker marker_; int steps_count_; double steps_took_; diff --git a/src/mark-compact-inl.h b/src/mark-compact-inl.h index 2f7e31f..43f6b89 100644 --- a/src/mark-compact-inl.h +++ b/src/mark-compact-inl.h @@ -1,4 +1,4 @@ -// Copyright 2012 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -52,15 +52,6 @@ void MarkCompactCollector::SetFlags(int flags) { } -bool MarkCompactCollector::MarkObjectAndPush(HeapObject* obj) { - if (MarkObjectWithoutPush(obj)) { - marking_deque_.PushBlack(obj); - return true; - } - return false; -} - - void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) { ASSERT(Marking::MarkBitFrom(obj) == mark_bit); if (!mark_bit.Get()) { @@ -71,13 +62,16 @@ void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) { } -bool MarkCompactCollector::MarkObjectWithoutPush(HeapObject* obj) { - MarkBit mark_bit = Marking::MarkBitFrom(obj); - if (!mark_bit.Get()) { - SetMark(obj, mark_bit); - return true; - } - return false; +bool MarkCompactCollector::MarkObjectWithoutPush(HeapObject* object) { + MarkBit mark = Marking::MarkBitFrom(object); + bool old_mark = mark.Get(); + if (!old_mark) SetMark(object, mark); + return old_mark; +} + + +void MarkCompactCollector::MarkObjectAndPush(HeapObject* object) { + if (!MarkObjectWithoutPush(object)) marking_deque_.PushBlack(object); } diff --git a/src/mark-compact.cc b/src/mark-compact.cc index c455564..0aa1192 100644 --- a/src/mark-compact.cc +++ b/src/mark-compact.cc @@ -64,13 +64,13 @@ MarkCompactCollector::MarkCompactCollector() : // NOLINT abort_incremental_marking_(false), compacting_(false), was_marked_incrementally_(false), + collect_maps_(FLAG_collect_maps), flush_monomorphic_ics_(false), tracer_(NULL), migration_slots_buffer_(NULL), heap_(NULL), code_flusher_(NULL), - encountered_weak_maps_(NULL), - marker_(this, this) { } + encountered_weak_maps_(NULL) { } #ifdef DEBUG @@ -282,7 +282,7 @@ void MarkCompactCollector::CollectGarbage() { MarkLiveObjects(); ASSERT(heap_->incremental_marking()->IsStopped()); - if (FLAG_collect_maps) ClearNonLiveTransitions(); + if (collect_maps_) ClearNonLiveTransitions(); ClearWeakMaps(); @@ -294,7 +294,7 @@ void MarkCompactCollector::CollectGarbage() { SweepSpaces(); - if (!FLAG_collect_maps) ReattachInitialMaps(); + if (!collect_maps_) ReattachInitialMaps(); Finish(); @@ -658,6 +658,11 @@ void MarkCompactCollector::AbortCompaction() { void MarkCompactCollector::Prepare(GCTracer* tracer) { was_marked_incrementally_ = heap()->incremental_marking()->IsMarking(); + // Disable collection of maps if incremental marking is enabled. + // Map collection algorithm relies on a special map transition tree traversal + // order which is not implemented for incremental marking. + collect_maps_ = FLAG_collect_maps && !was_marked_incrementally_; + // Monomorphic ICs are preserved when possible, but need to be flushed // when they might be keeping a Context alive, or when the heap is about // to be serialized. @@ -1793,11 +1798,11 @@ void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) { heap_->ClearCacheOnMap(map); // When map collection is enabled we have to mark through map's transitions - // in a special way to make transition links weak. Only maps for subclasses - // of JSReceiver can have transitions. + // in a special way to make transition links weak. + // Only maps for subclasses of JSReceiver can have transitions. STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); - if (FLAG_collect_maps && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) { - marker_.MarkMapContents(map); + if (collect_maps_ && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) { + MarkMapContents(map); } else { marking_deque_.PushBlack(map); } @@ -1807,86 +1812,85 @@ void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) { } -// Force instantiation of template instances. -template void Marker::MarkMapContents(Map* map); -template void Marker::MarkMapContents(Map* map); - - -template -void Marker::MarkMapContents(Map* map) { +void MarkCompactCollector::MarkMapContents(Map* map) { // Mark prototype transitions array but don't push it into marking stack. // This will make references from it weak. We will clean dead prototype - // transitions in ClearNonLiveTransitions. - Object** proto_trans_slot = - HeapObject::RawField(map, Map::kPrototypeTransitionsOrBackPointerOffset); - HeapObject* prototype_transitions = HeapObject::cast(*proto_trans_slot); - if (prototype_transitions->IsFixedArray()) { - mark_compact_collector()->RecordSlot(proto_trans_slot, - proto_trans_slot, - prototype_transitions); + // transitions in ClearNonLiveTransitions. But make sure that back pointers + // stored inside prototype transitions arrays are marked. + Object* raw_proto_transitions = map->unchecked_prototype_transitions(); + if (raw_proto_transitions->IsFixedArray()) { + FixedArray* prototype_transitions = FixedArray::cast(raw_proto_transitions); MarkBit mark = Marking::MarkBitFrom(prototype_transitions); if (!mark.Get()) { mark.Set(); MemoryChunk::IncrementLiveBytesFromGC(prototype_transitions->address(), prototype_transitions->Size()); + MarkObjectAndPush(HeapObject::cast( + prototype_transitions->get(Map::kProtoTransitionBackPointerOffset))); } } - // Make sure that the back pointer stored either in the map itself or inside - // its prototype transitions array is marked. Treat pointers in the descriptor - // array as weak and also mark that array to prevent visiting it later. - base_marker()->MarkObjectAndPush(HeapObject::cast(map->GetBackPointer())); - - Object** descriptor_array_slot = + Object** raw_descriptor_array_slot = HeapObject::RawField(map, Map::kInstanceDescriptorsOrBitField3Offset); - Object* descriptor_array = *descriptor_array_slot; - if (!descriptor_array->IsSmi()) { - MarkDescriptorArray(reinterpret_cast(descriptor_array)); - } - - // Mark the Object* fields of the Map. Since the descriptor array has been - // marked already, it is fine that one of these fields contains a pointer - // to it. But make sure to skip back pointer and prototype transitions. - STATIC_ASSERT(Map::kPointerFieldsEndOffset == - Map::kPrototypeTransitionsOrBackPointerOffset + kPointerSize); - Object** start_slot = HeapObject::RawField( - map, Map::kPointerFieldsBeginOffset); - Object** end_slot = HeapObject::RawField( - map, Map::kPrototypeTransitionsOrBackPointerOffset); - for (Object** slot = start_slot; slot < end_slot; slot++) { - Object* obj = *slot; - if (!obj->NonFailureIsHeapObject()) continue; - mark_compact_collector()->RecordSlot(start_slot, slot, obj); - base_marker()->MarkObjectAndPush(reinterpret_cast(obj)); + Object* raw_descriptor_array = *raw_descriptor_array_slot; + if (!raw_descriptor_array->IsSmi()) { + MarkDescriptorArray( + reinterpret_cast(raw_descriptor_array)); } + + // Mark the Object* fields of the Map. + // Since the descriptor array has been marked already, it is fine + // that one of these fields contains a pointer to it. + Object** start_slot = HeapObject::RawField(map, + Map::kPointerFieldsBeginOffset); + + Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset); + + StaticMarkingVisitor::VisitPointers(map->GetHeap(), start_slot, end_slot); } -template -void Marker::MarkDescriptorArray(DescriptorArray* descriptors) { +void MarkCompactCollector::MarkAccessorPairSlot(HeapObject* accessors, + int offset) { + Object** slot = HeapObject::RawField(accessors, offset); + HeapObject* accessor = HeapObject::cast(*slot); + if (accessor->IsMap()) return; + RecordSlot(slot, slot, accessor); + MarkObjectAndPush(accessor); +} + + +void MarkCompactCollector::MarkDescriptorArray( + DescriptorArray* descriptors) { + MarkBit descriptors_mark = Marking::MarkBitFrom(descriptors); + if (descriptors_mark.Get()) return; // Empty descriptor array is marked as a root before any maps are marked. - ASSERT(descriptors != descriptors->GetHeap()->empty_descriptor_array()); + ASSERT(descriptors != heap()->empty_descriptor_array()); + SetMark(descriptors, descriptors_mark); - // The DescriptorArray contains a pointer to its contents array, but the - // contents array will be marked black and hence not be visited again. - if (!base_marker()->MarkObjectAndPush(descriptors)) return; - FixedArray* contents = FixedArray::cast( + FixedArray* contents = reinterpret_cast( descriptors->get(DescriptorArray::kContentArrayIndex)); + ASSERT(contents->IsHeapObject()); + ASSERT(!IsMarked(contents)); + ASSERT(contents->IsFixedArray()); ASSERT(contents->length() >= 2); - ASSERT(Marking::IsWhite(Marking::MarkBitFrom(contents))); - base_marker()->MarkObjectWithoutPush(contents); - - // Contents contains (value, details) pairs. If the descriptor contains a - // transition (value is a Map), we don't mark the value as live. It might - // be set to the NULL_DESCRIPTOR in ClearNonLiveTransitions later. + MarkBit contents_mark = Marking::MarkBitFrom(contents); + SetMark(contents, contents_mark); + // Contents contains (value, details) pairs. If the details say that the type + // of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION, + // EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as + // live. Only for MAP_TRANSITION, EXTERNAL_ARRAY_TRANSITION and + // CONSTANT_TRANSITION is the value an Object* (a Map*). for (int i = 0; i < contents->length(); i += 2) { + // If the pair (value, details) at index i, i+1 is not + // a transition or null descriptor, mark the value. PropertyDetails details(Smi::cast(contents->get(i + 1))); Object** slot = contents->data_start() + i; if (!(*slot)->IsHeapObject()) continue; HeapObject* value = HeapObject::cast(*slot); - mark_compact_collector()->RecordSlot(slot, slot, *slot); + RecordSlot(slot, slot, *slot); switch (details.type()) { case NORMAL: @@ -1894,22 +1898,21 @@ void Marker::MarkDescriptorArray(DescriptorArray* descriptors) { case CONSTANT_FUNCTION: case HANDLER: case INTERCEPTOR: - base_marker()->MarkObjectAndPush(value); + MarkObjectAndPush(value); break; case CALLBACKS: if (!value->IsAccessorPair()) { - base_marker()->MarkObjectAndPush(value); - } else if (base_marker()->MarkObjectWithoutPush(value)) { - AccessorPair* accessors = AccessorPair::cast(value); - MarkAccessorPairSlot(accessors, AccessorPair::kGetterOffset); - MarkAccessorPairSlot(accessors, AccessorPair::kSetterOffset); + MarkObjectAndPush(value); + } else if (!MarkObjectWithoutPush(value)) { + MarkAccessorPairSlot(value, AccessorPair::kGetterOffset); + MarkAccessorPairSlot(value, AccessorPair::kSetterOffset); } break; case ELEMENTS_TRANSITION: // For maps with multiple elements transitions, the transition maps are // stored in a FixedArray. Keep the fixed array alive but not the maps // that it refers to. - if (value->IsFixedArray()) base_marker()->MarkObjectWithoutPush(value); + if (value->IsFixedArray()) MarkObjectWithoutPush(value); break; case MAP_TRANSITION: case CONSTANT_TRANSITION: @@ -1917,16 +1920,9 @@ void Marker::MarkDescriptorArray(DescriptorArray* descriptors) { break; } } -} - - -template -void Marker::MarkAccessorPairSlot(AccessorPair* accessors, int offset) { - Object** slot = HeapObject::RawField(accessors, offset); - HeapObject* accessor = HeapObject::cast(*slot); - if (accessor->IsMap()) return; - mark_compact_collector()->RecordSlot(slot, slot, accessor); - base_marker()->MarkObjectAndPush(accessor); + // The DescriptorArray descriptors contains a pointer to its contents array, + // but the contents array is already marked. + marking_deque_.PushBlack(descriptors); } diff --git a/src/mark-compact.h b/src/mark-compact.h index dbc2869..6420a21 100644 --- a/src/mark-compact.h +++ b/src/mark-compact.h @@ -42,7 +42,6 @@ typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset); // Forward declarations. class CodeFlusher; class GCTracer; -class MarkCompactCollector; class MarkingVisitor; class RootMarkingVisitor; @@ -167,6 +166,7 @@ class Marking { // ---------------------------------------------------------------------------- // Marking deque for tracing live objects. + class MarkingDeque { public: MarkingDeque() @@ -383,34 +383,6 @@ class SlotsBuffer { }; -// ------------------------------------------------------------------------- -// Marker shared between incremental and non-incremental marking -template class Marker { - public: - Marker(BaseMarker* base_marker, MarkCompactCollector* mark_compact_collector) - : base_marker_(base_marker), - mark_compact_collector_(mark_compact_collector) {} - - // Mark pointers in a Map and its DescriptorArray together, possibly - // treating transitions or back pointers weak. - void MarkMapContents(Map* map); - void MarkDescriptorArray(DescriptorArray* descriptors); - void MarkAccessorPairSlot(AccessorPair* accessors, int offset); - - private: - BaseMarker* base_marker() { - return base_marker_; - } - - MarkCompactCollector* mark_compact_collector() { - return mark_compact_collector_; - } - - BaseMarker* base_marker_; - MarkCompactCollector* mark_compact_collector_; -}; - - // Defined in isolate.h. class ThreadLocalTop; @@ -612,6 +584,8 @@ class MarkCompactCollector { bool was_marked_incrementally_; + bool collect_maps_; + bool flush_monomorphic_ics_; // A pointer to the current stack-allocated GC tracer object during a full @@ -634,13 +608,12 @@ class MarkCompactCollector { // // After: Live objects are marked and non-live objects are unmarked. + friend class RootMarkingVisitor; friend class MarkingVisitor; friend class StaticMarkingVisitor; friend class CodeMarkingVisitor; friend class SharedFunctionInfoMarkingVisitor; - friend class Marker; - friend class Marker; // Mark non-optimize code for functions inlined into the given optimized // code. This will prevent it from being flushed. @@ -658,25 +631,22 @@ class MarkCompactCollector { void AfterMarking(); // Marks the object black and pushes it on the marking stack. - // Returns true if object needed marking and false otherwise. - // This is for non-incremental marking only. - INLINE(bool MarkObjectAndPush(HeapObject* obj)); - - // Marks the object black and pushes it on the marking stack. - // This is for non-incremental marking only. + // This is for non-incremental marking. INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit)); - // Marks the object black without pushing it on the marking stack. - // Returns true if object needed marking and false otherwise. - // This is for non-incremental marking only. - INLINE(bool MarkObjectWithoutPush(HeapObject* obj)); + INLINE(bool MarkObjectWithoutPush(HeapObject* object)); + INLINE(void MarkObjectAndPush(HeapObject* value)); - // Marks the object black assuming that it is not yet marked. - // This is for non-incremental marking only. + // Marks the object black. This is for non-incremental marking. INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit)); void ProcessNewlyMarkedObject(HeapObject* obj); + // Mark a Map and its DescriptorArray together, skipping transitions. + void MarkMapContents(Map* map); + void MarkAccessorPairSlot(HeapObject* accessors, int offset); + void MarkDescriptorArray(DescriptorArray* descriptors); + // Mark the heap roots and all objects reachable from them. void MarkRoots(RootMarkingVisitor* visitor); @@ -779,7 +749,6 @@ class MarkCompactCollector { MarkingDeque marking_deque_; CodeFlusher* code_flusher_; Object* encountered_weak_maps_; - Marker marker_; List evacuation_candidates_; List invalidated_code_; diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc index d4a40bf..72079dc 100644 --- a/test/cctest/test-heap.cc +++ b/test/cctest/test-heap.cc @@ -1735,60 +1735,3 @@ TEST(OptimizedAllocationAlwaysInNewSpace) { CHECK(HEAP->InNewSpace(*o)); } - - -static int CountMapTransitions(Map* map) { - int result = 0; - DescriptorArray* descs = map->instance_descriptors(); - for (int i = 0; i < descs->number_of_descriptors(); i++) { - if (descs->IsTransitionOnly(i)) { - result++; - } - } - return result; -} - - -// Test that map transitions are cleared and maps are collected with -// incremental marking as well. -TEST(Regress1465) { - i::FLAG_allow_natives_syntax = true; - i::FLAG_trace_incremental_marking = true; - InitializeVM(); - v8::HandleScope scope; - - #define TRANSITION_COUNT 256 - for (int i = 0; i < TRANSITION_COUNT; i++) { - EmbeddedVector buffer; - OS::SNPrintF(buffer, "var o = new Object; o.prop%d = %d;", i, i); - CompileRun(buffer.start()); - } - CompileRun("var root = new Object;"); - Handle root = - v8::Utils::OpenHandle( - *v8::Handle::Cast( - v8::Context::GetCurrent()->Global()->Get(v8_str("root")))); - - // Count number of live transitions before marking. - int transitions_before = CountMapTransitions(root->map()); - CompileRun("%DebugPrint(root);"); - CHECK_EQ(TRANSITION_COUNT, transitions_before); - - // Go through all incremental marking steps in one swoop. - IncrementalMarking* marking = HEAP->incremental_marking(); - CHECK(marking->IsStopped()); - marking->Start(); - CHECK(marking->IsMarking()); - while (!marking->IsComplete()) { - marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD); - } - CHECK(marking->IsComplete()); - HEAP->CollectAllGarbage(Heap::kNoGCFlags); - CHECK(marking->IsStopped()); - - // Count number of live transitions after marking. Note that one transition - // is left, because 'o' still holds an instance of one transition target. - int transitions_after = CountMapTransitions(root->map()); - CompileRun("%DebugPrint(root);"); - CHECK_EQ(1, transitions_after); -} -- 2.7.4