map->set_inobject_properties(0);
map->set_pre_allocated_property_fields(0);
map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
+ map->set_dependent_codes(DependentCodes::cast(empty_fixed_array()),
+ SKIP_WRITE_BARRIER);
map->init_back_pointer(undefined_value());
map->set_unused_property_fields(0);
map->set_instance_descriptors(empty_descriptor_array());
// Fix the instance_descriptors for the existing maps.
meta_map()->set_code_cache(empty_fixed_array());
+ meta_map()->set_dependent_codes(DependentCodes::cast(empty_fixed_array()));
meta_map()->init_back_pointer(undefined_value());
meta_map()->set_instance_descriptors(empty_descriptor_array());
fixed_array_map()->set_code_cache(empty_fixed_array());
+ fixed_array_map()->set_dependent_codes(
+ DependentCodes::cast(empty_fixed_array()));
fixed_array_map()->init_back_pointer(undefined_value());
fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
oddball_map()->set_code_cache(empty_fixed_array());
+ oddball_map()->set_dependent_codes(DependentCodes::cast(empty_fixed_array()));
oddball_map()->init_back_pointer(undefined_value());
oddball_map()->set_instance_descriptors(empty_descriptor_array());
code->set_gc_metadata(Smi::FromInt(0));
code->set_ic_age(global_ic_age_);
code->set_prologue_offset(kPrologueOffsetNotSet);
+ if (code->kind() == Code::OPTIMIZED_FUNCTION) {
+ code->set_marked_for_deoptimization(false);
+ }
// Allow self references to created code object by patching the handle to
// point to the newly allocated Code object.
if (!self_reference.is_null()) {
Handle<Code> code =
CodeGenerator::MakeCodeEpilogue(&assembler, flags, info());
generator.FinishCode(code);
+ RegisterDependentCodeForEmbeddedMaps(code);
CodeGenerator::PrintCode(code, info());
return code;
}
}
+void LChunk::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
+ ZoneList<Handle<Map> > maps(1, zone());
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT &&
+ it.rinfo()->target_object()->IsMap()) {
+ Handle<Map> map(Map::cast(it.rinfo()->target_object()));
+ if (map->CanTransition()) {
+ maps.Add(map, zone());
+ }
+ }
+ }
+ for (int i = 0; i < maps.length(); i++) {
+ maps.at(i)->AddDependentCode(code);
+ }
+}
+
} } // namespace v8::internal
pointer_maps_(8, graph->zone()),
inlined_closures_(1, graph->zone()) { }
+ void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
+
int spill_slot_count_;
private:
}
}
}
+
+ void VisitEmbeddedPointer(RelocInfo* rinfo) {
+ ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+ if (rinfo->host()->kind() != Code::OPTIMIZED_FUNCTION ||
+ !rinfo->target_object()->IsMap() ||
+ !Map::cast(rinfo->target_object())->CanTransition()) {
+ VisitPointer(rinfo->target_object_address());
+ }
+ }
};
MarkLiveObjects();
ASSERT(heap_->incremental_marking()->IsStopped());
- if (FLAG_collect_maps) ClearNonLiveTransitions();
+ if (FLAG_collect_maps) ClearNonLiveReferences();
ClearWeakMaps();
#endif
}
+class DeoptimizeMarkedCodeFilter : public OptimizedFunctionFilter {
+ public:
+ virtual bool TakeFunction(JSFunction* function) {
+ return function->code()->marked_for_deoptimization();
+ }
+};
+
void MarkCompactCollector::Finish() {
#ifdef DEBUG
// GC, because it relies on the new address of certain old space
// objects (empty string, illegal builtin).
heap()->isolate()->stub_cache()->Clear();
+
+ DeoptimizeMarkedCodeFilter filter;
+ Deoptimizer::DeoptimizeAllFunctionsWith(&filter);
}
}
-void MarkCompactCollector::ClearNonLiveTransitions() {
+void MarkCompactCollector::ClearNonLiveReferences() {
HeapObjectIterator map_iterator(heap()->map_space());
// Iterate over the map space, setting map transitions that go from
// a marked map to an unmarked map to null transitions. This action
if (map->IsFreeSpace()) continue;
ASSERT(map->IsMap());
- // Only JSObject and subtypes have map transitions and back pointers.
- STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE);
- if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
+ if (!map->CanTransition()) continue;
if (map_mark.Get() &&
map->attached_to_shared_function_info()) {
ClearNonLivePrototypeTransitions(map);
ClearNonLiveMapTransitions(map, map_mark);
+
+ if (map_mark.Get()) {
+ ClearNonLiveDependentCodes(map);
+ } else {
+ ClearAndDeoptimizeDependentCodes(map);
+ }
}
}
}
+void MarkCompactCollector::ClearAndDeoptimizeDependentCodes(Map* map) {
+ AssertNoAllocation no_allocation_scope;
+ DependentCodes* codes = map->dependent_codes();
+ int number_of_codes = codes->number_of_codes();
+ if (number_of_codes == 0) return;
+ for (int i = 0; i < number_of_codes; i++) {
+ Code* code = codes->code_at(i);
+ if (IsMarked(code) && !code->marked_for_deoptimization()) {
+ code->set_marked_for_deoptimization(true);
+ }
+ codes->clear_code_at(i);
+ }
+ map->set_dependent_codes(DependentCodes::cast(heap()->empty_fixed_array()));
+}
+
+
+void MarkCompactCollector::ClearNonLiveDependentCodes(Map* map) {
+ AssertNoAllocation no_allocation_scope;
+ DependentCodes* codes = map->dependent_codes();
+ int number_of_codes = codes->number_of_codes();
+ if (number_of_codes == 0) return;
+ int new_number_of_codes = 0;
+ for (int i = 0; i < number_of_codes; i++) {
+ Code* code = codes->code_at(i);
+ if (IsMarked(code) && !code->marked_for_deoptimization()) {
+ if (new_number_of_codes != i) {
+ codes->set_code_at(new_number_of_codes, code);
+ Object** slot = codes->code_slot_at(new_number_of_codes);
+ RecordSlot(slot, slot, code);
+ new_number_of_codes++;
+ }
+ }
+ }
+ for (int i = new_number_of_codes; i < number_of_codes; i++) {
+ codes->clear_code_at(i);
+ }
+ codes->set_number_of_codes(new_number_of_codes);
+}
+
+
void MarkCompactCollector::ProcessWeakMaps() {
Object* weak_map_obj = encountered_weak_maps();
while (weak_map_obj != Smi::FromInt(0)) {
// Map transitions from a live map to a dead map must be killed.
// We replace them with a null descriptor, with the same key.
- void ClearNonLiveTransitions();
+ void ClearNonLiveReferences();
void ClearNonLivePrototypeTransitions(Map* map);
void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
+ void ClearAndDeoptimizeDependentCodes(Map* map);
+ void ClearNonLiveDependentCodes(Map* map);
+
// Marking detaches initial maps from SharedFunctionInfo objects
// to make this reference weak. We need to reattach initial maps
// back after collection. This is either done during
}
+bool Object::IsDependentCodes() {
+ if (!IsFixedArray()) return false;
+ // There's actually no way to see the difference between a fixed array and
+ // a dependent codes array.
+ return true;
+}
+
+
bool Object::IsTypeFeedbackCells() {
if (!IsFixedArray()) return false;
// There's actually no way to see the difference between a fixed array and
CAST_ACCESSOR(DescriptorArray)
CAST_ACCESSOR(DeoptimizationInputData)
CAST_ACCESSOR(DeoptimizationOutputData)
+CAST_ACCESSOR(DependentCodes)
CAST_ACCESSOR(TypeFeedbackCells)
CAST_ACCESSOR(SymbolTable)
CAST_ACCESSOR(JSFunctionResultCache)
}
+void Map::AddDependentCode(Handle<Code> code) {
+ Handle<DependentCodes> codes =
+ DependentCodes::Append(Handle<DependentCodes>(dependent_codes()), code);
+ if (*codes != dependent_codes()) {
+ set_dependent_codes(*codes);
+ }
+}
+
+
+int DependentCodes::number_of_codes() {
+ if (length() == 0) return 0;
+ return Smi::cast(get(kNumberOfCodesIndex))->value();
+}
+
+
+void DependentCodes::set_number_of_codes(int value) {
+ set(kNumberOfCodesIndex, Smi::FromInt(value));
+}
+
+
+Code* DependentCodes::code_at(int i) {
+ return Code::cast(get(kCodesIndex + i));
+}
+
+
+void DependentCodes::set_code_at(int i, Code* value) {
+ set(kCodesIndex + i, value);
+}
+
+
+Object** DependentCodes::code_slot_at(int i) {
+ return HeapObject::RawField(
+ this, FixedArray::OffsetOfElementAt(kCodesIndex + i));
+}
+
+
+void DependentCodes::clear_code_at(int i) {
+ set_undefined(kCodesIndex + i);
+}
+
+
void Code::set_flags(Code::Flags flags) {
STATIC_ASSERT(Code::NUMBER_OF_KINDS <= KindField::kMax + 1);
// Make sure that all call stubs have an arguments count.
}
+bool Code::marked_for_deoptimization() {
+ ASSERT(kind() == OPTIMIZED_FUNCTION);
+ return MarkedForDeoptimizationField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
+}
+
+
+void Code::set_marked_for_deoptimization(bool flag) {
+ ASSERT(kind() == OPTIMIZED_FUNCTION);
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+ int updated = MarkedForDeoptimizationField::update(previous, flag);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
+}
+
+
bool Code::is_inline_cache_stub() {
Kind kind = this->kind();
return kind >= FIRST_IC_KIND && kind <= LAST_IC_KIND;
ACCESSORS(Map, code_cache, Object, kCodeCacheOffset)
+ACCESSORS(Map, dependent_codes, DependentCodes, kDependentCodesOffset)
ACCESSORS(Map, constructor, Object, kConstructorOffset)
ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
ASSERT(!rinfo->target_object()->IsConsString());
HeapObject* object = HeapObject::cast(rinfo->target_object());
- heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
- StaticVisitor::MarkObject(heap, object);
+ if (!FLAG_collect_maps || rinfo->host()->kind() != Code::OPTIMIZED_FUNCTION ||
+ !object->IsMap() || !Map::cast(object)->CanTransition()) {
+ heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
+ StaticVisitor::MarkObject(heap, object);
+ }
}
map_object->ClearCodeCache(heap);
}
- // When map collection is enabled we have to mark through map's
- // transitions and back pointers in a special way to make these links
- // weak. Only maps for subclasses of JSReceiver can have transitions.
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- if (FLAG_collect_maps &&
- map_object->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
+ // When map collection is enabled we have to mark through map's transitions
+ // and back pointers in a special way to make these links weak.
+ if (FLAG_collect_maps && map_object->CanTransition()) {
MarkMapContents(heap, map_object);
} else {
StaticVisitor::VisitPointers(heap,
ASSERT(transitions->IsMap() || transitions->IsUndefined());
}
+ // Mark prototype dependent codes array but do not push it onto marking
+ // stack, this will make references from it weak. We will clean dead
+ // codes when we iterate over maps in ClearNonLiveTransitions.
+ Object** slot = HeapObject::RawField(map, Map::kDependentCodesOffset);
+ HeapObject* obj = HeapObject::cast(*slot);
+ heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
+ StaticVisitor::MarkObjectWithoutPush(heap, obj);
+
// Mark the pointer fields of the Map. Since the transitions array has
// been marked already, it is fine that one of these fields contains a
// pointer to it.
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
// There are two places where we iterate code bodies: here and the
- // templated CodeIterateBody (below). They should be kept in sync.
+ // templated CodeIterateBody (below). They should be kept in sync.
IteratePointer(v, kRelocationInfoOffset);
IteratePointer(v, kHandlerTableOffset);
IteratePointer(v, kDeoptimizationDataOffset);
RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
- // There are two places where we iterate code bodies: here and the
- // non-templated CodeIterateBody (above). They should be kept in sync.
+ // There are two places where we iterate code bodies: here and the non-
+ // templated CodeIterateBody (above). They should be kept in sync.
StaticVisitor::VisitPointer(
heap,
reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset));
}
+Handle<DependentCodes> DependentCodes::Append(Handle<DependentCodes> codes,
+ Handle<Code> value) {
+ int append_index = codes->number_of_codes();
+ if (append_index > 0 && codes->code_at(append_index - 1) == *value) {
+ // Do not append the code if it is already in the array.
+ // It is sufficient to just check only the last element because
+ // we process embedded maps of an optimized code in one batch.
+ return codes;
+ }
+ if (codes->length() < kCodesIndex + append_index + 1) {
+ Factory* factory = codes->GetIsolate()->factory();
+ int capacity = kCodesIndex + append_index + 1;
+ if (capacity > 5) capacity = capacity * 5 / 4;
+ Handle<DependentCodes> new_codes = Handle<DependentCodes>::cast(
+ factory->CopySizeFixedArray(codes, capacity));
+ // The number of codes can change after GC.
+ append_index = codes->number_of_codes();
+ for (int i = 0; i < append_index; i++) {
+ codes->clear_code_at(i);
+ }
+ codes = new_codes;
+ }
+ codes->set_code_at(append_index, *value);
+ codes->set_number_of_codes(append_index + 1);
+ return codes;
+}
+
+
MaybeObject* JSReceiver::SetPrototype(Object* value,
bool skip_hidden_prototypes) {
#ifdef DEBUG
V(TransitionArray) \
V(DeoptimizationInputData) \
V(DeoptimizationOutputData) \
+ V(DependentCodes) \
V(TypeFeedbackCells) \
V(FixedArray) \
V(FixedDoubleArray) \
inline bool has_function_cache();
inline void set_has_function_cache(bool flag);
+
+ // [marked_for_deoptimization]: For kind OPTIMIZED_FUNCTION tells whether
+ // the code is going to be deoptimized because of dead embedded maps.
+ inline bool marked_for_deoptimization();
+ inline void set_marked_for_deoptimization(bool flag);
+
bool allowed_in_shared_map_code_cache();
// Get the safepoint entry for the given pc.
static const int kHasFunctionCacheFirstBit =
kStackSlotsFirstBit + kStackSlotsBitCount;
static const int kHasFunctionCacheBitCount = 1;
+ static const int kMarkedForDeoptimizationFirstBit =
+ kStackSlotsFirstBit + kStackSlotsBitCount + 1;
+ static const int kMarkedForDeoptimizationBitCount = 1;
STATIC_ASSERT(kStackSlotsFirstBit + kStackSlotsBitCount <= 32);
STATIC_ASSERT(kUnaryOpTypeFirstBit + kUnaryOpTypeBitCount <= 32);
STATIC_ASSERT(kToBooleanStateFirstBit + kToBooleanStateBitCount <= 32);
STATIC_ASSERT(kHasFunctionCacheFirstBit + kHasFunctionCacheBitCount <= 32);
+ STATIC_ASSERT(kMarkedForDeoptimizationFirstBit +
+ kMarkedForDeoptimizationBitCount <= 32);
class StackSlotsField: public BitField<int,
kStackSlotsFirstBit, kStackSlotsBitCount> {}; // NOLINT
kToBooleanStateFirstBit, kToBooleanStateBitCount> {}; // NOLINT
class HasFunctionCacheField: public BitField<bool,
kHasFunctionCacheFirstBit, kHasFunctionCacheBitCount> {}; // NOLINT
+ class MarkedForDeoptimizationField: public BitField<bool,
+ kMarkedForDeoptimizationFirstBit,
+ kMarkedForDeoptimizationBitCount> {}; // NOLINT
// KindSpecificFlags2 layout (STUB and OPTIMIZED_FUNCTION)
static const int kStubMajorKeyFirstBit = 0;
};
+// This class describes the layout of dependent codes array of a map. The
+// first element contains the number of codes as a Smi. The subsequent
+// elements contain code objects. The suffix of the array can be filled with the
+// undefined value if the number of codes is less than the length of the array.
+class DependentCodes: public FixedArray {
+ public:
+ inline int number_of_codes();
+ inline void set_number_of_codes(int value);
+ inline Code* code_at(int i);
+ inline void set_code_at(int i, Code* value);
+ inline Object** code_slot_at(int i);
+ inline void clear_code_at(int i);
+ static Handle<DependentCodes> Append(Handle<DependentCodes> codes,
+ Handle<Code> value);
+ static inline DependentCodes* cast(Object* object);
+ private:
+ static const int kNumberOfCodesIndex = 0;
+ static const int kCodesIndex = 1;
+};
+
+
// All heap objects have a Map that describes their structure.
// A Map contains information about:
// - Size information about the object
// [stub cache]: contains stubs compiled for this map.
DECL_ACCESSORS(code_cache, Object)
+ // [dependent codes]: list of optimized codes that have this map embedded.
+ DECL_ACCESSORS(dependent_codes, DependentCodes)
+
// [back pointer]: points back to the parent map from which a transition
// leads to this map. The field overlaps with prototype transitions and the
// back pointer will be moved into the prototype transitions array if
void ZapPrototypeTransitions();
void ZapTransitions();
+ bool CanTransition() {
+ // Only JSObject and subtypes have map transitions and back pointers.
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE);
+ return instance_type() >= FIRST_JS_OBJECT_TYPE;
+ }
+
+ inline void AddDependentCode(Handle<Code> code);
+
// Dispatched behavior.
DECLARE_PRINTER(Map)
DECLARE_VERIFIER(Map)
static const int kDescriptorsOffset =
kTransitionsOrBackPointerOffset + kPointerSize;
static const int kCodeCacheOffset = kDescriptorsOffset + kPointerSize;
- static const int kBitField3Offset = kCodeCacheOffset + kPointerSize;
+ static const int kDependentCodesOffset = kCodeCacheOffset + kPointerSize;
+ static const int kBitField3Offset = kDependentCodesOffset + kPointerSize;
static const int kSize = kBitField3Offset + kPointerSize;
// Layout of pointer fields. Heap iteration code relies on them
math-floor-of-div-nosudiv: PASS, SKIP if ($arch != arm && $arch != android_arm)
##############################################################################
+# Long running test that reproduces memory leak and should be run manually.
+regress/regress-2073: SKIP
+
+##############################################################################
[ $arch == arm || $arch == android_arm ]
# Slow tests which times out in debug mode.
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Running this test with --trace_gc will show heap size growth due to
+// leaking objects via embedded maps in optimized code.
+
+var counter = 0;
+
+function nextid() {
+ counter += 1;
+ return counter;
+}
+
+function Scope() {
+ this.id = nextid();
+ this.parent = null;
+ this.left = null;
+ this.right = null;
+ this.head = null;
+ this.tail = null;
+ this.counter = 0;
+}
+
+Scope.prototype = {
+ new: function() {
+ var Child,
+ child;
+ Child = function() {};
+ Child.prototype = this;
+ child = new Child();
+ child.id = nextid();
+ child.parent = this;
+ child.left = this.last;
+ child.right = null;
+ child.head = null;
+ child.tail = null;
+ child.counter = 0;
+ if (this.head) {
+ this.tail.right = child;
+ this.tail = child;
+ } else {
+ this.head = this.tail = child;
+ }
+ return child;
+ },
+
+ destroy: function() {
+ if ($root == this) return;
+ var parent = this.parent;
+ if (parent.head == this) parent.head = this.right;
+ if (parent.tail == this) parent.tail = this.left;
+ if (this.left) this.left.right = this.right;
+ if (this.right) this.right.left = this.left;
+ }
+};
+
+function inc(scope) {
+ scope.counter = scope.counter + 1;
+}
+
+var $root = new Scope();
+
+n = 100000;
+m = 10;
+
+function doit() {
+ var a = $root.new();
+ var b = a.new();
+ inc(b);
+ if (i > m) $root.head.destroy();
+}
+
+for (var i = 0; i < n; i++) {
+ doit();
+}