RegisterDependentCodeForEmbeddedMaps(code);
}
PopulateDeoptimizationData(code);
- info()->CommitDependentMaps(code);
+ info()->CommitDependencies(code);
}
no_frame_ranges_ = isolate->cpu_profiler()->is_profiling()
? new List<OffsetRange>(2) : NULL;
for (int i = 0; i < DependentCode::kGroupCount; i++) {
- dependent_maps_[i] = NULL;
+ dependencies_[i] = NULL;
}
if (mode == STUB) {
mode_ = STUB;
// Check that no dependent maps have been added or added dependent maps have
// been rolled back or committed.
for (int i = 0; i < DependentCode::kGroupCount; i++) {
- ASSERT_EQ(NULL, dependent_maps_[i]);
+ ASSERT_EQ(NULL, dependencies_[i]);
}
#endif // DEBUG
}
-void CompilationInfo::CommitDependentMaps(Handle<Code> code) {
+void CompilationInfo::CommitDependencies(Handle<Code> code) {
for (int i = 0; i < DependentCode::kGroupCount; i++) {
- ZoneList<Handle<Map> >* group_maps = dependent_maps_[i];
- if (group_maps == NULL) continue;
+ ZoneList<Handle<HeapObject> >* group_objects = dependencies_[i];
+ if (group_objects == NULL) continue;
ASSERT(!object_wrapper_.is_null());
- for (int j = 0; j < group_maps->length(); j++) {
- group_maps->at(j)->dependent_code()->UpdateToFinishedCode(
- static_cast<DependentCode::DependencyGroup>(i), this, *code);
+ for (int j = 0; j < group_objects->length(); j++) {
+ DependentCode::DependencyGroup group =
+ static_cast<DependentCode::DependencyGroup>(i);
+ DependentCode* dependent_code =
+ DependentCode::ForObject(group_objects->at(j), group);
+ dependent_code->UpdateToFinishedCode(group, this, *code);
}
- dependent_maps_[i] = NULL; // Zone-allocated, no need to delete.
+ dependencies_[i] = NULL; // Zone-allocated, no need to delete.
}
}
-void CompilationInfo::RollbackDependentMaps() {
+void CompilationInfo::RollbackDependencies() {
// Unregister from all dependent maps if not yet committed.
for (int i = 0; i < DependentCode::kGroupCount; i++) {
- ZoneList<Handle<Map> >* group_maps = dependent_maps_[i];
- if (group_maps == NULL) continue;
- for (int j = 0; j < group_maps->length(); j++) {
- group_maps->at(j)->dependent_code()->RemoveCompilationInfo(
- static_cast<DependentCode::DependencyGroup>(i), this);
+ ZoneList<Handle<HeapObject> >* group_objects = dependencies_[i];
+ if (group_objects == NULL) continue;
+ for (int j = 0; j < group_objects->length(); j++) {
+ DependentCode::DependencyGroup group =
+ static_cast<DependentCode::DependencyGroup>(i);
+ DependentCode* dependent_code =
+ DependentCode::ForObject(group_objects->at(j), group);
+ dependent_code->RemoveCompilationInfo(group, this);
}
- dependent_maps_[i] = NULL; // Zone-allocated, no need to delete.
+ dependencies_[i] = NULL; // Zone-allocated, no need to delete.
}
}
// If crankshaft succeeded, install the optimized code else install
// the unoptimized code.
OptimizingCompiler::Status status = optimizing_compiler->last_status();
- if (info->HasAbortedDueToDependentMap()) {
+ if (info->HasAbortedDueToDependencyChange()) {
info->set_bailout_reason("bailed out due to dependent map");
status = optimizing_compiler->AbortOptimization();
} else if (status != OptimizingCompiler::SUCCEEDED) {
deferred_handles_ = deferred_handles;
}
- ZoneList<Handle<Map> >* dependent_maps(DependentCode::DependencyGroup group) {
- if (dependent_maps_[group] == NULL) {
- dependent_maps_[group] = new(zone_) ZoneList<Handle<Map> >(2, zone_);
+ ZoneList<Handle<HeapObject> >* dependencies(
+ DependentCode::DependencyGroup group) {
+ if (dependencies_[group] == NULL) {
+ dependencies_[group] = new(zone_) ZoneList<Handle<HeapObject> >(2, zone_);
}
- return dependent_maps_[group];
+ return dependencies_[group];
}
- void CommitDependentMaps(Handle<Code> code);
+ void CommitDependencies(Handle<Code> code);
- void RollbackDependentMaps();
+ void RollbackDependencies();
void SaveHandles() {
SaveHandle(&closure_);
return object_wrapper_;
}
- void AbortDueToDependentMap() {
- mode_ = DEPENDENT_MAP_ABORT;
+ void AbortDueToDependencyChange() {
+ mode_ = DEPENDENCY_CHANGE_ABORT;
}
- bool HasAbortedDueToDependentMap() {
- return mode_ == DEPENDENT_MAP_ABORT;
+ bool HasAbortedDueToDependencyChange() {
+ return mode_ == DEPENDENCY_CHANGE_ABORT;
}
protected:
OPTIMIZE,
NONOPT,
STUB,
- DEPENDENT_MAP_ABORT
+ DEPENDENCY_CHANGE_ABORT
};
void Initialize(Isolate* isolate, Mode mode, Zone* zone, Zone* phase_zone);
DeferredHandles* deferred_handles_;
- ZoneList<Handle<Map> >* dependent_maps_[DependentCode::kGroupCount];
+ ZoneList<Handle<HeapObject> >* dependencies_[DependentCode::kGroupCount];
template<typename T>
void SaveHandle(Handle<T> *object) {
// zone scope and get rid of dependent maps even when the destructor is
// called when cast as a CompilationInfo.
virtual ~CompilationInfoWithZone() {
- RollbackDependentMaps();
+ RollbackDependencies();
}
private:
}
HeapObject::cast(result)->set_map_no_write_barrier(
global_property_cell_map());
- PropertyCell::cast(result)->set_value(value);
- PropertyCell::cast(result)->set_type(Type::None());
+ PropertyCell* cell = PropertyCell::cast(result);
+ cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
+ SKIP_WRITE_BARRIER);
+ cell->set_value(value);
+ cell->set_type(Type::None());
return result;
}
if (!info()->IsStub()) {
Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
}
- info()->CommitDependentMaps(code);
+ info()->CommitDependencies(code);
}
while ((cell = js_global_property_cell_iterator.Next()) != NULL) {
ASSERT(cell->IsPropertyCell());
if (IsMarked(cell)) {
- int offset = PropertyCell::kValueOffset;
- MarkCompactMarkingVisitor::VisitPointer(
- heap(),
- reinterpret_cast<Object**>(cell->address() + offset));
- offset = PropertyCell::kTypeOffset;
- MarkCompactMarkingVisitor::VisitPointer(
- heap(),
- reinterpret_cast<Object**>(cell->address() + offset));
+ MarkCompactMarkingVisitor::VisitPropertyCell(cell->map(), cell);
}
}
}
ClearNonLiveMapTransitions(map, map_mark);
if (map_mark.Get()) {
- ClearNonLiveDependentCode(map);
+ ClearNonLiveDependentCode(map->dependent_code());
} else {
ClearAndDeoptimizeDependentCode(map);
}
}
+
+ // Iterate over property cell space, removing dependent code that is not
+ // otherwise kept alive by strong references.
+ HeapObjectIterator cell_iterator(heap_->property_cell_space());
+ for (HeapObject* cell = cell_iterator.Next();
+ cell != NULL;
+ cell = cell_iterator.Next()) {
+ if (IsMarked(cell)) {
+ ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code());
+ }
+ }
}
}
-void MarkCompactCollector::ClearNonLiveDependentCode(Map* map) {
+void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) {
DisallowHeapAllocation no_allocation;
- DependentCode* entries = map->dependent_code();
DependentCode::GroupStartIndexes starts(entries);
int number_of_entries = starts.number_of_entries();
if (number_of_entries == 0) return;
cell != NULL;
cell = cell_iterator.Next()) {
if (cell->IsCell()) {
- Address value_address = reinterpret_cast<Address>(cell) +
- (Cell::kValueOffset - kHeapObjectTag);
- updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
+ Cell::BodyDescriptor::IterateBody(cell, &updating_visitor);
}
}
cell != NULL;
cell = js_global_property_cell_iterator.Next()) {
if (cell->IsPropertyCell()) {
- Address value_address =
- reinterpret_cast<Address>(cell) +
- (PropertyCell::kValueOffset - kHeapObjectTag);
- updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
- Address type_address =
- reinterpret_cast<Address>(cell) +
- (PropertyCell::kTypeOffset - kHeapObjectTag);
- updating_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
+ PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor);
}
}
void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
void ClearAndDeoptimizeDependentCode(Map* map);
- void ClearNonLiveDependentCode(Map* map);
+ void ClearNonLiveDependentCode(DependentCode* dependent_code);
// Marking detaches initial maps from SharedFunctionInfo objects
// to make this reference weak. We need to reattach initial maps
RegisterDependentCodeForEmbeddedMaps(code);
}
PopulateDeoptimizationData(code);
- info()->CommitDependentMaps(code);
+ info()->CommitDependencies(code);
}
WRITE_FIELD(this, kValueOffset, val);
}
+ACCESSORS(PropertyCell, dependent_code, DependentCode, kDependentCodeOffset)
Object* PropertyCell::type_raw() {
return READ_FIELD(this, kTypeOffset);
Cell::BodyDescriptor,
void>::Visit);
- table_.Register(kVisitPropertyCell,
- &FixedBodyVisitor<StaticVisitor,
- PropertyCell::BodyDescriptor,
- void>::Visit);
+ table_.Register(kVisitPropertyCell, &VisitPropertyCell);
table_.template RegisterSpecializations<DataObjectVisitor,
kVisitDataObject,
template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitPropertyCell(
+ Map* map, HeapObject* object) {
+ Heap* heap = map->GetHeap();
+
+ Object** slot =
+ HeapObject::RawField(object, PropertyCell::kDependentCodeOffset);
+ if (FLAG_collect_maps) {
+ // Mark property cell dependent codes array but do not push it onto marking
+ // stack, this will make references from it weak. We will clean dead
+ // codes when we iterate over property cells in ClearNonLiveReferences.
+ HeapObject* obj = HeapObject::cast(*slot);
+ heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
+ StaticVisitor::MarkObjectWithoutPush(heap, obj);
+ } else {
+ StaticVisitor::VisitPointer(heap, slot);
+ }
+
+ StaticVisitor::VisitPointers(heap,
+ HeapObject::RawField(object, PropertyCell::kPointerFieldsBeginOffset),
+ HeapObject::RawField(object, PropertyCell::kPointerFieldsEndOffset));
+}
+
+
+template<typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitCode(
Map* map, HeapObject* object) {
Heap* heap = map->GetHeap();
table_.GetVisitor(map)(map, obj);
}
+ INLINE(static void VisitPropertyCell(Map* map, HeapObject* object));
INLINE(static void VisitCodeEntry(Heap* heap, Address entry_address));
INLINE(static void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo));
INLINE(static void VisitCell(Heap* heap, RelocInfo* rinfo));
Object* store_value = value;
if (IsGlobalObject()) {
Heap* heap = name->GetHeap();
- MaybeObject* maybe_store_value =
- heap->AllocatePropertyCell(value);
+ MaybeObject* maybe_store_value = heap->AllocatePropertyCell(value);
if (!maybe_store_value->ToObject(&store_value)) return maybe_store_value;
}
Object* dict;
Handle<DependentCode> codes =
DependentCode::Insert(dep, group, info->object_wrapper());
if (*codes != dependent_code()) set_dependent_code(*codes);
- info->dependent_maps(group)->Add(Handle<Map>(this), info->zone());
+ info->dependencies(group)->Add(Handle<HeapObject>(this), info->zone());
}
}
+DependentCode* DependentCode::ForObject(Handle<HeapObject> object,
+ DependencyGroup group) {
+ AllowDeferredHandleDereference dependencies_are_safe;
+ if (group == DependentCode::kPropertyCellChangedGroup) {
+ return Handle<PropertyCell>::cast(object)->dependent_code();
+ }
+ return Handle<Map>::cast(object)->dependent_code();
+}
+
+
Handle<DependentCode> DependentCode::Insert(Handle<DependentCode> entries,
DependencyGroup group,
Handle<Object> object) {
code->set_marked_for_deoptimization(true);
} else {
CompilationInfo* info = compilation_info_at(i);
- info->AbortDueToDependentMap();
+ info->AbortDueToDependencyChange();
}
}
// Compact the array by moving all subsequent groups to fill in the new holes.
}
+void PropertyCell::AddDependentCompilationInfo(CompilationInfo* info) {
+ Handle<DependentCode> dep(dependent_code());
+ Handle<DependentCode> codes =
+ DependentCode::Insert(dep, DependentCode::kPropertyCellChangedGroup,
+ info->object_wrapper());
+ if (*codes != dependent_code()) set_dependent_code(*codes);
+ info->dependencies(DependentCode::kPropertyCellChangedGroup)->Add(
+ Handle<HeapObject>(this), info->zone());
+}
+
+
+void PropertyCell::AddDependentCode(Handle<Code> code) {
+ Handle<DependentCode> codes = DependentCode::Insert(
+ Handle<DependentCode>(dependent_code()),
+ DependentCode::kPropertyCellChangedGroup, code);
+ if (*codes != dependent_code()) set_dependent_code(*codes);
+}
+
+
} } // namespace v8::internal
// Group of code that depends on elements not being added to objects with
// this map.
kElementsCantBeAddedGroup,
- kGroupCount = kElementsCantBeAddedGroup + 1
+ // Group of code that depends on global property values in property cells
+ // not being changed.
+ kPropertyCellChangedGroup,
+ kGroupCount = kPropertyCellChangedGroup + 1
};
// Array for holding the index of the first code object of each group.
inline void copy(int from, int to);
static inline DependentCode* cast(Object* object);
+ static DependentCode* ForObject(Handle<HeapObject> object,
+ DependencyGroup group);
+
private:
// Make a room at the end of the given group by moving out the first
// code objects of the subsequent groups.
inline bool CanOmitPrototypeChecks();
void AddDependentCompilationInfo(DependentCode::DependencyGroup group,
- CompilationInfo* info);
+ CompilationInfo* info);
void AddDependentCode(DependentCode::DependencyGroup group,
Handle<Code> code);
class PropertyCell: public Cell {
public:
+ // [type]: type of the global property.
Type* type();
void set_type(Type* value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ // [dependent_code]: dependent code that depends on the type of the global
+ // property.
+ DECL_ACCESSORS(dependent_code, DependentCode)
+
// Casting.
static inline PropertyCell* cast(Object* obj);
// Layout description.
static const int kTypeOffset = kValueOffset + kPointerSize;
- static const int kSize = kTypeOffset + kPointerSize;
+ static const int kDependentCodeOffset = kTypeOffset + kPointerSize;
+ static const int kSize = kDependentCodeOffset + kPointerSize;
+
+ static const int kPointerFieldsBeginOffset = kValueOffset;
+ static const int kPointerFieldsEndOffset = kDependentCodeOffset;
+
+ typedef FixedBodyDescriptor<kValueOffset,
+ kSize,
+ kSize> BodyDescriptor;
+
+ void AddDependentCompilationInfo(CompilationInfo* info);
- typedef FixedBodyDescriptor<
- kValueOffset,
- kTypeOffset + kPointerSize,
- PropertyCell::kSize> BodyDescriptor;
+ void AddDependentCode(Handle<Code> code);
private:
DECL_ACCESSORS(type_raw, Object)
RegisterDependentCodeForEmbeddedMaps(code);
}
PopulateDeoptimizationData(code);
- info()->CommitDependentMaps(code);
+ info()->CommitDependencies(code);
}