LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- return DefineAsRegister(
- new(zone()) LLoadNamedField(UseRegisterAtStart(instr->object())));
+ LOperand* obj = UseRegisterAtStart(instr->object());
+ LOperand* temp = instr->representation().IsDouble() ? TempRegister() : NULL;
+ ASSERT(temp == NULL || FLAG_track_double_fields);
+ return DefineAsRegister(new(zone()) LLoadNamedField(obj, temp));
}
: UseRegisterAtStart(instr->object());
}
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
+ LOperand* val =
+ needs_write_barrier ||
+ (FLAG_track_fields && instr->field_representation().IsSmi())
+ ? UseTempRegister(instr->value()) : UseRegister(instr->value());
// We need a temporary register for write barrier of the map field.
LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
- return new(zone()) LStoreNamedField(obj, val, temp);
+ LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
+ if ((FLAG_track_fields && instr->field_representation().IsSmi()) ||
+ (FLAG_track_double_fields && instr->field_representation().IsDouble())) {
+ return AssignEnvironment(result);
+ }
+ return result;
}
};
-class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedField: public LTemplateInstruction<1, 1, 1> {
public:
- explicit LLoadNamedField(LOperand* object) {
+ explicit LLoadNamedField(LOperand* object, LOperand* temp) {
inputs_[0] = object;
+ temps_[0] = temp;
}
LOperand* object() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
Handle<Map> transition() const { return hydrogen()->transition(); }
+ Representation representation() const {
+ return hydrogen()->field_representation();
+ }
};
prototype_maps_.at(i)->AddDependentCode(
DependentCode::kPrototypeCheckGroup, code);
}
+ for (int i = 0 ; i < transition_maps_.length(); i++) {
+ transition_maps_.at(i)->AddDependentCode(
+ DependentCode::kTransitionGroup, code);
+ }
}
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
+ if (!FLAG_track_double_fields) {
+ ASSERT(!instr->hydrogen()->representation().IsDouble());
+ }
+ Register temp = instr->hydrogen()->representation().IsDouble()
+ ? ToRegister(instr->temp()) : ToRegister(instr->result());
if (instr->hydrogen()->is_in_object()) {
- __ ldr(result, FieldMemOperand(object, instr->hydrogen()->offset()));
+ __ ldr(temp, FieldMemOperand(object, instr->hydrogen()->offset()));
} else {
- __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ ldr(result, FieldMemOperand(result, instr->hydrogen()->offset()));
+ __ ldr(temp, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ __ ldr(temp, FieldMemOperand(temp, instr->hydrogen()->offset()));
+ }
+
+ if (instr->hydrogen()->representation().IsDouble()) {
+ Label load_from_heap_number, done;
+ DwVfpRegister result = ToDoubleRegister(instr->result());
+ SwVfpRegister flt_scratch = double_scratch0().low();
+ __ JumpIfNotSmi(temp, &load_from_heap_number);
+ __ SmiUntag(temp);
+ __ vmov(flt_scratch, temp);
+ __ vcvt_f64_s32(result, flt_scratch);
+ __ b(&done);
+ __ sub(ip, temp, Operand(kHeapObjectTag));
+ __ vldr(result, ip, HeapNumber::kValueOffset);
+ __ bind(&done);
}
}
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+ Representation representation = instr->representation();
+
Register object = ToRegister(instr->object());
Register value = ToRegister(instr->value());
+ ASSERT(!object.is(value));
Register scratch = scratch0();
int offset = instr->offset();
- ASSERT(!object.is(value));
-
- if (!instr->transition().is_null()) {
- __ mov(scratch, Operand(instr->transition()));
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ SmiTag(value, value, SetCC);
+ if (!instr->hydrogen()->value()->range()->IsInSmiRange()) {
+ DeoptimizeIf(vs, instr->environment());
+ }
+ } else if (FLAG_track_double_fields && representation.IsDouble() &&
+ !instr->hydrogen()->value()->type().IsSmi() &&
+ !instr->hydrogen()->value()->type().IsHeapNumber()) {
+ Label do_store;
+ __ JumpIfSmi(value, &do_store);
+ Handle<Map> map(isolate()->factory()->heap_number_map());
+
+ __ ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+ DoCheckMapCommon(scratch, map, REQUIRE_EXACT_MAP, instr->environment());
+ __ bind(&do_store);
+ }
+
+ Handle<Map> transition = instr->transition();
+ if (!transition.is_null()) {
+ if (transition->CanBeDeprecated()) {
+ transition_maps_.Add(transition, info()->zone());
+ }
+ __ mov(scratch, Operand(transition));
__ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
Register temp = ToRegister(instr->temp());
deopt_jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
prototype_maps_(0, info->zone()),
+ transition_maps_(0, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
ZoneList<JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
ZoneList<Handle<Map> > prototype_maps_;
+ ZoneList<Handle<Map> > transition_maps_;
int inlined_function_count_;
Scope* const scope_;
Status status_;
}
+void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated) {
+ if (map->CanBeDeprecated()) {
+ mov(scratch, Operand(map));
+ ldr(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
+ tst(scratch, Operand(Smi::FromInt(Map::Deprecated::kMask)));
+ b(ne, if_deprecated);
+ }
+}
+
+
void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
Condition cc,
Label* condition_met);
+ void CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated);
+
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object,
__ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
}
+ int descriptor = transition->LastAdded();
+ DescriptorArray* descriptors = transition->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ Representation representation = details.representation();
+ ASSERT(!representation.IsNone());
+
+ // Ensure no transitions to deprecated maps are followed.
+ __ CheckMapDeprecated(transition, scratch1, miss_label);
+
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ Label do_store;
+ __ JumpIfSmi(value_reg, &do_store);
+ __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
+ miss_label, DONT_DO_SMI_CHECK);
+ __ bind(&do_store);
+ }
+
// Check that we are allowed to write this.
if (object->GetPrototype()->IsJSObject()) {
JSObject* holder;
int offset = object->map()->instance_size() + (index * kPointerSize);
__ str(value_reg, FieldMemOperand(receiver_reg, offset));
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(value_reg, &exit);
-
- // Update the write barrier for the array address.
- // Pass the now unused name_reg as a scratch register.
- __ mov(name_reg, value_reg);
- __ RecordWriteField(receiver_reg,
- offset,
- name_reg,
- scratch1,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
+
+ // Update the write barrier for the array address.
+ // Pass the now unused name_reg as a scratch register.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(receiver_reg,
+ offset,
+ name_reg,
+ scratch1,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+ }
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
__ str(value_reg, FieldMemOperand(scratch1, offset));
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(value_reg, &exit);
-
- // Update the write barrier for the array address.
- // Ok to clobber receiver_reg and name_reg, since we return.
- __ mov(name_reg, value_reg);
- __ RecordWriteField(scratch1,
- offset,
- name_reg,
- receiver_reg,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
+
+ // Update the write barrier for the array address.
+ // Ok to clobber receiver_reg and name_reg, since we return.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(scratch1,
+ offset,
+ name_reg,
+ receiver_reg,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+ }
}
// Return the value (register r0).
// object and the number of in-object properties is not going to change.
index -= object->map()->inobject_properties();
+ Representation representation = lookup->representation();
+ ASSERT(!representation.IsNone());
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ Label do_store;
+ __ JumpIfSmi(value_reg, &do_store);
+ __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
+ miss_label, DONT_DO_SMI_CHECK);
+ __ bind(&do_store);
+ }
+
// TODO(verwaest): Share this code as a code stub.
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
__ str(value_reg, FieldMemOperand(receiver_reg, offset));
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(value_reg, &exit);
-
- // Update the write barrier for the array address.
- // Pass the now unused name_reg as a scratch register.
- __ mov(name_reg, value_reg);
- __ RecordWriteField(receiver_reg,
- offset,
- name_reg,
- scratch1,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
+
+ // Update the write barrier for the array address.
+ // Pass the now unused name_reg as a scratch register.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(receiver_reg,
+ offset,
+ name_reg,
+ scratch1,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+ }
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
__ str(value_reg, FieldMemOperand(scratch1, offset));
- // Skip updating write barrier if storing a smi.
- __ JumpIfSmi(value_reg, &exit);
-
- // Update the write barrier for the array address.
- // Ok to clobber receiver_reg and name_reg, since we return.
- __ mov(name_reg, value_reg);
- __ RecordWriteField(scratch1,
- offset,
- name_reg,
- receiver_reg,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
+
+ // Update the write barrier for the array address.
+ // Ok to clobber receiver_reg and name_reg, since we return.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(scratch1,
+ offset,
+ name_reg,
+ receiver_reg,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
+ }
}
// Return the value (register r0).
Register map_reg = scratch1();
int receiver_count = receiver_maps->length();
+ int number_of_handled_maps = 0;
__ ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int current = 0; current < receiver_count; ++current) {
- __ mov(ip, Operand(receiver_maps->at(current)));
- __ cmp(map_reg, ip);
- __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq);
+ Handle<Map> map = receiver_maps->at(current);
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ __ mov(ip, Operand(receiver_maps->at(current)));
+ __ cmp(map_reg, ip);
+ __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq);
+ }
}
+ ASSERT(number_of_handled_maps != 0);
__ bind(&miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
InlineCacheState state =
- receiver_maps->length() > 1 ? POLYMORPHIC : MONOMORPHIC;
+ number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
return GetICCode(kind(), type, name, state);
}
// ECMA-262, section 15.10.7.1.
FieldDescriptor field(heap->source_string(),
JSRegExp::kSourceFieldIndex,
- final);
+ final,
+ Representation::Tagged());
initial_map->AppendDescriptor(&field, witness);
}
{
// ECMA-262, section 15.10.7.2.
FieldDescriptor field(heap->global_string(),
JSRegExp::kGlobalFieldIndex,
- final);
+ final,
+ Representation::Tagged());
initial_map->AppendDescriptor(&field, witness);
}
{
// ECMA-262, section 15.10.7.3.
FieldDescriptor field(heap->ignore_case_string(),
JSRegExp::kIgnoreCaseFieldIndex,
- final);
+ final,
+ Representation::Tagged());
initial_map->AppendDescriptor(&field, witness);
}
{
// ECMA-262, section 15.10.7.4.
FieldDescriptor field(heap->multiline_string(),
JSRegExp::kMultilineFieldIndex,
- final);
+ final,
+ Representation::Tagged());
initial_map->AppendDescriptor(&field, witness);
}
{
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
FieldDescriptor field(heap->last_index_string(),
JSRegExp::kLastIndexFieldIndex,
- writable);
+ writable,
+ Representation::Tagged());
initial_map->AppendDescriptor(&field, witness);
}
map->set_instance_descriptors(*descriptors);
{ // length
- FieldDescriptor d(*factory->length_string(), 0, DONT_ENUM);
+ FieldDescriptor d(
+ *factory->length_string(), 0, DONT_ENUM, Representation::Tagged());
map->AppendDescriptor(&d, witness);
}
{ // callee
{
FieldDescriptor index_field(heap()->index_string(),
JSRegExpResult::kIndexIndex,
- NONE);
+ NONE,
+ Representation::Tagged());
initial_map->AppendDescriptor(&index_field, witness);
}
{
FieldDescriptor input_field(heap()->input_string(),
JSRegExpResult::kInputIndex,
- NONE);
+ NONE,
+ Representation::Tagged());
initial_map->AppendDescriptor(&input_field, witness);
}
Handle<Object> callbacks(descs->GetCallbacksObject(i), isolate());
PropertyDetails d = PropertyDetails(details.attributes(),
CALLBACKS,
+ Representation::Tagged(),
details.descriptor_index());
JSObject::SetNormalizedProperty(to, key, callbacks, d);
break;
for (int i = 0; i < size; i += kPointerSize) {
HInstruction* value =
- AddInstruction(new(zone) HLoadNamedField(boilerplate, true, i));
+ AddInstruction(new(zone) HLoadNamedField(
+ boilerplate, true, Representation::Tagged(), i));
AddInstruction(new(zone) HStoreNamedField(object,
factory->empty_string(),
- value,
- true, i));
+ value, true,
+ Representation::Tagged(), i));
}
checker.ElseDeopt();
AddInstruction(new(zone) HStoreNamedField(js_array,
factory->elements_field_string(),
new_elements, true,
+ Representation::Tagged(),
JSArray::kElementsOffset));
if_builder.End();
AddInstruction(new(zone) HStoreNamedField(js_array, factory->length_string(),
- map, true, JSArray::kMapOffset));
+ map, true,
+ Representation::Tagged(),
+ JSArray::kMapOffset));
return js_array;
}
true,
"Optimize object size, Array shift, DOM strings and string +")
DEFINE_bool(pretenure_literals, true, "allocate literals in old space")
+DEFINE_bool(track_fields, false, "track fields with only smi values")
+DEFINE_bool(track_double_fields, false, "track fields with double values")
+DEFINE_implication(track_double_fields, track_fields)
// Flags for data representation optimizations
DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles")
for (int i = 0; i < count; i++) {
String* name = fun->shared()->GetThisPropertyAssignmentName(i);
ASSERT(name->IsInternalizedString());
- FieldDescriptor field(name, i, NONE, i + 1);
+ // TODO(verwaest): Since we cannot update the boilerplate's map yet,
+ // initialize to the worst case.
+ FieldDescriptor field(name, i, NONE, Representation::Tagged(), i + 1);
descriptors->Set(i, &field, witness);
}
descriptors->Sort();
ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
PropertyDetails d = PropertyDetails(details.attributes(),
CALLBACKS,
+ Representation::None(),
details.descriptor_index());
Object* value = descs->GetCallbacksObject(i);
MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
#undef DEFINE_COMPILE
-const char* Representation::Mnemonic() const {
- switch (kind_) {
- case kNone: return "v";
- case kTagged: return "t";
- case kDouble: return "d";
- case kInteger32: return "i";
- case kExternal: return "x";
- default:
- UNREACHABLE();
- return NULL;
- }
-}
-
-
int HValue::LoopWeight() const {
const int w = FLAG_loop_weight;
static const int weights[] = { 1, w, w*w, w*w*w, w*w*w*w };
};
-class Representation {
- public:
- enum Kind {
- kNone,
- kInteger32,
- kDouble,
- kTagged,
- kExternal,
- kNumRepresentations
- };
-
- Representation() : kind_(kNone) { }
-
- static Representation None() { return Representation(kNone); }
- static Representation Tagged() { return Representation(kTagged); }
- static Representation Integer32() { return Representation(kInteger32); }
- static Representation Double() { return Representation(kDouble); }
- static Representation External() { return Representation(kExternal); }
-
- static Representation FromKind(Kind kind) { return Representation(kind); }
-
- bool Equals(const Representation& other) {
- return kind_ == other.kind_;
- }
-
- bool is_more_general_than(const Representation& other) {
- ASSERT(kind_ != kExternal);
- ASSERT(other.kind_ != kExternal);
- return kind_ > other.kind_;
- }
-
- Kind kind() const { return static_cast<Kind>(kind_); }
- bool IsNone() const { return kind_ == kNone; }
- bool IsTagged() const { return kind_ == kTagged; }
- bool IsInteger32() const { return kind_ == kInteger32; }
- bool IsDouble() const { return kind_ == kDouble; }
- bool IsExternal() const { return kind_ == kExternal; }
- bool IsSpecialization() const {
- return kind_ == kInteger32 || kind_ == kDouble;
- }
- const char* Mnemonic() const;
-
- private:
- explicit Representation(Kind k) : kind_(k) { }
-
- // Make sure kind fits in int8.
- STATIC_ASSERT(kNumRepresentations <= (1 << kBitsPerByte));
-
- int8_t kind_;
-};
-
-
class UniqueValueId {
public:
UniqueValueId() : raw_address_(NULL) { }
class HLoadNamedField: public HTemplateInstruction<2> {
public:
- HLoadNamedField(HValue* object, bool is_in_object, int offset,
- HValue* typecheck = NULL)
+ HLoadNamedField(HValue* object, bool is_in_object,
+ Representation field_representation,
+ int offset, HValue* typecheck = NULL)
: is_in_object_(is_in_object),
+ field_representation_(field_representation),
offset_(offset) {
ASSERT(object != NULL);
SetOperandAt(0, object);
SetOperandAt(1, typecheck != NULL ? typecheck : object);
- set_representation(Representation::Tagged());
+ if (FLAG_track_fields && field_representation.IsSmi()) {
+ set_type(HType::Smi());
+ set_representation(Representation::Tagged());
+ } else if (FLAG_track_double_fields && field_representation.IsDouble()) {
+ set_representation(field_representation);
+ } else {
+ set_representation(Representation::Tagged());
+ }
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnMaps);
if (is_in_object) {
static HLoadNamedField* NewArrayLength(Zone* zone, HValue* object,
HValue* typecheck,
HType type = HType::Tagged()) {
+ Representation representation =
+ type.IsSmi() ? Representation::Smi() : Representation::Tagged();
HLoadNamedField* result = new(zone) HLoadNamedField(
- object, true, JSArray::kLengthOffset, typecheck);
+ object, true, representation, JSArray::kLengthOffset, typecheck);
result->set_type(type);
result->SetGVNFlag(kDependsOnArrayLengths);
result->ClearGVNFlag(kDependsOnInobjectFields);
bool HasTypeCheck() const { return OperandAt(0) != OperandAt(1); }
bool is_in_object() const { return is_in_object_; }
+ Representation field_representation() const { return representation_; }
int offset() const { return offset_; }
virtual Representation RequiredInputRepresentation(int index) {
virtual bool IsDeletable() const { return true; }
bool is_in_object_;
+ Representation field_representation_;
int offset_;
};
Handle<String> name,
HValue* val,
bool in_object,
+ Representation field_representation,
int offset)
: name_(name),
is_in_object_(in_object),
+ field_representation_(field_representation),
offset_(offset),
transition_unique_id_(),
new_space_dominator_(NULL) {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField)
virtual Representation RequiredInputRepresentation(int index) {
+ if (FLAG_track_fields && index == 1 && field_representation_.IsSmi()) {
+ return Representation::Integer32();
+ }
return Representation::Tagged();
}
virtual void SetSideEffectDominator(GVNFlag side_effect, HValue* dominator) {
HValue* new_space_dominator() const { return new_space_dominator_; }
bool NeedsWriteBarrier() {
- return StoringValueNeedsWriteBarrier(value()) &&
+ return (!FLAG_track_fields || !field_representation_.IsSmi()) &&
+ StoringValueNeedsWriteBarrier(value()) &&
ReceiverObjectNeedsWriteBarrier(object(), new_space_dominator());
}
transition_unique_id_ = UniqueValueId(transition_);
}
+ Representation field_representation() const {
+ return field_representation_;
+ }
+
private:
Handle<String> name_;
bool is_in_object_;
+ Representation field_representation_;
int offset_;
Handle<Map> transition_;
UniqueValueId transition_unique_id_;
new_length->ClearFlag(HValue::kCanOverflow);
Factory* factory = isolate()->factory();
+ Representation representation = IsFastElementsKind(kind)
+ ? Representation::Smi() : Representation::Tagged();
HInstruction* length_store = AddInstruction(new(zone) HStoreNamedField(
object,
factory->length_field_string(),
new_length, true,
+ representation,
JSArray::kLengthOffset));
length_store->SetGVNFlag(kChangesArrayLengths);
}
BuildStoreMap(elements, map);
Handle<String> fixed_array_length_field_name = factory->length_field_string();
+ Representation representation = IsFastElementsKind(kind)
+ ? Representation::Smi() : Representation::Tagged();
HInstruction* store_length =
new(zone) HStoreNamedField(elements, fixed_array_length_field_name,
- capacity, true, FixedArray::kLengthOffset);
+ capacity, true, representation,
+ FixedArray::kLengthOffset);
AddInstruction(store_length);
}
isolate()->factory()->properties_field_symbol(),
empty_fixed_array,
true,
+ Representation::Tagged(),
JSArray::kPropertiesOffset));
HInstruction* length_store = AddInstruction(
isolate()->factory()->length_field_string(),
length_field,
true,
+ Representation::Tagged(),
JSArray::kLengthOffset));
length_store->SetGVNFlag(kChangesArrayLengths);
isolate()->factory()->elements_field_string(),
elements,
true,
+ Representation::Tagged(),
JSArray::kElementsOffset));
elements_store->SetGVNFlag(kChangesElementsPointer);
Handle<String> map_field_name = factory->map_field_string();
HInstruction* store_map =
new(zone) HStoreNamedField(object, map_field_name, map,
- true, JSObject::kMapOffset);
+ true, Representation::Tagged(),
+ JSObject::kMapOffset);
store_map->ClearGVNFlag(kChangesInobjectFields);
store_map->SetGVNFlag(kChangesMaps);
AddInstruction(store_map);
HInstruction* elements_store = AddInstruction(new(zone) HStoreNamedField(
object,
factory->elements_field_string(),
- new_elements, true,
+ new_elements, true, Representation::Tagged(),
JSArray::kElementsOffset));
elements_store->SetGVNFlag(kChangesElementsPointer);
// Copy the JS array part.
for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
if ((i != JSArray::kElementsOffset) || (length == 0)) {
- HInstruction* value =
- AddInstruction(new(zone) HLoadNamedField(boilerplate, true, i));
+ HInstruction* value = AddInstruction(new(zone) HLoadNamedField(
+ boilerplate, true, Representation::Tagged(), i));
if (i != JSArray::kMapOffset) {
AddInstruction(new(zone) HStoreNamedField(object,
factory->empty_string(),
- value,
- true, i));
+ value, true,
+ Representation::Tagged(), i));
} else {
BuildStoreMap(object, value);
}
AddInstruction(new(zone) HInnerAllocatedObject(object, elems_offset));
AddInstruction(new(zone) HStoreNamedField(object,
factory->elements_field_string(),
- object_elements,
- true, JSObject::kElementsOffset));
+ object_elements, true,
+ Representation::Tagged(),
+ JSObject::kElementsOffset));
// Copy the elements array header.
for (int i = 0; i < FixedArrayBase::kHeaderSize; i += kPointerSize) {
HInstruction* value =
- AddInstruction(new(zone) HLoadNamedField(boilerplate_elements,
- true, i));
+ AddInstruction(new(zone) HLoadNamedField(
+ boilerplate_elements, true, Representation::Tagged(), i));
AddInstruction(new(zone) HStoreNamedField(object_elements,
factory->empty_string(),
- value,
- true, i));
+ value, true,
+ Representation::Tagged(), i));
}
// Copy the elements array contents.
isolate()->factory()->payload_string(),
payload,
true,
+ Representation::Tagged(),
AllocationSiteInfo::kPayloadOffset));
return alloc_site;
}
HInstruction* global_object = AddInstruction(new(zone())
HGlobalObject(context));
HInstruction* native_context = AddInstruction(new(zone())
- HLoadNamedField(global_object, true, GlobalObject::kNativeContextOffset));
+ HLoadNamedField(global_object, true, Representation::Tagged(),
+ GlobalObject::kNativeContextOffset));
int offset = Context::kHeaderSize +
kPointerSize * Context::JS_ARRAY_MAPS_INDEX;
HInstruction* map_array = AddInstruction(new(zone())
- HLoadNamedField(native_context, true, offset));
+ HLoadNamedField(native_context, true, Representation::Tagged(), offset));
offset = kind_ * kPointerSize + FixedArrayBase::kHeaderSize;
- return AddInstruction(new(zone()) HLoadNamedField(map_array, true, offset));
+ return AddInstruction(new(zone()) HLoadNamedField(
+ map_array, true, Representation::Tagged(), offset));
}
static int ComputeLoadStoreFieldIndex(Handle<Map> type,
- Handle<String> name,
LookupResult* lookup) {
ASSERT(lookup->IsField() || lookup->IsTransitionToField(*type));
if (lookup->IsField()) {
return lookup->GetLocalFieldIndexFromMap(*type);
} else {
Map* transition = lookup->GetTransitionMapFromMap(*type);
- return transition->PropertyIndexFor(*name) - type->inobject_properties();
+ int descriptor = transition->LastAdded();
+ int index = transition->instance_descriptors()->GetFieldIndex(descriptor);
+ return index - type->inobject_properties();
+ }
+}
+
+
+static Representation ComputeLoadStoreRepresentation(Handle<Map> type,
+ LookupResult* lookup) {
+ if (lookup->IsField()) {
+ return lookup->representation();
+ } else {
+ Map* transition = lookup->GetTransitionMapFromMap(*type);
+ int descriptor = transition->LastAdded();
+ PropertyDetails details =
+ transition->instance_descriptors()->GetDetails(descriptor);
+ return details.representation();
}
}
zone()));
}
- int index = ComputeLoadStoreFieldIndex(map, name, lookup);
+ int index = ComputeLoadStoreFieldIndex(map, lookup);
bool is_in_object = index < 0;
+ Representation representation = ComputeLoadStoreRepresentation(map, lookup);
int offset = index * kPointerSize;
if (index < 0) {
// Negative property indices are in-object properties, indexed
} else {
offset += FixedArray::kHeaderSize;
}
- HStoreNamedField* instr =
- new(zone()) HStoreNamedField(object, name, value, is_in_object, offset);
+ HStoreNamedField* instr = new(zone()) HStoreNamedField(
+ object, name, value, is_in_object, representation, offset);
if (lookup->IsTransitionToField(*map)) {
Handle<Map> transition(lookup->GetTransitionMapFromMap(*map));
instr->set_transition(transition);
for (int i = 0; i < types->length() && count < kMaxLoadPolymorphism; ++i) {
map = types->at(i);
if (ComputeLoadStoreField(map, name, &lookup, false)) {
- int index = ComputeLoadStoreFieldIndex(map, name, &lookup);
+ int index = ComputeLoadStoreFieldIndex(map, &lookup);
bool is_in_object = index < 0;
int offset = index * kPointerSize;
if (index < 0) {
HValue* object,
Handle<Map> map,
LookupResult* lookup) {
+ Representation representation = lookup->representation();
int index = lookup->GetLocalFieldIndexFromMap(*map);
if (index < 0) {
// Negative property indices are in-object properties, indexed
// from the end of the fixed part of the object.
int offset = (index * kPointerSize) + map->instance_size();
- return new(zone()) HLoadNamedField(object, true, offset);
+ return new(zone()) HLoadNamedField(object, true, representation, offset);
} else {
// Non-negative property indices are in the properties array.
int offset = (index * kPointerSize) + FixedArray::kHeaderSize;
- return new(zone()) HLoadNamedField(object, false, offset);
+ return new(zone()) HLoadNamedField(object, false, representation, offset);
}
}
isolate()));
HInstruction* value_instruction =
AddInstruction(new(zone) HInnerAllocatedObject(target, *offset));
+ // TODO(verwaest): choose correct storage.
AddInstruction(new(zone) HStoreNamedField(
object_properties, factory->unknown_field_string(), value_instruction,
- true, boilerplate_object->GetInObjectPropertyOffset(i)));
+ true, Representation::Tagged(),
+ boilerplate_object->GetInObjectPropertyOffset(i)));
BuildEmitDeepCopy(value_object, original_value_object, target,
offset, DONT_TRACK_ALLOCATION_SITE);
} else {
+ // TODO(verwaest): choose correct storage.
HInstruction* value_instruction = AddInstruction(new(zone) HConstant(
value, Representation::Tagged()));
AddInstruction(new(zone) HStoreNamedField(
object_properties, factory->unknown_field_string(), value_instruction,
- true, boilerplate_object->GetInObjectPropertyOffset(i)));
+ true, Representation::Tagged(),
+ boilerplate_object->GetInObjectPropertyOffset(i)));
}
}
object_header,
factory->elements_field_string(),
elements,
- true, JSObject::kElementsOffset));
+ true, Representation::Tagged(), JSObject::kElementsOffset));
elements_store->SetGVNFlag(kChangesElementsPointer);
Handle<Object> properties_field =
properties_field, Representation::None()));
AddInstruction(new(zone) HStoreNamedField(object_header,
factory->empty_string(),
- properties,
- true, JSObject::kPropertiesOffset));
+ properties, true,
+ Representation::Tagged(),
+ JSObject::kPropertiesOffset));
if (boilerplate_object->IsJSArray()) {
Handle<JSArray> boilerplate_array =
Handle<Object>(boilerplate_array->length(), isolate());
HInstruction* length = AddInstruction(new(zone) HConstant(
length_field, Representation::None()));
+ ASSERT(boilerplate_array->length()->IsSmi());
+ Representation representation =
+ IsFastElementsKind(boilerplate_array->GetElementsKind())
+ ? Representation::Smi() : Representation::Tagged();
HInstruction* length_store = AddInstruction(new(zone) HStoreNamedField(
object_header,
factory->length_field_string(),
length,
- true, JSArray::kLengthOffset));
+ true, representation, JSArray::kLengthOffset));
length_store->SetGVNFlag(kChangesArrayLengths);
}
name,
value,
true, // in-object store.
+ Representation::Tagged(),
JSValue::kValueOffset));
if_js_value->Goto(join);
join->SetJoinId(call->id());
prototype_maps_.at(i)->AddDependentCode(
DependentCode::kPrototypeCheckGroup, code);
}
+ for (int i = 0 ; i < transition_maps_.length(); i++) {
+ transition_maps_.at(i)->AddDependentCode(
+ DependentCode::kTransitionGroup, code);
+ }
}
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
+ if (!FLAG_track_double_fields) {
+ ASSERT(!instr->hydrogen()->representation().IsDouble());
+ }
+ Register temp = instr->hydrogen()->representation().IsDouble()
+ ? ToRegister(instr->temp()) : ToRegister(instr->result());
if (instr->hydrogen()->is_in_object()) {
- __ mov(result, FieldOperand(object, instr->hydrogen()->offset()));
+ __ mov(temp, FieldOperand(object, instr->hydrogen()->offset()));
} else {
- __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
- __ mov(result, FieldOperand(result, instr->hydrogen()->offset()));
+ __ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset));
+ __ mov(temp, FieldOperand(temp, instr->hydrogen()->offset()));
+ }
+
+ if (instr->hydrogen()->representation().IsDouble()) {
+ Label load_from_heap_number, done;
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister result = ToDoubleRegister(instr->result());
+ __ JumpIfNotSmi(temp, &load_from_heap_number);
+ __ SmiUntag(temp);
+ __ cvtsi2sd(result, Operand(temp));
+ __ jmp(&done);
+ __ bind(&load_from_heap_number);
+ __ movdbl(result, FieldOperand(temp, HeapNumber::kValueOffset));
+ } else {
+ __ JumpIfNotSmi(temp, &load_from_heap_number);
+ __ SmiUntag(temp);
+ __ push(temp);
+ __ fild_s(Operand(esp, 0));
+ __ pop(temp);
+ __ jmp(&done);
+ __ bind(&load_from_heap_number);
+ PushX87DoubleOperand(FieldOperand(temp, HeapNumber::kValueOffset));
+ CurrentInstructionReturnsX87Result();
+ }
+ __ bind(&done);
}
}
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+ Representation representation = instr->representation();
+
Register object = ToRegister(instr->object());
+
int offset = instr->offset();
- if (!instr->transition().is_null()) {
+ if (FLAG_track_fields && representation.IsSmi()) {
+ if (instr->value()->IsConstantOperand()) {
+ LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
+ if (!IsInteger32(operand_value)) {
+ DeoptimizeIf(no_condition, instr->environment());
+ }
+ } else {
+ Register value = ToRegister(instr->value());
+ __ SmiTag(value);
+ if (!instr->hydrogen()->value()->range()->IsInSmiRange()) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
+ }
+ } else if (FLAG_track_double_fields && representation.IsDouble() &&
+ !instr->hydrogen()->value()->type().IsSmi() &&
+ !instr->hydrogen()->value()->type().IsHeapNumber()) {
+ Register value = ToRegister(instr->value());
+ Label do_store;
+ __ JumpIfSmi(value, &do_store);
+ Handle<Map> map(isolate()->factory()->heap_number_map());
+ DoCheckMapCommon(value, map, REQUIRE_EXACT_MAP, instr);
+ __ bind(&do_store);
+ }
+
+ Handle<Map> transition = instr->transition();
+ if (!transition.is_null()) {
+ if (transition->CanBeDeprecated()) {
+ transition_maps_.Add(transition, info()->zone());
+ }
if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
- __ mov(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
+ __ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
} else {
Register temp = ToRegister(instr->temp());
Register temp_map = ToRegister(instr->temp_map());
- __ mov(temp_map, instr->transition());
+ __ mov(temp_map, transition);
__ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map);
// Update the write barrier for the map field.
__ RecordWriteField(object,
jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
prototype_maps_(0, info->zone()),
+ transition_maps_(0, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
ZoneList<JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
ZoneList<Handle<Map> > prototype_maps_;
+ ZoneList<Handle<Map> > transition_maps_;
int inlined_function_count_;
Scope* const scope_;
Status status_;
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- ASSERT(instr->representation().IsTagged());
LOperand* obj = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new(zone()) LLoadNamedField(obj));
+ LOperand* temp = instr->representation().IsDouble() ? TempRegister() : NULL;
+ ASSERT(temp == NULL || FLAG_track_double_fields);
+ return DefineAsRegister(new(zone()) LLoadNamedField(obj, temp));
}
}
bool can_be_constant = instr->value()->IsConstant() &&
- !HConstant::cast(instr->value())->InNewSpace();
+ !HConstant::cast(instr->value())->InNewSpace() &&
+ !(FLAG_track_double_fields && instr->field_representation().IsDouble());
LOperand* val;
if (needs_write_barrier) {
val = UseTempRegister(instr->value());
} else if (can_be_constant) {
val = UseRegisterOrConstant(instr->value());
+ } else if (FLAG_track_fields && instr->field_representation().IsSmi()) {
+ val = UseTempRegister(instr->value());
} else {
val = UseRegister(instr->value());
}
// We need a temporary register for write barrier of the map field.
LOperand* temp_map = needs_write_barrier_for_map ? TempRegister() : NULL;
- return new(zone()) LStoreNamedField(obj, val, temp, temp_map);
+ LStoreNamedField* result =
+ new(zone()) LStoreNamedField(obj, val, temp, temp_map);
+ if ((FLAG_track_fields && instr->field_representation().IsSmi()) ||
+ (FLAG_track_double_fields && instr->field_representation().IsDouble())) {
+ return AssignEnvironment(result);
+ }
+ return result;
}
};
-class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedField: public LTemplateInstruction<1, 1, 1> {
public:
- explicit LLoadNamedField(LOperand* object) {
+ explicit LLoadNamedField(LOperand* object, LOperand* temp) {
inputs_[0] = object;
+ temps_[0] = temp;
}
LOperand* object() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
Handle<Map> transition() const { return hydrogen()->transition(); }
+ Representation representation() const {
+ return hydrogen()->field_representation();
+ }
};
}
+void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated) {
+ if (map->CanBeDeprecated()) {
+ mov(scratch, map);
+ mov(scratch, FieldOperand(scratch, Map::kBitField3Offset));
+ and_(scratch, Immediate(Smi::FromInt(Map::Deprecated::kMask)));
+ j(not_zero, if_deprecated);
+ }
+}
+
+
void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
+ void CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated);
+
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object,
__ CheckAccessGlobalProxy(receiver_reg, scratch1, scratch2, miss_label);
}
+ int descriptor = transition->LastAdded();
+ DescriptorArray* descriptors = transition->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ Representation representation = details.representation();
+ ASSERT(!representation.IsNone());
+
+ // Ensure no transitions to deprecated maps are followed.
+ __ CheckMapDeprecated(transition, scratch1, miss_label);
+
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ Label do_store;
+ __ JumpIfSmi(value_reg, &do_store);
+ __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
+ miss_label, DONT_DO_SMI_CHECK, REQUIRE_EXACT_MAP);
+ __ bind(&do_store);
+ }
+
// Check that we are allowed to write this.
if (object->GetPrototype()->IsJSObject()) {
JSObject* holder;
int offset = object->map()->instance_size() + (index * kPointerSize);
__ mov(FieldOperand(receiver_reg, offset), value_reg);
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ mov(name_reg, value_reg);
- __ RecordWriteField(receiver_reg,
- offset,
- name_reg,
- scratch1,
- kDontSaveFPRegs);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(receiver_reg,
+ offset,
+ name_reg,
+ scratch1,
+ kDontSaveFPRegs);
+ }
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
__ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
__ mov(FieldOperand(scratch1, offset), eax);
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ mov(name_reg, value_reg);
- __ RecordWriteField(scratch1,
- offset,
- name_reg,
- receiver_reg,
- kDontSaveFPRegs);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(scratch1,
+ offset,
+ name_reg,
+ receiver_reg,
+ kDontSaveFPRegs);
+ }
}
// Return the value (register eax).
// object and the number of in-object properties is not going to change.
index -= object->map()->inobject_properties();
+ Representation representation = lookup->representation();
+ ASSERT(!representation.IsNone());
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ Label do_store;
+ __ JumpIfSmi(value_reg, &do_store);
+ __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
+ miss_label, DONT_DO_SMI_CHECK, REQUIRE_EXACT_MAP);
+ __ bind(&do_store);
+ }
+
// TODO(verwaest): Share this code as a code stub.
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
__ mov(FieldOperand(receiver_reg, offset), value_reg);
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ mov(name_reg, value_reg);
- __ RecordWriteField(receiver_reg,
- offset,
- name_reg,
- scratch1,
- kDontSaveFPRegs);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(receiver_reg,
+ offset,
+ name_reg,
+ scratch1,
+ kDontSaveFPRegs);
+ }
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
__ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
__ mov(FieldOperand(scratch1, offset), eax);
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ mov(name_reg, value_reg);
- __ RecordWriteField(scratch1,
- offset,
- name_reg,
- receiver_reg,
- kDontSaveFPRegs);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(scratch1,
+ offset,
+ name_reg,
+ receiver_reg,
+ kDontSaveFPRegs);
+ }
}
// Return the value (register eax).
Register map_reg = scratch1();
__ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
int receiver_count = receiver_maps->length();
+ int number_of_handled_maps = 0;
for (int current = 0; current < receiver_count; ++current) {
- __ cmp(map_reg, receiver_maps->at(current));
- __ j(equal, handlers->at(current));
+ Handle<Map> map = receiver_maps->at(current);
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ __ cmp(map_reg, map);
+ __ j(equal, handlers->at(current));
+ }
}
+ ASSERT(number_of_handled_maps != 0);
__ bind(&miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
InlineCacheState state =
- receiver_maps->length() > 1 ? POLYMORPHIC : MONOMORPHIC;
+ number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
return GetICCode(kind(), type, name, state);
}
static bool TryRemoveInvalidPrototypeDependentStub(Code* target,
Object* receiver,
Object* name) {
+ // If the code is NORMAL, it handles dictionary mode objects. Such stubs do
+ // not check maps, but do positive/negative lookups.
+ if (target->type() != Code::NORMAL) {
+ Map* map = target->FindFirstMap();
+ if (map != NULL && map->is_deprecated()) {
+ return true;
+ }
+ }
+
InlineCacheHolderFlag cache_holder =
Code::ExtractCacheHolderFromFlags(target->flags());
Code::ExtraICState extra_ic_state,
Handle<Object> object,
Handle<String> name) {
+ if (object->IsJSObject()) {
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (receiver->map()->is_deprecated()) {
+ JSObject::MigrateInstance(receiver);
+ }
+ }
+
// If the object is undefined or null it's illegal to try to get any
// of its properties; throw a TypeError in that case.
if (object->IsUndefined() || object->IsNull()) {
Handle<String>::cast(key));
}
+ if (object->IsJSObject()) {
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (receiver->map()->is_deprecated()) {
+ JSObject::MigrateInstance(receiver);
+ }
+ }
+
if (object->IsUndefined() || object->IsNull()) {
return TypeError("non_object_property_call", object, key);
}
return Runtime::GetElementOrCharAtOrFail(isolate(), object, index);
}
+ if (object->IsJSObject()) {
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (receiver->map()->is_deprecated()) {
+ JSObject::MigrateInstance(receiver);
+ }
+ }
+
// Named lookup in the object.
LookupResult lookup(isolate());
LookupForRead(object, name, &lookup);
MapHandleList receiver_maps;
CodeHandleList handlers;
+ int number_of_valid_maps;
{
AssertNoAllocation no_gc;
target()->FindAllMaps(&receiver_maps);
int number_of_maps = receiver_maps.length();
- if (number_of_maps >= 4) return false;
+ number_of_valid_maps = number_of_maps;
+ for (int i = 0; i < number_of_maps; i++) {
+ if (receiver_maps.at(i)->is_deprecated()) {
+ number_of_valid_maps--;
+ }
+ }
+
+ if (number_of_valid_maps >= 4) return false;
// Only allow 0 maps in case target() was reset to UNINITIALIZED by the GC.
// In that case, allow the IC to go back monomorphic.
handlers.Add(code);
Handle<Code> ic = isolate()->stub_cache()->ComputePolymorphicIC(
- &receiver_maps, &handlers, name);
+ &receiver_maps, &handlers, number_of_valid_maps + 1, name);
set_target(*ic);
return true;
}
if (target()->type() != Code::NORMAL) {
if (target()->is_load_stub()) {
CopyICToMegamorphicCache(name);
+ } else if (target()->is_store_stub()) {
+ // Ensure that the IC stays monomorphic when replacing a monomorphic
+ // IC for a deprecated map.
+ // TODO(verwaest): Remove this code once polymorphic store ICs are
+ // implemented. Updating the polymorphic IC will keep it monomorphic
+ // by filtering deprecated maps.
+ MapHandleList maps;
+ Code* handler = target();
+ handler->FindAllMaps(&maps);
+ for (int i = 0; i < Min(1, maps.length()); i++) {
+ if (maps.at(i)->is_deprecated()) {
+ UpdateMonomorphicIC(receiver, code, name);
+ return;
+ }
+ }
+ if (maps.length() > 0) {
+ if (receiver->map() == *maps.at(0)) {
+ UpdateMonomorphicIC(receiver, code, name);
+ return;
+ }
+ UpdateMegamorphicCache(*maps.at(0), *name, handler);
+ }
} else {
Code* handler = target();
Map* map = handler->FindFirstMap();
}
} else if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (receiver->map()->is_deprecated()) {
+ JSObject::MigrateInstance(receiver);
+ }
+
if (receiver->elements()->map() ==
isolate()->heap()->non_strict_arguments_elements_map()) {
stub = non_strict_arguments_stub();
static bool LookupForWrite(Handle<JSObject> receiver,
Handle<String> name,
+ Handle<Object> value,
LookupResult* lookup) {
Handle<JSObject> holder = receiver;
receiver->Lookup(*name, lookup);
receiver->LocalLookupRealNamedProperty(*name, lookup);
return lookup->IsFound() &&
!lookup->IsReadOnly() &&
+ lookup->CanHoldValue(value) &&
lookup->IsCacheable();
}
- return true;
+ return lookup->CanHoldValue(value);
}
if (lookup->IsPropertyCallbacks()) return true;
// chain check. This avoids a double lookup, but requires us to pass in the
// receiver when trying to fetch extra information from the transition.
receiver->map()->LookupTransition(*holder, *name, lookup);
- return lookup->IsTransition() &&
- !lookup->GetTransitionDetails(receiver->map()).IsReadOnly();
+ if (!lookup->IsTransition()) return false;
+ PropertyDetails target_details =
+ lookup->GetTransitionDetails(receiver->map());
+ if (target_details.IsReadOnly()) return false;
+ return value->FitsRepresentation(target_details.representation());
}
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (receiver->map()->is_deprecated()) {
+ JSObject::MigrateInstance(receiver);
+ }
+
// Check if the given name is an array index.
uint32_t index;
if (name->AsArrayIndex(&index)) {
}
LookupResult lookup(isolate());
- if (LookupForWrite(receiver, name, &lookup)) {
+ if (LookupForWrite(receiver, name, value, &lookup)) {
if (FLAG_use_ic) {
UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
}
if (miss_mode != MISS_FORCE_GENERIC) {
if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (receiver->map()->is_deprecated()) {
+ JSObject::MigrateInstance(receiver);
+ }
bool key_is_smi_like = key->IsSmi() ||
(FLAG_compiled_keyed_stores && !key->ToSmi()->IsFailure());
if (receiver->elements()->map() ==
JSObject::TransitionToMap(json_object, map);
int length = properties.length();
for (int i = 0; i < length; i++) {
- json_object->FastPropertyAtPut(i, *properties[i]);
+ Handle<Object> value = properties[i];
+ Representation representation =
+ map->instance_descriptors()->GetDetails(i).representation();
+ if (representation.IsDouble() && value->IsSmi()) {
+ // TODO(verwaest): Allocate heap number.
+ }
+ json_object->FastPropertyAtPut(i, *value);
}
transitioning = false;
}
if (value.is_null()) return ReportUnexpectedCharacter();
properties.Add(value, zone());
- if (transitioning) continue;
+ if (transitioning) {
+ int field = properties.length() - 1;
+ Representation expected_representation =
+ map->instance_descriptors()->GetDetails(field).representation();
+ if (!value->FitsRepresentation(expected_representation)) {
+ map = Map::GeneralizeRepresentation(
+ map, field, value->OptimalRepresentation());
+ }
+ continue;
+ }
} else {
key = ParseJsonInternalizedString();
if (key.is_null() || c0_ != ':') return ReportUnexpectedCharacter();
JSObject::TransitionToMap(json_object, map);
int length = properties.length();
for (int i = 0; i < length; i++) {
- json_object->FastPropertyAtPut(i, *properties[i]);
+ Handle<Object> value = properties[i];
+ Representation representation =
+ map->instance_descriptors()->GetDetails(i).representation();
+ if (representation.IsDouble() && value->IsSmi()) {
+ // TODO(verwaest): Allocate heap number.
+ }
+ json_object->FastPropertyAtPut(i, *value);
}
}
}
Smi* PropertyDetails::AsSmi() {
- return Smi::FromInt(value_);
+ // Ensure the upper 2 bits have the same value by sign extending it. This is
+ // necessary to be able to use the 31st bit of the property details.
+ int value = value_ << 1;
+ return Smi::FromInt(value >> 1);
}
}
-MaybeObject* JSObject::AddFastPropertyUsingMap(Map* map) {
- ASSERT(this->map()->NumberOfOwnDescriptors() + 1 ==
- map->NumberOfOwnDescriptors());
- if (this->map()->unused_property_fields() == 0) {
- int new_size = properties()->length() + map->unused_property_fields() + 1;
- FixedArray* new_properties;
- MaybeObject* maybe_properties = properties()->CopySize(new_size);
- if (!maybe_properties->To(&new_properties)) return maybe_properties;
- set_properties(new_properties);
- }
- set_map(map);
- return this;
-}
-
-
MaybeObject* JSObject::TransitionToMap(Map* map) {
ASSERT(this->map()->inobject_properties() == map->inobject_properties());
ElementsKind expected_kind = this->map()->elements_kind();
}
+MaybeObject* JSObject::MigrateInstance() {
+ // Converting any field to the most specific type will cause the
+ // GeneralizeFieldRepresentation algorithm to create the most general existing
+ // transition that matches the object. This achieves what is needed.
+ return GeneralizeFieldRepresentation(0, Representation::Smi());
+}
+
+
Handle<String> JSObject::ExpectedTransitionKey(Handle<Map> map) {
AssertNoAllocation no_gc;
if (!map->HasTransitionArray()) return Handle<String>::null();
}
+void DescriptorArray::SetRepresentation(int descriptor_index,
+ Representation representation) {
+ ASSERT(!representation.IsNone());
+ PropertyDetails details = GetDetails(descriptor_index);
+ set(ToDetailsIndex(descriptor_index),
+ details.CopyWithRepresentation(representation).AsSmi());
+}
+
+
+void DescriptorArray::InitializeRepresentations(Representation representation) {
+ int length = number_of_descriptors();
+ for (int i = 0; i < length; i++) {
+ SetRepresentation(i, representation);
+ }
+}
+
+
Object** DescriptorArray::GetValueSlot(int descriptor_number) {
ASSERT(descriptor_number < number_of_descriptors());
return HeapObject::RawField(
number_of_descriptors());
ASSERT(desc->GetDetails().descriptor_index() > 0);
+ ASSERT(!desc->GetDetails().representation().IsNone());
NoIncrementalWriteBarrierSet(this,
ToKeyIndex(descriptor_number),
desc->GetKey());
ASSERT(desc->GetDetails().descriptor_index() <=
number_of_descriptors());
ASSERT(desc->GetDetails().descriptor_index() > 0);
+ ASSERT(!desc->GetDetails().representation().IsNone());
set(ToKeyIndex(descriptor_number), desc->GetKey());
set(ToValueIndex(descriptor_number), desc->GetValue());
}
+void Map::deprecate() {
+ set_bit_field3(Deprecated::update(bit_field3(), true));
+}
+
+
+bool Map::is_deprecated() {
+ if (!FLAG_track_fields) return false;
+ return Deprecated::decode(bit_field3());
+}
+
+
+bool Map::CanBeDeprecated() {
+ int descriptor = LastAdded();
+ for (int i = 0; i <= descriptor; i++) {
+ PropertyDetails details = instance_descriptors()->GetDetails(i);
+ if (FLAG_track_fields && details.representation().IsSmi()) {
+ return true;
+ }
+ if (FLAG_track_double_fields && details.representation().IsDouble()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
void Map::NotifyLeafMapLayoutChange() {
dependent_code()->DeoptimizeDependentCodeGroup(
GetIsolate(),
ASSERT(enumeration_index > 0);
}
- details = PropertyDetails(
- details.attributes(), details.type(), enumeration_index);
+ details = PropertyDetails(details.attributes(), details.type(),
+ Representation::None(), enumeration_index);
if (IsGlobalObject()) {
JSGlobalPropertyCell* cell =
if (map()->unused_property_fields() == 0) {
int new_unused = new_map->unused_property_fields();
FixedArray* values;
- { MaybeObject* maybe_values =
- properties()->CopySize(properties()->length() + new_unused + 1);
- if (!maybe_values->To(&values)) return maybe_values;
- }
+ MaybeObject* maybe_values =
+ properties()->CopySize(properties()->length() + new_unused + 1);
+ if (!maybe_values->To(&values)) return maybe_values;
+
set_properties(values);
}
set_map(new_map);
int index = map()->NextFreePropertyIndex();
// Allocate new instance descriptors with (name, index) added
- FieldDescriptor new_field(name, index, attributes, 0);
+ FieldDescriptor new_field(
+ name, index, attributes, value->OptimalRepresentation(), 0);
ASSERT(index < map()->inobject_properties() ||
(index - map()->inobject_properties()) < properties()->length() ||
// Assign an enumeration index to the property and update
// SetNextEnumerationIndex.
int index = dict->NextEnumerationIndex();
- PropertyDetails details = PropertyDetails(attributes, NORMAL, index);
+ PropertyDetails details = PropertyDetails(
+ attributes, NORMAL, Representation::None(), index);
dict->SetNextEnumerationIndex(index + 1);
dict->SetEntry(entry, name, store_value, details);
return value;
}
JSGlobalPropertyCell::cast(store_value)->set_value(value);
}
- PropertyDetails details = PropertyDetails(attributes, NORMAL);
+ PropertyDetails details = PropertyDetails(
+ attributes, NORMAL, Representation::None());
Object* result;
{ MaybeObject* maybe_result = dict->Add(name, store_value, details);
if (!maybe_result->ToObject(&result)) return maybe_result;
new_enumeration_index = dictionary->DetailsAt(old_index).dictionary_index();
}
- PropertyDetails new_details(attributes, NORMAL, new_enumeration_index);
+ PropertyDetails new_details(
+ attributes, NORMAL, Representation::None(), new_enumeration_index);
return SetNormalizedProperty(name, value, new_details);
}
// TODO(verwaest): From here on we lose existing map transitions, causing
// invalid back pointers. This will change once we can store multiple
// transitions with the same key.
-
bool owned_descriptors = old_map->owns_descriptors();
if (owned_descriptors ||
old_target->instance_descriptors() == old_map->instance_descriptors()) {
old_map->set_owns_descriptors(false);
}
+ old_target->DeprecateTransitionTree();
+
old_map->SetTransition(transition_index, new_map);
new_map->SetBackPointer(old_map);
return result;
}
int index = map()->NextFreePropertyIndex();
- FieldDescriptor new_field(name, index, attributes, 0);
+ FieldDescriptor new_field(
+ name, index, attributes, new_value->OptimalRepresentation(), 0);
// Make a new map for the object.
Map* new_map;
}
+const char* Representation::Mnemonic() const {
+ switch (kind_) {
+ case kNone: return "v";
+ case kTagged: return "t";
+ case kSmi: return "s";
+ case kDouble: return "d";
+ case kInteger32: return "i";
+ case kExternal: return "x";
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+enum RightTrimMode { FROM_GC, FROM_MUTATOR };
+
+
+static void ZapEndOfFixedArray(Address new_end, int to_trim) {
+ // If we are doing a big trim in old space then we zap the space.
+ Object** zap = reinterpret_cast<Object**>(new_end);
+ zap++; // Header of filler must be at least one word so skip that.
+ for (int i = 1; i < to_trim; i++) {
+ *zap++ = Smi::FromInt(0);
+ }
+}
+
+
+template<RightTrimMode trim_mode>
+static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) {
+ ASSERT(elms->map() != HEAP->fixed_cow_array_map());
+ // For now this trick is only applied to fixed arrays in new and paged space.
+ ASSERT(!HEAP->lo_space()->Contains(elms));
+
+ const int len = elms->length();
+
+ ASSERT(to_trim < len);
+
+ Address new_end = elms->address() + FixedArray::SizeFor(len - to_trim);
+
+ if (trim_mode != FROM_GC || Heap::ShouldZapGarbage()) {
+ ZapEndOfFixedArray(new_end, to_trim);
+ }
+
+ int size_delta = to_trim * kPointerSize;
+
+ // Technically in new space this write might be omitted (except for
+ // debug mode which iterates through the heap), but to play safer
+ // we still do it.
+ heap->CreateFillerObjectAt(new_end, size_delta);
+
+ elms->set_length(len - to_trim);
+
+ // Maintain marking consistency for IncrementalMarking.
+ if (Marking::IsBlack(Marking::MarkBitFrom(elms))) {
+ if (trim_mode == FROM_GC) {
+ MemoryChunk::IncrementLiveBytesFromGC(elms->address(), -size_delta);
+ } else {
+ MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
+ }
+ }
+}
+
+
+bool Map::InstancesNeedRewriting(int target_number_of_fields,
+ int target_inobject,
+ int target_unused) {
+ // If fields were added (or removed), rewrite the instance.
+ int number_of_fields = NumberOfFields();
+ ASSERT(target_number_of_fields >= number_of_fields);
+ if (target_number_of_fields != number_of_fields) return true;
+ // If no fields were added, and no inobject properties were removed, setting
+ // the map is sufficient.
+ if (target_inobject == inobject_properties()) return false;
+ // In-object slack tracking may have reduced the object size of the new map.
+ // In that case, succeed if all existing fields were inobject, and they still
+ // fit within the new inobject size.
+ ASSERT(target_inobject < inobject_properties());
+ if (target_number_of_fields <= target_inobject) {
+ ASSERT(target_number_of_fields + target_unused == target_inobject);
+ return false;
+ }
+ // Otherwise, properties will need to be moved to the backing store.
+ return true;
+}
+
+
+// To migrate an instance to a map:
+// - First check whether the instance needs to be rewritten. If not, simply
+// change the map.
+// - Otherwise, allocate a fixed array large enough to hold all fields, in
+// addition to unused space.
+// - Copy all existing properties in, in the following order: backing store
+// properties, unused fields, inobject properties.
+// - If all allocation succeeded, commit the state atomically:
+// * Copy inobject properties from the backing store back into the object.
+// * Trim the difference in instance size of the object. This also cleanly
+// frees inobject properties that moved to the backing store.
+// * If there are properties left in the backing store, trim of the space used
+// to temporarily store the inobject properties.
+// * If there are properties left in the backing store, install the backing
+// store.
+MaybeObject* JSObject::MigrateToMap(Map* new_map) {
+ Heap* heap = GetHeap();
+ Map* old_map = map();
+ int number_of_fields = new_map->NumberOfFields();
+ int inobject = new_map->inobject_properties();
+ int unused = new_map->unused_property_fields();
+
+ // Nothing to do if no functions were converted to fields.
+ if (!old_map->InstancesNeedRewriting(number_of_fields, inobject, unused)) {
+ set_map(new_map);
+ return this;
+ }
+
+ int total_size = number_of_fields + unused;
+ int external = total_size - inobject;
+ FixedArray* array;
+ MaybeObject* maybe_array = heap->AllocateFixedArray(total_size);
+ if (!maybe_array->To(&array)) return maybe_array;
+
+ DescriptorArray* old_descriptors = old_map->instance_descriptors();
+ DescriptorArray* new_descriptors = new_map->instance_descriptors();
+ int descriptors = new_map->NumberOfOwnDescriptors();
+
+ for (int i = 0; i < descriptors; i++) {
+ PropertyDetails details = new_descriptors->GetDetails(i);
+ if (details.type() != FIELD) continue;
+ PropertyDetails old_details = old_descriptors->GetDetails(i);
+ ASSERT(old_details.type() == CONSTANT_FUNCTION ||
+ old_details.type() == FIELD);
+ Object* value = old_details.type() == CONSTANT_FUNCTION
+ ? old_descriptors->GetValue(i)
+ : FastPropertyAt(old_descriptors->GetFieldIndex(i));
+ int target_index = new_descriptors->GetFieldIndex(i) - inobject;
+ if (target_index < 0) target_index += total_size;
+ array->set(target_index, value);
+ }
+
+ // From here on we cannot fail anymore.
+
+ // Copy (real) inobject properties. If necessary, stop at number_of_fields to
+ // avoid overwriting |one_pointer_filler_map|.
+ int limit = Min(inobject, number_of_fields);
+ for (int i = 0; i < limit; i++) {
+ FastPropertyAtPut(i, array->get(external + i));
+ }
+
+ // Create filler object past the new instance size.
+ int new_instance_size = new_map->instance_size();
+ int instance_size_delta = old_map->instance_size() - new_instance_size;
+ ASSERT(instance_size_delta >= 0);
+ Address address = this->address() + new_instance_size;
+ heap->CreateFillerObjectAt(address, instance_size_delta);
+
+ // If there are properties in the new backing store, trim it to the correct
+ // size and install the backing store into the object.
+ if (external > 0) {
+ RightTrimFixedArray<FROM_MUTATOR>(heap, array, inobject);
+ set_properties(array);
+ }
+
+ set_map(new_map);
+
+ return this;
+}
+
+
+MaybeObject* JSObject::GeneralizeFieldRepresentation(
+ int modify_index,
+ Representation new_representation) {
+ Map* new_map;
+ MaybeObject* maybe_new_map =
+ map()->GeneralizeRepresentation(modify_index, new_representation);
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ ASSERT(map() != new_map || new_map->FindRootMap()->is_deprecated());
+
+ return MigrateToMap(new_map);
+}
+
+
+int Map::NumberOfFields() {
+ DescriptorArray* descriptors = instance_descriptors();
+ int result = 0;
+ for (int i = 0; i < NumberOfOwnDescriptors(); i++) {
+ if (descriptors->GetDetails(i).type() == FIELD) result++;
+ }
+ return result;
+}
+
+
+MaybeObject* Map::CopyGeneralizeAllRepresentations() {
+ Map* new_map;
+ MaybeObject* maybe_map = this->Copy();
+ if (!maybe_map->To(&new_map)) return maybe_map;
+
+ new_map->instance_descriptors()->InitializeRepresentations(
+ Representation::Tagged());
+ return new_map;
+}
+
+
+void Map::DeprecateTransitionTree() {
+ if (!FLAG_track_fields) return;
+ if (is_deprecated()) return;
+ if (HasTransitionArray()) {
+ TransitionArray* transitions = this->transitions();
+ for (int i = 0; i < transitions->number_of_transitions(); i++) {
+ transitions->GetTarget(i)->DeprecateTransitionTree();
+ }
+ }
+ deprecate();
+ dependent_code()->DeoptimizeDependentCodeGroup(
+ GetIsolate(), DependentCode::kTransitionGroup);
+ dependent_code()->DeoptimizeDependentCodeGroup(
+ GetIsolate(), DependentCode::kPrototypeCheckGroup);
+}
+
+
+// Invalidates a transition target at |key|, and installs |new_descriptors| over
+// the current instance_descriptors to ensure proper sharing of descriptor
+// arrays.
+void Map::DeprecateTarget(Name* key, DescriptorArray* new_descriptors) {
+ if (HasTransitionArray()) {
+ TransitionArray* transitions = this->transitions();
+ int transition = transitions->Search(key);
+ if (transition != TransitionArray::kNotFound) {
+ transitions->GetTarget(transition)->DeprecateTransitionTree();
+ }
+ }
+
+ // Don't overwrite the empty descriptor array.
+ if (NumberOfOwnDescriptors() == 0) return;
+
+ DescriptorArray* to_replace = instance_descriptors();
+ Map* current = this;
+ while (current->instance_descriptors() == to_replace) {
+ current->SetEnumLength(Map::kInvalidEnumCache);
+ current->set_instance_descriptors(new_descriptors);
+ Object* next = current->GetBackPointer();
+ if (next->IsUndefined()) break;
+ current = Map::cast(next);
+ }
+
+ set_owns_descriptors(false);
+}
+
+
+Map* Map::FindRootMap() {
+ Map* result = this;
+ while (true) {
+ Object* back = result->GetBackPointer();
+ if (back->IsUndefined()) return result;
+ result = Map::cast(back);
+ }
+}
+
+
+Map* Map::FindUpdatedMap(int verbatim,
+ int length,
+ DescriptorArray* descriptors) {
+ // This can only be called on roots of transition trees.
+ ASSERT(GetBackPointer()->IsUndefined());
+
+ Map* current = this;
+
+ for (int i = verbatim; i < length; i++) {
+ if (!current->HasTransitionArray()) break;
+ Name* name = descriptors->GetKey(i);
+ TransitionArray* transitions = current->transitions();
+ int transition = transitions->Search(name);
+ if (transition == TransitionArray::kNotFound) break;
+ current = transitions->GetTarget(transition);
+ }
+
+ return current;
+}
+
+
+Map* Map::FindLastMatchMap(int verbatim,
+ int length,
+ DescriptorArray* descriptors) {
+ // This can only be called on roots of transition trees.
+ ASSERT(GetBackPointer()->IsUndefined());
+
+ Map* current = this;
+
+ for (int i = verbatim; i < length; i++) {
+ if (!current->HasTransitionArray()) break;
+ Name* name = descriptors->GetKey(i);
+ TransitionArray* transitions = current->transitions();
+ int transition = transitions->Search(name);
+ if (transition == TransitionArray::kNotFound) break;
+
+ Map* next = transitions->GetTarget(transition);
+ DescriptorArray* next_descriptors = next->instance_descriptors();
+
+ if (next_descriptors->GetValue(i) != descriptors->GetValue(i)) break;
+
+ PropertyDetails details = descriptors->GetDetails(i);
+ PropertyDetails next_details = next_descriptors->GetDetails(i);
+ if (details.type() != next_details.type()) break;
+ if (details.attributes() != next_details.attributes()) break;
+ if (!details.representation().Equals(next_details.representation())) break;
+ ASSERT(!details.IsDeleted());
+ ASSERT(!next_details.IsDeleted());
+
+ current = next;
+ }
+ return current;
+}
+
+
+// Generalize the representation of the descriptor at |modify_index|.
+// This method rewrites the transition tree to reflect the new change. To avoid
+// high degrees over polymorphism, and to stabilize quickly, on every rewrite
+// the new type is deduced by merging the current type with any potential new
+// (partial) version of the type in the transition tree.
+// To do this, on each rewrite:
+// - Search the root of the transition tree using FindRootMap.
+// - Find |updated|, the newest matching version of this map using
+// FindUpdatedMap. This uses the keys in the own map's descriptor array to
+// walk the transition tree.
+// - Merge/generalize the descriptor array of the current map and |updated|.
+// - Generalize the |modify_index| descriptor using |new_representation|.
+// - Walk the tree again starting from the root towards |updated|. Stop at
+// |split_map|, the first map who's descriptor array does not match the merged
+// descriptor array.
+// - If |updated| == |split_map|, |updated| is in the expected state. Return it.
+// - Otherwise, invalidate the outdated transition target from |updated|, and
+// replace its transition tree with a new branch for the updated descriptors.
+MaybeObject* Map::GeneralizeRepresentation(int modify_index,
+ Representation new_representation) {
+ Map* old_map = this;
+ DescriptorArray* old_descriptors = old_map->instance_descriptors();
+ Representation old_reprepresentation =
+ old_descriptors->GetDetails(modify_index).representation();
+
+ if (old_reprepresentation.IsNone()) {
+ UNREACHABLE();
+ old_descriptors->SetRepresentation(modify_index, new_representation);
+ return this;
+ }
+
+ int descriptors = old_map->NumberOfOwnDescriptors();
+ Map* root_map = old_map->FindRootMap();
+
+ if (!old_map->EquivalentToForTransition(root_map)) {
+ return CopyGeneralizeAllRepresentations();
+ }
+
+ int verbatim = root_map->NumberOfOwnDescriptors();
+
+ Map* updated = root_map->FindUpdatedMap(
+ verbatim, descriptors, old_descriptors);
+ // Check the state of the root map.
+ DescriptorArray* updated_descriptors = updated->instance_descriptors();
+
+ DescriptorArray* new_descriptors;
+ MaybeObject* maybe_descriptors = updated_descriptors->Merge(
+ verbatim,
+ updated->NumberOfOwnDescriptors(),
+ descriptors,
+ old_descriptors);
+ if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
+
+ old_reprepresentation =
+ new_descriptors->GetDetails(modify_index).representation();
+ new_representation = new_representation.generalize(old_reprepresentation);
+ new_descriptors->SetRepresentation(modify_index, new_representation);
+
+ Map* split_map = root_map->FindLastMatchMap(
+ verbatim, descriptors, new_descriptors);
+
+ int split_descriptors = split_map->NumberOfOwnDescriptors();
+ // Check whether |split_map| matches what we were looking for. If so, return
+ // it.
+ if (descriptors == split_descriptors) return split_map;
+
+ int descriptor = split_descriptors;
+ split_map->DeprecateTarget(
+ old_descriptors->GetKey(descriptor), new_descriptors);
+
+ Map* new_map = split_map;
+ // Add missing transitions.
+ for (; descriptor < descriptors; descriptor++) {
+ MaybeObject* maybe_map = new_map->CopyInstallDescriptors(
+ descriptor, new_descriptors);
+ if (!maybe_map->To(&new_map)) {
+ // Create a handle for the last created map to ensure it stays alive
+ // during GC. Its descriptor array is too large, but it will be
+ // overwritten during retry anyway.
+ Handle<Map>(new_map);
+ }
+ }
+
+ new_map->set_owns_descriptors(true);
+ return new_map;
+}
+
MaybeObject* JSObject::SetPropertyWithInterceptor(
Name* name,
}
-enum RightTrimMode { FROM_GC, FROM_MUTATOR };
-
-
-static void ZapEndOfFixedArray(Address new_end, int to_trim) {
- // If we are doing a big trim in old space then we zap the space.
- Object** zap = reinterpret_cast<Object**>(new_end);
- zap++; // Header of filler must be at least one word so skip that.
- for (int i = 1; i < to_trim; i++) {
- *zap++ = Smi::FromInt(0);
- }
-}
-
-
-template<RightTrimMode trim_mode>
-static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) {
- ASSERT(elms->map() != HEAP->fixed_cow_array_map());
- // For now this trick is only applied to fixed arrays in new and paged space.
- ASSERT(!HEAP->lo_space()->Contains(elms));
-
- const int len = elms->length();
-
- ASSERT(to_trim < len);
-
- Address new_end = elms->address() + FixedArray::SizeFor(len - to_trim);
-
- if (trim_mode != FROM_GC || Heap::ShouldZapGarbage()) {
- ZapEndOfFixedArray(new_end, to_trim);
- }
-
- int size_delta = to_trim * kPointerSize;
-
- // Technically in new space this write might be omitted (except for
- // debug mode which iterates through the heap), but to play safer
- // we still do it.
- heap->CreateFillerObjectAt(new_end, size_delta);
-
- elms->set_length(len - to_trim);
-
- // Maintain marking consistency for IncrementalMarking.
- if (Marking::IsBlack(Marking::MarkBitFrom(elms))) {
- if (trim_mode == FROM_GC) {
- MemoryChunk::IncrementLiveBytesFromGC(elms->address(), -size_delta);
- } else {
- MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
- }
- }
-}
-
-
void Map::EnsureDescriptorSlack(Handle<Map> map, int slack) {
Handle<DescriptorArray> descriptors(map->instance_descriptors());
if (slack <= descriptors->NumberOfSlackDescriptors()) return;
}
-void JSObject::AddFastPropertyUsingMap(Handle<JSObject> object,
- Handle<Map> map) {
+void JSObject::TransitionToMap(Handle<JSObject> object, Handle<Map> map) {
CALL_HEAP_FUNCTION_VOID(
object->GetIsolate(),
- object->AddFastPropertyUsingMap(*map));
+ object->TransitionToMap(*map));
}
-void JSObject::TransitionToMap(Handle<JSObject> object, Handle<Map> map) {
+void JSObject::MigrateInstance(Handle<JSObject> object) {
CALL_HEAP_FUNCTION_VOID(
object->GetIsolate(),
- object->TransitionToMap(*map));
+ object->MigrateInstance());
+}
+
+
+Handle<Map> Map::GeneralizeRepresentation(Handle<Map> map,
+ int modify_index,
+ Representation new_representation) {
+ CALL_HEAP_FUNCTION(
+ map->GetIsolate(),
+ map->GeneralizeRepresentation(modify_index, new_representation),
+ Map);
}
case NORMAL:
result = lookup->holder()->SetNormalizedProperty(lookup, *value);
break;
- case FIELD:
+ case FIELD: {
+ Representation representation = lookup->representation();
+ if (!value->FitsRepresentation(representation)) {
+ MaybeObject* maybe_failure =
+ lookup->holder()->GeneralizeFieldRepresentation(
+ lookup->GetDescriptorIndex(), value->OptimalRepresentation());
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ }
result = lookup->holder()->FastPropertyAtPut(
lookup->GetFieldIndex().field_index(), *value);
break;
+ }
case CONSTANT_FUNCTION:
// Only replace the function if necessary.
if (*value == lookup->GetConstantFunction()) return *value;
if (details.type() == FIELD) {
if (attributes == details.attributes()) {
+ if (!value->FitsRepresentation(details.representation())) {
+ MaybeObject* maybe_map = transition_map->GeneralizeRepresentation(
+ descriptor, value->OptimalRepresentation());
+ if (!maybe_map->To(&transition_map)) return maybe_map;
+ Object* back = transition_map->GetBackPointer();
+ if (back->IsMap()) {
+ MaybeObject* maybe_failure =
+ lookup->holder()->MigrateToMap(Map::cast(back));
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ }
+ }
int field_index = descriptors->GetFieldIndex(descriptor);
result = lookup->holder()->AddFastPropertyUsingMap(
transition_map, *name, *value, field_index);
MaybeObject* result = *value;
switch (lookup.type()) {
case NORMAL: {
- PropertyDetails details = PropertyDetails(attributes, NORMAL);
+ PropertyDetails details = PropertyDetails(
+ attributes, NORMAL, Representation::None());
result = self->SetNormalizedProperty(*name, *value, details);
break;
}
- case FIELD:
+ case FIELD: {
+ Representation representation = lookup.representation();
+ if (!value->FitsRepresentation(representation)) {
+ MaybeObject* maybe_failure = self->GeneralizeFieldRepresentation(
+ lookup.GetDescriptorIndex(), value->OptimalRepresentation());
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ }
result = self->FastPropertyAtPut(
lookup.GetFieldIndex().field_index(), *value);
break;
+ }
case CONSTANT_FUNCTION:
// Only replace the function if necessary.
if (*value != lookup.GetConstantFunction()) {
if (details.type() == FIELD) {
if (attributes == details.attributes()) {
+ if (!value->FitsRepresentation(details.representation())) {
+ MaybeObject* maybe_map = transition_map->GeneralizeRepresentation(
+ descriptor, value->OptimalRepresentation());
+ if (!maybe_map->To(&transition_map)) return maybe_map;
+ Object* back = transition_map->GetBackPointer();
+ if (back->IsMap()) {
+ MaybeObject* maybe_failure = self->MigrateToMap(Map::cast(back));
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ }
+ }
int field_index = descriptors->GetFieldIndex(descriptor);
result = self->AddFastPropertyUsingMap(
transition_map, *name, *value, field_index);
case CONSTANT_FUNCTION: {
PropertyDetails d = PropertyDetails(details.attributes(),
NORMAL,
+ Representation::None(),
details.descriptor_index());
Object* value = descs->GetConstantFunction(i);
MaybeObject* maybe_dictionary =
case FIELD: {
PropertyDetails d = PropertyDetails(details.attributes(),
NORMAL,
+ Representation::None(),
details.descriptor_index());
Object* value = FastPropertyAt(descs->GetFieldIndex(i));
MaybeObject* maybe_dictionary =
}
case CALLBACKS: {
Object* value = descs->GetCallbacksObject(i);
- details = details.set_pointer(0);
+ PropertyDetails d = PropertyDetails(details.attributes(),
+ CALLBACKS,
+ Representation::None(),
+ details.descriptor_index());
MaybeObject* maybe_dictionary =
- dictionary->Add(descs->GetKey(i), value, details);
+ dictionary->Add(descs->GetKey(i), value, d);
if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
break;
}
ASSERT(old_map->has_fast_smi_or_object_elements());
value = FixedArray::cast(array)->get(i);
}
- PropertyDetails details = PropertyDetails(NONE, NORMAL);
+ PropertyDetails details = PropertyDetails(
+ NONE, NORMAL, Representation::None());
if (!value->IsTheHole()) {
Object* result;
MaybeObject* maybe_result =
}
-int Map::PropertyIndexFor(Name* name) {
- DescriptorArray* descs = instance_descriptors();
- int limit = NumberOfOwnDescriptors();
- for (int i = 0; i < limit; i++) {
- if (name->Equals(descs->GetKey(i))) return descs->GetFieldIndex(i);
- }
- return -1;
-}
-
-
int Map::NextFreePropertyIndex() {
int max_index = -1;
int number_of_own_descriptors = NumberOfOwnDescriptors();
if (details.type() == CALLBACKS && result->IsAccessorPair()) {
ASSERT(!details.IsDontDelete());
if (details.attributes() != attributes) {
- dictionary->DetailsAtPut(entry,
- PropertyDetails(attributes, CALLBACKS, index));
+ dictionary->DetailsAtPut(
+ entry,
+ PropertyDetails(
+ attributes, CALLBACKS, Representation::None(), index));
}
AccessorPair::cast(result)->SetComponents(getter, setter);
return true;
MaybeObject* JSObject::SetElementCallback(uint32_t index,
Object* structure,
PropertyAttributes attributes) {
- PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
+ PropertyDetails details = PropertyDetails(
+ attributes, CALLBACKS, Representation::None());
// Normalize elements to make this operation simple.
SeededNumberDictionary* dictionary;
}
// Update the dictionary with the new CALLBACKS property.
- PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
+ PropertyDetails details = PropertyDetails(
+ attributes, CALLBACKS, Representation::None());
maybe_ok = SetNormalizedProperty(name, structure, details);
if (maybe_ok->IsFailure()) return maybe_ok;
(descriptor_index == descriptors->number_of_descriptors() - 1)
? SIMPLE_TRANSITION
: FULL_TRANSITION;
+ ASSERT(name == descriptors->GetKey(descriptor_index));
MaybeObject* maybe_transitions = AddTransition(name, result, simple_flag);
if (!maybe_transitions->To(&transitions)) return maybe_transitions;
}
+MaybeObject* Map::CopyInstallDescriptors(int new_descriptor,
+ DescriptorArray* descriptors) {
+ ASSERT(descriptors->IsSortedNoDuplicates());
+
+ Map* result;
+ MaybeObject* maybe_result = CopyDropDescriptors();
+ if (!maybe_result->To(&result)) return maybe_result;
+
+ result->InitializeDescriptors(descriptors);
+ result->SetNumberOfOwnDescriptors(new_descriptor + 1);
+
+ int unused_property_fields = this->unused_property_fields();
+ if (descriptors->GetDetails(new_descriptor).type() == FIELD) {
+ unused_property_fields = this->unused_property_fields() - 1;
+ if (unused_property_fields < 0) {
+ unused_property_fields += JSObject::kFieldsAdded;
+ }
+ }
+
+ result->set_unused_property_fields(unused_property_fields);
+ result->set_owns_descriptors(false);
+
+ if (CanHaveMoreTransitions()) {
+ Name* name = descriptors->GetKey(new_descriptor);
+ TransitionArray* transitions;
+ MaybeObject* maybe_transitions =
+ AddTransition(name, result, SIMPLE_TRANSITION);
+ if (!maybe_transitions->To(&transitions)) return maybe_transitions;
+
+ set_transitions(transitions);
+ result->SetBackPointer(this);
+ }
+
+ return result;
+}
+
+
MaybeObject* Map::CopyAsElementsKind(ElementsKind kind, TransitionFlag flag) {
if (flag == INSERT_TRANSITION) {
ASSERT(!HasElementsTransition() ||
descriptors->CopyUpTo(number_of_own_descriptors);
if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors;
+ new_descriptors->InitializeRepresentations(Representation::Tagged());
+
return CopyReplaceDescriptors(new_descriptors, NULL, OMIT_TRANSITION, 0);
}
}
+// Generalize the |other| descriptor array by merging it with the (at least
+// partly) updated |this| descriptor array.
+// The method merges two descriptor array in three parts. Both descriptor arrays
+// are identical up to |verbatim|. They also overlap in keys up to |valid|.
+// Between |verbatim| and |valid|, the resulting descriptor type as well as the
+// representation are generalized from both |this| and |other|. Beyond |valid|,
+// the descriptors are copied verbatim from |other| up to |new_size|.
+// In case of incompatible types, the type and representation of |other| is
+// used.
+MaybeObject* DescriptorArray::Merge(int verbatim,
+ int valid,
+ int new_size,
+ DescriptorArray* other) {
+ ASSERT(verbatim <= valid);
+ ASSERT(valid <= new_size);
+
+ DescriptorArray* result;
+ // Allocate a new descriptor array large enough to hold the required
+ // descriptors, with minimally the exact same size as this descriptor array.
+ MaybeObject* maybe_descriptors = DescriptorArray::Allocate(
+ new_size, Max(new_size, number_of_descriptors()) - new_size);
+ if (!maybe_descriptors->To(&result)) return maybe_descriptors;
+ ASSERT(result->length() > length() ||
+ result->NumberOfSlackDescriptors() > 0 ||
+ result->number_of_descriptors() == other->number_of_descriptors());
+ ASSERT(result->number_of_descriptors() == new_size);
+
+ DescriptorArray::WhitenessWitness witness(result);
+
+ int descriptor;
+
+ // 0 -> |verbatim|
+ int current_offset = 0;
+ for (descriptor = 0; descriptor < verbatim; descriptor++) {
+ if (GetDetails(descriptor).type() == FIELD) current_offset++;
+ result->CopyFrom(descriptor, this, descriptor, witness);
+ }
+
+ // |verbatim| -> |valid|
+ for (; descriptor < valid; descriptor++) {
+ Name* key = GetKey(descriptor);
+ PropertyDetails details = GetDetails(descriptor);
+ PropertyDetails other_details = other->GetDetails(descriptor);
+ ASSERT(details.attributes() == other_details.attributes());
+
+ if (details.type() == FIELD || other_details.type() == FIELD ||
+ (details.type() == CONSTANT_FUNCTION &&
+ other_details.type() == CONSTANT_FUNCTION &&
+ GetValue(descriptor) != other->GetValue(descriptor))) {
+ Representation representation =
+ details.representation().generalize(other_details.representation());
+ FieldDescriptor d(key,
+ current_offset++,
+ details.attributes(),
+ representation,
+ descriptor + 1);
+ result->Set(descriptor, &d, witness);
+ } else {
+ result->CopyFrom(descriptor, other, descriptor, witness);
+ }
+ }
+
+ // |valid| -> |new_size|
+ for (; descriptor < new_size; descriptor++) {
+ PropertyDetails details = other->GetDetails(descriptor);
+ if (details.type() == FIELD) {
+ Name* key = other->GetKey(descriptor);
+ FieldDescriptor d(key,
+ current_offset++,
+ details.attributes(),
+ details.representation(),
+ descriptor + 1);
+ result->Set(descriptor, &d, witness);
+ } else {
+ result->CopyFrom(descriptor, other, descriptor, witness);
+ }
+ }
+
+ result->Sort();
+ return result;
+}
+
+
// We need the whiteness witness since sort will reshuffle the entries in the
// descriptor array. If the descriptor array were to be black, the shuffling
// would move a slot that was already recorded as pointing into an evacuation
}
+static bool CheckEquivalent(Map* first, Map* second) {
+ return
+ first->constructor() == second->constructor() &&
+ first->prototype() == second->prototype() &&
+ first->instance_type() == second->instance_type() &&
+ first->bit_field() == second->bit_field() &&
+ first->bit_field2() == second->bit_field2() &&
+ first->is_observed() == second->is_observed() &&
+ first->function_with_prototype() == second->function_with_prototype();
+}
+
+
+bool Map::EquivalentToForTransition(Map* other) {
+ return CheckEquivalent(this, other);
+}
+
+
bool Map::EquivalentToForNormalization(Map* other,
PropertyNormalizationMode mode) {
- return
- constructor() == other->constructor() &&
- prototype() == other->prototype() &&
- inobject_properties() == ((mode == CLEAR_INOBJECT_PROPERTIES) ?
- 0 :
- other->inobject_properties()) &&
- instance_type() == other->instance_type() &&
- bit_field() == other->bit_field() &&
- bit_field2() == other->bit_field2() &&
- is_observed() == other->is_observed() &&
- function_with_prototype() == other->function_with_prototype();
+ int properties = mode == CLEAR_INOBJECT_PROPERTIES
+ ? 0 : other->inobject_properties();
+ return CheckEquivalent(this, other) && inobject_properties() == properties;
}
// is read-only (a declared const that has not been initialized). If a
// value is being defined we skip attribute checks completely.
if (set_mode == DEFINE_PROPERTY) {
- details = PropertyDetails(
- attributes, NORMAL, details.dictionary_index());
+ details = PropertyDetails(attributes, NORMAL, Representation::None(),
+ details.dictionary_index());
dictionary->DetailsAtPut(entry, details);
} else if (details.IsReadOnly() && !element->IsTheHole()) {
if (strict_mode == kNonStrictMode) {
}
}
FixedArrayBase* new_dictionary;
- PropertyDetails details = PropertyDetails(attributes, NORMAL);
+ PropertyDetails details = PropertyDetails(
+ attributes, NORMAL, Representation::None());
MaybeObject* maybe = dictionary->AddNumberEntry(index, *value, details);
if (!maybe->To(&new_dictionary)) return maybe;
if (*dictionary != SeededNumberDictionary::cast(new_dictionary)) {
}
uint32_t result = pos;
- PropertyDetails no_details = PropertyDetails(NONE, NORMAL);
+ PropertyDetails no_details = PropertyDetails(
+ NONE, NORMAL, Representation::None());
Heap* heap = GetHeap();
while (undefs > 0) {
if (pos > static_cast<uint32_t>(Smi::kMaxValue)) {
heap->AllocateJSGlobalPropertyCell(heap->the_hole_value());
if (!maybe_cell->ToObject(&cell)) return maybe_cell;
}
- PropertyDetails details(NONE, NORMAL);
+ PropertyDetails details(NONE, NORMAL, Representation::None());
details = details.AsDeleted();
Object* dictionary;
{ MaybeObject* maybe_dictionary =
if (Dictionary<Shape, Key>::IsKey(Dictionary<Shape, Key>::KeyAt(i))) {
int enum_index = Smi::cast(enumeration_order->get(pos++))->value();
PropertyDetails details = DetailsAt(i);
- PropertyDetails new_details =
- PropertyDetails(details.attributes(), details.type(), enum_index);
+ PropertyDetails new_details = PropertyDetails(
+ details.attributes(), details.type(),
+ Representation::None(), enum_index);
DetailsAtPut(i, new_details);
}
}
{ MaybeObject* maybe_k = Shape::AsObject(this->GetHeap(), key);
if (!maybe_k->ToObject(&k)) return maybe_k;
}
- PropertyDetails details = PropertyDetails(NONE, NORMAL);
+ PropertyDetails details = PropertyDetails(
+ NONE, NORMAL, Representation::None());
return Dictionary<Shape, Key>::cast(obj)->AddEntry(key, value, details,
Dictionary<Shape, Key>::Hash(key));
// Assign an enumeration index to the property and update
// SetNextEnumerationIndex.
int index = NextEnumerationIndex();
- details = PropertyDetails(details.attributes(), details.type(), index);
+ details = PropertyDetails(details.attributes(), details.type(),
+ Representation::None(), index);
SetNextEnumerationIndex(index + 1);
}
SetEntry(entry, k, value, details);
MaybeObject* UnseededNumberDictionary::AddNumberEntry(uint32_t key,
Object* value) {
SLOW_ASSERT(this->FindEntry(key) == kNotFound);
- return Add(key, value, PropertyDetails(NONE, NORMAL));
+ return Add(key, value, PropertyDetails(NONE, NORMAL, Representation::None()));
}
// Preserve enumeration index.
details = PropertyDetails(details.attributes(),
details.type(),
+ Representation::None(),
DetailsAt(entry).dictionary_index());
MaybeObject* maybe_object_key =
SeededNumberDictionaryShape::AsObject(GetHeap(), key);
FieldDescriptor d(key,
current_offset++,
details.attributes(),
+ // TODO(verwaest): value->OptimalRepresentation();
+ Representation::Tagged(),
enumeration_index);
descriptors->Set(enumeration_index - 1, &d, witness);
} else if (type == CALLBACKS) {
inline double Number();
inline bool IsNaN();
+ inline Representation OptimalRepresentation() {
+ if (FLAG_track_fields && IsSmi()) {
+ return Representation::Smi();
+ } else if (FLAG_track_double_fields && IsHeapNumber()) {
+ return Representation::Double();
+ } else {
+ return Representation::Tagged();
+ }
+ }
+
+ inline bool FitsRepresentation(Representation representation) {
+ if (FLAG_track_fields && representation.IsSmi()) {
+ return IsSmi();
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ return IsNumber();
+ }
+ return true;
+ }
+
// Returns true if the object is of the correct type to be used as a
// implementation of a JSObject's elements.
inline bool HasValidElements();
// Extend the receiver with a single fast property appeared first in the
// passed map. This also extends the property backing store if necessary.
- static void AddFastPropertyUsingMap(Handle<JSObject> object, Handle<Map> map);
- inline MUST_USE_RESULT MaybeObject* AddFastPropertyUsingMap(Map* map);
static void TransitionToMap(Handle<JSObject> object, Handle<Map> map);
inline MUST_USE_RESULT MaybeObject* TransitionToMap(Map* map);
+ static void MigrateInstance(Handle<JSObject> instance);
+ inline MUST_USE_RESULT MaybeObject* MigrateInstance();
+
// Can cause GC.
MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributes(
Name* key,
Object* new_value,
PropertyAttributes attributes);
+ MUST_USE_RESULT MaybeObject* MigrateToMap(Map* new_map);
+ MUST_USE_RESULT MaybeObject* GeneralizeFieldRepresentation(
+ int modify_index,
+ Representation new_representation);
+
// Add a property to a fast-case object.
MUST_USE_RESULT MaybeObject* AddFastProperty(
Name* name,
inline Name* GetSortedKey(int descriptor_number);
inline int GetSortedKeyIndex(int descriptor_number);
inline void SetSortedKey(int pointer, int descriptor_number);
+ inline void InitializeRepresentations(Representation representation);
+ inline void SetRepresentation(int descriptor_number,
+ Representation representation);
// Accessor for complete descriptor.
inline void Get(int descriptor_number, Descriptor* desc);
DescriptorArray* src,
int src_index,
const WhitenessWitness&);
+ MUST_USE_RESULT MaybeObject* Merge(int verbatim,
+ int valid,
+ int new_size,
+ DescriptorArray* other);
MUST_USE_RESULT MaybeObject* CopyUpTo(int enumeration_index);
// Group of code that weakly embed this map and depend on being
// deoptimized when the map is garbage collected.
kWeaklyEmbeddedGroup,
+ // Group of code that embed a transition to this map, and depend on being
+ // deoptimized when the transition is replaced by a new version.
+ kTransitionGroup,
// Group of code that omit run-time prototype checks for prototypes
// described by this map. The group is deoptimized whenever an object
// described by this map changes shape (and transitions to a new map),
class DictionaryMap: public BitField<bool, 24, 1> {};
class OwnsDescriptors: public BitField<bool, 25, 1> {};
class IsObserved: public BitField<bool, 26, 1> {};
+ class Deprecated: public BitField<bool, 27, 1> {};
// Tells whether the object in the prototype property will be used
// for instances created from this function. If the prototype
inline void ClearTransitions(Heap* heap,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ void DeprecateTransitionTree();
+ void DeprecateTarget(Name* key, DescriptorArray* new_descriptors);
+
+ Map* FindRootMap();
+ Map* FindUpdatedMap(int verbatim, int length, DescriptorArray* descriptors);
+ Map* FindLastMatchMap(int verbatim, int length, DescriptorArray* descriptors);
+
+ int NumberOfFields();
+
+ bool InstancesNeedRewriting(int target_number_of_fields,
+ int target_inobject,
+ int target_unused);
+ static Handle<Map> GeneralizeRepresentation(
+ Handle<Map> map,
+ int modify_index,
+ Representation new_representation);
+ MUST_USE_RESULT MaybeObject* GeneralizeRepresentation(
+ int modify_index,
+ Representation representation);
+ MUST_USE_RESULT MaybeObject* CopyGeneralizeAllRepresentations();
+
// Tells whether the map is attached to SharedFunctionInfo
// (for inobject slack tracking).
inline void set_attached_to_shared_function_info(bool value);
inline void set_owns_descriptors(bool is_shared);
inline bool is_observed();
inline void set_is_observed(bool is_observed);
+ inline void deprecate();
+ inline bool is_deprecated();
+ inline bool CanBeDeprecated();
MUST_USE_RESULT MaybeObject* RawCopy(int instance_size);
MUST_USE_RESULT MaybeObject* CopyWithPreallocatedFieldDescriptors();
Name* name,
TransitionFlag flag,
int descriptor_index);
+ MUST_USE_RESULT MaybeObject* CopyInstallDescriptors(
+ int new_descriptor,
+ DescriptorArray* descriptors);
MUST_USE_RESULT MaybeObject* ShareDescriptor(DescriptorArray* descriptors,
Descriptor* descriptor);
MUST_USE_RESULT MaybeObject* CopyAddDescriptor(Descriptor* descriptor,
// instance descriptors.
MUST_USE_RESULT MaybeObject* Copy();
- // Returns the property index for name (only valid for FAST MODE).
- int PropertyIndexFor(Name* name);
-
// Returns the next free property index (only valid for FAST MODE).
int NextFreePropertyIndex();
// Computes a hash value for this map, to be used in HashTables and such.
int Hash();
+ bool EquivalentToForTransition(Map* other);
+
// Compares this map to another to see if they describe equivalent objects.
// If |mode| is set to CLEAR_INOBJECT_PROPERTIES, |other| is treated as if
// it had exactly zero inobject properties.
};
+class Representation {
+ public:
+ enum Kind {
+ kNone,
+ kSmi,
+ kInteger32,
+ kDouble,
+ kTagged,
+ kExternal,
+ kNumRepresentations
+ };
+
+ Representation() : kind_(kNone) { }
+
+ static Representation None() { return Representation(kNone); }
+ static Representation Tagged() { return Representation(kTagged); }
+ static Representation Smi() { return Representation(kSmi); }
+ static Representation Integer32() { return Representation(kInteger32); }
+ static Representation Double() { return Representation(kDouble); }
+ static Representation External() { return Representation(kExternal); }
+
+ static Representation FromKind(Kind kind) { return Representation(kind); }
+
+ bool Equals(const Representation& other) {
+ return kind_ == other.kind_;
+ }
+
+ bool is_more_general_than(const Representation& other) {
+ ASSERT(kind_ != kExternal);
+ ASSERT(other.kind_ != kExternal);
+ return kind_ > other.kind_;
+ }
+
+ Representation generalize(Representation other) {
+ if (is_more_general_than(other)) {
+ return *this;
+ } else {
+ return other;
+ }
+ }
+
+ Kind kind() const { return static_cast<Kind>(kind_); }
+ bool IsNone() const { return kind_ == kNone; }
+ bool IsTagged() const { return kind_ == kTagged; }
+ bool IsSmi() const { return kind_ == kSmi; }
+ bool IsInteger32() const { return kind_ == kInteger32; }
+ bool IsDouble() const { return kind_ == kDouble; }
+ bool IsExternal() const { return kind_ == kExternal; }
+ bool IsSpecialization() const {
+ return kind_ == kInteger32 || kind_ == kDouble;
+ }
+ const char* Mnemonic() const;
+
+ private:
+ explicit Representation(Kind k) : kind_(k) { }
+
+ // Make sure kind fits in int8.
+ STATIC_ASSERT(kNumRepresentations <= (1 << kBitsPerByte));
+
+ int8_t kind_;
+};
+
+
// PropertyDetails captures type and attributes for a property.
// They are used both in property dictionaries and instance descriptors.
class PropertyDetails BASE_EMBEDDED {
public:
PropertyDetails(PropertyAttributes attributes,
PropertyType type,
+ Representation representation,
int index = 0) {
value_ = TypeField::encode(type)
| AttributesField::encode(attributes)
+ | RepresentationField::encode(EncodeRepresentation(representation))
| DictionaryStorageField::encode(index);
ASSERT(type == this->type());
ASSERT(attributes == this->attributes());
- ASSERT(index == this->dictionary_index());
+ if (representation.IsNone()) {
+ ASSERT(index == this->dictionary_index());
+ } else {
+ ASSERT(index == this->descriptor_index());
+ }
}
int pointer() { return DescriptorPointer::decode(value_); }
PropertyDetails set_pointer(int i) { return PropertyDetails(value_, i); }
+ PropertyDetails CopyWithRepresentation(Representation representation) {
+ return PropertyDetails(value_, representation);
+ }
+
// Conversion for storing details as Object*.
explicit inline PropertyDetails(Smi* smi);
inline Smi* AsSmi();
+ static uint8_t EncodeRepresentation(Representation representation) {
+ ASSERT(representation.kind() <= Representation::kTagged);
+ if (representation.kind() < Representation::kInteger32) {
+ return representation.kind();
+ } else {
+ return representation.kind() - 1;
+ }
+ }
+
+ static Representation DecodeRepresentation(uint32_t bits) {
+ ASSERT(bits <= Representation::kTagged);
+ if (bits >= Representation::kInteger32) bits += 1;
+ return Representation::FromKind(static_cast<Representation::Kind>(bits));
+ }
+
PropertyType type() { return TypeField::decode(value_); }
PropertyAttributes attributes() const {
return DescriptorStorageField::decode(value_);
}
+ Representation representation() {
+ return DecodeRepresentation(RepresentationField::decode(value_));
+ }
+
inline PropertyDetails AsDeleted();
static bool IsValidIndex(int index) {
class DictionaryStorageField: public BitField<uint32_t, 7, 24> {};
class DescriptorStorageField: public BitField<uint32_t, 7, 11> {};
class DescriptorPointer: public BitField<uint32_t, 18, 11> {};
+ class RepresentationField: public BitField<uint32_t, 29, 2> {};
static const int kInitialIndex = 1;
private:
PropertyDetails(int value, int pointer) {
- value_ = DescriptorPointer::update(value, pointer);
+ value_ = DescriptorPointer::update(value, pointer);
+ }
+ PropertyDetails(int value, Representation representation) {
+ value_ = RepresentationField::update(
+ value, EncodeRepresentation(representation));
}
uint32_t value_;
#endif
void SetEnumerationIndex(int index) {
- details_ = PropertyDetails(details_.attributes(), details_.type(), index);
+ details_ = PropertyDetails(details_.attributes(), details_.type(),
+ details_.representation(), index);
}
void SetSortedKeyIndex(int index) { details_ = details_.set_pointer(index); }
Object* value,
PropertyAttributes attributes,
PropertyType type,
+ Representation representation,
int index)
: key_(key),
value_(value),
- details_(attributes, type, index) { }
+ details_(attributes, type, representation, index) { }
friend class DescriptorArray;
};
FieldDescriptor(Name* key,
int field_index,
PropertyAttributes attributes,
+ Representation representation,
int index = 0)
- : Descriptor(key, Smi::FromInt(field_index), attributes, FIELD, index) {}
+ : Descriptor(key, Smi::FromInt(field_index), attributes,
+ FIELD, representation, index) {}
};
JSFunction* function,
PropertyAttributes attributes,
int index)
- : Descriptor(key, function, attributes, CONSTANT_FUNCTION, index) {}
+ : Descriptor(key, function, attributes,
+ CONSTANT_FUNCTION, Representation::Tagged(), index) {}
};
Object* foreign,
PropertyAttributes attributes,
int index = 0)
- : Descriptor(key, foreign, attributes, CALLBACKS, index) {}
+ : Descriptor(key, foreign, attributes, CALLBACKS,
+ Representation::Tagged(), index) {}
};
lookup_type_(NOT_FOUND),
holder_(NULL),
cacheable_(true),
- details_(NONE, NONEXISTENT) {
+ details_(NONE, NONEXISTENT, Representation::None()) {
isolate->SetTopLookupResult(this);
}
number_ = number;
}
+ bool CanHoldValue(Handle<Object> value) {
+ return value->FitsRepresentation(details_.representation());
+ }
+
void TransitionResult(JSObject* holder, int number) {
lookup_type_ = TRANSITION_TYPE;
- details_ = PropertyDetails(NONE, TRANSITION);
+ details_ = PropertyDetails(NONE, TRANSITION, Representation::None());
holder_ = holder;
number_ = number;
}
void HandlerResult(JSProxy* proxy) {
lookup_type_ = HANDLER_TYPE;
holder_ = proxy;
- details_ = PropertyDetails(NONE, HANDLER);
+ details_ = PropertyDetails(NONE, HANDLER, Representation::None());
cacheable_ = false;
}
void InterceptorResult(JSObject* holder) {
lookup_type_ = INTERCEPTOR_TYPE;
holder_ = holder;
- details_ = PropertyDetails(NONE, INTERCEPTOR);
+ details_ = PropertyDetails(NONE, INTERCEPTOR, Representation::None());
}
void NotFound() {
lookup_type_ = NOT_FOUND;
- details_ = PropertyDetails(NONE, NONEXISTENT);
+ details_ = PropertyDetails(NONE, NONEXISTENT, Representation::None());
holder_ = NULL;
}
return details_.type();
}
+ Representation representation() {
+ ASSERT(IsFound());
+ return details_.representation();
+ }
+
PropertyAttributes GetAttributes() {
ASSERT(!IsTransition());
ASSERT(IsFound());
boilerplate, KEEP_INOBJECT_PROPERTIES, length / 2);
}
+ // TODO(verwaest): Support tracking representations in the boilerplate.
for (int index = 0; index < length; index +=2) {
Handle<Object> key(constant_properties->get(index+0), isolate);
Handle<Object> value(constant_properties->get(index+1), isolate);
PropertyDetails new_details(
static_cast<PropertyAttributes>(details.attributes() | READ_ONLY),
details.type(),
+ Representation::None(),
details.dictionary_index());
function->property_dictionary()->DetailsAtPut(entry, new_details);
}
}
-static void TrySettingInlineConstructStub(Isolate* isolate,
- Handle<JSFunction> function) {
- Handle<Object> prototype = isolate->factory()->null_value();
- if (function->has_instance_prototype()) {
- prototype = Handle<Object>(function->instance_prototype(), isolate);
- }
- if (function->shared()->CanGenerateInlineConstructor(*prototype)) {
- ConstructStubCompiler compiler(isolate);
- Handle<Code> code = compiler.CompileConstructStub(function);
- function->shared()->set_construct_stub(*code);
- }
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObject) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
shared->CompleteInobjectSlackTracking();
}
- bool first_allocation = !shared->live_objects_may_exist();
Handle<JSObject> result = isolate->factory()->NewJSObject(function);
RETURN_IF_EMPTY_HANDLE(isolate, result);
- // Delay setting the stub if inobject slack tracking is in progress.
- if (first_allocation && !shared->IsInobjectSlackTrackingInProgress()) {
- TrySettingInlineConstructStub(isolate, function);
- }
isolate->counters()->constructed_objects()->Increment();
isolate->counters()->constructed_objects_runtime()->Increment();
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
function->shared()->CompleteInobjectSlackTracking();
- TrySettingInlineConstructStub(isolate, function);
return isolate->heap()->undefined_value();
}
}
}
details->set(0, element_or_char);
- details->set(1, PropertyDetails(NONE, NORMAL).AsSmi());
+ details->set(
+ 1, PropertyDetails(NONE, NORMAL, Representation::None()).AsSmi());
return *isolate->factory()->NewJSArrayWithElements(details);
}
{ MaybeObject* maybe_dictionary = name_dictionary->Add(
String::cast(name_string),
Smi::FromInt(i),
- PropertyDetails(NONE, NORMAL));
+ PropertyDetails(NONE, NORMAL, Representation::None()));
if (!maybe_dictionary->ToObject(&dictionary)) {
// Non-recoverable failure. Calling code must restart heap
// initialization.
StrictModeFlag strict_mode) {
Handle<Code> stub = FindIC(
name, receiver, Code::STORE_IC, Code::MAP_TRANSITION, strict_mode);
- if (!stub.is_null()) return stub;
+ if (!stub.is_null()) {
+ MapHandleList embedded_maps;
+ stub->FindAllMaps(&embedded_maps);
+ for (int i = 0; i < embedded_maps.length(); i++) {
+ if (embedded_maps.at(i).is_identical_to(transition)) {
+ return stub;
+ }
+ }
+ }
StoreStubCompiler compiler(isolate_, strict_mode);
Handle<Code> code =
StrictModeFlag strict_mode) {
Handle<Code> stub = FindIC(
name, receiver, Code::KEYED_STORE_IC, Code::MAP_TRANSITION, strict_mode);
- if (!stub.is_null()) return stub;
+ if (!stub.is_null()) {
+ MapHandleList embedded_maps;
+ stub->FindAllMaps(&embedded_maps);
+ for (int i = 0; i < embedded_maps.length(); i++) {
+ if (embedded_maps.at(i).is_identical_to(transition)) {
+ return stub;
+ }
+ }
+ }
KeyedStoreStubCompiler compiler(isolate(), strict_mode, STANDARD_STORE);
Handle<Code> code =
Handle<Code> StubCache::ComputePolymorphicIC(MapHandleList* receiver_maps,
CodeHandleList* handlers,
+ int number_of_valid_maps,
Handle<Name> name) {
LoadStubCompiler ic_compiler(isolate_);
- Code::StubType type = handlers->length() == 1 ? handlers->at(0)->type()
- : Code::NORMAL;
+ Code::StubType type = number_of_valid_maps == 1 ? handlers->at(0)->type()
+ : Code::NORMAL;
Handle<Code> ic = ic_compiler.CompilePolymorphicIC(
receiver_maps, handlers, name, type, PROPERTY);
return ic;
Handle<Code> ComputePolymorphicIC(MapHandleList* receiver_maps,
CodeHandleList* handlers,
+ int number_of_valid_maps,
Handle<Name> name);
// Finds the Code object stored in the Heap::non_monomorphic_cache().
}
}
result->NoIncrementalWriteBarrierSet(insertion_index, name, target);
+ result->set_back_pointer_storage(back_pointer_storage());
return result;
}
prototype_maps_.at(i)->AddDependentCode(
DependentCode::kPrototypeCheckGroup, code);
}
+ for (int i = 0 ; i < transition_maps_.length(); i++) {
+ transition_maps_.at(i)->AddDependentCode(
+ DependentCode::kTransitionGroup, code);
+ }
}
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
Register object = ToRegister(instr->object());
- Register result = ToRegister(instr->result());
+ if (!FLAG_track_double_fields) {
+ ASSERT(!instr->hydrogen()->representation().IsDouble());
+ }
+ Register temp = instr->hydrogen()->representation().IsDouble()
+ ? ToRegister(instr->temp()) : ToRegister(instr->result());
if (instr->hydrogen()->is_in_object()) {
- __ movq(result, FieldOperand(object, instr->hydrogen()->offset()));
+ __ movq(temp, FieldOperand(object, instr->hydrogen()->offset()));
} else {
- __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset));
- __ movq(result, FieldOperand(result, instr->hydrogen()->offset()));
+ __ movq(temp, FieldOperand(object, JSObject::kPropertiesOffset));
+ __ movq(temp, FieldOperand(temp, instr->hydrogen()->offset()));
+ }
+
+ if (instr->hydrogen()->representation().IsDouble()) {
+ Label load_from_heap_number, done;
+ XMMRegister result = ToDoubleRegister(instr->result());
+ __ JumpIfNotSmi(temp, &load_from_heap_number);
+ __ SmiToInteger32(temp, temp);
+ __ cvtlsi2sd(result, temp);
+ __ jmp(&done);
+ __ bind(&load_from_heap_number);
+ __ movsd(result, FieldOperand(temp, HeapNumber::kValueOffset));
+ __ bind(&done);
}
}
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+ Representation representation = instr->representation();
+
Register object = ToRegister(instr->object());
+
int offset = instr->offset();
- if (!instr->transition().is_null()) {
+ if (FLAG_track_fields && representation.IsSmi()) {
+ if (instr->value()->IsConstantOperand()) {
+ LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
+ if (!IsInteger32Constant(operand_value)) {
+ DeoptimizeIf(no_condition, instr->environment());
+ }
+ } else {
+ Register value = ToRegister(instr->value());
+ __ Integer32ToSmi(value, value);
+ }
+ } else if (FLAG_track_double_fields && representation.IsDouble() &&
+ !instr->hydrogen()->value()->type().IsSmi() &&
+ !instr->hydrogen()->value()->type().IsHeapNumber()) {
+ Register value = ToRegister(instr->value());
+ Label do_store;
+ __ JumpIfSmi(value, &do_store);
+ Handle<Map> map(isolate()->factory()->heap_number_map());
+ DoCheckMapCommon(value, map, REQUIRE_EXACT_MAP, instr);
+ __ bind(&do_store);
+ }
+
+ Handle<Map> transition = instr->transition();
+ if (!transition.is_null()) {
+ if (transition->CanBeDeprecated()) {
+ transition_maps_.Add(transition, info()->zone());
+ }
if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
- __ Move(FieldOperand(object, HeapObject::kMapOffset),
- instr->transition());
+ __ Move(FieldOperand(object, HeapObject::kMapOffset), transition);
} else {
Register temp = ToRegister(instr->temp());
- __ Move(kScratchRegister, instr->transition());
+ __ Move(kScratchRegister, transition);
__ movq(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
// Update the write barrier for the map field.
__ RecordWriteField(object,
jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
prototype_maps_(0, info->zone()),
+ transition_maps_(0, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
ZoneList<JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
ZoneList<Handle<Map> > prototype_maps_;
+ ZoneList<Handle<Map> > transition_maps_;
int inlined_function_count_;
Scope* const scope_;
Status status_;
LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
- ASSERT(instr->representation().IsTagged());
LOperand* obj = UseRegisterAtStart(instr->object());
- return DefineAsRegister(new(zone()) LLoadNamedField(obj));
+ LOperand* temp = instr->representation().IsDouble() ? TempRegister() : NULL;
+ ASSERT(temp == NULL || FLAG_track_double_fields);
+ return DefineAsRegister(new(zone()) LLoadNamedField(obj, temp));
}
}
bool can_be_constant = instr->value()->IsConstant() &&
- !HConstant::cast(instr->value())->InNewSpace();
+ !HConstant::cast(instr->value())->InNewSpace() &&
+ !(FLAG_track_double_fields && instr->field_representation().IsDouble());
LOperand* val;
if (needs_write_barrier) {
val = UseTempRegister(instr->value());
} else if (can_be_constant) {
val = UseRegisterOrConstant(instr->value());
+ } else if (FLAG_track_fields && instr->field_representation().IsSmi()) {
+ val = UseTempRegister(instr->value());
} else {
val = UseRegister(instr->value());
}
LOperand* temp = (!instr->is_in_object() || needs_write_barrier ||
needs_write_barrier_for_map) ? TempRegister() : NULL;
- return new(zone()) LStoreNamedField(obj, val, temp);
+ LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
+ if ((FLAG_track_fields && instr->field_representation().IsSmi()) ||
+ (FLAG_track_double_fields && instr->field_representation().IsDouble())) {
+ return AssignEnvironment(result);
+ }
+ return result;
}
};
-class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedField: public LTemplateInstruction<1, 1, 1> {
public:
- explicit LLoadNamedField(LOperand* object) {
+ explicit LLoadNamedField(LOperand* object, LOperand* temp) {
inputs_[0] = object;
+ temps_[0] = temp;
}
LOperand* object() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
Handle<Map> transition() const { return hydrogen()->transition(); }
+ Representation representation() const {
+ return hydrogen()->field_representation();
+ }
};
}
+void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated) {
+ if (map->CanBeDeprecated()) {
+ Move(scratch, map);
+ movq(scratch, FieldOperand(scratch, Map::kBitField3Offset));
+ SmiToInteger32(scratch, scratch);
+ and_(scratch, Immediate(Map::Deprecated::kMask));
+ j(not_zero, if_deprecated);
+ }
+}
+
+
void MacroAssembler::JumpIfBlack(Register object,
Register bitmap_scratch,
Register mask_scratch,
Label* condition_met,
Label::Distance condition_met_distance = Label::kFar);
+ void CheckMapDeprecated(Handle<Map> map,
+ Register scratch,
+ Label* if_deprecated);
+
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object,
__ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
}
+ int descriptor = transition->LastAdded();
+ DescriptorArray* descriptors = transition->instance_descriptors();
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ Representation representation = details.representation();
+ ASSERT(!representation.IsNone());
+
+ // Ensure no transitions to deprecated maps are followed.
+ __ CheckMapDeprecated(transition, scratch1, miss_label);
+
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ Label do_store;
+ __ JumpIfSmi(value_reg, &do_store);
+ __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
+ miss_label, DONT_DO_SMI_CHECK, REQUIRE_EXACT_MAP);
+ __ bind(&do_store);
+ }
+
// Check that we are allowed to write this.
if (object->GetPrototype()->IsJSObject()) {
JSObject* holder;
int offset = object->map()->instance_size() + (index * kPointerSize);
__ movq(FieldOperand(receiver_reg, offset), value_reg);
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ movq(name_reg, value_reg);
- __ RecordWriteField(
- receiver_reg, offset, name_reg, scratch1, kDontSaveFPRegs);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ movq(name_reg, value_reg);
+ __ RecordWriteField(
+ receiver_reg, offset, name_reg, scratch1, kDontSaveFPRegs);
+ }
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
__ movq(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
__ movq(FieldOperand(scratch1, offset), value_reg);
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ movq(name_reg, value_reg);
- __ RecordWriteField(
- scratch1, offset, name_reg, receiver_reg, kDontSaveFPRegs);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ movq(name_reg, value_reg);
+ __ RecordWriteField(
+ scratch1, offset, name_reg, receiver_reg, kDontSaveFPRegs);
+ }
}
// Return the value (register rax).
// object and the number of in-object properties is not going to change.
index -= object->map()->inobject_properties();
+ Representation representation = lookup->representation();
+ ASSERT(!representation.IsNone());
+ if (FLAG_track_fields && representation.IsSmi()) {
+ __ JumpIfNotSmi(value_reg, miss_label);
+ } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ Label do_store;
+ __ JumpIfSmi(value_reg, &do_store);
+ __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
+ miss_label, DONT_DO_SMI_CHECK, REQUIRE_EXACT_MAP);
+ __ bind(&do_store);
+ }
+
// TODO(verwaest): Share this code as a code stub.
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
__ movq(FieldOperand(receiver_reg, offset), value_reg);
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ movq(name_reg, value_reg);
- __ RecordWriteField(
- receiver_reg, offset, name_reg, scratch1, kDontSaveFPRegs);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ movq(name_reg, value_reg);
+ __ RecordWriteField(
+ receiver_reg, offset, name_reg, scratch1, kDontSaveFPRegs);
+ }
} else {
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
__ movq(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
__ movq(FieldOperand(scratch1, offset), value_reg);
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ movq(name_reg, value_reg);
- __ RecordWriteField(
- scratch1, offset, name_reg, receiver_reg, kDontSaveFPRegs);
+ if (!FLAG_track_fields || !representation.IsSmi()) {
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ movq(name_reg, value_reg);
+ __ RecordWriteField(
+ scratch1, offset, name_reg, receiver_reg, kDontSaveFPRegs);
+ }
}
// Return the value (register rax).
Register map_reg = scratch1();
__ movq(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
int receiver_count = receiver_maps->length();
+ int number_of_handled_maps = 0;
for (int current = 0; current < receiver_count; ++current) {
- // Check map and tail call if there's a match
- __ Cmp(map_reg, receiver_maps->at(current));
- __ j(equal, handlers->at(current), RelocInfo::CODE_TARGET);
+ Handle<Map> map = receiver_maps->at(current);
+ if (!map->is_deprecated()) {
+ number_of_handled_maps++;
+ // Check map and tail call if there's a match
+ __ Cmp(map_reg, receiver_maps->at(current));
+ __ j(equal, handlers->at(current), RelocInfo::CODE_TARGET);
+ }
}
+ ASSERT(number_of_handled_maps > 0);
__ bind(&miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
InlineCacheState state =
- receiver_maps->length() > 1 ? POLYMORPHIC : MONOMORPHIC;
+ number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
return GetICCode(kind(), type, name, state);
}
}
-TEST(Bug618) {
+TEST(Regress618) {
const char* source = "function C1() {"
" this.x = 23;"
"};"
// deopt in simple cases.
function testExactMapHoisting(a) {
var object = new Object();
- a.foo = 0;
+ a.foo = {};
a[0] = 0;
a[1] = 1;
var count = 3;
--- /dev/null
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --track-fields --track-double-fields --allow-natives-syntax
+
+// Test transitions caused by changes to field representations.
+
+function create_smi_object() {
+ var o = {};
+ o.x = 1;
+ o.y = 2;
+ o.z = 3;
+ return o;
+}
+
+var o1 = create_smi_object();
+var o2 = create_smi_object();
+
+// o1,o2 are smi, smi, smi
+assertTrue(%HaveSameMap(o1, o2));
+o1.y = 1.3;
+// o1 is smi, double, smi
+assertFalse(%HaveSameMap(o1, o2));
+o2.y = 1.5;
+// o2 is smi, double, smi
+assertTrue(%HaveSameMap(o1, o2));
+
+// o3 is initialized as smi, double, smi
+var o3 = create_smi_object();
+assertTrue(%HaveSameMap(o1, o3));
+
+function set_large(o, v) {
+ o.x01 = v; o.x02 = v; o.x03 = v; o.x04 = v; o.x05 = v; o.x06 = v; o.x07 = v;
+ o.x08 = v; o.x09 = v; o.x10 = v; o.x11 = v; o.x12 = v; o.x13 = v; o.x14 = v;
+ o.x15 = v; o.x16 = v; o.x17 = v; o.x18 = v; o.x19 = v; o.x20 = v; o.x21 = v;
+ o.x22 = v; o.x23 = v; o.x24 = v; o.x25 = v; o.x26 = v; o.x27 = v; o.x28 = v;
+ o.x29 = v; o.x30 = v; o.x31 = v; o.x32 = v; o.x33 = v; o.x34 = v; o.x35 = v;
+ o.x36 = v; o.x37 = v; o.x38 = v; o.x39 = v; o.x40 = v; o.x41 = v; o.x42 = v;
+ o.y01 = v; o.y02 = v; o.y03 = v; o.y04 = v; o.y05 = v; o.y06 = v; o.y07 = v;
+ o.y08 = v; o.y09 = v; o.y10 = v; o.y11 = v; o.y12 = v; o.y13 = v; o.y14 = v;
+ o.y15 = v; o.y16 = v; o.y17 = v; o.y18 = v; o.y19 = v; o.y20 = v; o.y21 = v;
+}
+
+// Check that large object migrations work.
+var o4 = {};
+// All smi.
+set_large(o4, 0);
+assertTrue(%HasFastProperties(o4));
+// All double.
+set_large(o4, 1.5);
+// o5 is immediately allocated with doubles.
+var o5 = {};
+set_large(o5, 0);
+assertTrue(%HaveSameMap(o4, o5));
+
+function create_smi_object2() {
+ var o = {};
+ o.a = 1;
+ o.b = 2;
+ o.c = 3;
+ return o;
+}
+
+// All smi
+var o6 = create_smi_object2();
+var o7 = create_smi_object2();
+
+assertTrue(%HaveSameMap(o6, o7));
+// Smi, double, smi.
+o6.b = 1.5;
+assertFalse(%HaveSameMap(o6, o7));
+// Smi, double, object.
+o7.c = {};
+assertFalse(%HaveSameMap(o6, o7));
+// Smi, double, object.
+o6.c = {};
+assertTrue(%HaveSameMap(o6, o7));