static const int kNullValueRootIndex = 7;
static const int kTrueValueRootIndex = 8;
static const int kFalseValueRootIndex = 9;
- static const int kEmptyStringRootIndex = 161;
+ static const int kEmptyStringRootIndex = 160;
// The external allocation limit should be below 256 MB on all architectures
// to avoid that resource-constrained embedders run low on memory.
static const int kNodeIsIndependentShift = 4;
static const int kNodeIsPartiallyDependentShift = 5;
- static const int kJSObjectType = 0xbc;
+ static const int kJSObjectType = 0xbb;
static const int kFirstNonstringType = 0x80;
static const int kOddballType = 0x83;
- static const int kForeignType = 0x88;
+ static const int kForeignType = 0x87;
static const int kUndefinedOddballKind = 5;
static const int kNullOddballKind = 3;
Register scratch2,
Register heap_number_map,
Label* gc_required,
- TaggingMode tagging_mode,
- MutableMode mode) {
+ TaggingMode tagging_mode) {
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
- Heap::RootListIndex map_index = mode == MUTABLE
- ? Heap::kMutableHeapNumberMapRootIndex
- : Heap::kHeapNumberMapRootIndex;
- AssertIsRoot(heap_number_map, map_index);
-
// Store heap number map in the allocated object.
+ AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
if (tagging_mode == TAG_RESULT) {
str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
} else {
Register scratch2,
Register heap_number_map,
Label* gc_required,
- TaggingMode tagging_mode = TAG_RESULT,
- MutableMode mode = IMMUTABLE);
+ TaggingMode tagging_mode = TAG_RESULT);
void AllocateHeapNumberWithValue(Register result,
DwVfpRegister value,
Register scratch1,
}
} else if (representation.IsDouble()) {
Label do_store, heap_number;
- __ LoadRoot(scratch3, Heap::kMutableHeapNumberMapRootIndex);
- __ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow,
- TAG_RESULT, MUTABLE);
+ __ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow);
__ JumpIfNotSmi(value_reg, &heap_number);
__ SmiUntag(scratch1, value_reg);
Register scratch1,
Register scratch2,
CPURegister value,
- CPURegister heap_number_map,
- MutableMode mode) {
+ CPURegister heap_number_map) {
ASSERT(!value.IsValid() || value.Is64Bits());
UseScratchRegisterScope temps(this);
Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
NO_ALLOCATION_FLAGS);
- Heap::RootListIndex map_index = mode == MUTABLE
- ? Heap::kMutableHeapNumberMapRootIndex
- : Heap::kHeapNumberMapRootIndex;
-
// Prepare the heap number map.
if (!heap_number_map.IsValid()) {
// If we have a valid value register, use the same type of register to store
} else {
heap_number_map = scratch1;
}
- LoadRoot(heap_number_map, map_index);
+ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
}
if (emit_debug_code()) {
Register map;
} else {
map = Register(heap_number_map);
}
- AssertRegisterIsRoot(map, map_index);
+ AssertRegisterIsRoot(map, Heap::kHeapNumberMapRootIndex);
}
// Store the heap number map and the value in the allocated object.
Register scratch1,
Register scratch2,
CPURegister value = NoFPReg,
- CPURegister heap_number_map = NoReg,
- MutableMode mode = IMMUTABLE);
+ CPURegister heap_number_map = NoReg);
// ---------------------------------------------------------------------------
// Support functions.
__ Ldr(temp_double, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
__ Bind(&do_store);
- __ AllocateHeapNumber(storage_reg, slow, scratch1, scratch2, temp_double,
- NoReg, MUTABLE);
+ __ AllocateHeapNumber(storage_reg, slow, scratch1, scratch2, temp_double);
}
// Stub never generated for non-global objects that require access checks.
Handle<Map> map = Map::GeneralizeAllFieldRepresentations(
Handle<Map>::cast(MaterializeNextValue()));
switch (map->instance_type()) {
- case MUTABLE_HEAP_NUMBER_TYPE:
case HEAP_NUMBER_TYPE: {
// Reuse the HeapNumber value directly as it is already properly
- // tagged and skip materializing the HeapNumber explicitly. Turn mutable
- // heap numbers immutable.
+ // tagged and skip materializing the HeapNumber explicitly.
Handle<Object> object = MaterializeNextValue();
if (object_index < prev_materialized_count_) {
materialized_objects_->Add(Handle<Object>(
Handle<Object> Deoptimizer::MaterializeNextValue() {
int value_index = materialization_value_index_++;
Handle<Object> value = materialized_values_->at(value_index);
- if (value->IsMutableHeapNumber()) {
- HeapNumber::cast(*value)->set_map(isolate_->heap()->heap_number_map());
- }
if (*value == isolate_->heap()->arguments_marker()) {
value = MaterializeNextHeapObject();
}
// TODO(jarin) this should be unified with the code in
// Deoptimizer::MaterializeNextHeapObject()
switch (map->instance_type()) {
- case MUTABLE_HEAP_NUMBER_TYPE:
case HEAP_NUMBER_TYPE: {
// Reuse the HeapNumber value directly as it is already properly
// tagged and skip materializing the HeapNumber explicitly.
// We need to distinguish the minus zero value and this cannot be
// done after conversion to int. Doing this by comparing bit
// patterns is faster than using fpclassify() et al.
- if (IsMinusZero(value)) return NewHeapNumber(-0.0, IMMUTABLE, pretenure);
+ if (IsMinusZero(value)) return NewHeapNumber(-0.0, pretenure);
int int_value = FastD2I(value);
if (value == int_value && Smi::IsValid(int_value)) {
}
// Materialize the value in the heap.
- return NewHeapNumber(value, IMMUTABLE, pretenure);
+ return NewHeapNumber(value, pretenure);
}
Handle<Object> Factory::NewNumberFromInt(int32_t value,
PretenureFlag pretenure) {
if (Smi::IsValid(value)) return handle(Smi::FromInt(value), isolate());
- // Bypass NewNumber to avoid various redundant checks.
- return NewHeapNumber(FastI2D(value), IMMUTABLE, pretenure);
+ // Bypass NumberFromDouble to avoid various redundant checks.
+ return NewHeapNumber(FastI2D(value), pretenure);
}
if (int32v >= 0 && Smi::IsValid(int32v)) {
return handle(Smi::FromInt(int32v), isolate());
}
- return NewHeapNumber(FastUI2D(value), IMMUTABLE, pretenure);
+ return NewHeapNumber(FastUI2D(value), pretenure);
}
Handle<HeapNumber> Factory::NewHeapNumber(double value,
- MutableMode mode,
PretenureFlag pretenure) {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateHeapNumber(value, mode, pretenure),
- HeapNumber);
+ isolate()->heap()->AllocateHeapNumber(value, pretenure), HeapNumber);
}
return NewNumber(static_cast<double>(value), pretenure);
}
Handle<HeapNumber> NewHeapNumber(double value,
- MutableMode mode = IMMUTABLE,
PretenureFlag pretenure = NOT_TENURED);
+
// These objects are used by the api to create env-independent data
// structures in the heap.
inline Handle<JSObject> NewNeanderObject() {
for (int i = 0; i < real_size; i++) {
switch (descs->GetType(i)) {
case FIELD: {
- Representation r = descs->GetDetails(i).representation();
- if (r.IsSmi() || r.IsDouble()) break;
int index = descs->GetFieldIndex(i);
Name* k = descs->GetKey(i);
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number)
- ALLOCATE_MAP(
- MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize, mutable_heap_number)
ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
AllocationResult Heap::AllocateHeapNumber(double value,
- MutableMode mode,
PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate heap numbers in paged
// spaces.
if (!allocation.To(&result)) return allocation;
}
- Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map();
- HeapObject::cast(result)->set_map_no_write_barrier(map);
+ result->set_map_no_write_barrier(heap_number_map());
HeapNumber::cast(result)->set_value(value);
return result;
}
HandleScope scope(isolate());
Factory* factory = isolate()->factory();
- // The -0 value must be set before NewNumber works.
- set_minus_zero_value(*factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED));
+ // The -0 value must be set before NumberFromDouble works.
+ set_minus_zero_value(*factory->NewHeapNumber(-0.0, TENURED));
ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
- set_nan_value(*factory->NewHeapNumber(OS::nan_value(), IMMUTABLE, TENURED));
- set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED));
+ set_nan_value(*factory->NewHeapNumber(OS::nan_value(), TENURED));
+ set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, TENURED));
// The hole has not been created yet, but we want to put something
// predictable in the gaps in the string table, so lets make that Smi zero.
V(Map, shared_function_info_map, SharedFunctionInfoMap) \
V(Map, meta_map, MetaMap) \
V(Map, heap_number_map, HeapNumberMap) \
- V(Map, mutable_heap_number_map, MutableHeapNumberMap) \
V(Map, native_context_map, NativeContextMap) \
V(Map, fixed_array_map, FixedArrayMap) \
V(Map, code_map, CodeMap) \
V(shared_function_info_map) \
V(meta_map) \
V(heap_number_map) \
- V(mutable_heap_number_map) \
V(native_context_map) \
V(fixed_array_map) \
V(code_map) \
// Allocated a HeapNumber from value.
MUST_USE_RESULT AllocationResult AllocateHeapNumber(
- double value,
- MutableMode mode = IMMUTABLE,
- PretenureFlag pretenure = NOT_TENURED);
+ double value, PretenureFlag pretenure = NOT_TENURED);
// Allocate a byte array of the specified length
MUST_USE_RESULT AllocationResult AllocateByteArray(
HInstruction* heap_number = Add<HAllocate>(heap_number_size,
HType::HeapObject(),
NOT_TENURED,
- MUTABLE_HEAP_NUMBER_TYPE);
- AddStoreMapConstant(
- heap_number, isolate()->factory()->mutable_heap_number_map());
+ HEAP_NUMBER_TYPE);
+ AddStoreMapConstant(heap_number, isolate()->factory()->heap_number_map());
Add<HStoreNamedField>(heap_number, HObjectAccess::ForHeapNumberValue(),
value);
instr = New<HStoreNamedField>(checked_object->ActualValue(),
// 2) we can just use the mode of the parent object for pretenuring
HInstruction* double_box =
Add<HAllocate>(heap_number_constant, HType::HeapObject(),
- pretenure_flag, MUTABLE_HEAP_NUMBER_TYPE);
+ pretenure_flag, HEAP_NUMBER_TYPE);
AddStoreMapConstant(double_box,
- isolate()->factory()->mutable_heap_number_map());
- // Unwrap the mutable heap number from the boilerplate.
- HValue* double_value =
- Add<HConstant>(Handle<HeapNumber>::cast(value)->value());
- Add<HStoreNamedField>(
- double_box, HObjectAccess::ForHeapNumberValue(), double_value);
+ isolate()->factory()->heap_number_map());
+ Add<HStoreNamedField>(double_box, HObjectAccess::ForHeapNumberValue(),
+ Add<HConstant>(value));
value_instruction = double_box;
} else if (representation.IsSmi()) {
value_instruction = value->IsUninitialized()
void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
- Label* gc_required,
- MutableMode mode) {
+ Label* gc_required) {
// Allocate heap number in new space.
Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
TAG_OBJECT);
- Handle<Map> map = mode == MUTABLE
- ? isolate()->factory()->mutable_heap_number_map()
- : isolate()->factory()->heap_number_map();
-
// Set the map.
- mov(FieldOperand(result, HeapObject::kMapOffset), Immediate(map));
+ mov(FieldOperand(result, HeapObject::kMapOffset),
+ Immediate(isolate()->factory()->heap_number_map()));
}
void AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
- Label* gc_required,
- MutableMode mode = IMMUTABLE);
+ Label* gc_required);
// Allocate a sequential string. All the header fields of the string object
// are initialized.
}
} else if (representation.IsDouble()) {
Label do_store, heap_number;
- __ AllocateHeapNumber(storage_reg, scratch1, scratch2, slow, MUTABLE);
+ __ AllocateHeapNumber(storage_reg, scratch1, scratch2, slow);
__ JumpIfNotSmi(value_reg, &heap_number);
__ SmiUntag(value_reg);
Representation expected_representation = details.representation();
if (value->FitsRepresentation(expected_representation)) {
- if (expected_representation.IsDouble()) {
- value = Object::NewStorageFor(isolate(), value,
- expected_representation);
+ // If the target representation is double and the value is already
+ // double, use the existing box.
+ if (value->IsSmi() && expected_representation.IsDouble()) {
+ value = factory()->NewHeapNumber(
+ Handle<Smi>::cast(value)->value());
} else if (expected_representation.IsHeapObject() &&
!target->instance_descriptors()->GetFieldType(
descriptor)->NowContains(value)) {
switch (HeapObject::cast(*object)->map()->instance_type()) {
case HEAP_NUMBER_TYPE:
- case MUTABLE_HEAP_NUMBER_TYPE:
if (deferred_string_key) SerializeDeferredKey(comma, key);
return SerializeHeapNumber(Handle<HeapNumber>::cast(object));
case ODDBALL_TYPE:
case CODE_TYPE:
case FIXED_DOUBLE_ARRAY_TYPE:
case HEAP_NUMBER_TYPE:
- case MUTABLE_HEAP_NUMBER_TYPE:
case INTERCEPTOR_INFO_TYPE:
case ODDBALL_TYPE:
case SCRIPT_TYPE:
Register scratch2,
Register heap_number_map,
Label* need_gc,
- TaggingMode tagging_mode,
- MutableMode mode) {
+ TaggingMode tagging_mode) {
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
- Heap::RootListIndex map_index = mode == MUTABLE
- ? Heap::kMutableHeapNumberMapRootIndex
- : Heap::kHeapNumberMapRootIndex;
- AssertIsRoot(heap_number_map, map_index);
-
// Store heap number map in the allocated object.
+ AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
if (tagging_mode == TAG_RESULT) {
sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
} else {
Register scratch2,
Register heap_number_map,
Label* gc_required,
- TaggingMode tagging_mode = TAG_RESULT,
- MutableMode mode = IMMUTABLE);
+ TaggingMode tagging_mode = TAG_RESULT);
void AllocateHeapNumberWithValue(Register result,
FPURegister value,
Register scratch1,
}
} else if (representation.IsDouble()) {
Label do_store, heap_number;
- __ LoadRoot(scratch3, Heap::kMutableHeapNumberMapRootIndex);
- __ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow,
- TAG_RESULT, MUTABLE);
+ __ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow);
__ JumpIfNotSmi(value_reg, &heap_number);
__ SmiUntag(scratch1, value_reg);
Map::cast(this)->MapVerify();
break;
case HEAP_NUMBER_TYPE:
- case MUTABLE_HEAP_NUMBER_TYPE:
HeapNumber::cast(this)->HeapNumberVerify();
break;
case FIXED_ARRAY_TYPE:
void HeapNumber::HeapNumberVerify() {
- CHECK(IsHeapNumber() || IsMutableHeapNumber());
+ CHECK(IsHeapNumber());
}
Representation r = descriptors->GetDetails(i).representation();
FieldIndex index = FieldIndex::ForDescriptor(map(), i);
Object* value = RawFastPropertyAt(index);
- if (r.IsDouble()) ASSERT(value->IsMutableHeapNumber());
+ if (r.IsDouble()) ASSERT(value->IsHeapNumber());
if (value->IsUninitialized()) continue;
if (r.IsSmi()) ASSERT(value->IsSmi());
if (r.IsHeapObject()) ASSERT(value->IsHeapObject());
TYPE_CHECKER(HeapNumber, HEAP_NUMBER_TYPE)
-TYPE_CHECKER(MutableHeapNumber, MUTABLE_HEAP_NUMBER_TYPE)
TYPE_CHECKER(Symbol, SYMBOL_TYPE)
return handle(Smi::FromInt(0), isolate);
}
if (!representation.IsDouble()) return object;
- double value;
if (object->IsUninitialized()) {
- value = 0;
- } else if (object->IsMutableHeapNumber()) {
- value = HeapNumber::cast(*object)->value();
- } else {
- value = object->Number();
- }
- return isolate->factory()->NewHeapNumber(value, MUTABLE);
-}
-
-
-Handle<Object> Object::WrapForRead(Isolate* isolate,
- Handle<Object> object,
- Representation representation) {
- ASSERT(!object->IsUninitialized());
- if (!representation.IsDouble()) {
- ASSERT(object->FitsRepresentation(representation));
- return object;
+ return isolate->factory()->NewHeapNumber(0);
}
- return isolate->factory()->NewHeapNumber(HeapNumber::cast(*object)->value());
+ return isolate->factory()->NewHeapNumber(object->Number());
}
CAST_ACCESSOR(Foreign)
CAST_ACCESSOR(FreeSpace)
CAST_ACCESSOR(GlobalObject)
+CAST_ACCESSOR(HeapNumber)
CAST_ACCESSOR(HeapObject)
CAST_ACCESSOR(JSArray)
CAST_ACCESSOR(JSArrayBuffer)
ACCESSORS(JSValue, value, Object, kValueOffset)
-HeapNumber* HeapNumber::cast(Object* object) {
- SLOW_ASSERT(object->IsHeapNumber() || object->IsMutableHeapNumber());
- return reinterpret_cast<HeapNumber*>(object);
-}
-
-
-const HeapNumber* HeapNumber::cast(const Object* object) {
- SLOW_ASSERT(object->IsHeapNumber() || object->IsMutableHeapNumber());
- return reinterpret_cast<const HeapNumber*>(object);
-}
-
-
ACCESSORS(JSDate, value, Object, kValueOffset)
ACCESSORS(JSDate, cache_stamp, Object, kCacheStampOffset)
ACCESSORS(JSDate, year, Object, kYearOffset)
case HEAP_NUMBER_TYPE:
HeapNumber::cast(this)->HeapNumberPrint(out);
break;
- case MUTABLE_HEAP_NUMBER_TYPE:
- PrintF(out, "<mutable ");
- HeapNumber::cast(this)->HeapNumberPrint(out);
- PrintF(out, ">");
- break;
case FIXED_DOUBLE_ARRAY_TYPE:
FixedDoubleArray::cast(this)->FixedDoubleArrayPrint(out);
break;
return kVisitJSFunction;
case HEAP_NUMBER_TYPE:
- case MUTABLE_HEAP_NUMBER_TYPE:
#define EXTERNAL_ARRAY_CASE(Type, type, TYPE, ctype, size) \
case EXTERNAL_##TYPE##_ARRAY_TYPE:
HeapNumber::cast(this)->HeapNumberPrint(accumulator);
accumulator->Put('>');
break;
- case MUTABLE_HEAP_NUMBER_TYPE:
- accumulator->Add("<MutableNumber: ");
- HeapNumber::cast(this)->HeapNumberPrint(accumulator);
- accumulator->Put('>');
- break;
case JS_PROXY_TYPE:
accumulator->Add("<JSProxy>");
break;
break;
case HEAP_NUMBER_TYPE:
- case MUTABLE_HEAP_NUMBER_TYPE:
case FILLER_TYPE:
case BYTE_ARRAY_TYPE:
case FREE_SPACE_TYPE:
void HeapNumber::HeapNumberPrint(FILE* out) {
- PrintF(out, "%.16g", value());
+ PrintF(out, "%.16g", Number());
}
// print that using vsnprintf (which may truncate but never allocate if
// there is no more space in the buffer).
EmbeddedVector<char, 100> buffer;
- SNPrintF(buffer, "%.16g", value());
+ SNPrintF(buffer, "%.16g", Number());
accumulator->Add("%s", buffer.start());
}
DescriptorArray* new_desc = target->instance_descriptors();
int limit = NumberOfOwnDescriptors();
for (int i = 0; i < limit; i++) {
- if (new_desc->GetDetails(i).representation().IsDouble() !=
- old_desc->GetDetails(i).representation().IsDouble()) {
+ if (new_desc->GetDetails(i).representation().IsDouble() &&
+ !old_desc->GetDetails(i).representation().IsDouble()) {
return true;
}
}
PropertyDetails details = new_map->GetLastDescriptorDetails();
Handle<Object> value;
if (details.representation().IsDouble()) {
- value = isolate->factory()->NewHeapNumber(0, MUTABLE);
+ value = isolate->factory()->NewHeapNumber(0);
} else {
value = isolate->factory()->uninitialized_value();
}
value = handle(Smi::FromInt(0), isolate);
}
value = Object::NewStorageFor(isolate, value, details.representation());
- } else if (old_details.representation().IsDouble() &&
- !details.representation().IsDouble()) {
- value = Object::WrapForRead(isolate, value, old_details.representation());
}
ASSERT(!(details.representation().IsDouble() && value->IsSmi()));
int target_index = new_descriptors->GetFieldIndex(i) - inobject;
if (details.type() != FIELD) continue;
Handle<Object> value;
if (details.representation().IsDouble()) {
- value = isolate->factory()->NewHeapNumber(0, MUTABLE);
+ value = isolate->factory()->NewHeapNumber(0);
} else {
value = isolate->factory()->uninitialized_value();
}
// Nothing more to be done.
if (value->IsUninitialized()) return;
HeapNumber* box = HeapNumber::cast(RawFastPropertyAt(index));
- ASSERT(box->IsMutableHeapNumber());
box->set_value(value->Number());
} else {
FastPropertyAtPut(index, value);
FieldIndex index = FieldIndex::ForDescriptor(*map, i);
Handle<Object> value(
object->RawFastPropertyAt(index), isolate);
- if (details.representation().IsDouble()) {
- ASSERT(value->IsMutableHeapNumber());
- Handle<HeapNumber> old = Handle<HeapNumber>::cast(value);
- value = isolate->factory()->NewHeapNumber(old->value());
- }
PropertyDetails d =
PropertyDetails(details.attributes(), NORMAL, i + 1);
dictionary = NameDictionary::Add(dictionary, key, value, d);
FieldIndex index) {
Isolate* isolate = object->GetIsolate();
Handle<Object> raw_value(object->RawFastPropertyAt(index), isolate);
- return Object::WrapForRead(isolate, raw_value, representation);
+ return Object::NewStorageFor(isolate, raw_value, representation);
}
Object* property =
RawFastPropertyAt(FieldIndex::ForDescriptor(map(), i));
if (descs->GetDetails(i).representation().IsDouble()) {
- ASSERT(property->IsMutableHeapNumber());
+ ASSERT(property->IsHeapNumber());
if (value->IsNumber() && property->Number() == value->Number()) {
return descs->GetKey(i);
}
};
-enum MutableMode {
- MUTABLE,
- IMMUTABLE
-};
-
-
static const int kGrowICDelta = STORE_AND_GROW_NO_TRANSITION -
STANDARD_STORE;
STATIC_ASSERT(STANDARD_STORE == 0);
V(PROPERTY_CELL_TYPE) \
\
V(HEAP_NUMBER_TYPE) \
- V(MUTABLE_HEAP_NUMBER_TYPE) \
V(FOREIGN_TYPE) \
V(BYTE_ARRAY_TYPE) \
V(FREE_SPACE_TYPE) \
// "Data", objects that cannot contain non-map-word pointers to heap
// objects.
HEAP_NUMBER_TYPE,
- MUTABLE_HEAP_NUMBER_TYPE,
FOREIGN_TYPE,
BYTE_ARRAY_TYPE,
FREE_SPACE_TYPE,
#define HEAP_OBJECT_TYPE_LIST(V) \
V(HeapNumber) \
- V(MutableHeapNumber) \
V(Name) \
V(UniqueName) \
V(String) \
} else if (FLAG_track_fields && representation.IsSmi()) {
return IsSmi();
} else if (FLAG_track_double_fields && representation.IsDouble()) {
- return IsMutableHeapNumber() || IsNumber();
+ return IsNumber();
} else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
return IsHeapObject();
}
Handle<Object> object,
Representation representation);
- inline static Handle<Object> WrapForRead(Isolate* isolate,
- Handle<Object> object,
- Representation representation);
-
// Returns true if the object is of the correct type to be used as a
// implementation of a JSObject's elements.
inline bool HasValidElements();
object->properties()->length());
}
Handle<Object> raw_value(object->RawFastPropertyAt(field_index), isolate);
- RUNTIME_ASSERT(raw_value->IsMutableHeapNumber());
- return *Object::WrapForRead(isolate, raw_value, Representation::Double());
+ RUNTIME_ASSERT(raw_value->IsNumber() || raw_value->IsUninitialized());
+ return *Object::NewStorageFor(isolate, raw_value, Representation::Double());
}
void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch,
- Label* gc_required,
- MutableMode mode) {
+ Label* gc_required) {
// Allocate heap number in new space.
Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
- Heap::RootListIndex map_index = mode == MUTABLE
- ? Heap::kMutableHeapNumberMapRootIndex
- : Heap::kHeapNumberMapRootIndex;
-
// Set the map.
- LoadRoot(kScratchRegister, map_index);
+ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
}
// space is full.
void AllocateHeapNumber(Register result,
Register scratch,
- Label* gc_required,
- MutableMode mode = IMMUTABLE);
+ Label* gc_required);
// Allocate a sequential string. All the header fields of the string object
// are initialized.
}
} else if (representation.IsDouble()) {
Label do_store, heap_number;
- __ AllocateHeapNumber(storage_reg, scratch1, slow, MUTABLE);
+ __ AllocateHeapNumber(storage_reg, scratch1, slow);
__ JumpIfNotSmi(value_reg, &heap_number);
__ SmiToInteger32(scratch1, value_reg);
void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
- Label* gc_required,
- MutableMode mode) {
+ Label* gc_required) {
// Allocate heap number in new space.
Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
TAG_OBJECT);
- Handle<Map> map = mode == MUTABLE
- ? isolate()->factory()->mutable_heap_number_map()
- : isolate()->factory()->heap_number_map();
-
// Set the map.
- mov(FieldOperand(result, HeapObject::kMapOffset), Immediate(map));
+ mov(FieldOperand(result, HeapObject::kMapOffset),
+ Immediate(isolate()->factory()->heap_number_map()));
}
void AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
- Label* gc_required,
- MutableMode mode = IMMUTABLE);
+ Label* gc_required);
// Allocate a sequential string. All the header fields of the string object
// are initialized.
}
} else if (representation.IsDouble()) {
Label do_store, heap_number;
- __ AllocateHeapNumber(storage_reg, scratch1, scratch2, slow, MUTABLE);
+ __ AllocateHeapNumber(storage_reg, scratch1, scratch2, slow);
__ JumpIfNotSmi(value_reg, &heap_number);
__ SmiUntag(value_reg);
v8::HandleScope scope(env->GetIsolate());
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
- CompileRun("a = { s_prop: \'value\', n_prop: \'value2\' };");
+ CompileRun("a = { s_prop: \'value\', n_prop: 0.1 };");
const v8::HeapSnapshot* snapshot =
heap_profiler->TakeHeapSnapshot(v8_str("value"));
CHECK(ValidateSnapshot(snapshot));
CHECK(js_s_prop == heap_profiler->FindObjectById(s_prop->GetId()));
const v8::HeapGraphNode* n_prop =
GetProperty(obj, v8::HeapGraphEdge::kProperty, "n_prop");
- v8::Local<v8::String> js_n_prop =
- js_obj->Get(v8_str("n_prop")).As<v8::String>();
- CHECK(js_n_prop == heap_profiler->FindObjectById(n_prop->GetId()));
+ v8::Local<v8::Number> js_n_prop =
+ js_obj->Get(v8_str("n_prop")).As<v8::Number>();
+ CHECK(js_n_prop->NumberValue() ==
+ heap_profiler->FindObjectById(n_prop->GetId())->NumberValue());
}
+++ /dev/null
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-ayle license that can be
-// found in the LICENSE file.
-
-// Flags: --allow-natives-syntax --track-fields --expose-gc
-
-var global = Function('return this')();
-var verbose = 0;
-
-function test(ctor_desc, use_desc, migr_desc) {
- var n = 5;
- var objects = [];
- var results = [];
-
- if (verbose) {
- print();
- print("===========================================================");
- print("=== " + ctor_desc.name +
- " | " + use_desc.name + " |--> " + migr_desc.name);
- print("===========================================================");
- }
-
- // Clean ICs and transitions.
- %NotifyContextDisposed();
- gc(); gc(); gc();
-
-
- // create objects
- if (verbose) {
- print("-----------------------------");
- print("--- construct");
- print();
- }
- for (var i = 0; i < n; i++) {
- objects[i] = ctor_desc.ctor.apply(ctor_desc, ctor_desc.args(i));
- }
-
- try {
- // use them
- if (verbose) {
- print("-----------------------------");
- print("--- use 1");
- print();
- }
- var use = use_desc.use1;
- for (var i = 0; i < n; i++) {
- if (i == 3) %OptimizeFunctionOnNextCall(use);
- results[i] = use(objects[i], i);
- }
-
- // trigger migrations
- if (verbose) {
- print("-----------------------------");
- print("--- trigger migration");
- print();
- }
- var migr = migr_desc.migr;
- for (var i = 0; i < n; i++) {
- if (i == 3) %OptimizeFunctionOnNextCall(migr);
- migr(objects[i], i);
- }
-
- // use again
- if (verbose) {
- print("-----------------------------");
- print("--- use 2");
- print();
- }
- var use = use_desc.use2 !== undefined ? use_desc.use2 : use_desc.use1;
- for (var i = 0; i < n; i++) {
- if (i == 3) %OptimizeFunctionOnNextCall(use);
- results[i] = use(objects[i], i);
- if (verbose >= 2) print(results[i]);
- }
-
- } catch (e) {
- if (verbose) print("--- incompatible use: " + e);
- }
- return results;
-}
-
-
-var ctors = [
- {
- name: "none-to-double",
- ctor: function(v) { return {a: v}; },
- args: function(i) { return [1.5 + i]; },
- },
- {
- name: "double",
- ctor: function(v) { var o = {}; o.a = v; return o; },
- args: function(i) { return [1.5 + i]; },
- },
- {
- name: "none-to-smi",
- ctor: function(v) { return {a: v}; },
- args: function(i) { return [i]; },
- },
- {
- name: "smi",
- ctor: function(v) { var o = {}; o.a = v; return o; },
- args: function(i) { return [i]; },
- },
- {
- name: "none-to-object",
- ctor: function(v) { return {a: v}; },
- args: function(i) { return ["s"]; },
- },
- {
- name: "object",
- ctor: function(v) { var o = {}; o.a = v; return o; },
- args: function(i) { return ["s"]; },
- },
- {
- name: "{a:, b:, c:}",
- ctor: function(v1, v2, v3) { return {a: v1, b: v2, c: v3}; },
- args: function(i) { return [1.5 + i, 1.6, 1.7]; },
- },
- {
- name: "{a..h:}",
- ctor: function(v) { var o = {}; o.h=o.g=o.f=o.e=o.d=o.c=o.b=o.a=v; return o; },
- args: function(i) { return [1.5 + i]; },
- },
- {
- name: "1",
- ctor: function(v) { var o = 1; o.a = v; return o; },
- args: function(i) { return [1.5 + i]; },
- },
- {
- name: "f()",
- ctor: function(v) { var o = function() { return v;}; o.a = v; return o; },
- args: function(i) { return [1.5 + i]; },
- },
- {
- name: "f().bind",
- ctor: function(v) { var o = function(a,b,c) { return a+b+c; }; o = o.bind(o, v, v+1, v+2.2); return o; },
- args: function(i) { return [1.5 + i]; },
- },
- {
- name: "dictionary elements",
- ctor: function(v) { var o = []; o[1] = v; o[200000] = v; return o; },
- args: function(i) { return [1.5 + i]; },
- },
- {
- name: "json",
- ctor: function(v) { var json = '{"a":' + v + ',"b":' + v + '}'; return JSON.parse(json); },
- args: function(i) { return [1.5 + i]; },
- },
- {
- name: "fast accessors",
- accessor: {
- get: function() { return this.a_; },
- set: function(value) {this.a_ = value; },
- configurable: true,
- },
- ctor: function(v) {
- var o = {a_:v};
- Object.defineProperty(o, "a", this.accessor);
- return o;
- },
- args: function(i) { return [1.5 + i]; },
- },
- {
- name: "slow accessor",
- accessor1: { value: this.a_, configurable: true },
- accessor2: {
- get: function() { return this.a_; },
- set: function(value) {this.a_ = value; },
- configurable: true,
- },
- ctor: function(v) {
- var o = {a_:v};
- Object.defineProperty(o, "a", this.accessor1);
- Object.defineProperty(o, "a", this.accessor2);
- return o;
- },
- args: function(i) { return [1.5 + i]; },
- },
- {
- name: "slow",
- proto: {},
- ctor: function(v) {
- var o = {__proto__: this.proto};
- o.a = v;
- for (var i = 0; %HasFastProperties(o); i++) o["f"+i] = v;
- return o;
- },
- args: function(i) { return [1.5 + i]; },
- },
- {
- name: "global",
- ctor: function(v) { return global; },
- args: function(i) { return [i]; },
- },
-];
-
-
-
-var uses = [
- {
- name: "o.a+1.0",
- use1: function(o, i) { return o.a + 1.0; },
- use2: function(o, i) { return o.a + 1.1; },
- },
- {
- name: "o.b+1.0",
- use1: function(o, i) { return o.b + 1.0; },
- use2: function(o, i) { return o.b + 1.1; },
- },
- {
- name: "o[1]+1.0",
- use1: function(o, i) { return o[1] + 1.0; },
- use2: function(o, i) { return o[1] + 1.1; },
- },
- {
- name: "o[-1]+1.0",
- use1: function(o, i) { return o[-1] + 1.0; },
- use2: function(o, i) { return o[-1] + 1.1; },
- },
- {
- name: "()",
- use1: function(o, i) { return o() + 1.0; },
- use2: function(o, i) { return o() + 1.1; },
- },
-];
-
-
-
-var migrations = [
- {
- name: "to smi",
- migr: function(o, i) { if (i == 0) o.a = 1; },
- },
- {
- name: "to double",
- migr: function(o, i) { if (i == 0) o.a = 1.1; },
- },
- {
- name: "to object",
- migr: function(o, i) { if (i == 0) o.a = {}; },
- },
- {
- name: "set prototype {}",
- migr: function(o, i) { o.__proto__ = {}; },
- },
- {
- name: "%FunctionSetPrototype",
- migr: function(o, i) { %FunctionSetPrototype(o, null); },
- },
- {
- name: "modify prototype",
- migr: function(o, i) { if (i == 0) o.__proto__.__proto1__ = [,,,5,,,]; },
- },
- {
- name: "freeze prototype",
- migr: function(o, i) { if (i == 0) Object.freeze(o.__proto__); },
- },
- {
- name: "delete and re-add property",
- migr: function(o, i) { var v = o.a; delete o.a; o.a = v; },
- },
- {
- name: "modify prototype",
- migr: function(o, i) { if (i >= 0) o.__proto__ = {}; },
- },
- {
- name: "set property callback",
- migr: function(o, i) {
- Object.defineProperty(o, "a", {
- get: function() { return 1.5 + i; },
- set: function(value) {},
- configurable: true,
- });
- },
- },
- {
- name: "observe",
- migr: function(o, i) { Object.observe(o, function(){}); },
- },
- {
- name: "%EnableAccessChecks",
- migr: function(o, i) { %EnableAccessChecks(o); },
- },
- {
- name: "%DisableAccessChecks",
- migr: function(o, i) { if (o !== global) %DisableAccessChecks(o); },
- },
- {
- name: "seal",
- migr: function(o, i) { Object.seal(o); },
- },
- { // Must be the last in the sequence, because after the global object freeze
- // the other modifications does not make sence.
- name: "freeze",
- migr: function(o, i) { Object.freeze(o); },
- },
-];
-
-
-
-migrations.forEach(function(migr) {
- uses.forEach(function(use) {
- ctors.forEach(function(ctor) {
- test(ctor, use, migr);
- });
- });
-});
##############################################################################
# Skip long running tests that time out in debug mode.
'generated-transition-stub': [PASS, ['mode == debug', SKIP]],
- 'migrations': [PASS, ['mode == debug', SLOW]],
##############################################################################
# This test sets the umask on a per-process basis and hence cannot be