// We got a map in register r0. Get the enumeration cache from it.
__ bind(&use_cache);
__ LoadInstanceDescriptors(r0, r1, r2);
- __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kLastAddedOffset));
+ __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumCacheOffset));
__ ldr(r2, FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
Register scratch = ToRegister(instr->scratch());
__ LoadInstanceDescriptors(map, result, scratch);
__ ldr(result,
- FieldMemOperand(result, DescriptorArray::kLastAddedOffset));
+ FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ ldr(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
__ cmp(result, Operand(0));
// Check that there is an enum cache in the non-empty instance
// descriptors (r3). This is the case if the next enumeration
// index field does not contain a smi.
- ldr(r3, FieldMemOperand(r3, DescriptorArray::kLastAddedOffset));
+ ldr(r3, FieldMemOperand(r3, DescriptorArray::kEnumCacheOffset));
JumpIfSmi(r3, call_runtime);
// For all objects but the receiver, check that the cache is empty.
// Copy the descriptors from the array.
if (0 < descriptor_count) {
- result->SetLastAdded(array->LastAdded());
for (int i = 0; i < descriptor_count; i++) {
result->CopyFrom(i, *array, i, witness);
}
Handle<String> key =
SymbolFromString(Handle<String>(String::cast(entry->name())));
// Check if a descriptor with this name already exists before writing.
- if (LinearSearch(*result, *key, result->NumberOfSetDescriptors()) ==
+ if (LinearSearch(*result, *key, map->NumberOfSetDescriptors()) ==
DescriptorArray::kNotFound) {
CallbacksDescriptor desc(*key, *entry, entry->property_attributes());
map->AppendDescriptor(&desc, witness);
}
}
- int new_number_of_descriptors = result->NumberOfSetDescriptors();
+ int new_number_of_descriptors = map->NumberOfSetDescriptors();
// Reinstall the original descriptor array if no new elements were added.
if (new_number_of_descriptors == descriptor_count) {
map->set_instance_descriptors(*array);
for (int i = 0; i < new_number_of_descriptors; i++) {
new_result->CopyFrom(i, *result, i, witness);
}
- new_result->SetLastAdded(result->LastAdded());
map->set_instance_descriptors(*new_result);
}
}
reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
reinterpret_cast<Map*>(result)->set_bit_field(0);
reinterpret_cast<Map*>(result)->set_bit_field2(0);
- reinterpret_cast<Map*>(result)->set_bit_field3(0);
+ reinterpret_cast<Map*>(result)->set_bit_field3(
+ Map::LastAddedBits::encode(Map::kNoneAdded));
return result;
}
int instance_size,
ElementsKind elements_kind) {
Object* result;
- { MaybeObject* maybe_result = AllocateRawMap();
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ MaybeObject* maybe_result = AllocateRawMap();
+ if (!maybe_result->To(&result)) return maybe_result;
Map* map = reinterpret_cast<Map*>(result);
map->set_map_no_write_barrier(meta_map());
map->set_unused_property_fields(0);
map->set_bit_field(0);
map->set_bit_field2(1 << Map::kIsExtensible);
- map->set_bit_field3(0);
+ map->set_bit_field3(Map::LastAddedBits::encode(Map::kNoneAdded));
map->set_elements_kind(elements_kind);
// If the map object is aligned fill the padding area with Smi 0 objects.
// We got a map in register eax. Get the enumeration cache from it.
__ bind(&use_cache);
__ LoadInstanceDescriptors(eax, ecx);
- __ mov(ecx, FieldOperand(ecx, DescriptorArray::kLastAddedOffset));
+ __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumCacheOffset));
__ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
Register result = ToRegister(instr->result());
__ LoadInstanceDescriptors(map, result);
__ mov(result,
- FieldOperand(result, DescriptorArray::kLastAddedOffset));
+ FieldOperand(result, DescriptorArray::kEnumCacheOffset));
__ mov(result,
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
__ test(result, result);
// Check that there is an enum cache in the non-empty instance
// descriptors (edx). This is the case if the next enumeration
// index field does not contain a smi.
- mov(edx, FieldOperand(edx, DescriptorArray::kLastAddedOffset));
+ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheOffset));
JumpIfSmi(edx, call_runtime);
// For all objects but the receiver, check that the cache is empty.
case CONSTANT_FUNCTION:
return;
case TRANSITION: {
- Map* value = lookup->GetTransitionTarget();
- Handle<Map> transition(Map::cast(value));
+ Handle<Map> transition(lookup->GetTransitionTarget());
+ int descriptor = transition->LastAdded();
+
DescriptorArray* target_descriptors = transition->instance_descriptors();
- int descriptor = target_descriptors->LastAdded();
PropertyDetails details = target_descriptors->GetDetails(descriptor);
if (details.type() != FIELD || details.attributes() != NONE) return;
break;
case TRANSITION: {
Handle<Map> transition(lookup->GetTransitionTarget());
+ int descriptor = transition->LastAdded();
DescriptorArray* target_descriptors = transition->instance_descriptors();
- int descriptor = target_descriptors->LastAdded();
PropertyDetails details = target_descriptors->GetDetails(descriptor);
if (details.type() == FIELD && details.attributes() == NONE) {
// We got a map in register v0. Get the enumeration cache from it.
__ bind(&use_cache);
__ LoadInstanceDescriptors(v0, a1, a2);
- __ lw(a1, FieldMemOperand(a1, DescriptorArray::kLastAddedOffset));
+ __ lw(a1, FieldMemOperand(a1, DescriptorArray::kEnumCacheOffset));
__ lw(a2, FieldMemOperand(a1, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
Register scratch = ToRegister(instr->scratch());
__ LoadInstanceDescriptors(map, result, scratch);
__ lw(result,
- FieldMemOperand(result, DescriptorArray::kLastAddedOffset));
+ FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ lw(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
// Check that there is an enum cache in the non-empty instance
// descriptors (a3). This is the case if the next enumeration
// index field does not contain a smi.
- lw(a3, FieldMemOperand(a3, DescriptorArray::kLastAddedOffset));
+ lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumCacheOffset));
JumpIfSmi(a3, call_runtime);
// For all objects but the receiver, check that the cache is empty.
void Map::set_function_with_prototype(bool value) {
- if (value) {
- set_bit_field3(bit_field3() | (1 << kFunctionWithPrototype));
- } else {
- set_bit_field3(bit_field3() & ~(1 << kFunctionWithPrototype));
- }
+ set_bit_field3(FunctionWithPrototype::update(bit_field3(), value));
}
bool Map::function_with_prototype() {
- return ((1 << kFunctionWithPrototype) & bit_field3()) != 0;
+ return FunctionWithPrototype::decode(bit_field3());
}
void Map::set_is_shared(bool value) {
- if (value) {
- set_bit_field3(bit_field3() | (1 << kIsShared));
- } else {
- set_bit_field3(bit_field3() & ~(1 << kIsShared));
- }
+ set_bit_field3(IsShared::update(bit_field3(), value));
}
bool Map::is_shared() {
- return ((1 << kIsShared) & bit_field3()) != 0;
+ return IsShared::decode(bit_field3());
}
}
#endif
+ set_instance_descriptors(descriptors);
+
for (int i = 0; i < len; ++i) {
if (descriptors->GetDetails(i).index() == len) {
- descriptors->SetLastAdded(i);
+ SetLastAdded(i);
break;
}
}
ASSERT(len == 0 ||
- len == descriptors->GetDetails(descriptors->LastAdded()).index());
-
- set_instance_descriptors(descriptors);
+ len == descriptors->GetDetails(LastAdded()).index());
}
void Map::AppendDescriptor(Descriptor* desc,
const DescriptorArray::WhitenessWitness& witness) {
DescriptorArray* descriptors = instance_descriptors();
- int set_descriptors = descriptors->NumberOfSetDescriptors();
+ int set_descriptors = NumberOfSetDescriptors();
int new_last_added = descriptors->Append(desc, witness, set_descriptors);
- descriptors->SetLastAdded(new_last_added);
+ SetLastAdded(new_last_added);
}
strict_mode);
case TRANSITION: {
Map* transition_map = result->GetTransitionTarget();
+ int descriptor = transition_map->LastAdded();
DescriptorArray* descriptors = transition_map->instance_descriptors();
- int descriptor = descriptors->LastAdded();
PropertyDetails details = descriptors->GetDetails(descriptor);
if (details.type() == FIELD) {
return ConvertDescriptorToField(name, value, attributes);
case TRANSITION: {
Map* transition_map = result.GetTransitionTarget();
+ int descriptor = transition_map->LastAdded();
DescriptorArray* descriptors = transition_map->instance_descriptors();
- int descriptor = descriptors->LastAdded();
PropertyDetails details = descriptors->GetDetails(descriptor);
if (details.type() == FIELD) {
// except for the code cache, which can contain some ics which can be
// applied to the shared map.
Object* fresh;
- { MaybeObject* maybe_fresh =
- fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
- if (maybe_fresh->ToObject(&fresh)) {
- ASSERT(memcmp(Map::cast(fresh)->address(),
- Map::cast(result)->address(),
- Map::kCodeCacheOffset) == 0);
- int offset = Map::kCodeCacheOffset + kPointerSize;
- ASSERT(memcmp(Map::cast(fresh)->address() + offset,
- Map::cast(result)->address() + offset,
- Map::kSize - offset) == 0);
- }
+ MaybeObject* maybe_fresh =
+ fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
+ if (maybe_fresh->ToObject(&fresh)) {
+ ASSERT(memcmp(Map::cast(fresh)->address(),
+ Map::cast(result)->address(),
+ Map::kCodeCacheOffset) == 0);
+ int offset = Map::kCodeCacheOffset + kPointerSize;
+ ASSERT(memcmp(Map::cast(fresh)->address() + offset,
+ Map::cast(result)->address() + offset,
+ Map::kSize - offset) == 0);
}
}
#endif
// If there is a transition, try to follow it.
if (result.IsFound()) {
Map* target = result.GetTransitionTarget();
- int descriptor_number = target->instance_descriptors()->LastAdded();
+ int descriptor_number = target->LastAdded();
ASSERT(target->instance_descriptors()->GetKey(descriptor_number) == name);
return TryAccessorTransition(
this, target, descriptor_number, component, accessor, attributes);
}
Map* result;
- { MaybeObject* maybe_result = RawCopy(new_instance_size);
- if (!maybe_result->To(&result)) return maybe_result;
- }
+ MaybeObject* maybe_result = RawCopy(new_instance_size);
+ if (!maybe_result->To(&result)) return maybe_result;
if (mode != CLEAR_INOBJECT_PROPERTIES) {
result->set_inobject_properties(inobject_properties());
}
+ result->SetLastAdded(kNoneAdded);
result->set_code_cache(code_cache());
result->set_is_shared(sharing == SHARED_NORMALIZED_MAP);
MaybeObject* maybe_result = CopyDropDescriptors();
if (!maybe_result->To(&result)) return maybe_result;
- if (last_added == DescriptorArray::kNoneAdded) {
+ if (last_added == kNoneAdded) {
ASSERT(descriptors->IsEmpty());
- ASSERT(flag == OMIT_TRANSITION);
- return result;
+ } else {
+ ASSERT(descriptors->GetDetails(last_added).index() ==
+ descriptors->number_of_descriptors());
+ result->set_instance_descriptors(descriptors);
+ result->SetLastAdded(last_added);
}
- descriptors->SetLastAdded(last_added);
- result->set_instance_descriptors(descriptors);
-
if (flag == INSERT_TRANSITION) {
TransitionArray* transitions;
MaybeObject* maybe_transitions = AddTransition(name, result);
if (!maybe_descriptors->To(&descriptors)) return maybe_descriptors;
int last_added = initial_descriptors->IsEmpty()
- ? DescriptorArray::kNoneAdded
- : initial_descriptors->LastAdded();
+ ? kNoneAdded
+ : initial_map->LastAdded();
return CopyReplaceDescriptors(descriptors, NULL, last_added, OMIT_TRANSITION);
}
MaybeObject* maybe_descriptors = source_descriptors->Copy(shared_mode);
if (!maybe_descriptors->To(&descriptors)) return maybe_descriptors;
- int last_added = source_descriptors->IsEmpty()
- ? DescriptorArray::kNoneAdded
- : source_descriptors->LastAdded();
+ int last_added = source_descriptors->IsEmpty() ? kNoneAdded : LastAdded();
return CopyReplaceDescriptors(descriptors, NULL, last_added, OMIT_TRANSITION);
}
SLOW_ASSERT(new_descriptors->IsSortedNoDuplicates());
- return CopyReplaceDescriptors(
- new_descriptors, key, descriptors->LastAdded(), flag);
+ return CopyReplaceDescriptors(new_descriptors, key, LastAdded(), flag);
}
if (!maybe_array->To(&result)) return maybe_array;
}
- result->set(kLastAddedIndex, Smi::FromInt(kNoneAdded));
+ result->set(kEnumCacheIndex, Smi::FromInt(Map::kNoneAdded));
result->set(kTransitionsIndex, Smi::FromInt(0));
return result;
}
ASSERT(bridge_storage->length() >= kEnumCacheBridgeLength);
ASSERT(new_index_cache->IsSmi() || new_index_cache->IsFixedArray());
if (HasEnumCache()) {
- FixedArray::cast(get(kLastAddedIndex))->
+ FixedArray::cast(get(kEnumCacheIndex))->
set(kEnumCacheBridgeCacheIndex, new_cache);
- FixedArray::cast(get(kLastAddedIndex))->
+ FixedArray::cast(get(kEnumCacheIndex))->
set(kEnumCacheBridgeIndicesCacheIndex, new_index_cache);
} else {
if (IsEmpty()) return; // Do nothing for empty descriptor array.
set(kEnumCacheBridgeIndicesCacheIndex, new_index_cache);
NoWriteBarrierSet(FixedArray::cast(bridge_storage),
kEnumCacheBridgeLastAdded,
- get(kLastAddedIndex));
- set(kLastAddedIndex, bridge_storage);
+ get(kEnumCacheIndex));
+ set(kEnumCacheIndex, bridge_storage);
}
}
instance_type() == other->instance_type() &&
bit_field() == other->bit_field() &&
bit_field2() == other->bit_field2() &&
- (bit_field3() & ~(1<<Map::kIsShared)) ==
- (other->bit_field3() & ~(1<<Map::kIsShared));
+ static_cast<uint32_t>(bit_field3()) ==
+ LastAddedBits::update(
+ IsShared::update(other->bit_field3(), true),
+ kNoneAdded);
}
inline int number_of_entries() { return number_of_descriptors(); }
inline int NextEnumerationIndex() { return number_of_descriptors() + 1; }
- int LastAdded() {
- ASSERT(!IsEmpty());
- Object* obj = get(kLastAddedIndex);
- if (obj->IsSmi()) {
- return Smi::cast(obj)->value();
- } else {
- Object* index = FixedArray::cast(obj)->get(kEnumCacheBridgeLastAdded);
- return Smi::cast(index)->value();
- }
- }
-
- // Set index of the last added descriptor and flush any enum cache.
- void SetLastAdded(int index) {
- ASSERT(!IsEmpty() || index > 0);
- set(kLastAddedIndex, Smi::FromInt(index));
- }
-
- int NumberOfSetDescriptors() {
- ASSERT(!IsEmpty());
- if (LastAdded() == kNoneAdded) return 0;
- return GetDetails(LastAdded()).index();
- }
-
bool HasEnumCache() {
- return !IsEmpty() && !get(kLastAddedIndex)->IsSmi();
+ return !IsEmpty() && !get(kEnumCacheIndex)->IsSmi();
}
Object* GetEnumCache() {
ASSERT(HasEnumCache());
- FixedArray* bridge = FixedArray::cast(get(kLastAddedIndex));
+ FixedArray* bridge = FixedArray::cast(get(kEnumCacheIndex));
return bridge->get(kEnumCacheBridgeCacheIndex);
}
Object** GetEnumCacheSlot() {
ASSERT(HasEnumCache());
return HeapObject::RawField(reinterpret_cast<HeapObject*>(this),
- kLastAddedOffset);
+ kEnumCacheOffset);
}
Object** GetTransitionsSlot() {
// Constant for denoting key was not found.
static const int kNotFound = -1;
- // Constant for denoting that the LastAdded field was not yet set.
- static const int kNoneAdded = -1;
-
static const int kBackPointerStorageIndex = 0;
- static const int kLastAddedIndex = 1;
+ static const int kEnumCacheIndex = 1;
static const int kTransitionsIndex = 2;
static const int kFirstIndex = 3;
// Layout description.
static const int kBackPointerStorageOffset = FixedArray::kHeaderSize;
- static const int kLastAddedOffset = kBackPointerStorageOffset +
+ static const int kEnumCacheOffset = kBackPointerStorageOffset +
kPointerSize;
- static const int kTransitionsOffset = kLastAddedOffset + kPointerSize;
+ static const int kTransitionsOffset = kEnumCacheOffset + kPointerSize;
static const int kFirstOffset = kTransitionsOffset + kPointerSize;
// Layout description for the bridge array.
inline int bit_field3();
inline void set_bit_field3(int value);
+ class IsShared: public BitField<bool, 0, 1> {};
+ class FunctionWithPrototype: public BitField<bool, 1, 1> {};
+ class LastAddedBits: public BitField<int, 2, 11> {};
+
// Tells whether the object in the prototype property will be used
// for instances created from this function. If the prototype
// property is set to a value that is not a JSObject, the prototype
String* name,
LookupResult* result);
+ void SetLastAdded(int index) {
+ set_bit_field3(LastAddedBits::update(bit_field3(), index));
+ }
+
+ int LastAdded() {
+ return LastAddedBits::decode(bit_field3());
+ }
+
+ int NumberOfSetDescriptors() {
+ ASSERT(!instance_descriptors()->IsEmpty());
+ if (LastAdded() == kNoneAdded) return 0;
+ return instance_descriptors()->GetDetails(LastAdded()).index();
+ }
+
MUST_USE_RESULT MaybeObject* RawCopy(int instance_size);
MUST_USE_RESULT MaybeObject* CopyWithPreallocatedFieldDescriptors();
MUST_USE_RESULT MaybeObject* CopyDropDescriptors();
static const int kMaxPreAllocatedPropertyFields = 255;
+ // Constant for denoting that the LastAdded field was not yet set.
+ static const int kNoneAdded = LastAddedBits::kMax;
+
// Layout description.
static const int kInstanceSizesOffset = HeapObject::kHeaderSize;
static const int kInstanceAttributesOffset = kInstanceSizesOffset + kIntSize;
PropertyDetails TransitionArray::GetTargetDetails(int transition_number) {
Map* map = GetTarget(transition_number);
DescriptorArray* descriptors = map->instance_descriptors();
- int descriptor = descriptors->LastAdded();
- ASSERT(descriptor != DescriptorArray::kNotFound);
+ int descriptor = map->LastAdded();
+ ASSERT(descriptor != Map::kNoneAdded);
return descriptors->GetDetails(descriptor);
}
// We got a map in register rax. Get the enumeration cache from it.
__ bind(&use_cache);
__ LoadInstanceDescriptors(rax, rcx);
- __ movq(rcx, FieldOperand(rcx, DescriptorArray::kLastAddedOffset));
+ __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumCacheOffset));
__ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
Register result = ToRegister(instr->result());
__ LoadInstanceDescriptors(map, result);
__ movq(result,
- FieldOperand(result, DescriptorArray::kLastAddedOffset));
+ FieldOperand(result, DescriptorArray::kEnumCacheOffset));
__ movq(result,
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
Condition cc = masm()->CheckSmi(result);
// Check that there is an enum cache in the non-empty instance
// descriptors (rdx). This is the case if the next enumeration
// index field does not contain a smi.
- movq(rdx, FieldOperand(rdx, DescriptorArray::kLastAddedOffset));
+ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheOffset));
JumpIfSmi(rdx, call_runtime);
// For all objects but the receiver, check that the cache is empty.
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
Handle<DescriptorArray> new_descriptors = FACTORY->NewDescriptorArray(1);
v8::internal::DescriptorArray::WhitenessWitness witness(*new_descriptors);
+ map->set_instance_descriptors(*new_descriptors);
CallbacksDescriptor d(*name,
*foreign,
static_cast<PropertyAttributes>(0),
v8::internal::PropertyDetails::kInitialIndex);
- new_descriptors->Set(0, &d, witness);
- new_descriptors->SetLastAdded(0);
+ map->AppendDescriptor(&d, witness);
- map->set_instance_descriptors(*new_descriptors);
// Add the Foo constructor the global object.
env->Global()->Set(v8::String::New("Foo"), v8::Utils::ToLocal(function));
// Call the accessor through JavaScript.