#ifndef V8_OBJECTS_INL_H_
#define V8_OBJECTS_INL_H_
-#include "elements.h"
-#include "objects.h"
-#include "contexts.h"
-#include "conversions-inl.h"
-#include "heap.h"
-#include "isolate.h"
-#include "heap-inl.h"
-#include "property.h"
-#include "spaces.h"
-#include "store-buffer.h"
-#include "v8memory.h"
-#include "factory.h"
-#include "incremental-marking.h"
-#include "transitions-inl.h"
-#include "objects-visiting.h"
+#include "src/base/atomicops.h"
+#include "src/base/bits.h"
+#include "src/contexts.h"
+#include "src/conversions-inl.h"
+#include "src/elements.h"
+#include "src/factory.h"
+#include "src/field-index-inl.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/heap.h"
+#include "src/heap/incremental-marking.h"
+#include "src/heap/objects-visiting.h"
+#include "src/heap/spaces.h"
+#include "src/heap/store-buffer.h"
+#include "src/isolate.h"
+#include "src/lookup.h"
+#include "src/objects.h"
+#include "src/property.h"
+#include "src/prototype.h"
+#include "src/transitions-inl.h"
+#include "src/type-feedback-vector-inl.h"
+#include "src/v8memory.h"
namespace v8 {
namespace internal {
#define TYPE_CHECKER(type, instancetype) \
- bool Object::Is##type() { \
+ bool Object::Is##type() const { \
return Object::IsHeapObject() && \
HeapObject::cast(this)->map()->instance_type() == instancetype; \
}
-#define CAST_ACCESSOR(type) \
- type* type::cast(Object* object) { \
- SLOW_ASSERT(object->Is##type()); \
- return reinterpret_cast<type*>(object); \
+#define CAST_ACCESSOR(type) \
+ type* type::cast(Object* object) { \
+ SLOW_DCHECK(object->Is##type()); \
+ return reinterpret_cast<type*>(object); \
+ } \
+ const type* type::cast(const Object* object) { \
+ SLOW_DCHECK(object->Is##type()); \
+ return reinterpret_cast<const type*>(object); \
}
-#define INT_ACCESSORS(holder, name, offset) \
- int holder::name() { return READ_INT_FIELD(this, offset); } \
+#define INT_ACCESSORS(holder, name, offset) \
+ int holder::name() const { return READ_INT_FIELD(this, offset); } \
void holder::set_##name(int value) { WRITE_INT_FIELD(this, offset, value); }
-#define ACCESSORS(holder, name, type, offset) \
- type* holder::name() { return type::cast(READ_FIELD(this, offset)); } \
- void holder::set_##name(type* value, WriteBarrierMode mode) { \
- WRITE_FIELD(this, offset, value); \
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); \
+#define ACCESSORS(holder, name, type, offset) \
+ type* holder::name() const { return type::cast(READ_FIELD(this, offset)); } \
+ void holder::set_##name(type* value, WriteBarrierMode mode) { \
+ WRITE_FIELD(this, offset, value); \
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); \
}
// Getter that returns a tagged Smi and setter that writes a tagged Smi.
-#define ACCESSORS_TO_SMI(holder, name, offset) \
- Smi* holder::name() { return Smi::cast(READ_FIELD(this, offset)); } \
- void holder::set_##name(Smi* value, WriteBarrierMode mode) { \
- WRITE_FIELD(this, offset, value); \
+#define ACCESSORS_TO_SMI(holder, name, offset) \
+ Smi* holder::name() const { return Smi::cast(READ_FIELD(this, offset)); } \
+ void holder::set_##name(Smi* value, WriteBarrierMode mode) { \
+ WRITE_FIELD(this, offset, value); \
}
// Getter that returns a Smi as an int and writes an int as a Smi.
#define SMI_ACCESSORS(holder, name, offset) \
- int holder::name() { \
+ int holder::name() const { \
Object* value = READ_FIELD(this, offset); \
return Smi::cast(value)->value(); \
} \
}
#define SYNCHRONIZED_SMI_ACCESSORS(holder, name, offset) \
- int holder::synchronized_##name() { \
+ int holder::synchronized_##name() const { \
Object* value = ACQUIRE_READ_FIELD(this, offset); \
return Smi::cast(value)->value(); \
} \
}
#define NOBARRIER_SMI_ACCESSORS(holder, name, offset) \
- int holder::nobarrier_##name() { \
+ int holder::nobarrier_##name() const { \
Object* value = NOBARRIER_READ_FIELD(this, offset); \
return Smi::cast(value)->value(); \
} \
}
#define BOOL_GETTER(holder, field, name, offset) \
- bool holder::name() { \
+ bool holder::name() const { \
return BooleanBit::get(field(), offset); \
} \
#define BOOL_ACCESSORS(holder, field, name, offset) \
- bool holder::name() { \
+ bool holder::name() const { \
return BooleanBit::get(field(), offset); \
} \
void holder::set_##name(bool value) { \
}
-bool Object::IsFixedArrayBase() {
+bool Object::IsFixedArrayBase() const {
return IsFixedArray() || IsFixedDoubleArray() || IsConstantPoolArray() ||
IsFixedTypedArrayBase() || IsExternalArray();
}
// External objects are not extensible, so the map check is enough.
-bool Object::IsExternal() {
+bool Object::IsExternal() const {
return Object::IsHeapObject() &&
HeapObject::cast(this)->map() ==
HeapObject::cast(this)->GetHeap()->external_map();
}
-bool Object::IsAccessorInfo() {
+bool Object::IsAccessorInfo() const {
return IsExecutableAccessorInfo() || IsDeclaredAccessorInfo();
}
-bool Object::IsSmi() {
+bool Object::IsSmi() const {
return HAS_SMI_TAG(this);
}
-bool Object::IsHeapObject() {
+bool Object::IsHeapObject() const {
return Internals::HasHeapObjectTag(this);
}
TYPE_CHECKER(HeapNumber, HEAP_NUMBER_TYPE)
-TYPE_CHECKER(Float32x4, FLOAT32x4_TYPE)
-TYPE_CHECKER(Float64x2, FLOAT64x2_TYPE)
-TYPE_CHECKER(Int32x4, INT32x4_TYPE)
+TYPE_CHECKER(MutableHeapNumber, MUTABLE_HEAP_NUMBER_TYPE)
TYPE_CHECKER(Symbol, SYMBOL_TYPE)
-bool Object::IsString() {
+bool Object::IsString() const {
return Object::IsHeapObject()
&& HeapObject::cast(this)->map()->instance_type() < FIRST_NONSTRING_TYPE;
}
-bool Object::IsName() {
+bool Object::IsName() const {
return IsString() || IsSymbol();
}
-bool Object::IsUniqueName() {
+bool Object::IsUniqueName() const {
return IsInternalizedString() || IsSymbol();
}
-bool Object::IsSpecObject() {
+bool Object::IsSpecObject() const {
return Object::IsHeapObject()
&& HeapObject::cast(this)->map()->instance_type() >= FIRST_SPEC_OBJECT_TYPE;
}
-bool Object::IsSpecFunction() {
+bool Object::IsSpecFunction() const {
if (!Object::IsHeapObject()) return false;
InstanceType type = HeapObject::cast(this)->map()->instance_type();
return type == JS_FUNCTION_TYPE || type == JS_FUNCTION_PROXY_TYPE;
}
-bool Object::IsTemplateInfo() {
+bool Object::IsTemplateInfo() const {
return IsObjectTemplateInfo() || IsFunctionTemplateInfo();
}
-bool Object::IsInternalizedString() {
+bool Object::IsInternalizedString() const {
if (!this->IsHeapObject()) return false;
uint32_t type = HeapObject::cast(this)->map()->instance_type();
STATIC_ASSERT(kNotInternalizedTag != 0);
}
-bool Object::IsConsString() {
+bool Object::IsConsString() const {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsCons();
}
-bool Object::IsSlicedString() {
+bool Object::IsSlicedString() const {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsSliced();
}
-bool Object::IsSeqString() {
+bool Object::IsSeqString() const {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsSequential();
}
-bool Object::IsSeqOneByteString() {
+bool Object::IsSeqOneByteString() const {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsSequential() &&
String::cast(this)->IsOneByteRepresentation();
}
-bool Object::IsSeqTwoByteString() {
+bool Object::IsSeqTwoByteString() const {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsSequential() &&
String::cast(this)->IsTwoByteRepresentation();
}
-bool Object::IsExternalString() {
+bool Object::IsExternalString() const {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsExternal();
}
-bool Object::IsExternalAsciiString() {
+bool Object::IsExternalOneByteString() const {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsExternal() &&
String::cast(this)->IsOneByteRepresentation();
}
-bool Object::IsExternalTwoByteString() {
+bool Object::IsExternalTwoByteString() const {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsExternal() &&
String::cast(this)->IsTwoByteRepresentation();
return handle(Smi::FromInt(0), isolate);
}
if (!representation.IsDouble()) return object;
+ double value;
if (object->IsUninitialized()) {
- return isolate->factory()->NewHeapNumber(0);
+ value = 0;
+ } else if (object->IsMutableHeapNumber()) {
+ value = HeapNumber::cast(*object)->value();
+ } else {
+ value = object->Number();
+ }
+ return isolate->factory()->NewHeapNumber(value, MUTABLE);
+}
+
+
+Handle<Object> Object::WrapForRead(Isolate* isolate,
+ Handle<Object> object,
+ Representation representation) {
+ DCHECK(!object->IsUninitialized());
+ if (!representation.IsDouble()) {
+ DCHECK(object->FitsRepresentation(representation));
+ return object;
}
- return isolate->factory()->NewHeapNumber(object->Number());
+ return isolate->factory()->NewHeapNumber(HeapNumber::cast(*object)->value());
}
-StringShape::StringShape(String* str)
+StringShape::StringShape(const String* str)
: type_(str->map()->instance_type()) {
set_valid();
- ASSERT((type_ & kIsNotStringMask) == kStringTag);
+ DCHECK((type_ & kIsNotStringMask) == kStringTag);
}
StringShape::StringShape(Map* map)
: type_(map->instance_type()) {
set_valid();
- ASSERT((type_ & kIsNotStringMask) == kStringTag);
+ DCHECK((type_ & kIsNotStringMask) == kStringTag);
}
StringShape::StringShape(InstanceType t)
: type_(static_cast<uint32_t>(t)) {
set_valid();
- ASSERT((type_ & kIsNotStringMask) == kStringTag);
+ DCHECK((type_ & kIsNotStringMask) == kStringTag);
}
bool StringShape::IsInternalized() {
- ASSERT(valid());
+ DCHECK(valid());
STATIC_ASSERT(kNotInternalizedTag != 0);
return (type_ & (kIsNotStringMask | kIsNotInternalizedMask)) ==
(kStringTag | kInternalizedTag);
}
-bool String::IsOneByteRepresentation() {
+bool String::IsOneByteRepresentation() const {
uint32_t type = map()->instance_type();
return (type & kStringEncodingMask) == kOneByteStringTag;
}
-bool String::IsTwoByteRepresentation() {
+bool String::IsTwoByteRepresentation() const {
uint32_t type = map()->instance_type();
return (type & kStringEncodingMask) == kTwoByteStringTag;
}
uint32_t type = map()->instance_type();
STATIC_ASSERT(kIsIndirectStringTag != 0);
STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0);
- ASSERT(IsFlat());
+ DCHECK(IsFlat());
switch (type & (kIsIndirectStringMask | kStringEncodingMask)) {
case kOneByteStringTag:
return true;
uint32_t type = map()->instance_type();
STATIC_ASSERT(kIsIndirectStringTag != 0);
STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0);
- ASSERT(IsFlat());
+ DCHECK(IsFlat());
switch (type & (kIsIndirectStringMask | kStringEncodingMask)) {
case kOneByteStringTag:
return false;
}
-STATIC_CHECK((kStringRepresentationMask | kStringEncodingMask) ==
+STATIC_ASSERT((kStringRepresentationMask | kStringEncodingMask) ==
Internals::kFullStringRepresentationMask);
-STATIC_CHECK(static_cast<uint32_t>(kStringEncodingMask) ==
+STATIC_ASSERT(static_cast<uint32_t>(kStringEncodingMask) ==
Internals::kStringEncodingMask);
-bool StringShape::IsSequentialAscii() {
+bool StringShape::IsSequentialOneByte() {
return full_representation_tag() == (kSeqStringTag | kOneByteStringTag);
}
}
-bool StringShape::IsExternalAscii() {
+bool StringShape::IsExternalOneByte() {
return full_representation_tag() == (kExternalStringTag | kOneByteStringTag);
}
-STATIC_CHECK((kExternalStringTag | kOneByteStringTag) ==
- Internals::kExternalAsciiRepresentationTag);
+STATIC_ASSERT((kExternalStringTag | kOneByteStringTag) ==
+ Internals::kExternalOneByteRepresentationTag);
-STATIC_CHECK(v8::String::ASCII_ENCODING == kOneByteStringTag);
+STATIC_ASSERT(v8::String::ONE_BYTE_ENCODING == kOneByteStringTag);
bool StringShape::IsExternalTwoByte() {
}
-STATIC_CHECK((kExternalStringTag | kTwoByteStringTag) ==
+STATIC_ASSERT((kExternalStringTag | kTwoByteStringTag) ==
Internals::kExternalTwoByteRepresentationTag);
-STATIC_CHECK(v8::String::TWO_BYTE_ENCODING == kTwoByteStringTag);
+STATIC_ASSERT(v8::String::TWO_BYTE_ENCODING == kTwoByteStringTag);
uc32 FlatStringReader::Get(int index) {
- ASSERT(0 <= index && index <= length_);
- if (is_ascii_) {
+ DCHECK(0 <= index && index <= length_);
+ if (is_one_byte_) {
return static_cast<const byte*>(start_)[index];
} else {
return static_cast<const uc16*>(start_)[index];
explicit SequentialStringKey(Vector<const Char> string, uint32_t seed)
: string_(string), hash_field_(0), seed_(seed) { }
- virtual uint32_t Hash() V8_OVERRIDE {
+ virtual uint32_t Hash() OVERRIDE {
hash_field_ = StringHasher::HashSequentialString<Char>(string_.start(),
string_.length(),
seed_);
uint32_t result = hash_field_ >> String::kHashShift;
- ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
+ DCHECK(result != 0); // Ensure that the hash value of 0 is never computed.
return result;
}
- virtual uint32_t HashForObject(Object* other) V8_OVERRIDE {
+ virtual uint32_t HashForObject(Object* other) OVERRIDE {
return String::cast(other)->Hash();
}
OneByteStringKey(Vector<const uint8_t> str, uint32_t seed)
: SequentialStringKey<uint8_t>(str, seed) { }
- virtual bool IsMatch(Object* string) V8_OVERRIDE {
+ virtual bool IsMatch(Object* string) OVERRIDE {
return String::cast(string)->IsOneByteEqualTo(string_);
}
- virtual Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE;
+ virtual Handle<Object> AsHandle(Isolate* isolate) OVERRIDE;
};
-template<class Char>
-class SubStringKey : public HashTableKey {
+class SeqOneByteSubStringKey : public HashTableKey {
public:
- SubStringKey(Handle<String> string, int from, int length)
+ SeqOneByteSubStringKey(Handle<SeqOneByteString> string, int from, int length)
: string_(string), from_(from), length_(length) {
- if (string_->IsSlicedString()) {
- string_ = Handle<String>(Unslice(*string_, &from_));
- }
- ASSERT(string_->IsSeqString() || string->IsExternalString());
+ DCHECK(string_->IsSeqOneByteString());
}
- virtual uint32_t Hash() V8_OVERRIDE {
- ASSERT(length_ >= 0);
- ASSERT(from_ + length_ <= string_->length());
- const Char* chars = GetChars() + from_;
+ virtual uint32_t Hash() OVERRIDE {
+ DCHECK(length_ >= 0);
+ DCHECK(from_ + length_ <= string_->length());
+ const uint8_t* chars = string_->GetChars() + from_;
hash_field_ = StringHasher::HashSequentialString(
chars, length_, string_->GetHeap()->HashSeed());
uint32_t result = hash_field_ >> String::kHashShift;
- ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
+ DCHECK(result != 0); // Ensure that the hash value of 0 is never computed.
return result;
}
- virtual uint32_t HashForObject(Object* other) V8_OVERRIDE {
+ virtual uint32_t HashForObject(Object* other) OVERRIDE {
return String::cast(other)->Hash();
}
- virtual bool IsMatch(Object* string) V8_OVERRIDE;
- virtual Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE;
+ virtual bool IsMatch(Object* string) OVERRIDE;
+ virtual Handle<Object> AsHandle(Isolate* isolate) OVERRIDE;
private:
- const Char* GetChars();
- String* Unslice(String* string, int* offset) {
- while (string->IsSlicedString()) {
- SlicedString* sliced = SlicedString::cast(string);
- *offset += sliced->offset();
- string = sliced->parent();
- }
- return string;
- }
-
- Handle<String> string_;
+ Handle<SeqOneByteString> string_;
int from_;
int length_;
uint32_t hash_field_;
explicit TwoByteStringKey(Vector<const uc16> str, uint32_t seed)
: SequentialStringKey<uc16>(str, seed) { }
- virtual bool IsMatch(Object* string) V8_OVERRIDE {
+ virtual bool IsMatch(Object* string) OVERRIDE {
return String::cast(string)->IsTwoByteEqualTo(string_);
}
- virtual Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE;
+ virtual Handle<Object> AsHandle(Isolate* isolate) OVERRIDE;
};
explicit Utf8StringKey(Vector<const char> string, uint32_t seed)
: string_(string), hash_field_(0), seed_(seed) { }
- virtual bool IsMatch(Object* string) V8_OVERRIDE {
+ virtual bool IsMatch(Object* string) OVERRIDE {
return String::cast(string)->IsUtf8EqualTo(string_);
}
- virtual uint32_t Hash() V8_OVERRIDE {
+ virtual uint32_t Hash() OVERRIDE {
if (hash_field_ != 0) return hash_field_ >> String::kHashShift;
hash_field_ = StringHasher::ComputeUtf8Hash(string_, seed_, &chars_);
uint32_t result = hash_field_ >> String::kHashShift;
- ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
+ DCHECK(result != 0); // Ensure that the hash value of 0 is never computed.
return result;
}
- virtual uint32_t HashForObject(Object* other) V8_OVERRIDE {
+ virtual uint32_t HashForObject(Object* other) OVERRIDE {
return String::cast(other)->Hash();
}
- virtual Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE {
+ virtual Handle<Object> AsHandle(Isolate* isolate) OVERRIDE {
if (hash_field_ == 0) Hash();
return isolate->factory()->NewInternalizedStringFromUtf8(
string_, chars_, hash_field_);
};
-bool Object::IsNumber() {
+bool Object::IsNumber() const {
return IsSmi() || IsHeapNumber();
}
TYPE_CHECKER(FreeSpace, FREE_SPACE_TYPE)
-bool Object::IsFiller() {
+bool Object::IsFiller() const {
if (!Object::IsHeapObject()) return false;
InstanceType instance_type = HeapObject::cast(this)->map()->instance_type();
return instance_type == FREE_SPACE_TYPE || instance_type == FILLER_TYPE;
}
-bool Object::IsExternalArray() {
+bool Object::IsExternalArray() const {
if (!Object::IsHeapObject())
return false;
InstanceType instance_type =
#undef TYPED_ARRAY_TYPE_CHECKER
-bool Object::IsFixedTypedArrayBase() {
+bool Object::IsFixedTypedArrayBase() const {
if (!Object::IsHeapObject()) return false;
InstanceType instance_type =
}
-bool Object::IsJSReceiver() {
+bool Object::IsJSReceiver() const {
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
return IsHeapObject() &&
HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_RECEIVER_TYPE;
}
-bool Object::IsJSObject() {
+bool Object::IsJSObject() const {
STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
return IsHeapObject() &&
HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_OBJECT_TYPE;
}
-bool Object::IsJSProxy() {
+bool Object::IsJSProxy() const {
if (!Object::IsHeapObject()) return false;
- InstanceType type = HeapObject::cast(this)->map()->instance_type();
- return FIRST_JS_PROXY_TYPE <= type && type <= LAST_JS_PROXY_TYPE;
+ return HeapObject::cast(this)->map()->IsJSProxyMap();
}
TYPE_CHECKER(ConstantPoolArray, CONSTANT_POOL_ARRAY_TYPE)
-bool Object::IsJSWeakCollection() {
+bool Object::IsJSWeakCollection() const {
return IsJSWeakMap() || IsJSWeakSet();
}
-bool Object::IsDescriptorArray() {
+bool Object::IsDescriptorArray() const {
return IsFixedArray();
}
-bool Object::IsTransitionArray() {
+bool Object::IsTransitionArray() const {
return IsFixedArray();
}
-bool Object::IsDeoptimizationInputData() {
+bool Object::IsTypeFeedbackVector() const { return IsFixedArray(); }
+
+
+bool Object::IsDeoptimizationInputData() const {
// Must be a fixed array.
if (!IsFixedArray()) return false;
if (length == 0) return true;
length -= DeoptimizationInputData::kFirstDeoptEntryIndex;
- return length >= 0 &&
- length % DeoptimizationInputData::kDeoptEntrySize == 0;
+ return length >= 0 && length % DeoptimizationInputData::kDeoptEntrySize == 0;
}
-bool Object::IsDeoptimizationOutputData() {
+bool Object::IsDeoptimizationOutputData() const {
if (!IsFixedArray()) return false;
// There's actually no way to see the difference between a fixed array and
// a deoptimization data array. Since this is used for asserts we can check
}
-bool Object::IsDependentCode() {
+bool Object::IsDependentCode() const {
if (!IsFixedArray()) return false;
// There's actually no way to see the difference between a fixed array and
// a dependent codes array.
}
-bool Object::IsContext() {
+bool Object::IsContext() const {
if (!Object::IsHeapObject()) return false;
Map* map = HeapObject::cast(this)->map();
Heap* heap = map->GetHeap();
}
-bool Object::IsNativeContext() {
+bool Object::IsNativeContext() const {
return Object::IsHeapObject() &&
HeapObject::cast(this)->map() ==
HeapObject::cast(this)->GetHeap()->native_context_map();
}
-bool Object::IsScopeInfo() {
+bool Object::IsScopeInfo() const {
return Object::IsHeapObject() &&
HeapObject::cast(this)->map() ==
HeapObject::cast(this)->GetHeap()->scope_info_map();
TYPE_CHECKER(Oddball, ODDBALL_TYPE)
TYPE_CHECKER(Cell, CELL_TYPE)
TYPE_CHECKER(PropertyCell, PROPERTY_CELL_TYPE)
+TYPE_CHECKER(WeakCell, WEAK_CELL_TYPE)
TYPE_CHECKER(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE)
TYPE_CHECKER(JSGeneratorObject, JS_GENERATOR_OBJECT_TYPE)
TYPE_CHECKER(JSModule, JS_MODULE_TYPE)
TYPE_CHECKER(JSMessageObject, JS_MESSAGE_OBJECT_TYPE)
-bool Object::IsStringWrapper() {
+bool Object::IsStringWrapper() const {
return IsJSValue() && JSValue::cast(this)->value()->IsString();
}
TYPE_CHECKER(Foreign, FOREIGN_TYPE)
-bool Object::IsBoolean() {
+bool Object::IsBoolean() const {
return IsOddball() &&
((Oddball::cast(this)->kind() & Oddball::kNotBooleanMask) == 0);
}
TYPE_CHECKER(JSDataView, JS_DATA_VIEW_TYPE)
-bool Object::IsJSArrayBufferView() {
+bool Object::IsJSArrayBufferView() const {
return IsJSDataView() || IsJSTypedArray();
}
}
-bool Object::IsHashTable() {
+bool Object::IsHashTable() const {
return Object::IsHeapObject() &&
HeapObject::cast(this)->map() ==
HeapObject::cast(this)->GetHeap()->hash_table_map();
}
-bool Object::IsDictionary() {
+bool Object::IsWeakHashTable() const {
+ return IsHashTable();
+}
+
+
+bool Object::IsDictionary() const {
return IsHashTable() &&
this != HeapObject::cast(this)->GetHeap()->string_table();
}
-bool Object::IsStringTable() {
+bool Object::IsNameDictionary() const {
+ return IsDictionary();
+}
+
+
+bool Object::IsSeededNumberDictionary() const {
+ return IsDictionary();
+}
+
+
+bool Object::IsUnseededNumberDictionary() const {
+ return IsDictionary();
+}
+
+
+bool Object::IsStringTable() const {
return IsHashTable();
}
-bool Object::IsJSFunctionResultCache() {
+bool Object::IsJSFunctionResultCache() const {
if (!IsFixedArray()) return false;
- FixedArray* self = FixedArray::cast(this);
+ const FixedArray* self = FixedArray::cast(this);
int length = self->length();
if (length < JSFunctionResultCache::kEntriesIndex) return false;
if ((length - JSFunctionResultCache::kEntriesIndex)
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
- reinterpret_cast<JSFunctionResultCache*>(this)->
+ // TODO(svenpanne) We use const_cast here and below to break our dependency
+ // cycle between the predicates and the verifiers. This can be removed when
+ // the verifiers are const-correct, too.
+ reinterpret_cast<JSFunctionResultCache*>(const_cast<Object*>(this))->
JSFunctionResultCacheVerify();
}
#endif
}
-bool Object::IsNormalizedMapCache() {
+bool Object::IsNormalizedMapCache() const {
return NormalizedMapCache::IsNormalizedMapCache(this);
}
}
-bool NormalizedMapCache::IsNormalizedMapCache(Object* obj) {
+bool NormalizedMapCache::IsNormalizedMapCache(const Object* obj) {
if (!obj->IsFixedArray()) return false;
if (FixedArray::cast(obj)->length() != NormalizedMapCache::kEntries) {
return false;
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
- reinterpret_cast<NormalizedMapCache*>(obj)->NormalizedMapCacheVerify();
+ reinterpret_cast<NormalizedMapCache*>(const_cast<Object*>(obj))->
+ NormalizedMapCacheVerify();
}
#endif
return true;
}
-bool Object::IsCompilationCacheTable() {
+bool Object::IsCompilationCacheTable() const {
return IsHashTable();
}
-bool Object::IsCodeCacheHashTable() {
+bool Object::IsCodeCacheHashTable() const {
return IsHashTable();
}
-bool Object::IsPolymorphicCodeCacheHashTable() {
+bool Object::IsPolymorphicCodeCacheHashTable() const {
return IsHashTable();
}
-bool Object::IsMapCache() {
+bool Object::IsMapCache() const {
return IsHashTable();
}
-bool Object::IsObjectHashTable() {
+bool Object::IsObjectHashTable() const {
return IsHashTable();
}
-bool Object::IsOrderedHashTable() {
+bool Object::IsOrderedHashTable() const {
return IsHeapObject() &&
HeapObject::cast(this)->map() ==
HeapObject::cast(this)->GetHeap()->ordered_hash_table_map();
}
-bool Object::IsPrimitive() {
+bool Object::IsOrderedHashSet() const {
+ return IsOrderedHashTable();
+}
+
+
+bool Object::IsOrderedHashMap() const {
+ return IsOrderedHashTable();
+}
+
+
+bool Object::IsPrimitive() const {
return IsOddball() || IsNumber() || IsString();
}
-bool Object::IsJSGlobalProxy() {
+bool Object::IsJSGlobalProxy() const {
bool result = IsHeapObject() &&
(HeapObject::cast(this)->map()->instance_type() ==
JS_GLOBAL_PROXY_TYPE);
- ASSERT(!result ||
+ DCHECK(!result ||
HeapObject::cast(this)->map()->is_access_check_needed());
return result;
}
-bool Object::IsGlobalObject() {
+bool Object::IsGlobalObject() const {
if (!IsHeapObject()) return false;
InstanceType type = HeapObject::cast(this)->map()->instance_type();
TYPE_CHECKER(JSBuiltinsObject, JS_BUILTINS_OBJECT_TYPE)
-bool Object::IsUndetectableObject() {
+bool Object::IsUndetectableObject() const {
return IsHeapObject()
&& HeapObject::cast(this)->map()->is_undetectable();
}
-bool Object::IsAccessCheckNeeded() {
+bool Object::IsAccessCheckNeeded() const {
if (!IsHeapObject()) return false;
if (IsJSGlobalProxy()) {
- JSGlobalProxy* proxy = JSGlobalProxy::cast(this);
- GlobalObject* global =
- proxy->GetIsolate()->context()->global_object();
+ const JSGlobalProxy* proxy = JSGlobalProxy::cast(this);
+ GlobalObject* global = proxy->GetIsolate()->context()->global_object();
return proxy->IsDetachedFrom(global);
}
return HeapObject::cast(this)->map()->is_access_check_needed();
}
-bool Object::IsStruct() {
+bool Object::IsStruct() const {
if (!IsHeapObject()) return false;
switch (HeapObject::cast(this)->map()->instance_type()) {
#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return true;
}
-#define MAKE_STRUCT_PREDICATE(NAME, Name, name) \
- bool Object::Is##Name() { \
- return Object::IsHeapObject() \
+#define MAKE_STRUCT_PREDICATE(NAME, Name, name) \
+ bool Object::Is##Name() const { \
+ return Object::IsHeapObject() \
&& HeapObject::cast(this)->map()->instance_type() == NAME##_TYPE; \
}
STRUCT_LIST(MAKE_STRUCT_PREDICATE)
#undef MAKE_STRUCT_PREDICATE
-bool Object::IsUndefined() {
+bool Object::IsUndefined() const {
return IsOddball() && Oddball::cast(this)->kind() == Oddball::kUndefined;
}
-bool Object::IsNull() {
+bool Object::IsNull() const {
return IsOddball() && Oddball::cast(this)->kind() == Oddball::kNull;
}
-bool Object::IsTheHole() {
+bool Object::IsTheHole() const {
return IsOddball() && Oddball::cast(this)->kind() == Oddball::kTheHole;
}
-bool Object::IsException() {
+bool Object::IsException() const {
return IsOddball() && Oddball::cast(this)->kind() == Oddball::kException;
}
-bool Object::IsUninitialized() {
+bool Object::IsUninitialized() const {
return IsOddball() && Oddball::cast(this)->kind() == Oddball::kUninitialized;
}
-bool Object::IsTrue() {
+bool Object::IsTrue() const {
return IsOddball() && Oddball::cast(this)->kind() == Oddball::kTrue;
}
-bool Object::IsFalse() {
+bool Object::IsFalse() const {
return IsOddball() && Oddball::cast(this)->kind() == Oddball::kFalse;
}
-bool Object::IsArgumentsMarker() {
+bool Object::IsArgumentsMarker() const {
return IsOddball() && Oddball::cast(this)->kind() == Oddball::kArgumentMarker;
}
double Object::Number() {
- ASSERT(IsNumber());
+ DCHECK(IsNumber());
return IsSmi()
? static_cast<double>(reinterpret_cast<Smi*>(this)->value())
: reinterpret_cast<HeapNumber*>(this)->value();
}
-bool Object::IsNaN() {
+bool Object::IsNaN() const {
return this->IsHeapNumber() && std::isnan(HeapNumber::cast(this)->value());
}
+bool Object::IsMinusZero() const {
+ return this->IsHeapNumber() &&
+ i::IsMinusZero(HeapNumber::cast(this)->value());
+}
+
+
MaybeHandle<Smi> Object::ToSmi(Isolate* isolate, Handle<Object> object) {
if (object->IsSmi()) return Handle<Smi>::cast(object);
if (object->IsHeapNumber()) {
MaybeHandle<Object> Object::GetProperty(Handle<Object> object,
Handle<Name> name) {
- PropertyAttributes attributes;
- return GetPropertyWithReceiver(object, object, name, &attributes);
+ LookupIterator it(object, name);
+ return GetProperty(&it);
}
Handle<Object> object,
uint32_t index) {
// GetElement can trigger a getter which can cause allocation.
- // This was not always the case. This ASSERT is here to catch
+ // This was not always the case. This DCHECK is here to catch
// leftover incorrect uses.
- ASSERT(AllowHeapAllocation::IsAllowed());
+ DCHECK(AllowHeapAllocation::IsAllowed());
return Object::GetElementWithReceiver(isolate, object, object, index);
}
+Handle<Object> Object::GetPrototypeSkipHiddenPrototypes(
+ Isolate* isolate, Handle<Object> receiver) {
+ PrototypeIterator iter(isolate, receiver);
+ while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN)) {
+ if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
+ return PrototypeIterator::GetCurrent(iter);
+ }
+ iter.Advance();
+ }
+ return PrototypeIterator::GetCurrent(iter);
+}
+
+
MaybeHandle<Object> Object::GetPropertyOrElement(Handle<Object> object,
Handle<Name> name) {
uint32_t index;
Handle<Object> object,
const char* name) {
Handle<String> str = isolate->factory()->InternalizeUtf8String(name);
- ASSERT(!str.is_null());
+ DCHECK(!str.is_null());
#ifdef DEBUG
uint32_t index; // Assert that the name is not an array index.
- ASSERT(!str->AsArrayIndex(&index));
+ DCHECK(!str->AsArrayIndex(&index));
#endif // DEBUG
return GetProperty(object, str);
}
StrictMode strict_mode) {
Isolate* isolate = proxy->GetIsolate();
Handle<String> name = isolate->factory()->Uint32ToString(index);
- return SetPropertyWithHandler(
- proxy, receiver, name, value, NONE, strict_mode);
+ return SetPropertyWithHandler(proxy, receiver, name, value, strict_mode);
}
-bool JSProxy::HasElementWithHandler(Handle<JSProxy> proxy, uint32_t index) {
+Maybe<bool> JSProxy::HasElementWithHandler(Handle<JSProxy> proxy,
+ uint32_t index) {
Isolate* isolate = proxy->GetIsolate();
Handle<String> name = isolate->factory()->Uint32ToString(index);
return HasPropertyWithHandler(proxy, name);
#define FIELD_ADDR(p, offset) \
(reinterpret_cast<byte*>(p) + offset - kHeapObjectTag)
+#define FIELD_ADDR_CONST(p, offset) \
+ (reinterpret_cast<const byte*>(p) + offset - kHeapObjectTag)
+
#define READ_FIELD(p, offset) \
- (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)))
+ (*reinterpret_cast<Object* const*>(FIELD_ADDR_CONST(p, offset)))
-#define ACQUIRE_READ_FIELD(p, offset) \
- reinterpret_cast<Object*>( \
- Acquire_Load(reinterpret_cast<AtomicWord*>(FIELD_ADDR(p, offset))))
+#define ACQUIRE_READ_FIELD(p, offset) \
+ reinterpret_cast<Object*>(base::Acquire_Load( \
+ reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR_CONST(p, offset))))
-#define NOBARRIER_READ_FIELD(p, offset) \
- reinterpret_cast<Object*>( \
- NoBarrier_Load(reinterpret_cast<AtomicWord*>(FIELD_ADDR(p, offset))))
+#define NOBARRIER_READ_FIELD(p, offset) \
+ reinterpret_cast<Object*>(base::NoBarrier_Load( \
+ reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR_CONST(p, offset))))
#define WRITE_FIELD(p, offset, value) \
(*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value)
-#define RELEASE_WRITE_FIELD(p, offset, value) \
- Release_Store(reinterpret_cast<AtomicWord*>(FIELD_ADDR(p, offset)), \
- reinterpret_cast<AtomicWord>(value));
+#define RELEASE_WRITE_FIELD(p, offset, value) \
+ base::Release_Store( \
+ reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
+ reinterpret_cast<base::AtomicWord>(value));
-#define NOBARRIER_WRITE_FIELD(p, offset, value) \
- NoBarrier_Store(reinterpret_cast<AtomicWord*>(FIELD_ADDR(p, offset)), \
- reinterpret_cast<AtomicWord>(value));
+#define NOBARRIER_WRITE_FIELD(p, offset, value) \
+ base::NoBarrier_Store( \
+ reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
+ reinterpret_cast<base::AtomicWord>(value));
#define WRITE_BARRIER(heap, object, offset, value) \
heap->incremental_marking()->RecordWrite( \
#ifndef V8_TARGET_ARCH_MIPS
#define READ_DOUBLE_FIELD(p, offset) \
- (*reinterpret_cast<double*>(FIELD_ADDR(p, offset)))
+ (*reinterpret_cast<const double*>(FIELD_ADDR_CONST(p, offset)))
#else // V8_TARGET_ARCH_MIPS
// Prevent gcc from using load-double (mips ldc1) on (possibly)
// non-64-bit aligned HeapNumber::value.
- static inline double read_double_field(void* p, int offset) {
+ static inline double read_double_field(const void* p, int offset) {
union conversion {
double d;
uint32_t u[2];
} c;
- c.u[0] = (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)));
- c.u[1] = (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset + 4)));
+ c.u[0] = (*reinterpret_cast<const uint32_t*>(
+ FIELD_ADDR_CONST(p, offset)));
+ c.u[1] = (*reinterpret_cast<const uint32_t*>(
+ FIELD_ADDR_CONST(p, offset + 4)));
return c.d;
}
#define READ_DOUBLE_FIELD(p, offset) read_double_field(p, offset)
write_double_field(p, offset, value)
#endif // V8_TARGET_ARCH_MIPS
-#define READ_FLOAT32x4_FIELD(p, offset) \
- (*reinterpret_cast<float32x4_value_t*>(FIELD_ADDR(p, offset)))
-
-#define WRITE_FLOAT32x4_FIELD(p, offset, value) \
- (*reinterpret_cast<float32x4_value_t*>(FIELD_ADDR(p, offset)) = value)
-
-#define READ_FLOAT64x2_FIELD(p, offset) \
- (*reinterpret_cast<float64x2_value_t*>(FIELD_ADDR(p, offset)))
-
-#define WRITE_FLOAT64x2_FIELD(p, offset, value) \
- (*reinterpret_cast<float64x2_value_t*>(FIELD_ADDR(p, offset)) = value)
-
-#define READ_INT32x4_FIELD(p, offset) \
- (*reinterpret_cast<int32x4_value_t*>(FIELD_ADDR(p, offset)))
-
-#define WRITE_INT32x4_FIELD(p, offset, value) \
- (*reinterpret_cast<int32x4_value_t*>(FIELD_ADDR(p, offset)) = value)
-
-#define READ_FLOAT_FIELD(p, offset) \
- (*reinterpret_cast<float*>(FIELD_ADDR(p, offset)))
-
-#define WRITE_FLOAT_FIELD(p, offset, value) \
- (*reinterpret_cast<float*>(FIELD_ADDR(p, offset)) = value)
#define READ_INT_FIELD(p, offset) \
- (*reinterpret_cast<int*>(FIELD_ADDR(p, offset)))
+ (*reinterpret_cast<const int*>(FIELD_ADDR_CONST(p, offset)))
#define WRITE_INT_FIELD(p, offset, value) \
(*reinterpret_cast<int*>(FIELD_ADDR(p, offset)) = value)
#define READ_INTPTR_FIELD(p, offset) \
- (*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)))
+ (*reinterpret_cast<const intptr_t*>(FIELD_ADDR_CONST(p, offset)))
#define WRITE_INTPTR_FIELD(p, offset, value) \
(*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)) = value)
#define READ_UINT32_FIELD(p, offset) \
- (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)))
+ (*reinterpret_cast<const uint32_t*>(FIELD_ADDR_CONST(p, offset)))
#define WRITE_UINT32_FIELD(p, offset, value) \
(*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)) = value)
#define READ_INT32_FIELD(p, offset) \
- (*reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset)))
+ (*reinterpret_cast<const int32_t*>(FIELD_ADDR_CONST(p, offset)))
#define WRITE_INT32_FIELD(p, offset, value) \
(*reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset)) = value)
#define READ_INT64_FIELD(p, offset) \
- (*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset)))
+ (*reinterpret_cast<const int64_t*>(FIELD_ADDR_CONST(p, offset)))
#define WRITE_INT64_FIELD(p, offset, value) \
(*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset)) = value)
#define READ_SHORT_FIELD(p, offset) \
- (*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)))
+ (*reinterpret_cast<const uint16_t*>(FIELD_ADDR_CONST(p, offset)))
#define WRITE_SHORT_FIELD(p, offset, value) \
(*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)) = value)
#define READ_BYTE_FIELD(p, offset) \
- (*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)))
+ (*reinterpret_cast<const byte*>(FIELD_ADDR_CONST(p, offset)))
-#define NOBARRIER_READ_BYTE_FIELD(p, offset) \
- static_cast<byte>(NoBarrier_Load( \
- reinterpret_cast<Atomic8*>(FIELD_ADDR(p, offset))) )
+#define NOBARRIER_READ_BYTE_FIELD(p, offset) \
+ static_cast<byte>(base::NoBarrier_Load( \
+ reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset))))
#define WRITE_BYTE_FIELD(p, offset, value) \
(*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)) = value)
-#define NOBARRIER_WRITE_BYTE_FIELD(p, offset, value) \
- NoBarrier_Store(reinterpret_cast<Atomic8*>(FIELD_ADDR(p, offset)), \
- static_cast<Atomic8>(value));
+#define NOBARRIER_WRITE_BYTE_FIELD(p, offset, value) \
+ base::NoBarrier_Store( \
+ reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)), \
+ static_cast<base::Atomic8>(value));
Object** HeapObject::RawField(HeapObject* obj, int byte_offset) {
- return &READ_FIELD(obj, byte_offset);
+ return reinterpret_cast<Object**>(FIELD_ADDR(obj, byte_offset));
}
-int Smi::value() {
+int Smi::value() const {
return Internals::SmiValue(this);
}
Smi* Smi::FromInt(int value) {
- ASSERT(Smi::IsValid(value));
+ DCHECK(Smi::IsValid(value));
return reinterpret_cast<Smi*>(Internals::IntToSmi(value));
}
Smi* Smi::FromIntptr(intptr_t value) {
- ASSERT(Smi::IsValid(value));
+ DCHECK(Smi::IsValid(value));
int smi_shift_bits = kSmiTagSize + kSmiShiftSize;
return reinterpret_cast<Smi*>((value << smi_shift_bits) | kSmiTag);
}
bool Smi::IsValid(intptr_t value) {
bool result = Internals::IsValidSmi(value);
- ASSERT_EQ(result, value >= kMinValue && value <= kMaxValue);
+ DCHECK_EQ(result, value >= kMinValue && value <= kMaxValue);
return result;
}
-MapWord MapWord::FromMap(Map* map) {
+MapWord MapWord::FromMap(const Map* map) {
return MapWord(reinterpret_cast<uintptr_t>(map));
}
HeapObject* MapWord::ToForwardingAddress() {
- ASSERT(IsForwardingAddress());
+ DCHECK(IsForwardingAddress());
return HeapObject::FromAddress(reinterpret_cast<Address>(value_));
}
#endif
-Heap* HeapObject::GetHeap() {
+Heap* HeapObject::GetHeap() const {
Heap* heap =
- MemoryChunk::FromAddress(reinterpret_cast<Address>(this))->heap();
- SLOW_ASSERT(heap != NULL);
+ MemoryChunk::FromAddress(reinterpret_cast<const byte*>(this))->heap();
+ SLOW_DCHECK(heap != NULL);
return heap;
}
-Isolate* HeapObject::GetIsolate() {
+Isolate* HeapObject::GetIsolate() const {
return GetHeap()->isolate();
}
-Map* HeapObject::map() {
+Map* HeapObject::map() const {
+#ifdef DEBUG
+ // Clear mark potentially added by PathTracer.
+ uintptr_t raw_value =
+ map_word().ToRawValue() & ~static_cast<uintptr_t>(PathTracer::kMarkTag);
+ return MapWord::FromRawValue(raw_value).ToMap();
+#else
return map_word().ToMap();
+#endif
}
}
-MapWord HeapObject::map_word() {
+MapWord HeapObject::map_word() const {
return MapWord(
reinterpret_cast<uintptr_t>(NOBARRIER_READ_FIELD(this, kMapOffset)));
}
}
-MapWord HeapObject::synchronized_map_word() {
+MapWord HeapObject::synchronized_map_word() const {
return MapWord(
reinterpret_cast<uintptr_t>(ACQUIRE_READ_FIELD(this, kMapOffset)));
}
HeapObject* HeapObject::FromAddress(Address address) {
- ASSERT_TAG_ALIGNED(address);
+ DCHECK_TAG_ALIGNED(address);
return reinterpret_cast<HeapObject*>(address + kHeapObjectTag);
}
}
+bool HeapObject::MayContainRawValues() {
+ InstanceType type = map()->instance_type();
+ if (type <= LAST_NAME_TYPE) {
+ if (type == SYMBOL_TYPE) {
+ return false;
+ }
+ DCHECK(type < FIRST_NONSTRING_TYPE);
+ // There are four string representations: sequential strings, external
+ // strings, cons strings, and sliced strings.
+ // Only the former two contain raw values and no heap pointers (besides the
+ // map-word).
+ return ((type & kIsIndirectStringMask) != kIsIndirectStringTag);
+ }
+ // The ConstantPoolArray contains heap pointers, but also raw values.
+ if (type == CONSTANT_POOL_ARRAY_TYPE) return true;
+ return (type <= LAST_DATA_TYPE);
+}
+
+
void HeapObject::IteratePointers(ObjectVisitor* v, int start, int end) {
v->VisitPointers(reinterpret_cast<Object**>(FIELD_ADDR(this, start)),
reinterpret_cast<Object**>(FIELD_ADDR(this, end)));
}
-double HeapNumber::value() {
+double HeapNumber::value() const {
return READ_DOUBLE_FIELD(this, kValueOffset);
}
}
-const char* Float32x4::Name() {
- return "float32x4";
-}
-
-
-int Float32x4::kRuntimeAllocatorId() {
- return Runtime::kAllocateFloat32x4;
-}
-
-
-int Float32x4::kMapRootIndex() {
- return Heap::kFloat32x4MapRootIndex;
-}
-
-
-float32x4_value_t Float32x4::value() {
- return READ_FLOAT32x4_FIELD(this, kValueOffset);
-}
-
-
-void Float32x4::set_value(float32x4_value_t value) {
- WRITE_FLOAT32x4_FIELD(this, kValueOffset, value);
-}
-
-
-float Float32x4::getAt(int index) {
- ASSERT(index >= 0 && index < kLanes);
- return READ_FLOAT_FIELD(this, kValueOffset + index * kFloatSize);
-}
-
-
-const char* Float64x2::Name() {
- return "float64x2";
-}
-
-
-int Float64x2::kRuntimeAllocatorId() {
- return Runtime::kAllocateFloat64x2;
-}
-
-
-int Float64x2::kMapRootIndex() {
- return Heap::kFloat64x2MapRootIndex;
-}
-
-
-float64x2_value_t Float64x2::value() {
- return READ_FLOAT64x2_FIELD(this, kValueOffset);
-}
-
-
-void Float64x2::set_value(float64x2_value_t value) {
- WRITE_FLOAT64x2_FIELD(this, kValueOffset, value);
-}
-
-
-double Float64x2::getAt(int index) {
- ASSERT(index >= 0 && index < kLanes);
- return READ_DOUBLE_FIELD(this, kValueOffset + index * kDoubleSize);
-}
-
-
-const char* Int32x4::Name() {
- return "int32x4";
-}
-
-
-int Int32x4::kRuntimeAllocatorId() {
- return Runtime::kAllocateInt32x4;
-}
-
-
-int Int32x4::kMapRootIndex() {
- return Heap::kInt32x4MapRootIndex;
-}
-
-
-int32x4_value_t Int32x4::value() {
- return READ_INT32x4_FIELD(this, kValueOffset);
-}
-
-
-void Int32x4::set_value(int32x4_value_t value) {
- WRITE_INT32x4_FIELD(this, kValueOffset, value);
-}
-
-
-int32_t Int32x4::getAt(int index) {
- ASSERT(index >= 0 && index < kLanes);
- return READ_INT32_FIELD(this, kValueOffset + index * kInt32Size);
-}
-
-
ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset)
}
-FixedArrayBase* JSObject::elements() {
+FixedArrayBase* JSObject::elements() const {
Object* array = READ_FIELD(this, kElementsOffset);
return static_cast<FixedArrayBase*>(array);
}
void JSObject::ValidateElements(Handle<JSObject> object) {
-#ifdef ENABLE_SLOW_ASSERTS
+#ifdef ENABLE_SLOW_DCHECKS
if (FLAG_enable_slow_asserts) {
ElementsAccessor* accessor = object->GetElementsAccessor();
accessor->Validate(object);
void AllocationSite::MarkZombie() {
- ASSERT(!IsZombie());
+ DCHECK(!IsZombie());
Initialize();
set_pretenure_decision(kZombie);
}
int value = pretenure_data()->value();
// Verify that we can count more mementos than we can possibly find in one
// new space collection.
- ASSERT((GetHeap()->MaxSemiSpaceSize() /
+ DCHECK((GetHeap()->MaxSemiSpaceSize() /
(StaticVisitorBase::kMinObjectSizeInWords * kPointerSize +
AllocationMemento::kSize)) < MementoFoundCountBits::kMax);
- ASSERT(count < MementoFoundCountBits::kMax);
+ DCHECK(count < MementoFoundCountBits::kMax);
set_pretenure_data(
Smi::FromInt(MementoFoundCountBits::update(value, count)),
SKIP_WRITE_BARRIER);
int value = memento_found_count();
set_memento_found_count(value + 1);
- return value == 0;
+ return memento_found_count() == kPretenureMinimumCreated;
}
inline void AllocationSite::IncrementMementoCreateCount() {
- ASSERT(FLAG_allocation_site_pretenuring);
+ DCHECK(FLAG_allocation_site_pretenuring);
int value = memento_create_count();
set_memento_create_count(value + 1);
}
-inline bool AllocationSite::DigestPretenuringFeedback() {
- bool decision_changed = false;
+inline bool AllocationSite::MakePretenureDecision(
+ PretenureDecision current_decision,
+ double ratio,
+ bool maximum_size_scavenge) {
+ // Here we just allow state transitions from undecided or maybe tenure
+ // to don't tenure, maybe tenure, or tenure.
+ if ((current_decision == kUndecided || current_decision == kMaybeTenure)) {
+ if (ratio >= kPretenureRatio) {
+ // We just transition into tenure state when the semi-space was at
+ // maximum capacity.
+ if (maximum_size_scavenge) {
+ set_deopt_dependent_code(true);
+ set_pretenure_decision(kTenure);
+ // Currently we just need to deopt when we make a state transition to
+ // tenure.
+ return true;
+ }
+ set_pretenure_decision(kMaybeTenure);
+ } else {
+ set_pretenure_decision(kDontTenure);
+ }
+ }
+ return false;
+}
+
+
+inline bool AllocationSite::DigestPretenuringFeedback(
+ bool maximum_size_scavenge) {
+ bool deopt = false;
int create_count = memento_create_count();
int found_count = memento_found_count();
bool minimum_mementos_created = create_count >= kPretenureMinimumCreated;
double ratio =
minimum_mementos_created || FLAG_trace_pretenuring_statistics ?
static_cast<double>(found_count) / create_count : 0.0;
- PretenureFlag current_mode = GetPretenureMode();
+ PretenureDecision current_decision = pretenure_decision();
if (minimum_mementos_created) {
- PretenureDecision result = ratio >= kPretenureRatio
- ? kTenure
- : kDontTenure;
- set_pretenure_decision(result);
- if (current_mode != GetPretenureMode()) {
- decision_changed = true;
- set_deopt_dependent_code(true);
- }
+ deopt = MakePretenureDecision(
+ current_decision, ratio, maximum_size_scavenge);
}
if (FLAG_trace_pretenuring_statistics) {
PrintF(
"AllocationSite(%p): (created, found, ratio) (%d, %d, %f) %s => %s\n",
static_cast<void*>(this), create_count, found_count, ratio,
- current_mode == TENURED ? "tenured" : "not tenured",
- GetPretenureMode() == TENURED ? "tenured" : "not tenured");
+ PretenureDecisionName(current_decision),
+ PretenureDecisionName(pretenure_decision()));
}
// Clear feedback calculation fields until the next gc.
set_memento_found_count(0);
set_memento_create_count(0);
- return decision_changed;
+ return deopt;
}
ElementsKind target_kind = current_kind;
{
DisallowHeapAllocation no_allocation;
- ASSERT(mode != ALLOW_COPIED_DOUBLE_ELEMENTS);
+ DCHECK(mode != ALLOW_COPIED_DOUBLE_ELEMENTS);
bool is_holey = IsFastHoleyElementsKind(current_kind);
if (current_kind == FAST_HOLEY_ELEMENTS) return;
Heap* heap = object->GetHeap();
EnsureElementsMode mode) {
Heap* heap = object->GetHeap();
if (elements->map() != heap->fixed_double_array_map()) {
- ASSERT(elements->map() == heap->fixed_array_map() ||
+ DCHECK(elements->map() == heap->fixed_array_map() ||
elements->map() == heap->fixed_cow_array_map());
if (mode == ALLOW_COPIED_DOUBLE_ELEMENTS) {
mode = DONT_ALLOW_DOUBLE_ELEMENTS;
return;
}
- ASSERT(mode == ALLOW_COPIED_DOUBLE_ELEMENTS);
+ DCHECK(mode == ALLOW_COPIED_DOUBLE_ELEMENTS);
if (object->GetElementsKind() == FAST_HOLEY_SMI_ELEMENTS) {
TransitionElementsKind(object, FAST_HOLEY_DOUBLE_ELEMENTS);
} else if (object->GetElementsKind() == FAST_SMI_ELEMENTS) {
Handle<Map> new_map,
Handle<FixedArrayBase> value) {
JSObject::MigrateToMap(object, new_map);
- ASSERT((object->map()->has_fast_smi_or_object_elements() ||
+ DCHECK((object->map()->has_fast_smi_or_object_elements() ||
(*value == object->GetHeap()->empty_fixed_array())) ==
(value->map() == object->GetHeap()->fixed_array_map() ||
value->map() == object->GetHeap()->fixed_cow_array_map()));
- ASSERT((*value == object->GetHeap()->empty_fixed_array()) ||
+ DCHECK((*value == object->GetHeap()->empty_fixed_array()) ||
(object->map()->has_fast_double_elements() ==
value->IsFixedDoubleArray()));
object->set_elements(*value);
void JSObject::initialize_properties() {
- ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
+ DCHECK(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
WRITE_FIELD(this, kPropertiesOffset, GetHeap()->empty_fixed_array());
}
}
-Handle<String> JSObject::ExpectedTransitionKey(Handle<Map> map) {
+Handle<String> Map::ExpectedTransitionKey(Handle<Map> map) {
DisallowHeapAllocation no_gc;
if (!map->HasTransitionArray()) return Handle<String>::null();
TransitionArray* transitions = map->transitions();
}
-Handle<Map> JSObject::ExpectedTransitionTarget(Handle<Map> map) {
- ASSERT(!ExpectedTransitionKey(map).is_null());
+Handle<Map> Map::ExpectedTransitionTarget(Handle<Map> map) {
+ DCHECK(!ExpectedTransitionKey(map).is_null());
return Handle<Map>(map->transitions()->GetTarget(
TransitionArray::kSimpleTransitionIndex));
}
-Handle<Map> JSObject::FindTransitionToField(Handle<Map> map, Handle<Name> key) {
+Handle<Map> Map::FindTransitionToField(Handle<Map> map, Handle<Name> key) {
DisallowHeapAllocation no_allocation;
if (!map->HasTransitionArray()) return Handle<Map>::null();
TransitionArray* transitions = map->transitions();
ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
-byte Oddball::kind() {
+byte Oddball::kind() const {
return Smi::cast(READ_FIELD(this, kKindOffset))->value();
}
}
-Object* Cell::value() {
+Object* Cell::value() const {
return READ_FIELD(this, kValueOffset);
}
void Cell::set_value(Object* val, WriteBarrierMode ignored) {
// The write barrier is not used for global property cells.
- ASSERT(!val->IsPropertyCell() && !val->IsCell());
+ DCHECK(!val->IsPropertyCell() && !val->IsCell());
WRITE_FIELD(this, kValueOffset, val);
}
ACCESSORS(PropertyCell, dependent_code, DependentCode, kDependentCodeOffset)
-Object* PropertyCell::type_raw() {
+Object* PropertyCell::type_raw() const {
return READ_FIELD(this, kTypeOffset);
}
}
+Object* WeakCell::value() const { return READ_FIELD(this, kValueOffset); }
+
+
+void WeakCell::clear() {
+ DCHECK(GetHeap()->gc_state() == Heap::MARK_COMPACT);
+ WRITE_FIELD(this, kValueOffset, Smi::FromInt(0));
+}
+
+
+void WeakCell::initialize(HeapObject* val) {
+ WRITE_FIELD(this, kValueOffset, val);
+ WRITE_BARRIER(GetHeap(), this, kValueOffset, val);
+}
+
+
+bool WeakCell::cleared() const { return value() == Smi::FromInt(0); }
+
+
+Object* WeakCell::next() const { return READ_FIELD(this, kNextOffset); }
+
+
+void WeakCell::set_next(Object* val, WriteBarrierMode mode) {
+ WRITE_FIELD(this, kNextOffset, val);
+ if (mode == UPDATE_WRITE_BARRIER) {
+ WRITE_BARRIER(GetHeap(), this, kNextOffset, val);
+ }
+}
+
+
int JSObject::GetHeaderSize() {
InstanceType type = map()->instance_type();
// Check for the most common kind of JavaScript object before
int JSObject::GetInternalFieldCount() {
- ASSERT(1 << kPointerSizeLog2 == kPointerSize);
+ DCHECK(1 << kPointerSizeLog2 == kPointerSize);
// Make sure to adjust for the number of in-object properties. These
// properties do contribute to the size, but are not internal fields.
return ((Size() - GetHeaderSize()) >> kPointerSizeLog2) -
int JSObject::GetInternalFieldOffset(int index) {
- ASSERT(index < GetInternalFieldCount() && index >= 0);
+ DCHECK(index < GetInternalFieldCount() && index >= 0);
return GetHeaderSize() + (kPointerSize * index);
}
Object* JSObject::GetInternalField(int index) {
- ASSERT(index < GetInternalFieldCount() && index >= 0);
+ DCHECK(index < GetInternalFieldCount() && index >= 0);
// Internal objects do follow immediately after the header, whereas in-object
// properties are at the end of the object. Therefore there is no need
// to adjust the index here.
void JSObject::SetInternalField(int index, Object* value) {
- ASSERT(index < GetInternalFieldCount() && index >= 0);
+ DCHECK(index < GetInternalFieldCount() && index >= 0);
// Internal objects do follow immediately after the header, whereas in-object
// properties are at the end of the object. Therefore there is no need
// to adjust the index here.
void JSObject::SetInternalField(int index, Smi* value) {
- ASSERT(index < GetInternalFieldCount() && index >= 0);
+ DCHECK(index < GetInternalFieldCount() && index >= 0);
// Internal objects do follow immediately after the header, whereas in-object
// properties are at the end of the object. Therefore there is no need
// to adjust the index here.
// Access fast-case object properties at index. The use of these routines
// is needed to correctly distinguish between properties stored in-object and
// properties stored in the properties array.
-Object* JSObject::RawFastPropertyAt(int index) {
- // Adjust for the number of properties stored in the object.
- index -= map()->inobject_properties();
- if (index < 0) {
- int offset = map()->instance_size() + (index * kPointerSize);
- return READ_FIELD(this, offset);
+Object* JSObject::RawFastPropertyAt(FieldIndex index) {
+ if (index.is_inobject()) {
+ return READ_FIELD(this, index.offset());
} else {
- ASSERT(index < properties()->length());
- return properties()->get(index);
+ return properties()->get(index.outobject_array_index());
}
}
-void JSObject::FastPropertyAtPut(int index, Object* value) {
- // Adjust for the number of properties stored in the object.
- index -= map()->inobject_properties();
- if (index < 0) {
- int offset = map()->instance_size() + (index * kPointerSize);
+void JSObject::FastPropertyAtPut(FieldIndex index, Object* value) {
+ if (index.is_inobject()) {
+ int offset = index.offset();
WRITE_FIELD(this, offset, value);
WRITE_BARRIER(GetHeap(), this, offset, value);
} else {
- ASSERT(index < properties()->length());
- properties()->set(index, value);
+ properties()->set(index.outobject_array_index(), value);
}
}
void JSObject::InitializeBody(Map* map,
Object* pre_allocated_value,
Object* filler_value) {
- ASSERT(!filler_value->IsHeapObject() ||
+ DCHECK(!filler_value->IsHeapObject() ||
!GetHeap()->InNewSpace(filler_value));
- ASSERT(!pre_allocated_value->IsHeapObject() ||
+ DCHECK(!pre_allocated_value->IsHeapObject() ||
!GetHeap()->InNewSpace(pre_allocated_value));
int size = map->instance_size();
int offset = kHeaderSize;
if (filler_value != pre_allocated_value) {
int pre_allocated = map->pre_allocated_property_fields();
- ASSERT(pre_allocated * kPointerSize + kHeaderSize <= size);
+ DCHECK(pre_allocated * kPointerSize + kHeaderSize <= size);
for (int i = 0; i < pre_allocated; i++) {
WRITE_FIELD(this, offset, pre_allocated_value);
offset += kPointerSize;
bool JSObject::HasFastProperties() {
- ASSERT(properties()->IsDictionary() == map()->is_dictionary_map());
+ DCHECK(properties()->IsDictionary() == map()->is_dictionary_map());
return !properties()->IsDictionary();
}
-bool JSObject::TooManyFastProperties(StoreFromKeyed store_mode) {
- // Allow extra fast properties if the object has more than
- // kFastPropertiesSoftLimit in-object properties. When this is the case, it is
- // very unlikely that the object is being used as a dictionary and there is a
- // good chance that allowing more map transitions will be worth it.
- Map* map = this->map();
- if (map->unused_property_fields() != 0) return false;
-
- int inobject = map->inobject_properties();
-
- int limit;
- if (store_mode == CERTAINLY_NOT_STORE_FROM_KEYED) {
- limit = Max(inobject, kMaxFastProperties);
- } else {
- limit = Max(inobject, kFastPropertiesSoftLimit);
- }
- return properties()->length() > limit;
+bool Map::TooManyFastProperties(StoreFromKeyed store_mode) {
+ if (unused_property_fields() != 0) return false;
+ if (is_prototype_map()) return false;
+ int minimum = store_mode == CERTAINLY_NOT_STORE_FROM_KEYED ? 128 : 12;
+ int limit = Max(minimum, inobject_properties());
+ int external = NumberOfFields() - inobject_properties();
+ return external > limit;
}
}
-FixedArrayBase* FixedArrayBase::cast(Object* object) {
- ASSERT(object->IsFixedArrayBase());
- return reinterpret_cast<FixedArrayBase*>(object);
-}
-
-
-Object* FixedArray::get(int index) {
- SLOW_ASSERT(index >= 0 && index < this->length());
+Object* FixedArray::get(int index) const {
+ SLOW_DCHECK(index >= 0 && index < this->length());
return READ_FIELD(this, kHeaderSize + index * kPointerSize);
}
void FixedArray::set(int index, Smi* value) {
- ASSERT(map() != GetHeap()->fixed_cow_array_map());
- ASSERT(index >= 0 && index < this->length());
- ASSERT(reinterpret_cast<Object*>(value)->IsSmi());
+ DCHECK(map() != GetHeap()->fixed_cow_array_map());
+ DCHECK(index >= 0 && index < this->length());
+ DCHECK(reinterpret_cast<Object*>(value)->IsSmi());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(this, offset, value);
}
void FixedArray::set(int index, Object* value) {
- ASSERT(map() != GetHeap()->fixed_cow_array_map());
- ASSERT(index >= 0 && index < this->length());
+ DCHECK_NE(GetHeap()->fixed_cow_array_map(), map());
+ DCHECK_EQ(FIXED_ARRAY_TYPE, map()->instance_type());
+ DCHECK(index >= 0 && index < this->length());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(this, offset, value);
WRITE_BARRIER(GetHeap(), this, offset, value);
inline bool FixedDoubleArray::is_the_hole_nan(double value) {
- return BitCast<uint64_t, double>(value) == kHoleNanInt64;
+ return bit_cast<uint64_t, double>(value) == kHoleNanInt64;
}
inline double FixedDoubleArray::hole_nan_as_double() {
- return BitCast<double, uint64_t>(kHoleNanInt64);
+ return bit_cast<double, uint64_t>(kHoleNanInt64);
}
inline double FixedDoubleArray::canonical_not_the_hole_nan_as_double() {
- ASSERT(BitCast<uint64_t>(OS::nan_value()) != kHoleNanInt64);
- ASSERT((BitCast<uint64_t>(OS::nan_value()) >> 32) != kHoleNanUpper32);
- return OS::nan_value();
+ DCHECK(bit_cast<uint64_t>(base::OS::nan_value()) != kHoleNanInt64);
+ DCHECK((bit_cast<uint64_t>(base::OS::nan_value()) >> 32) != kHoleNanUpper32);
+ return base::OS::nan_value();
}
double FixedDoubleArray::get_scalar(int index) {
- ASSERT(map() != GetHeap()->fixed_cow_array_map() &&
+ DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
map() != GetHeap()->fixed_array_map());
- ASSERT(index >= 0 && index < this->length());
+ DCHECK(index >= 0 && index < this->length());
double result = READ_DOUBLE_FIELD(this, kHeaderSize + index * kDoubleSize);
- ASSERT(!is_the_hole_nan(result));
+ DCHECK(!is_the_hole_nan(result));
return result;
}
int64_t FixedDoubleArray::get_representation(int index) {
- ASSERT(map() != GetHeap()->fixed_cow_array_map() &&
+ DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
map() != GetHeap()->fixed_array_map());
- ASSERT(index >= 0 && index < this->length());
+ DCHECK(index >= 0 && index < this->length());
return READ_INT64_FIELD(this, kHeaderSize + index * kDoubleSize);
}
void FixedDoubleArray::set(int index, double value) {
- ASSERT(map() != GetHeap()->fixed_cow_array_map() &&
+ DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
map() != GetHeap()->fixed_array_map());
int offset = kHeaderSize + index * kDoubleSize;
if (std::isnan(value)) value = canonical_not_the_hole_nan_as_double();
void FixedDoubleArray::set_the_hole(int index) {
- ASSERT(map() != GetHeap()->fixed_cow_array_map() &&
+ DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
map() != GetHeap()->fixed_array_map());
int offset = kHeaderSize + index * kDoubleSize;
WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double());
}
-void ConstantPoolArray::set_weak_object_state(
- ConstantPoolArray::WeakObjectState state) {
- int old_layout_field = READ_INT_FIELD(this, kArrayLayoutOffset);
- int new_layout_field = WeakObjectStateField::update(old_layout_field, state);
- WRITE_INT_FIELD(this, kArrayLayoutOffset, new_layout_field);
+void ConstantPoolArray::NumberOfEntries::increment(Type type) {
+ DCHECK(type < NUMBER_OF_TYPES);
+ element_counts_[type]++;
}
-ConstantPoolArray::WeakObjectState ConstantPoolArray::get_weak_object_state() {
- int layout_field = READ_INT_FIELD(this, kArrayLayoutOffset);
- return WeakObjectStateField::decode(layout_field);
+int ConstantPoolArray::NumberOfEntries::equals(
+ const ConstantPoolArray::NumberOfEntries& other) const {
+ for (int i = 0; i < NUMBER_OF_TYPES; i++) {
+ if (element_counts_[i] != other.element_counts_[i]) return false;
+ }
+ return true;
}
-int ConstantPoolArray::first_int64_index() {
- return 0;
+bool ConstantPoolArray::NumberOfEntries::is_empty() const {
+ return total_count() == 0;
+}
+
+
+int ConstantPoolArray::NumberOfEntries::count_of(Type type) const {
+ DCHECK(type < NUMBER_OF_TYPES);
+ return element_counts_[type];
+}
+
+
+int ConstantPoolArray::NumberOfEntries::base_of(Type type) const {
+ int base = 0;
+ DCHECK(type < NUMBER_OF_TYPES);
+ for (int i = 0; i < type; i++) {
+ base += element_counts_[i];
+ }
+ return base;
+}
+
+
+int ConstantPoolArray::NumberOfEntries::total_count() const {
+ int count = 0;
+ for (int i = 0; i < NUMBER_OF_TYPES; i++) {
+ count += element_counts_[i];
+ }
+ return count;
}
-int ConstantPoolArray::first_code_ptr_index() {
- int layout_field = READ_INT_FIELD(this, kArrayLayoutOffset);
- return first_int64_index() +
- NumberOfInt64EntriesField::decode(layout_field);
+int ConstantPoolArray::NumberOfEntries::are_in_range(int min, int max) const {
+ for (int i = FIRST_TYPE; i < NUMBER_OF_TYPES; i++) {
+ if (element_counts_[i] < min || element_counts_[i] > max) {
+ return false;
+ }
+ }
+ return true;
}
-int ConstantPoolArray::first_heap_ptr_index() {
- int layout_field = READ_INT_FIELD(this, kArrayLayoutOffset);
- return first_code_ptr_index() +
- NumberOfCodePtrEntriesField::decode(layout_field);
+int ConstantPoolArray::Iterator::next_index() {
+ DCHECK(!is_finished());
+ int ret = next_index_++;
+ update_section();
+ return ret;
}
-int ConstantPoolArray::first_int32_index() {
- int layout_field = READ_INT_FIELD(this, kArrayLayoutOffset);
- return first_heap_ptr_index() +
- NumberOfHeapPtrEntriesField::decode(layout_field);
+bool ConstantPoolArray::Iterator::is_finished() {
+ return next_index_ > array_->last_index(type_, final_section_);
}
-int ConstantPoolArray::count_of_int64_entries() {
- return first_code_ptr_index();
+void ConstantPoolArray::Iterator::update_section() {
+ if (next_index_ > array_->last_index(type_, current_section_) &&
+ current_section_ != final_section_) {
+ DCHECK(final_section_ == EXTENDED_SECTION);
+ current_section_ = EXTENDED_SECTION;
+ next_index_ = array_->first_index(type_, EXTENDED_SECTION);
+ }
}
-int ConstantPoolArray::count_of_code_ptr_entries() {
- return first_heap_ptr_index() - first_code_ptr_index();
+bool ConstantPoolArray::is_extended_layout() {
+ uint32_t small_layout_1 = READ_UINT32_FIELD(this, kSmallLayout1Offset);
+ return IsExtendedField::decode(small_layout_1);
}
-int ConstantPoolArray::count_of_heap_ptr_entries() {
- return first_int32_index() - first_heap_ptr_index();
+ConstantPoolArray::LayoutSection ConstantPoolArray::final_section() {
+ return is_extended_layout() ? EXTENDED_SECTION : SMALL_SECTION;
}
-int ConstantPoolArray::count_of_int32_entries() {
- return length() - first_int32_index();
+int ConstantPoolArray::first_extended_section_index() {
+ DCHECK(is_extended_layout());
+ uint32_t small_layout_2 = READ_UINT32_FIELD(this, kSmallLayout2Offset);
+ return TotalCountField::decode(small_layout_2);
}
-void ConstantPoolArray::Init(int number_of_int64_entries,
- int number_of_code_ptr_entries,
- int number_of_heap_ptr_entries,
- int number_of_int32_entries) {
- set_length(number_of_int64_entries +
- number_of_code_ptr_entries +
- number_of_heap_ptr_entries +
- number_of_int32_entries);
- int layout_field =
- NumberOfInt64EntriesField::encode(number_of_int64_entries) |
- NumberOfCodePtrEntriesField::encode(number_of_code_ptr_entries) |
- NumberOfHeapPtrEntriesField::encode(number_of_heap_ptr_entries) |
- WeakObjectStateField::encode(NO_WEAK_OBJECTS);
- WRITE_INT_FIELD(this, kArrayLayoutOffset, layout_field);
+int ConstantPoolArray::get_extended_section_header_offset() {
+ return RoundUp(SizeFor(NumberOfEntries(this, SMALL_SECTION)), kInt64Size);
+}
+
+
+ConstantPoolArray::WeakObjectState ConstantPoolArray::get_weak_object_state() {
+ uint32_t small_layout_2 = READ_UINT32_FIELD(this, kSmallLayout2Offset);
+ return WeakObjectStateField::decode(small_layout_2);
+}
+
+
+void ConstantPoolArray::set_weak_object_state(
+ ConstantPoolArray::WeakObjectState state) {
+ uint32_t small_layout_2 = READ_UINT32_FIELD(this, kSmallLayout2Offset);
+ small_layout_2 = WeakObjectStateField::update(small_layout_2, state);
+ WRITE_INT32_FIELD(this, kSmallLayout2Offset, small_layout_2);
+}
+
+
+int ConstantPoolArray::first_index(Type type, LayoutSection section) {
+ int index = 0;
+ if (section == EXTENDED_SECTION) {
+ DCHECK(is_extended_layout());
+ index += first_extended_section_index();
+ }
+
+ for (Type type_iter = FIRST_TYPE; type_iter < type;
+ type_iter = next_type(type_iter)) {
+ index += number_of_entries(type_iter, section);
+ }
+
+ return index;
+}
+
+
+int ConstantPoolArray::last_index(Type type, LayoutSection section) {
+ return first_index(type, section) + number_of_entries(type, section) - 1;
+}
+
+
+int ConstantPoolArray::number_of_entries(Type type, LayoutSection section) {
+ if (section == SMALL_SECTION) {
+ uint32_t small_layout_1 = READ_UINT32_FIELD(this, kSmallLayout1Offset);
+ uint32_t small_layout_2 = READ_UINT32_FIELD(this, kSmallLayout2Offset);
+ switch (type) {
+ case INT64:
+ return Int64CountField::decode(small_layout_1);
+ case CODE_PTR:
+ return CodePtrCountField::decode(small_layout_1);
+ case HEAP_PTR:
+ return HeapPtrCountField::decode(small_layout_1);
+ case INT32:
+ return Int32CountField::decode(small_layout_2);
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+ } else {
+ DCHECK(section == EXTENDED_SECTION && is_extended_layout());
+ int offset = get_extended_section_header_offset();
+ switch (type) {
+ case INT64:
+ offset += kExtendedInt64CountOffset;
+ break;
+ case CODE_PTR:
+ offset += kExtendedCodePtrCountOffset;
+ break;
+ case HEAP_PTR:
+ offset += kExtendedHeapPtrCountOffset;
+ break;
+ case INT32:
+ offset += kExtendedInt32CountOffset;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return READ_INT_FIELD(this, offset);
+ }
+}
+
+
+bool ConstantPoolArray::offset_is_type(int offset, Type type) {
+ return (offset >= OffsetOfElementAt(first_index(type, SMALL_SECTION)) &&
+ offset <= OffsetOfElementAt(last_index(type, SMALL_SECTION))) ||
+ (is_extended_layout() &&
+ offset >= OffsetOfElementAt(first_index(type, EXTENDED_SECTION)) &&
+ offset <= OffsetOfElementAt(last_index(type, EXTENDED_SECTION)));
+}
+
+
+ConstantPoolArray::Type ConstantPoolArray::get_type(int index) {
+ LayoutSection section;
+ if (is_extended_layout() && index >= first_extended_section_index()) {
+ section = EXTENDED_SECTION;
+ } else {
+ section = SMALL_SECTION;
+ }
+
+ Type type = FIRST_TYPE;
+ while (index > last_index(type, section)) {
+ type = next_type(type);
+ }
+ DCHECK(type <= LAST_TYPE);
+ return type;
}
int64_t ConstantPoolArray::get_int64_entry(int index) {
- ASSERT(map() == GetHeap()->constant_pool_array_map());
- ASSERT(index >= 0 && index < first_code_ptr_index());
+ DCHECK(map() == GetHeap()->constant_pool_array_map());
+ DCHECK(get_type(index) == INT64);
return READ_INT64_FIELD(this, OffsetOfElementAt(index));
}
+
double ConstantPoolArray::get_int64_entry_as_double(int index) {
STATIC_ASSERT(kDoubleSize == kInt64Size);
- ASSERT(map() == GetHeap()->constant_pool_array_map());
- ASSERT(index >= 0 && index < first_code_ptr_index());
+ DCHECK(map() == GetHeap()->constant_pool_array_map());
+ DCHECK(get_type(index) == INT64);
return READ_DOUBLE_FIELD(this, OffsetOfElementAt(index));
}
Address ConstantPoolArray::get_code_ptr_entry(int index) {
- ASSERT(map() == GetHeap()->constant_pool_array_map());
- ASSERT(index >= first_code_ptr_index() && index < first_heap_ptr_index());
+ DCHECK(map() == GetHeap()->constant_pool_array_map());
+ DCHECK(get_type(index) == CODE_PTR);
return reinterpret_cast<Address>(READ_FIELD(this, OffsetOfElementAt(index)));
}
Object* ConstantPoolArray::get_heap_ptr_entry(int index) {
- ASSERT(map() == GetHeap()->constant_pool_array_map());
- ASSERT(index >= first_heap_ptr_index() && index < first_int32_index());
+ DCHECK(map() == GetHeap()->constant_pool_array_map());
+ DCHECK(get_type(index) == HEAP_PTR);
return READ_FIELD(this, OffsetOfElementAt(index));
}
int32_t ConstantPoolArray::get_int32_entry(int index) {
- ASSERT(map() == GetHeap()->constant_pool_array_map());
- ASSERT(index >= first_int32_index() && index < length());
+ DCHECK(map() == GetHeap()->constant_pool_array_map());
+ DCHECK(get_type(index) == INT32);
return READ_INT32_FIELD(this, OffsetOfElementAt(index));
}
+void ConstantPoolArray::set(int index, int64_t value) {
+ DCHECK(map() == GetHeap()->constant_pool_array_map());
+ DCHECK(get_type(index) == INT64);
+ WRITE_INT64_FIELD(this, OffsetOfElementAt(index), value);
+}
+
+
+void ConstantPoolArray::set(int index, double value) {
+ STATIC_ASSERT(kDoubleSize == kInt64Size);
+ DCHECK(map() == GetHeap()->constant_pool_array_map());
+ DCHECK(get_type(index) == INT64);
+ WRITE_DOUBLE_FIELD(this, OffsetOfElementAt(index), value);
+}
+
+
void ConstantPoolArray::set(int index, Address value) {
- ASSERT(map() == GetHeap()->constant_pool_array_map());
- ASSERT(index >= first_code_ptr_index() && index < first_heap_ptr_index());
+ DCHECK(map() == GetHeap()->constant_pool_array_map());
+ DCHECK(get_type(index) == CODE_PTR);
WRITE_FIELD(this, OffsetOfElementAt(index), reinterpret_cast<Object*>(value));
}
void ConstantPoolArray::set(int index, Object* value) {
- ASSERT(map() == GetHeap()->constant_pool_array_map());
- ASSERT(index >= first_code_ptr_index() && index < first_int32_index());
+ DCHECK(map() == GetHeap()->constant_pool_array_map());
+ DCHECK(!GetHeap()->InNewSpace(value));
+ DCHECK(get_type(index) == HEAP_PTR);
WRITE_FIELD(this, OffsetOfElementAt(index), value);
WRITE_BARRIER(GetHeap(), this, OffsetOfElementAt(index), value);
}
-void ConstantPoolArray::set(int index, int64_t value) {
- ASSERT(map() == GetHeap()->constant_pool_array_map());
- ASSERT(index >= first_int64_index() && index < first_code_ptr_index());
- WRITE_INT64_FIELD(this, OffsetOfElementAt(index), value);
+void ConstantPoolArray::set(int index, int32_t value) {
+ DCHECK(map() == GetHeap()->constant_pool_array_map());
+ DCHECK(get_type(index) == INT32);
+ WRITE_INT32_FIELD(this, OffsetOfElementAt(index), value);
}
-void ConstantPoolArray::set(int index, double value) {
- STATIC_ASSERT(kDoubleSize == kInt64Size);
- ASSERT(map() == GetHeap()->constant_pool_array_map());
- ASSERT(index >= first_int64_index() && index < first_code_ptr_index());
- WRITE_DOUBLE_FIELD(this, OffsetOfElementAt(index), value);
+void ConstantPoolArray::set_at_offset(int offset, int32_t value) {
+ DCHECK(map() == GetHeap()->constant_pool_array_map());
+ DCHECK(offset_is_type(offset, INT32));
+ WRITE_INT32_FIELD(this, offset, value);
}
-void ConstantPoolArray::set(int index, int32_t value) {
- ASSERT(map() == GetHeap()->constant_pool_array_map());
- ASSERT(index >= this->first_int32_index() && index < length());
- WRITE_INT32_FIELD(this, OffsetOfElementAt(index), value);
+void ConstantPoolArray::set_at_offset(int offset, int64_t value) {
+ DCHECK(map() == GetHeap()->constant_pool_array_map());
+ DCHECK(offset_is_type(offset, INT64));
+ WRITE_INT64_FIELD(this, offset, value);
+}
+
+
+void ConstantPoolArray::set_at_offset(int offset, double value) {
+ DCHECK(map() == GetHeap()->constant_pool_array_map());
+ DCHECK(offset_is_type(offset, INT64));
+ WRITE_DOUBLE_FIELD(this, offset, value);
+}
+
+
+void ConstantPoolArray::set_at_offset(int offset, Address value) {
+ DCHECK(map() == GetHeap()->constant_pool_array_map());
+ DCHECK(offset_is_type(offset, CODE_PTR));
+ WRITE_FIELD(this, offset, reinterpret_cast<Object*>(value));
+ WRITE_BARRIER(GetHeap(), this, offset, reinterpret_cast<Object*>(value));
+}
+
+
+void ConstantPoolArray::set_at_offset(int offset, Object* value) {
+ DCHECK(map() == GetHeap()->constant_pool_array_map());
+ DCHECK(!GetHeap()->InNewSpace(value));
+ DCHECK(offset_is_type(offset, HEAP_PTR));
+ WRITE_FIELD(this, offset, value);
+ WRITE_BARRIER(GetHeap(), this, offset, value);
+}
+
+
+void ConstantPoolArray::Init(const NumberOfEntries& small) {
+ uint32_t small_layout_1 =
+ Int64CountField::encode(small.count_of(INT64)) |
+ CodePtrCountField::encode(small.count_of(CODE_PTR)) |
+ HeapPtrCountField::encode(small.count_of(HEAP_PTR)) |
+ IsExtendedField::encode(false);
+ uint32_t small_layout_2 =
+ Int32CountField::encode(small.count_of(INT32)) |
+ TotalCountField::encode(small.total_count()) |
+ WeakObjectStateField::encode(NO_WEAK_OBJECTS);
+ WRITE_UINT32_FIELD(this, kSmallLayout1Offset, small_layout_1);
+ WRITE_UINT32_FIELD(this, kSmallLayout2Offset, small_layout_2);
+ if (kHeaderSize != kFirstEntryOffset) {
+ DCHECK(kFirstEntryOffset - kHeaderSize == kInt32Size);
+ WRITE_UINT32_FIELD(this, kHeaderSize, 0); // Zero out header padding.
+ }
+}
+
+
+void ConstantPoolArray::InitExtended(const NumberOfEntries& small,
+ const NumberOfEntries& extended) {
+ // Initialize small layout fields first.
+ Init(small);
+
+ // Set is_extended_layout field.
+ uint32_t small_layout_1 = READ_UINT32_FIELD(this, kSmallLayout1Offset);
+ small_layout_1 = IsExtendedField::update(small_layout_1, true);
+ WRITE_INT32_FIELD(this, kSmallLayout1Offset, small_layout_1);
+
+ // Initialize the extended layout fields.
+ int extended_header_offset = get_extended_section_header_offset();
+ WRITE_INT32_FIELD(this, extended_header_offset + kExtendedInt64CountOffset,
+ extended.count_of(INT64));
+ WRITE_INT32_FIELD(this, extended_header_offset + kExtendedCodePtrCountOffset,
+ extended.count_of(CODE_PTR));
+ WRITE_INT32_FIELD(this, extended_header_offset + kExtendedHeapPtrCountOffset,
+ extended.count_of(HEAP_PTR));
+ WRITE_INT32_FIELD(this, extended_header_offset + kExtendedInt32CountOffset,
+ extended.count_of(INT32));
+}
+
+
+int ConstantPoolArray::size() {
+ NumberOfEntries small(this, SMALL_SECTION);
+ if (!is_extended_layout()) {
+ return SizeFor(small);
+ } else {
+ NumberOfEntries extended(this, EXTENDED_SECTION);
+ return SizeForExtended(small, extended);
+ }
+}
+
+
+int ConstantPoolArray::length() {
+ uint32_t small_layout_2 = READ_UINT32_FIELD(this, kSmallLayout2Offset);
+ int length = TotalCountField::decode(small_layout_2);
+ if (is_extended_layout()) {
+ length += number_of_entries(INT64, EXTENDED_SECTION) +
+ number_of_entries(CODE_PTR, EXTENDED_SECTION) +
+ number_of_entries(HEAP_PTR, EXTENDED_SECTION) +
+ number_of_entries(INT32, EXTENDED_SECTION);
+ }
+ return length;
}
void FixedArray::set(int index,
Object* value,
WriteBarrierMode mode) {
- ASSERT(map() != GetHeap()->fixed_cow_array_map());
- ASSERT(index >= 0 && index < this->length());
+ DCHECK(map() != GetHeap()->fixed_cow_array_map());
+ DCHECK(index >= 0 && index < this->length());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(this, offset, value);
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
void FixedArray::NoIncrementalWriteBarrierSet(FixedArray* array,
int index,
Object* value) {
- ASSERT(array->map() != array->GetHeap()->fixed_cow_array_map());
- ASSERT(index >= 0 && index < array->length());
+ DCHECK(array->map() != array->GetHeap()->fixed_cow_array_map());
+ DCHECK(index >= 0 && index < array->length());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(array, offset, value);
Heap* heap = array->GetHeap();
void FixedArray::NoWriteBarrierSet(FixedArray* array,
int index,
Object* value) {
- ASSERT(array->map() != array->GetHeap()->fixed_cow_array_map());
- ASSERT(index >= 0 && index < array->length());
- ASSERT(!array->GetHeap()->InNewSpace(value));
+ DCHECK(array->map() != array->GetHeap()->fixed_cow_array_map());
+ DCHECK(index >= 0 && index < array->length());
+ DCHECK(!array->GetHeap()->InNewSpace(value));
WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
}
void FixedArray::set_undefined(int index) {
- ASSERT(map() != GetHeap()->fixed_cow_array_map());
- ASSERT(index >= 0 && index < this->length());
- ASSERT(!GetHeap()->InNewSpace(GetHeap()->undefined_value()));
+ DCHECK(map() != GetHeap()->fixed_cow_array_map());
+ DCHECK(index >= 0 && index < this->length());
+ DCHECK(!GetHeap()->InNewSpace(GetHeap()->undefined_value()));
WRITE_FIELD(this,
kHeaderSize + index * kPointerSize,
GetHeap()->undefined_value());
void FixedArray::set_null(int index) {
- ASSERT(index >= 0 && index < this->length());
- ASSERT(!GetHeap()->InNewSpace(GetHeap()->null_value()));
+ DCHECK(index >= 0 && index < this->length());
+ DCHECK(!GetHeap()->InNewSpace(GetHeap()->null_value()));
WRITE_FIELD(this,
kHeaderSize + index * kPointerSize,
GetHeap()->null_value());
void FixedArray::set_the_hole(int index) {
- ASSERT(map() != GetHeap()->fixed_cow_array_map());
- ASSERT(index >= 0 && index < this->length());
- ASSERT(!GetHeap()->InNewSpace(GetHeap()->the_hole_value()));
+ DCHECK(map() != GetHeap()->fixed_cow_array_map());
+ DCHECK(index >= 0 && index < this->length());
+ DCHECK(!GetHeap()->InNewSpace(GetHeap()->the_hole_value()));
WRITE_FIELD(this,
kHeaderSize + index * kPointerSize,
GetHeap()->the_hole_value());
bool DescriptorArray::IsEmpty() {
- ASSERT(length() >= kFirstIndex ||
+ DCHECK(length() >= kFirstIndex ||
this == GetHeap()->empty_descriptor_array());
return length() < kFirstIndex;
}
// Perform a binary search in a fixed array. Low and high are entry indices. If
// there are three entries in this array it should be called with low=0 and
// high=2.
-template<SearchMode search_mode, typename T>
-int BinarySearch(T* array, Name* name, int low, int high, int valid_entries) {
+template <SearchMode search_mode, typename T>
+int BinarySearch(T* array, Name* name, int low, int high, int valid_entries,
+ int* out_insertion_index) {
+ DCHECK(search_mode == ALL_ENTRIES || out_insertion_index == NULL);
uint32_t hash = name->Hash();
int limit = high;
- ASSERT(low <= high);
+ DCHECK(low <= high);
while (low != high) {
int mid = (low + high) / 2;
for (; low <= limit; ++low) {
int sort_index = array->GetSortedKeyIndex(low);
Name* entry = array->GetKey(sort_index);
- if (entry->Hash() != hash) break;
+ uint32_t current_hash = entry->Hash();
+ if (current_hash != hash) {
+ if (out_insertion_index != NULL) {
+ *out_insertion_index = sort_index + (current_hash > hash ? 0 : 1);
+ }
+ return T::kNotFound;
+ }
if (entry->Equals(name)) {
if (search_mode == ALL_ENTRIES || sort_index < valid_entries) {
return sort_index;
}
}
+ if (out_insertion_index != NULL) *out_insertion_index = limit + 1;
return T::kNotFound;
}
// Perform a linear search in this fixed array. len is the number of entry
// indices that are valid.
-template<SearchMode search_mode, typename T>
-int LinearSearch(T* array, Name* name, int len, int valid_entries) {
+template <SearchMode search_mode, typename T>
+int LinearSearch(T* array, Name* name, int len, int valid_entries,
+ int* out_insertion_index) {
uint32_t hash = name->Hash();
if (search_mode == ALL_ENTRIES) {
for (int number = 0; number < len; number++) {
int sorted_index = array->GetSortedKeyIndex(number);
Name* entry = array->GetKey(sorted_index);
uint32_t current_hash = entry->Hash();
- if (current_hash > hash) break;
+ if (current_hash > hash) {
+ if (out_insertion_index != NULL) *out_insertion_index = sorted_index;
+ return T::kNotFound;
+ }
if (current_hash == hash && entry->Equals(name)) return sorted_index;
}
+ if (out_insertion_index != NULL) *out_insertion_index = len;
+ return T::kNotFound;
} else {
- ASSERT(len >= valid_entries);
+ DCHECK(len >= valid_entries);
+ DCHECK_EQ(NULL, out_insertion_index); // Not supported here.
for (int number = 0; number < valid_entries; number++) {
Name* entry = array->GetKey(number);
uint32_t current_hash = entry->Hash();
if (current_hash == hash && entry->Equals(name)) return number;
}
+ return T::kNotFound;
}
- return T::kNotFound;
}
-template<SearchMode search_mode, typename T>
-int Search(T* array, Name* name, int valid_entries) {
+template <SearchMode search_mode, typename T>
+int Search(T* array, Name* name, int valid_entries, int* out_insertion_index) {
if (search_mode == VALID_ENTRIES) {
- SLOW_ASSERT(array->IsSortedNoDuplicates(valid_entries));
+ SLOW_DCHECK(array->IsSortedNoDuplicates(valid_entries));
} else {
- SLOW_ASSERT(array->IsSortedNoDuplicates());
+ SLOW_DCHECK(array->IsSortedNoDuplicates());
}
int nof = array->number_of_entries();
- if (nof == 0) return T::kNotFound;
+ if (nof == 0) {
+ if (out_insertion_index != NULL) *out_insertion_index = 0;
+ return T::kNotFound;
+ }
// Fast case: do linear search for small arrays.
const int kMaxElementsForLinearSearch = 8;
nof <= kMaxElementsForLinearSearch) ||
(search_mode == VALID_ENTRIES &&
valid_entries <= (kMaxElementsForLinearSearch * 3))) {
- return LinearSearch<search_mode>(array, name, nof, valid_entries);
+ return LinearSearch<search_mode>(array, name, nof, valid_entries,
+ out_insertion_index);
}
// Slow case: perform binary search.
- return BinarySearch<search_mode>(array, name, 0, nof - 1, valid_entries);
+ return BinarySearch<search_mode>(array, name, 0, nof - 1, valid_entries,
+ out_insertion_index);
}
int DescriptorArray::Search(Name* name, int valid_descriptors) {
- return internal::Search<VALID_ENTRIES>(this, name, valid_descriptors);
+ return internal::Search<VALID_ENTRIES>(this, name, valid_descriptors, NULL);
}
FixedArrayBase* Map::GetInitialElements() {
if (has_fast_smi_or_object_elements() ||
has_fast_double_elements()) {
- ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
+ DCHECK(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
return GetHeap()->empty_fixed_array();
} else if (has_external_array_elements()) {
ExternalArray* empty_array = GetHeap()->EmptyExternalArrayForMap(this);
- ASSERT(!GetHeap()->InNewSpace(empty_array));
+ DCHECK(!GetHeap()->InNewSpace(empty_array));
return empty_array;
} else if (has_fixed_typed_array_elements()) {
FixedTypedArrayBase* empty_array =
GetHeap()->EmptyFixedTypedArrayForMap(this);
- ASSERT(!GetHeap()->InNewSpace(empty_array));
+ DCHECK(!GetHeap()->InNewSpace(empty_array));
return empty_array;
} else {
UNREACHABLE();
Object** DescriptorArray::GetKeySlot(int descriptor_number) {
- ASSERT(descriptor_number < number_of_descriptors());
+ DCHECK(descriptor_number < number_of_descriptors());
return RawFieldOfElementAt(ToKeyIndex(descriptor_number));
}
Name* DescriptorArray::GetKey(int descriptor_number) {
- ASSERT(descriptor_number < number_of_descriptors());
+ DCHECK(descriptor_number < number_of_descriptors());
return Name::cast(get(ToKeyIndex(descriptor_number)));
}
void DescriptorArray::SetRepresentation(int descriptor_index,
Representation representation) {
- ASSERT(!representation.IsNone());
+ DCHECK(!representation.IsNone());
PropertyDetails details = GetDetails(descriptor_index);
set(ToDetailsIndex(descriptor_index),
details.CopyWithRepresentation(representation).AsSmi());
Object** DescriptorArray::GetValueSlot(int descriptor_number) {
- ASSERT(descriptor_number < number_of_descriptors());
+ DCHECK(descriptor_number < number_of_descriptors());
return RawFieldOfElementAt(ToValueIndex(descriptor_number));
}
+int DescriptorArray::GetValueOffset(int descriptor_number) {
+ return OffsetOfElementAt(ToValueIndex(descriptor_number));
+}
+
+
Object* DescriptorArray::GetValue(int descriptor_number) {
- ASSERT(descriptor_number < number_of_descriptors());
+ DCHECK(descriptor_number < number_of_descriptors());
return get(ToValueIndex(descriptor_number));
}
PropertyDetails DescriptorArray::GetDetails(int descriptor_number) {
- ASSERT(descriptor_number < number_of_descriptors());
+ DCHECK(descriptor_number < number_of_descriptors());
Object* details = get(ToDetailsIndex(descriptor_number));
return PropertyDetails(Smi::cast(details));
}
int DescriptorArray::GetFieldIndex(int descriptor_number) {
- ASSERT(GetDetails(descriptor_number).type() == FIELD);
+ DCHECK(GetDetails(descriptor_number).type() == FIELD);
return GetDetails(descriptor_number).field_index();
}
HeapType* DescriptorArray::GetFieldType(int descriptor_number) {
- ASSERT(GetDetails(descriptor_number).type() == FIELD);
+ DCHECK(GetDetails(descriptor_number).type() == FIELD);
return HeapType::cast(GetValue(descriptor_number));
}
Object* DescriptorArray::GetCallbacksObject(int descriptor_number) {
- ASSERT(GetType(descriptor_number) == CALLBACKS);
+ DCHECK(GetType(descriptor_number) == CALLBACKS);
return GetValue(descriptor_number);
}
AccessorDescriptor* DescriptorArray::GetCallbacks(int descriptor_number) {
- ASSERT(GetType(descriptor_number) == CALLBACKS);
+ DCHECK(GetType(descriptor_number) == CALLBACKS);
Foreign* p = Foreign::cast(GetCallbacksObject(descriptor_number));
return reinterpret_cast<AccessorDescriptor*>(p->foreign_address());
}
Descriptor* desc,
const WhitenessWitness&) {
// Range check.
- ASSERT(descriptor_number < number_of_descriptors());
+ DCHECK(descriptor_number < number_of_descriptors());
NoIncrementalWriteBarrierSet(this,
ToKeyIndex(descriptor_number),
void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
// Range check.
- ASSERT(descriptor_number < number_of_descriptors());
+ DCHECK(descriptor_number < number_of_descriptors());
set(ToKeyIndex(descriptor_number), *desc->GetKey());
set(ToValueIndex(descriptor_number), *desc->GetValue());
}
-void DescriptorArray::Append(Descriptor* desc,
- const WhitenessWitness& witness) {
- DisallowHeapAllocation no_gc;
- int descriptor_number = number_of_descriptors();
- SetNumberOfDescriptors(descriptor_number + 1);
- Set(descriptor_number, desc, witness);
-
- uint32_t hash = desc->GetKey()->Hash();
-
- int insertion;
-
- for (insertion = descriptor_number; insertion > 0; --insertion) {
- Name* key = GetSortedKey(insertion - 1);
- if (key->Hash() <= hash) break;
- SetSortedKey(insertion, GetSortedKeyIndex(insertion - 1));
- }
-
- SetSortedKey(insertion, descriptor_number);
-}
-
-
void DescriptorArray::Append(Descriptor* desc) {
DisallowHeapAllocation no_gc;
int descriptor_number = number_of_descriptors();
DescriptorArray::WhitenessWitness::WhitenessWitness(DescriptorArray* array)
: marking_(array->GetHeap()->incremental_marking()) {
marking_->EnterNoMarkingScope();
- ASSERT(!marking_->IsMarking() ||
+ DCHECK(!marking_->IsMarking() ||
Marking::Color(array) == Marking::WHITE_OBJECT);
}
template<typename Derived, typename Shape, typename Key>
int HashTable<Derived, Shape, Key>::ComputeCapacity(int at_least_space_for) {
const int kMinCapacity = 32;
- int capacity = RoundUpToPowerOf2(at_least_space_for * 2);
+ int capacity = base::bits::RoundUpToPowerOfTwo32(at_least_space_for * 2);
if (capacity < kMinCapacity) {
capacity = kMinCapacity; // Guarantee min capacity.
}
}
uint32_t SeededNumberDictionary::max_number_key() {
- ASSERT(!requires_slow_elements());
+ DCHECK(!requires_slow_elements());
Object* max_index_object = get(kMaxNumberKeyIndex);
if (!max_index_object->IsSmi()) return 0;
uint32_t value = static_cast<uint32_t>(Smi::cast(max_index_object)->value());
// Cast operations
-CAST_ACCESSOR(FixedArray)
-CAST_ACCESSOR(FixedDoubleArray)
-CAST_ACCESSOR(FixedTypedArrayBase)
+CAST_ACCESSOR(AccessorInfo)
+CAST_ACCESSOR(ByteArray)
+CAST_ACCESSOR(Cell)
+CAST_ACCESSOR(Code)
+CAST_ACCESSOR(CodeCacheHashTable)
+CAST_ACCESSOR(CompilationCacheTable)
+CAST_ACCESSOR(ConsString)
CAST_ACCESSOR(ConstantPoolArray)
-CAST_ACCESSOR(DescriptorArray)
CAST_ACCESSOR(DeoptimizationInputData)
CAST_ACCESSOR(DeoptimizationOutputData)
CAST_ACCESSOR(DependentCode)
-CAST_ACCESSOR(StringTable)
-CAST_ACCESSOR(JSFunctionResultCache)
-CAST_ACCESSOR(NormalizedMapCache)
-CAST_ACCESSOR(ScopeInfo)
-CAST_ACCESSOR(CompilationCacheTable)
-CAST_ACCESSOR(CodeCacheHashTable)
-CAST_ACCESSOR(PolymorphicCodeCacheHashTable)
-CAST_ACCESSOR(MapCache)
-CAST_ACCESSOR(String)
-CAST_ACCESSOR(SeqString)
-CAST_ACCESSOR(SeqOneByteString)
-CAST_ACCESSOR(SeqTwoByteString)
-CAST_ACCESSOR(SlicedString)
-CAST_ACCESSOR(ConsString)
+CAST_ACCESSOR(DescriptorArray)
+CAST_ACCESSOR(ExternalArray)
+CAST_ACCESSOR(ExternalOneByteString)
+CAST_ACCESSOR(ExternalFloat32Array)
+CAST_ACCESSOR(ExternalFloat64Array)
+CAST_ACCESSOR(ExternalInt16Array)
+CAST_ACCESSOR(ExternalInt32Array)
+CAST_ACCESSOR(ExternalInt8Array)
CAST_ACCESSOR(ExternalString)
-CAST_ACCESSOR(ExternalAsciiString)
CAST_ACCESSOR(ExternalTwoByteString)
-CAST_ACCESSOR(Symbol)
-CAST_ACCESSOR(Name)
-CAST_ACCESSOR(JSReceiver)
-CAST_ACCESSOR(JSObject)
-CAST_ACCESSOR(Smi)
-CAST_ACCESSOR(HeapObject)
-CAST_ACCESSOR(HeapNumber)
-CAST_ACCESSOR(Float32x4)
-CAST_ACCESSOR(Float64x2)
-CAST_ACCESSOR(Int32x4)
-CAST_ACCESSOR(Oddball)
-CAST_ACCESSOR(Cell)
-CAST_ACCESSOR(PropertyCell)
-CAST_ACCESSOR(SharedFunctionInfo)
-CAST_ACCESSOR(Map)
-CAST_ACCESSOR(JSFunction)
+CAST_ACCESSOR(ExternalUint16Array)
+CAST_ACCESSOR(ExternalUint32Array)
+CAST_ACCESSOR(ExternalUint8Array)
+CAST_ACCESSOR(ExternalUint8ClampedArray)
+CAST_ACCESSOR(FixedArray)
+CAST_ACCESSOR(FixedArrayBase)
+CAST_ACCESSOR(FixedDoubleArray)
+CAST_ACCESSOR(FixedTypedArrayBase)
+CAST_ACCESSOR(Foreign)
+CAST_ACCESSOR(FreeSpace)
CAST_ACCESSOR(GlobalObject)
-CAST_ACCESSOR(JSGlobalProxy)
-CAST_ACCESSOR(JSGlobalObject)
-CAST_ACCESSOR(JSBuiltinsObject)
-CAST_ACCESSOR(Code)
+CAST_ACCESSOR(HeapObject)
CAST_ACCESSOR(JSArray)
CAST_ACCESSOR(JSArrayBuffer)
CAST_ACCESSOR(JSArrayBufferView)
-CAST_ACCESSOR(JSTypedArray)
+CAST_ACCESSOR(JSBuiltinsObject)
CAST_ACCESSOR(JSDataView)
-CAST_ACCESSOR(JSRegExp)
-CAST_ACCESSOR(JSProxy)
+CAST_ACCESSOR(JSDate)
+CAST_ACCESSOR(JSFunction)
CAST_ACCESSOR(JSFunctionProxy)
-CAST_ACCESSOR(JSSet)
+CAST_ACCESSOR(JSFunctionResultCache)
+CAST_ACCESSOR(JSGeneratorObject)
+CAST_ACCESSOR(JSGlobalObject)
+CAST_ACCESSOR(JSGlobalProxy)
CAST_ACCESSOR(JSMap)
-CAST_ACCESSOR(JSSetIterator)
CAST_ACCESSOR(JSMapIterator)
+CAST_ACCESSOR(JSMessageObject)
+CAST_ACCESSOR(JSModule)
+CAST_ACCESSOR(JSObject)
+CAST_ACCESSOR(JSProxy)
+CAST_ACCESSOR(JSReceiver)
+CAST_ACCESSOR(JSRegExp)
+CAST_ACCESSOR(JSSet)
+CAST_ACCESSOR(JSSetIterator)
+CAST_ACCESSOR(JSTypedArray)
+CAST_ACCESSOR(JSValue)
CAST_ACCESSOR(JSWeakMap)
CAST_ACCESSOR(JSWeakSet)
-CAST_ACCESSOR(Foreign)
-CAST_ACCESSOR(ByteArray)
-CAST_ACCESSOR(FreeSpace)
-CAST_ACCESSOR(ExternalArray)
-CAST_ACCESSOR(ExternalInt8Array)
-CAST_ACCESSOR(ExternalUint8Array)
-CAST_ACCESSOR(ExternalInt16Array)
-CAST_ACCESSOR(ExternalUint16Array)
-CAST_ACCESSOR(ExternalInt32Array)
-CAST_ACCESSOR(ExternalInt32x4Array)
-CAST_ACCESSOR(ExternalUint32Array)
-CAST_ACCESSOR(ExternalFloat32Array)
-CAST_ACCESSOR(ExternalFloat32x4Array)
-CAST_ACCESSOR(ExternalFloat64Array)
-CAST_ACCESSOR(ExternalFloat64x2Array)
-CAST_ACCESSOR(ExternalUint8ClampedArray)
+CAST_ACCESSOR(Map)
+CAST_ACCESSOR(MapCache)
+CAST_ACCESSOR(Name)
+CAST_ACCESSOR(NameDictionary)
+CAST_ACCESSOR(NormalizedMapCache)
+CAST_ACCESSOR(Object)
+CAST_ACCESSOR(ObjectHashTable)
+CAST_ACCESSOR(Oddball)
+CAST_ACCESSOR(OrderedHashMap)
+CAST_ACCESSOR(OrderedHashSet)
+CAST_ACCESSOR(PolymorphicCodeCacheHashTable)
+CAST_ACCESSOR(PropertyCell)
+CAST_ACCESSOR(ScopeInfo)
+CAST_ACCESSOR(SeededNumberDictionary)
+CAST_ACCESSOR(SeqOneByteString)
+CAST_ACCESSOR(SeqString)
+CAST_ACCESSOR(SeqTwoByteString)
+CAST_ACCESSOR(SharedFunctionInfo)
+CAST_ACCESSOR(SlicedString)
+CAST_ACCESSOR(Smi)
+CAST_ACCESSOR(String)
+CAST_ACCESSOR(StringTable)
CAST_ACCESSOR(Struct)
-CAST_ACCESSOR(AccessorInfo)
+CAST_ACCESSOR(Symbol)
+CAST_ACCESSOR(UnseededNumberDictionary)
+CAST_ACCESSOR(WeakCell)
+CAST_ACCESSOR(WeakHashTable)
+
template <class Traits>
FixedTypedArray<Traits>* FixedTypedArray<Traits>::cast(Object* object) {
- SLOW_ASSERT(object->IsHeapObject() &&
- HeapObject::cast(object)->map()->instance_type() ==
- Traits::kInstanceType);
+ SLOW_DCHECK(object->IsHeapObject() &&
+ HeapObject::cast(object)->map()->instance_type() ==
+ Traits::kInstanceType);
+ return reinterpret_cast<FixedTypedArray<Traits>*>(object);
+}
+
+
+template <class Traits>
+const FixedTypedArray<Traits>*
+FixedTypedArray<Traits>::cast(const Object* object) {
+ SLOW_DCHECK(object->IsHeapObject() &&
+ HeapObject::cast(object)->map()->instance_type() ==
+ Traits::kInstanceType);
return reinterpret_cast<FixedTypedArray<Traits>*>(object);
}
template <typename Derived, typename Shape, typename Key>
HashTable<Derived, Shape, Key>*
HashTable<Derived, Shape, Key>::cast(Object* obj) {
- ASSERT(obj->IsHashTable());
+ SLOW_DCHECK(obj->IsHashTable());
return reinterpret_cast<HashTable*>(obj);
}
+template <typename Derived, typename Shape, typename Key>
+const HashTable<Derived, Shape, Key>*
+HashTable<Derived, Shape, Key>::cast(const Object* obj) {
+ SLOW_DCHECK(obj->IsHashTable());
+ return reinterpret_cast<const HashTable*>(obj);
+}
+
+
SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
SYNCHRONIZED_SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
void Name::set_hash_field(uint32_t value) {
WRITE_UINT32_FIELD(this, kHashFieldOffset, value);
#if V8_HOST_ARCH_64_BIT
- WRITE_UINT32_FIELD(this, kHashFieldOffset + kIntSize, 0);
+#if V8_TARGET_LITTLE_ENDIAN
+ WRITE_UINT32_FIELD(this, kHashFieldSlot + kIntSize, 0);
+#else
+ WRITE_UINT32_FIELD(this, kHashFieldSlot, 0);
+#endif
#endif
}
ACCESSORS(Symbol, name, Object, kNameOffset)
ACCESSORS(Symbol, flags, Smi, kFlagsOffset)
BOOL_ACCESSORS(Symbol, flags, is_private, kPrivateBit)
+BOOL_ACCESSORS(Symbol, flags, is_own, kOwnBit)
bool String::Equals(String* other) {
uint16_t String::Get(int index) {
- ASSERT(index >= 0 && index < length());
+ DCHECK(index >= 0 && index < length());
switch (StringShape(this).full_representation_tag()) {
case kSeqStringTag | kOneByteStringTag:
return SeqOneByteString::cast(this)->SeqOneByteStringGet(index);
case kConsStringTag | kTwoByteStringTag:
return ConsString::cast(this)->ConsStringGet(index);
case kExternalStringTag | kOneByteStringTag:
- return ExternalAsciiString::cast(this)->ExternalAsciiStringGet(index);
+ return ExternalOneByteString::cast(this)->ExternalOneByteStringGet(index);
case kExternalStringTag | kTwoByteStringTag:
return ExternalTwoByteString::cast(this)->ExternalTwoByteStringGet(index);
case kSlicedStringTag | kOneByteStringTag:
void String::Set(int index, uint16_t value) {
- ASSERT(index >= 0 && index < length());
- ASSERT(StringShape(this).IsSequential());
+ DCHECK(index >= 0 && index < length());
+ DCHECK(StringShape(this).IsSequential());
return this->IsOneByteRepresentation()
? SeqOneByteString::cast(this)->SeqOneByteStringSet(index, value)
String* String::GetUnderlying() {
// Giving direct access to underlying string only makes sense if the
// wrapping string is already flattened.
- ASSERT(this->IsFlat());
- ASSERT(StringShape(this).IsIndirect());
+ DCHECK(this->IsFlat());
+ DCHECK(StringShape(this).IsIndirect());
STATIC_ASSERT(ConsString::kFirstOffset == SlicedString::kParentOffset);
const int kUnderlyingOffset = SlicedString::kParentOffset;
return String::cast(READ_FIELD(this, kUnderlyingOffset));
const int offset) {
int slice_offset = offset;
const int length = string->length();
- ASSERT(offset <= length);
+ DCHECK(offset <= length);
while (true) {
int32_t type = string->map()->instance_type();
switch (type & (kStringRepresentationMask | kStringEncodingMask)) {
case kExternalStringTag | kOneByteStringTag:
visitor->VisitOneByteString(
- ExternalAsciiString::cast(string)->GetChars() + slice_offset,
+ ExternalOneByteString::cast(string)->GetChars() + slice_offset,
length - offset);
return NULL;
uint16_t SeqOneByteString::SeqOneByteStringGet(int index) {
- ASSERT(index >= 0 && index < length());
+ DCHECK(index >= 0 && index < length());
return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
}
void SeqOneByteString::SeqOneByteStringSet(int index, uint16_t value) {
- ASSERT(index >= 0 && index < length() && value <= kMaxOneByteCharCode);
+ DCHECK(index >= 0 && index < length() && value <= kMaxOneByteCharCode);
WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize,
static_cast<byte>(value));
}
uint16_t SeqTwoByteString::SeqTwoByteStringGet(int index) {
- ASSERT(index >= 0 && index < length());
+ DCHECK(index >= 0 && index < length());
return READ_SHORT_FIELD(this, kHeaderSize + index * kShortSize);
}
void SeqTwoByteString::SeqTwoByteStringSet(int index, uint16_t value) {
- ASSERT(index >= 0 && index < length());
+ DCHECK(index >= 0 && index < length());
WRITE_SHORT_FIELD(this, kHeaderSize + index * kShortSize, value);
}
void SlicedString::set_parent(String* parent, WriteBarrierMode mode) {
- ASSERT(parent->IsSeqString() || parent->IsExternalString());
+ DCHECK(parent->IsSeqString() || parent->IsExternalString());
WRITE_FIELD(this, kParentOffset, parent);
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kParentOffset, parent, mode);
}
}
-const ExternalAsciiString::Resource* ExternalAsciiString::resource() {
+const ExternalOneByteString::Resource* ExternalOneByteString::resource() {
return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
}
-void ExternalAsciiString::update_data_cache() {
+void ExternalOneByteString::update_data_cache() {
if (is_short()) return;
const char** data_field =
reinterpret_cast<const char**>(FIELD_ADDR(this, kResourceDataOffset));
}
-void ExternalAsciiString::set_resource(
- const ExternalAsciiString::Resource* resource) {
- ASSERT(IsAligned(reinterpret_cast<intptr_t>(resource), kPointerSize));
+void ExternalOneByteString::set_resource(
+ const ExternalOneByteString::Resource* resource) {
+ DCHECK(IsAligned(reinterpret_cast<intptr_t>(resource), kPointerSize));
*reinterpret_cast<const Resource**>(
FIELD_ADDR(this, kResourceOffset)) = resource;
if (resource != NULL) update_data_cache();
}
-const uint8_t* ExternalAsciiString::GetChars() {
+const uint8_t* ExternalOneByteString::GetChars() {
return reinterpret_cast<const uint8_t*>(resource()->data());
}
-uint16_t ExternalAsciiString::ExternalAsciiStringGet(int index) {
- ASSERT(index >= 0 && index < length());
+uint16_t ExternalOneByteString::ExternalOneByteStringGet(int index) {
+ DCHECK(index >= 0 && index < length());
return GetChars()[index];
}
uint16_t ExternalTwoByteString::ExternalTwoByteStringGet(int index) {
- ASSERT(index >= 0 && index < length());
+ DCHECK(index >= 0 && index < length());
return GetChars()[index];
}
}
-int ConsStringIteratorOp::OffsetForDepth(int depth) {
- return depth & kDepthMask;
-}
+int ConsStringIterator::OffsetForDepth(int depth) { return depth & kDepthMask; }
-void ConsStringIteratorOp::PushLeft(ConsString* string) {
+void ConsStringIterator::PushLeft(ConsString* string) {
frames_[depth_++ & kDepthMask] = string;
}
-void ConsStringIteratorOp::PushRight(ConsString* string) {
+void ConsStringIterator::PushRight(ConsString* string) {
// Inplace update.
frames_[(depth_-1) & kDepthMask] = string;
}
-void ConsStringIteratorOp::AdjustMaximumDepth() {
+void ConsStringIterator::AdjustMaximumDepth() {
if (depth_ > maximum_depth_) maximum_depth_ = depth_;
}
-void ConsStringIteratorOp::Pop() {
- ASSERT(depth_ > 0);
- ASSERT(depth_ <= maximum_depth_);
+void ConsStringIterator::Pop() {
+ DCHECK(depth_ > 0);
+ DCHECK(depth_ <= maximum_depth_);
depth_--;
}
uint16_t StringCharacterStream::GetNext() {
- ASSERT(buffer8_ != NULL && end_ != NULL);
+ DCHECK(buffer8_ != NULL && end_ != NULL);
// Advance cursor if needed.
if (buffer8_ == end_) HasMore();
- ASSERT(buffer8_ < end_);
+ DCHECK(buffer8_ < end_);
return is_one_byte_ ? *buffer8_++ : *buffer16_++;
}
-StringCharacterStream::StringCharacterStream(String* string,
- ConsStringIteratorOp* op,
- int offset)
- : is_one_byte_(false),
- op_(op) {
+StringCharacterStream::StringCharacterStream(String* string, int offset)
+ : is_one_byte_(false) {
Reset(string, offset);
}
buffer8_ = NULL;
end_ = NULL;
ConsString* cons_string = String::VisitFlat(this, string, offset);
- op_->Reset(cons_string, offset);
+ iter_.Reset(cons_string, offset);
if (cons_string != NULL) {
- string = op_->Next(&offset);
+ string = iter_.Next(&offset);
if (string != NULL) String::VisitFlat(this, string, offset);
}
}
bool StringCharacterStream::HasMore() {
if (buffer8_ != end_) return true;
int offset;
- String* string = op_->Next(&offset);
- ASSERT_EQ(offset, 0);
+ String* string = iter_.Next(&offset);
+ DCHECK_EQ(offset, 0);
if (string == NULL) return false;
String::VisitFlat(this, string);
- ASSERT(buffer8_ != end_);
+ DCHECK(buffer8_ != end_);
return true;
}
byte ByteArray::get(int index) {
- ASSERT(index >= 0 && index < this->length());
+ DCHECK(index >= 0 && index < this->length());
return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
}
void ByteArray::set(int index, byte value) {
- ASSERT(index >= 0 && index < this->length());
+ DCHECK(index >= 0 && index < this->length());
WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize, value);
}
int ByteArray::get_int(int index) {
- ASSERT(index >= 0 && (index * kIntSize) < this->length());
+ DCHECK(index >= 0 && (index * kIntSize) < this->length());
return READ_INT_FIELD(this, kHeaderSize + index * kIntSize);
}
ByteArray* ByteArray::FromDataStartAddress(Address address) {
- ASSERT_TAG_ALIGNED(address);
+ DCHECK_TAG_ALIGNED(address);
return reinterpret_cast<ByteArray*>(address - kHeaderSize + kHeapObjectTag);
}
uint8_t ExternalUint8ClampedArray::get_scalar(int index) {
- ASSERT((index >= 0) && (index < this->length()));
+ DCHECK((index >= 0) && (index < this->length()));
uint8_t* ptr = external_uint8_clamped_pointer();
return ptr[index];
}
void ExternalUint8ClampedArray::set(int index, uint8_t value) {
- ASSERT((index >= 0) && (index < this->length()));
+ DCHECK((index >= 0) && (index < this->length()));
uint8_t* ptr = external_uint8_clamped_pointer();
ptr[index] = value;
}
-void* ExternalArray::external_pointer() {
+void* ExternalArray::external_pointer() const {
intptr_t ptr = READ_INTPTR_FIELD(this, kExternalPointerOffset);
return reinterpret_cast<void*>(ptr);
}
int8_t ExternalInt8Array::get_scalar(int index) {
- ASSERT((index >= 0) && (index < this->length()));
+ DCHECK((index >= 0) && (index < this->length()));
int8_t* ptr = static_cast<int8_t*>(external_pointer());
return ptr[index];
}
void ExternalInt8Array::set(int index, int8_t value) {
- ASSERT((index >= 0) && (index < this->length()));
+ DCHECK((index >= 0) && (index < this->length()));
int8_t* ptr = static_cast<int8_t*>(external_pointer());
ptr[index] = value;
}
uint8_t ExternalUint8Array::get_scalar(int index) {
- ASSERT((index >= 0) && (index < this->length()));
+ DCHECK((index >= 0) && (index < this->length()));
uint8_t* ptr = static_cast<uint8_t*>(external_pointer());
return ptr[index];
}
void ExternalUint8Array::set(int index, uint8_t value) {
- ASSERT((index >= 0) && (index < this->length()));
+ DCHECK((index >= 0) && (index < this->length()));
uint8_t* ptr = static_cast<uint8_t*>(external_pointer());
ptr[index] = value;
}
int16_t ExternalInt16Array::get_scalar(int index) {
- ASSERT((index >= 0) && (index < this->length()));
+ DCHECK((index >= 0) && (index < this->length()));
int16_t* ptr = static_cast<int16_t*>(external_pointer());
return ptr[index];
}
void ExternalInt16Array::set(int index, int16_t value) {
- ASSERT((index >= 0) && (index < this->length()));
+ DCHECK((index >= 0) && (index < this->length()));
int16_t* ptr = static_cast<int16_t*>(external_pointer());
ptr[index] = value;
}
uint16_t ExternalUint16Array::get_scalar(int index) {
- ASSERT((index >= 0) && (index < this->length()));
+ DCHECK((index >= 0) && (index < this->length()));
uint16_t* ptr = static_cast<uint16_t*>(external_pointer());
return ptr[index];
}
void ExternalUint16Array::set(int index, uint16_t value) {
- ASSERT((index >= 0) && (index < this->length()));
+ DCHECK((index >= 0) && (index < this->length()));
uint16_t* ptr = static_cast<uint16_t*>(external_pointer());
ptr[index] = value;
}
int32_t ExternalInt32Array::get_scalar(int index) {
- ASSERT((index >= 0) && (index < this->length()));
+ DCHECK((index >= 0) && (index < this->length()));
int32_t* ptr = static_cast<int32_t*>(external_pointer());
return ptr[index];
}
void ExternalInt32Array::set(int index, int32_t value) {
- ASSERT((index >= 0) && (index < this->length()));
+ DCHECK((index >= 0) && (index < this->length()));
int32_t* ptr = static_cast<int32_t*>(external_pointer());
ptr[index] = value;
}
uint32_t ExternalUint32Array::get_scalar(int index) {
- ASSERT((index >= 0) && (index < this->length()));
+ DCHECK((index >= 0) && (index < this->length()));
uint32_t* ptr = static_cast<uint32_t*>(external_pointer());
return ptr[index];
}
void ExternalUint32Array::set(int index, uint32_t value) {
- ASSERT((index >= 0) && (index < this->length()));
+ DCHECK((index >= 0) && (index < this->length()));
uint32_t* ptr = static_cast<uint32_t*>(external_pointer());
- ptr[index] = value;
-}
-
-
-float ExternalFloat32Array::get_scalar(int index) {
- ASSERT((index >= 0) && (index < this->length()));
- float* ptr = static_cast<float*>(external_pointer());
- return ptr[index];
-}
-
-
-Handle<Object> ExternalFloat32Array::get(Handle<ExternalFloat32Array> array,
- int index) {
- return array->GetIsolate()->factory()->NewNumber(array->get_scalar(index));
-}
-
-
-void ExternalFloat32Array::set(int index, float value) {
- ASSERT((index >= 0) && (index < this->length()));
- float* ptr = static_cast<float*>(external_pointer());
- ptr[index] = value;
-}
-
-
-float32x4_value_t ExternalFloat32x4Array::get_scalar(int index) {
- ASSERT((index >= 0) && (index < this->length()));
- float* ptr = static_cast<float*>(external_pointer());
- float32x4_value_t value;
- value.storage[0] = ptr[index * 4 + 0];
- value.storage[1] = ptr[index * 4 + 1];
- value.storage[2] = ptr[index * 4 + 2];
- value.storage[3] = ptr[index * 4 + 3];
- return value;
-}
-
-
-Handle<Object> ExternalFloat32x4Array::get(Handle<ExternalFloat32x4Array> array,
- int index) {
- float32x4_value_t value = array->get_scalar(index);
- return array->GetIsolate()->factory()->NewFloat32x4(value);
-}
-
-
-void ExternalFloat32x4Array::set(int index, const float32x4_value_t& value) {
- ASSERT((index >= 0) && (index < this->length()));
- float* ptr = static_cast<float*>(external_pointer());
- ptr[index * 4 + 0] = value.storage[0];
- ptr[index * 4 + 1] = value.storage[1];
- ptr[index * 4 + 2] = value.storage[2];
- ptr[index * 4 + 3] = value.storage[3];
-}
-
-
-float64x2_value_t ExternalFloat64x2Array::get_scalar(int index) {
- ASSERT((index >= 0) && (index < this->length()));
- double* ptr = static_cast<double*>(external_pointer());
- float64x2_value_t value;
- value.storage[0] = ptr[index * 2 + 0];
- value.storage[1] = ptr[index * 2 + 1];
- return value;
-}
-
-
-Handle<Object> ExternalFloat64x2Array::get(Handle<ExternalFloat64x2Array> array,
- int index) {
- float64x2_value_t value = array->get_scalar(index);
- return array->GetIsolate()->factory()->NewFloat64x2(value);
-}
-
-
-void ExternalFloat64x2Array::set(int index, const float64x2_value_t& value) {
- ASSERT((index >= 0) && (index < this->length()));
- double* ptr = static_cast<double*>(external_pointer());
- ptr[index * 2 + 0] = value.storage[0];
- ptr[index * 2 + 1] = value.storage[1];
+ ptr[index] = value;
}
-int32x4_value_t ExternalInt32x4Array::get_scalar(int index) {
- ASSERT((index >= 0) && (index < this->length()));
- int32_t* ptr = static_cast<int32_t*>(external_pointer());
- int32x4_value_t value;
- value.storage[0] = ptr[index * 4 + 0];
- value.storage[1] = ptr[index * 4 + 1];
- value.storage[2] = ptr[index * 4 + 2];
- value.storage[3] = ptr[index * 4 + 3];
- return value;
+float ExternalFloat32Array::get_scalar(int index) {
+ DCHECK((index >= 0) && (index < this->length()));
+ float* ptr = static_cast<float*>(external_pointer());
+ return ptr[index];
}
-Handle<Object> ExternalInt32x4Array::get(Handle<ExternalInt32x4Array> array,
+Handle<Object> ExternalFloat32Array::get(Handle<ExternalFloat32Array> array,
int index) {
- int32x4_value_t value = array->get_scalar(index);
- return array->GetIsolate()->factory()->NewInt32x4(value);
+ return array->GetIsolate()->factory()->NewNumber(array->get_scalar(index));
}
-void ExternalInt32x4Array::set(int index, const int32x4_value_t& value) {
- ASSERT((index >= 0) && (index < this->length()));
- int32_t* ptr = static_cast<int32_t*>(external_pointer());
- ptr[index * 4 + 0] = value.storage[0];
- ptr[index * 4 + 1] = value.storage[1];
- ptr[index * 4 + 2] = value.storage[2];
- ptr[index * 4 + 3] = value.storage[3];
+void ExternalFloat32Array::set(int index, float value) {
+ DCHECK((index >= 0) && (index < this->length()));
+ float* ptr = static_cast<float*>(external_pointer());
+ ptr[index] = value;
}
double ExternalFloat64Array::get_scalar(int index) {
- ASSERT((index >= 0) && (index < this->length()));
+ DCHECK((index >= 0) && (index < this->length()));
double* ptr = static_cast<double*>(external_pointer());
return ptr[index];
}
void ExternalFloat64Array::set(int index, double value) {
- ASSERT((index >= 0) && (index < this->length()));
+ DCHECK((index >= 0) && (index < this->length()));
double* ptr = static_cast<double*>(external_pointer());
ptr[index] = value;
}
}
-int FixedTypedArrayBase::DataSize() {
- InstanceType instance_type = map()->instance_type();
+int FixedTypedArrayBase::DataSize(InstanceType type) {
int element_size;
- switch (instance_type) {
+ switch (type) {
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
case FIXED_##TYPE##_ARRAY_TYPE: \
element_size = size; \
}
+int FixedTypedArrayBase::DataSize() {
+ return DataSize(map()->instance_type());
+}
+
+
int FixedTypedArrayBase::size() {
return OBJECT_POINTER_ALIGN(kDataOffset + DataSize());
}
+int FixedTypedArrayBase::TypedArraySize(InstanceType type) {
+ return OBJECT_POINTER_ALIGN(kDataOffset + DataSize(type));
+}
+
+
uint8_t Uint8ArrayTraits::defaultValue() { return 0; }
float Float32ArrayTraits::defaultValue() {
- return static_cast<float>(OS::nan_value());
+ return static_cast<float>(base::OS::nan_value());
}
-double Float64ArrayTraits::defaultValue() { return OS::nan_value(); }
+double Float64ArrayTraits::defaultValue() { return base::OS::nan_value(); }
template <class Traits>
typename Traits::ElementType FixedTypedArray<Traits>::get_scalar(int index) {
- ASSERT((index >= 0) && (index < this->length()));
+ DCHECK((index >= 0) && (index < this->length()));
ElementType* ptr = reinterpret_cast<ElementType*>(
FIELD_ADDR(this, kDataOffset));
return ptr[index];
template<> inline
FixedTypedArray<Float64ArrayTraits>::ElementType
FixedTypedArray<Float64ArrayTraits>::get_scalar(int index) {
- ASSERT((index >= 0) && (index < this->length()));
+ DCHECK((index >= 0) && (index < this->length()));
return READ_DOUBLE_FIELD(this, ElementOffset(index));
}
template <class Traits>
void FixedTypedArray<Traits>::set(int index, ElementType value) {
- ASSERT((index >= 0) && (index < this->length()));
+ DCHECK((index >= 0) && (index < this->length()));
ElementType* ptr = reinterpret_cast<ElementType*>(
FIELD_ADDR(this, kDataOffset));
ptr[index] = value;
template<> inline
void FixedTypedArray<Float64ArrayTraits>::set(
int index, Float64ArrayTraits::ElementType value) {
- ASSERT((index >= 0) && (index < this->length()));
+ DCHECK((index >= 0) && (index < this->length()));
WRITE_DOUBLE_FIELD(this, ElementOffset(index), value);
}
template<> inline
uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from_double(double value) {
- if (value < 0) return 0;
+ // Handle NaNs and less than zero values which clamp to zero.
+ if (!(value > 0)) return 0;
if (value > 0xFF) return 0xFF;
return static_cast<uint8_t>(lrint(value));
}
} else {
// Clamp undefined to the default value. All other types have been
// converted to a number type further up in the call chain.
- ASSERT(value->IsUndefined());
+ DCHECK(value->IsUndefined());
}
array->set(index, cast_value);
}
return Traits::ToHandle(array->GetIsolate(), cast_value);
}
-template<> inline
-Handle<Object> FixedTypedArray<Float32x4ArrayTraits>::SetValue(
- Handle<FixedTypedArray<Float32x4ArrayTraits> > array,
- uint32_t index, Handle<Object> value) {
- float32x4_value_t cast_value;
- cast_value.storage[0] = static_cast<float>(OS::nan_value());
- cast_value.storage[1] = static_cast<float>(OS::nan_value());
- cast_value.storage[2] = static_cast<float>(OS::nan_value());
- cast_value.storage[3] = static_cast<float>(OS::nan_value());
- if (index < static_cast<uint32_t>(array->length())) {
- if (value->IsFloat32x4()) {
- cast_value = Handle<Float32x4>::cast(value)->value();
- } else {
- // Clamp undefined to NaN (default). All other types have been
- // converted to a number type further up in the call chain.
- ASSERT(value->IsUndefined());
- }
- array->set(index, cast_value);
- }
- return Float32x4ArrayTraits::ToHandle(array->GetIsolate(), cast_value);
-}
-
-
-template<> inline
-Handle<Object> FixedTypedArray<Float64x2ArrayTraits>::SetValue(
- Handle<FixedTypedArray<Float64x2ArrayTraits> > array,
- uint32_t index, Handle<Object> value) {
- float64x2_value_t cast_value;
- cast_value.storage[0] = OS::nan_value();
- cast_value.storage[1] = OS::nan_value();
- if (index < static_cast<uint32_t>(array->length())) {
- if (value->IsFloat64x2()) {
- cast_value = Handle<Float64x2>::cast(value)->value();
- } else {
- // Clamp undefined to NaN (default). All other types have been
- // converted to a number type further up in the call chain.
- ASSERT(value->IsUndefined());
- }
- array->set(index, cast_value);
- }
- return Float64x2ArrayTraits::ToHandle(array->GetIsolate(), cast_value);
-}
-
-
-template<> inline
-Handle<Object> FixedTypedArray<Int32x4ArrayTraits>::SetValue(
- Handle<FixedTypedArray<Int32x4ArrayTraits> > array,
- uint32_t index, Handle<Object> value) {
- int32x4_value_t cast_value;
- cast_value.storage[0] = 0;
- cast_value.storage[1] = 0;
- cast_value.storage[2] = 0;
- cast_value.storage[3] = 0;
- if (index < static_cast<uint32_t>(array->length())) {
- if (value->IsInt32x4()) {
- cast_value = Handle<Int32x4>::cast(value)->value();
- } else {
- // Clamp undefined to zero (default). All other types have been
- // converted to a number type further up in the call chain.
- ASSERT(value->IsUndefined());
- }
- array->set(index, cast_value);
- }
- return Int32x4ArrayTraits::ToHandle(array->GetIsolate(), cast_value);
-}
-
Handle<Object> Uint8ArrayTraits::ToHandle(Isolate* isolate, uint8_t scalar) {
return handle(Smi::FromInt(scalar), isolate);
}
-Handle<Object> Int32x4ArrayTraits::ToHandle(
- Isolate* isolate, int32x4_value_t scalar) {
- return isolate->factory()->NewInt32x4(scalar);
-}
-
-
-Handle<Object> Float32x4ArrayTraits::ToHandle(
- Isolate* isolate, float32x4_value_t scalar) {
- return isolate->factory()->NewFloat32x4(scalar);
-}
-
-
-Handle<Object> Float64x2ArrayTraits::ToHandle(
- Isolate* isolate, float64x2_value_t scalar) {
- return isolate->factory()->NewFloat64x2(scalar);
-}
-
-
Handle<Object> Float64ArrayTraits::ToHandle(Isolate* isolate, double scalar) {
return isolate->factory()->NewNumber(scalar);
}
void Map::set_visitor_id(int id) {
- ASSERT(0 <= id && id < 256);
+ DCHECK(0 <= id && id < 256);
WRITE_BYTE_FIELD(this, kVisitorIdOffset, static_cast<byte>(id));
}
int Map::GetInObjectPropertyOffset(int index) {
// Adjust for the number of properties stored in the object.
index -= inobject_properties();
- ASSERT(index < 0);
+ DCHECK(index <= 0);
return instance_size() + (index * kPointerSize);
}
int instance_size = map->instance_size();
if (instance_size != kVariableSizeSentinel) return instance_size;
// Only inline the most frequent cases.
- int instance_type = static_cast<int>(map->instance_type());
+ InstanceType instance_type = map->instance_type();
if (instance_type == FIXED_ARRAY_TYPE) {
return FixedArray::BodyDescriptor::SizeOf(map, this);
}
- if (instance_type == ASCII_STRING_TYPE ||
- instance_type == ASCII_INTERNALIZED_STRING_TYPE) {
+ if (instance_type == ONE_BYTE_STRING_TYPE ||
+ instance_type == ONE_BYTE_INTERNALIZED_STRING_TYPE) {
return SeqOneByteString::SizeFor(
reinterpret_cast<SeqOneByteString*>(this)->length());
}
reinterpret_cast<FixedDoubleArray*>(this)->length());
}
if (instance_type == CONSTANT_POOL_ARRAY_TYPE) {
- return ConstantPoolArray::SizeFor(
- reinterpret_cast<ConstantPoolArray*>(this)->count_of_int64_entries(),
- reinterpret_cast<ConstantPoolArray*>(this)->count_of_code_ptr_entries(),
- reinterpret_cast<ConstantPoolArray*>(this)->count_of_heap_ptr_entries(),
- reinterpret_cast<ConstantPoolArray*>(this)->count_of_int32_entries());
+ return reinterpret_cast<ConstantPoolArray*>(this)->size();
}
if (instance_type >= FIRST_FIXED_TYPED_ARRAY_TYPE &&
instance_type <= LAST_FIXED_TYPED_ARRAY_TYPE) {
- return reinterpret_cast<FixedTypedArrayBase*>(this)->size();
+ return reinterpret_cast<FixedTypedArrayBase*>(
+ this)->TypedArraySize(instance_type);
}
- ASSERT(instance_type == CODE_TYPE);
+ DCHECK(instance_type == CODE_TYPE);
return reinterpret_cast<Code*>(this)->CodeSize();
}
void Map::set_instance_size(int value) {
- ASSERT_EQ(0, value & (kPointerSize - 1));
+ DCHECK_EQ(0, value & (kPointerSize - 1));
value >>= kPointerSizeLog2;
- ASSERT(0 <= value && value < 256);
+ DCHECK(0 <= value && value < 256);
NOBARRIER_WRITE_BYTE_FIELD(
this, kInstanceSizeOffset, static_cast<byte>(value));
}
void Map::set_inobject_properties(int value) {
- ASSERT(0 <= value && value < 256);
+ DCHECK(0 <= value && value < 256);
WRITE_BYTE_FIELD(this, kInObjectPropertiesOffset, static_cast<byte>(value));
}
void Map::set_pre_allocated_property_fields(int value) {
- ASSERT(0 <= value && value < 256);
+ DCHECK(0 <= value && value < 256);
WRITE_BYTE_FIELD(this,
kPreAllocatedPropertyFieldsOffset,
static_cast<byte>(value));
void Map::set_function_with_prototype(bool value) {
- set_bit_field3(FunctionWithPrototype::update(bit_field3(), value));
+ set_bit_field(FunctionWithPrototype::update(bit_field(), value));
}
bool Map::function_with_prototype() {
- return FunctionWithPrototype::decode(bit_field3());
+ return FunctionWithPrototype::decode(bit_field());
}
}
-void Map::set_attached_to_shared_function_info(bool value) {
- if (value) {
- set_bit_field2(bit_field2() | (1 << kAttachedToSharedFunctionInfo));
- } else {
- set_bit_field2(bit_field2() & ~(1 << kAttachedToSharedFunctionInfo));
- }
-}
-
-bool Map::attached_to_shared_function_info() {
- return ((1 << kAttachedToSharedFunctionInfo) & bit_field2()) != 0;
+void Map::set_is_prototype_map(bool value) {
+ set_bit_field2(IsPrototypeMapBits::update(bit_field2(), value));
}
-
-void Map::set_is_shared(bool value) {
- set_bit_field3(IsShared::update(bit_field3(), value));
+bool Map::is_prototype_map() {
+ return IsPrototypeMapBits::decode(bit_field2());
}
-bool Map::is_shared() {
- return IsShared::decode(bit_field3()); }
-
-
void Map::set_dictionary_map(bool value) {
uint32_t new_bit_field3 = DictionaryMap::update(bit_field3(), value);
new_bit_field3 = IsUnstable::update(new_bit_field3, value);
}
-void Map::set_owns_descriptors(bool is_shared) {
- set_bit_field3(OwnsDescriptors::update(bit_field3(), is_shared));
+void Map::set_owns_descriptors(bool owns_descriptors) {
+ set_bit_field3(OwnsDescriptors::update(bit_field3(), owns_descriptors));
}
}
+void Map::set_done_inobject_slack_tracking(bool value) {
+ set_bit_field3(DoneInobjectSlackTracking::update(bit_field3(), value));
+}
+
+
+bool Map::done_inobject_slack_tracking() {
+ return DoneInobjectSlackTracking::decode(bit_field3());
+}
+
+
+void Map::set_construction_count(int value) {
+ set_bit_field3(ConstructionCount::update(bit_field3(), value));
+}
+
+
+int Map::construction_count() {
+ return ConstructionCount::decode(bit_field3());
+}
+
+
void Map::freeze() {
set_bit_field3(IsFrozen::update(bit_field3(), true));
}
}
+bool Code::IsCodeStubOrIC() {
+ return kind() == STUB || kind() == HANDLER || kind() == LOAD_IC ||
+ kind() == KEYED_LOAD_IC || kind() == CALL_IC || kind() == STORE_IC ||
+ kind() == KEYED_STORE_IC || kind() == BINARY_OP_IC ||
+ kind() == COMPARE_IC || kind() == COMPARE_NIL_IC ||
+ kind() == TO_BOOLEAN_IC;
+}
+
+
InlineCacheState Code::ic_state() {
InlineCacheState result = ExtractICStateFromFlags(flags());
// Only allow uninitialized or debugger states for non-IC code
// objects. This is used in the debugger to determine whether or not
// a call to code object has been replaced with a debug break call.
- ASSERT(is_inline_cache_stub() ||
+ DCHECK(is_inline_cache_stub() ||
result == UNINITIALIZED ||
result == DEBUG_STUB);
return result;
ExtraICState Code::extra_ic_state() {
- ASSERT(is_inline_cache_stub() || ic_state() == DEBUG_STUB);
+ DCHECK(is_inline_cache_stub() || ic_state() == DEBUG_STUB);
return ExtractExtraICStateFromFlags(flags());
}
}
+inline bool Code::is_hydrogen_stub() {
+ return is_crankshafted() && kind() != OPTIMIZED_FUNCTION;
+}
+
+
inline void Code::set_is_crankshafted(bool value) {
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
int updated = IsCrankshaftedField::update(previous, value);
}
-int Code::major_key() {
- ASSERT(has_major_key());
- return StubMajorKeyField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
-}
-
-
-void Code::set_major_key(int major) {
- ASSERT(has_major_key());
- ASSERT(0 <= major && major < 256);
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
- int updated = StubMajorKeyField::update(previous, major);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
+inline bool Code::is_turbofanned() {
+ DCHECK(kind() == OPTIMIZED_FUNCTION || kind() == STUB);
+ return IsTurbofannedField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
}
-bool Code::has_major_key() {
- return kind() == STUB ||
- kind() == HANDLER ||
- kind() == BINARY_OP_IC ||
- kind() == COMPARE_IC ||
- kind() == COMPARE_NIL_IC ||
- kind() == LOAD_IC ||
- kind() == KEYED_LOAD_IC ||
- kind() == STORE_IC ||
- kind() == CALL_IC ||
- kind() == KEYED_STORE_IC ||
- kind() == TO_BOOLEAN_IC;
+inline void Code::set_is_turbofanned(bool value) {
+ DCHECK(kind() == OPTIMIZED_FUNCTION || kind() == STUB);
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+ int updated = IsTurbofannedField::update(previous, value);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
bool Code::optimizable() {
- ASSERT_EQ(FUNCTION, kind());
+ DCHECK_EQ(FUNCTION, kind());
return READ_BYTE_FIELD(this, kOptimizableOffset) == 1;
}
void Code::set_optimizable(bool value) {
- ASSERT_EQ(FUNCTION, kind());
+ DCHECK_EQ(FUNCTION, kind());
WRITE_BYTE_FIELD(this, kOptimizableOffset, value ? 1 : 0);
}
bool Code::has_deoptimization_support() {
- ASSERT_EQ(FUNCTION, kind());
+ DCHECK_EQ(FUNCTION, kind());
byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
return FullCodeFlagsHasDeoptimizationSupportField::decode(flags);
}
void Code::set_has_deoptimization_support(bool value) {
- ASSERT_EQ(FUNCTION, kind());
+ DCHECK_EQ(FUNCTION, kind());
byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
flags = FullCodeFlagsHasDeoptimizationSupportField::update(flags, value);
WRITE_BYTE_FIELD(this, kFullCodeFlags, flags);
bool Code::has_debug_break_slots() {
- ASSERT_EQ(FUNCTION, kind());
+ DCHECK_EQ(FUNCTION, kind());
byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
return FullCodeFlagsHasDebugBreakSlotsField::decode(flags);
}
void Code::set_has_debug_break_slots(bool value) {
- ASSERT_EQ(FUNCTION, kind());
+ DCHECK_EQ(FUNCTION, kind());
byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
flags = FullCodeFlagsHasDebugBreakSlotsField::update(flags, value);
WRITE_BYTE_FIELD(this, kFullCodeFlags, flags);
bool Code::is_compiled_optimizable() {
- ASSERT_EQ(FUNCTION, kind());
+ DCHECK_EQ(FUNCTION, kind());
byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
return FullCodeFlagsIsCompiledOptimizable::decode(flags);
}
void Code::set_compiled_optimizable(bool value) {
- ASSERT_EQ(FUNCTION, kind());
+ DCHECK_EQ(FUNCTION, kind());
byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
flags = FullCodeFlagsIsCompiledOptimizable::update(flags, value);
WRITE_BYTE_FIELD(this, kFullCodeFlags, flags);
int Code::allow_osr_at_loop_nesting_level() {
- ASSERT_EQ(FUNCTION, kind());
- return READ_BYTE_FIELD(this, kAllowOSRAtLoopNestingLevelOffset);
+ DCHECK_EQ(FUNCTION, kind());
+ int fields = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
+ return AllowOSRAtLoopNestingLevelField::decode(fields);
}
void Code::set_allow_osr_at_loop_nesting_level(int level) {
- ASSERT_EQ(FUNCTION, kind());
- ASSERT(level >= 0 && level <= kMaxLoopNestingMarker);
- WRITE_BYTE_FIELD(this, kAllowOSRAtLoopNestingLevelOffset, level);
+ DCHECK_EQ(FUNCTION, kind());
+ DCHECK(level >= 0 && level <= kMaxLoopNestingMarker);
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
+ int updated = AllowOSRAtLoopNestingLevelField::update(previous, level);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
}
int Code::profiler_ticks() {
- ASSERT_EQ(FUNCTION, kind());
+ DCHECK_EQ(FUNCTION, kind());
return READ_BYTE_FIELD(this, kProfilerTicksOffset);
}
void Code::set_profiler_ticks(int ticks) {
- ASSERT_EQ(FUNCTION, kind());
- ASSERT(ticks < 256);
- WRITE_BYTE_FIELD(this, kProfilerTicksOffset, ticks);
+ DCHECK(ticks < 256);
+ if (kind() == FUNCTION) {
+ WRITE_BYTE_FIELD(this, kProfilerTicksOffset, ticks);
+ }
+}
+
+
+int Code::builtin_index() {
+ return READ_INT32_FIELD(this, kKindSpecificFlags1Offset);
+}
+
+
+void Code::set_builtin_index(int index) {
+ WRITE_INT32_FIELD(this, kKindSpecificFlags1Offset, index);
}
unsigned Code::stack_slots() {
- ASSERT(is_crankshafted());
+ DCHECK(is_crankshafted());
return StackSlotsField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
}
void Code::set_stack_slots(unsigned slots) {
CHECK(slots <= (1 << kStackSlotsBitCount));
- ASSERT(is_crankshafted());
+ DCHECK(is_crankshafted());
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
int updated = StackSlotsField::update(previous, slots);
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
unsigned Code::safepoint_table_offset() {
- ASSERT(is_crankshafted());
+ DCHECK(is_crankshafted());
return SafepointTableOffsetField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
}
void Code::set_safepoint_table_offset(unsigned offset) {
CHECK(offset <= (1 << kSafepointTableOffsetBitCount));
- ASSERT(is_crankshafted());
- ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
+ DCHECK(is_crankshafted());
+ DCHECK(IsAligned(offset, static_cast<unsigned>(kIntSize)));
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
int updated = SafepointTableOffsetField::update(previous, offset);
WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
unsigned Code::back_edge_table_offset() {
- ASSERT_EQ(FUNCTION, kind());
+ DCHECK_EQ(FUNCTION, kind());
return BackEdgeTableOffsetField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
+ READ_UINT32_FIELD(this, kKindSpecificFlags2Offset)) << kPointerSizeLog2;
}
void Code::set_back_edge_table_offset(unsigned offset) {
- ASSERT_EQ(FUNCTION, kind());
- ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
+ DCHECK_EQ(FUNCTION, kind());
+ DCHECK(IsAligned(offset, static_cast<unsigned>(kPointerSize)));
+ offset = offset >> kPointerSizeLog2;
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
int updated = BackEdgeTableOffsetField::update(previous, offset);
WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
bool Code::back_edges_patched_for_osr() {
- ASSERT_EQ(FUNCTION, kind());
- return BackEdgesPatchedForOSRField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
-}
-
-
-void Code::set_back_edges_patched_for_osr(bool value) {
- ASSERT_EQ(FUNCTION, kind());
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
- int updated = BackEdgesPatchedForOSRField::update(previous, value);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
+ DCHECK_EQ(FUNCTION, kind());
+ return allow_osr_at_loop_nesting_level() > 0;
}
-
byte Code::to_boolean_state() {
return extra_ic_state();
}
bool Code::has_function_cache() {
- ASSERT(kind() == STUB);
+ DCHECK(kind() == STUB);
return HasFunctionCacheField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
}
void Code::set_has_function_cache(bool flag) {
- ASSERT(kind() == STUB);
+ DCHECK(kind() == STUB);
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
int updated = HasFunctionCacheField::update(previous, flag);
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
bool Code::marked_for_deoptimization() {
- ASSERT(kind() == OPTIMIZED_FUNCTION);
+ DCHECK(kind() == OPTIMIZED_FUNCTION);
return MarkedForDeoptimizationField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
}
void Code::set_marked_for_deoptimization(bool flag) {
- ASSERT(kind() == OPTIMIZED_FUNCTION);
- ASSERT(!flag || AllowDeoptimization::IsAllowed(GetIsolate()));
+ DCHECK(kind() == OPTIMIZED_FUNCTION);
+ DCHECK(!flag || AllowDeoptimization::IsAllowed(GetIsolate()));
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
int updated = MarkedForDeoptimizationField::update(previous, flag);
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
void Code::mark_as_weak_stub() {
- ASSERT(CanBeWeakStub());
+ DCHECK(CanBeWeakStub());
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
int updated = WeakStubField::update(previous, true);
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
void Code::mark_as_invalidated_weak_stub() {
- ASSERT(is_inline_cache_stub());
+ DCHECK(is_inline_cache_stub());
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
int updated = InvalidatedWeakStubField::update(previous, true);
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
void Code::set_constant_pool(Object* value) {
- ASSERT(value->IsConstantPoolArray());
+ DCHECK(value->IsConstantPoolArray());
WRITE_FIELD(this, kConstantPoolOffset, value);
WRITE_BARRIER(GetHeap(), this, kConstantPoolOffset, value);
}
-Code::Flags Code::ComputeFlags(Kind kind,
- InlineCacheState ic_state,
- ExtraICState extra_ic_state,
- StubType type,
- InlineCacheHolderFlag holder) {
+Code::Flags Code::ComputeFlags(Kind kind, InlineCacheState ic_state,
+ ExtraICState extra_ic_state, StubType type,
+ CacheHolderFlag holder) {
// Compute the bit mask.
unsigned int bits = KindField::encode(kind)
| ICStateField::encode(ic_state)
Code::Flags Code::ComputeMonomorphicFlags(Kind kind,
ExtraICState extra_ic_state,
- InlineCacheHolderFlag holder,
+ CacheHolderFlag holder,
StubType type) {
return ComputeFlags(kind, MONOMORPHIC, extra_ic_state, type, holder);
}
-Code::Flags Code::ComputeHandlerFlags(Kind handler_kind,
- StubType type,
- InlineCacheHolderFlag holder) {
+Code::Flags Code::ComputeHandlerFlags(Kind handler_kind, StubType type,
+ CacheHolderFlag holder) {
return ComputeFlags(Code::HANDLER, MONOMORPHIC, handler_kind, type, holder);
}
}
-InlineCacheHolderFlag Code::ExtractCacheHolderFromFlags(Flags flags) {
+CacheHolderFlag Code::ExtractCacheHolderFromFlags(Flags flags) {
return CacheHolderField::decode(flags);
}
}
+Code::Flags Code::RemoveTypeAndHolderFromFlags(Flags flags) {
+ int bits = flags & ~TypeField::kMask & ~CacheHolderField::kMask;
+ return static_cast<Flags>(bits);
+}
+
+
Code* Code::GetCodeFromTargetAddress(Address address) {
HeapObject* code = HeapObject::FromAddress(address - Code::kHeaderSize);
// GetCodeFromTargetAddress might be called when marking objects during mark
public:
FindAndReplacePattern() : count_(0) { }
void Add(Handle<Map> map_to_find, Handle<Object> obj_to_replace) {
- ASSERT(count_ < kMaxCount);
+ DCHECK(count_ < kMaxCount);
find_[count_] = map_to_find;
replace_[count_] = obj_to_replace;
++count_;
}
-Object* Map::prototype() {
+Object* Map::prototype() const {
return READ_FIELD(this, kPrototypeOffset);
}
void Map::set_prototype(Object* value, WriteBarrierMode mode) {
- ASSERT(value->IsNull() || value->IsJSReceiver());
+ DCHECK(value->IsNull() || value->IsJSReceiver());
WRITE_FIELD(this, kPrototypeOffset, value);
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kPrototypeOffset, value, mode);
}
void Map::set_bit_field3(uint32_t bits) {
- // Ensure the upper 2 bits have the same value by sign extending it. This is
- // necessary to be able to use the 31st bit.
- int value = bits << 1;
- WRITE_FIELD(this, kBitField3Offset, Smi::FromInt(value >> 1));
+ if (kInt32Size != kPointerSize) {
+ WRITE_UINT32_FIELD(this, kBitField3Offset + kInt32Size, 0);
+ }
+ WRITE_UINT32_FIELD(this, kBitField3Offset, bits);
}
uint32_t Map::bit_field3() {
- Object* value = READ_FIELD(this, kBitField3Offset);
- return Smi::cast(value)->value();
+ return READ_UINT32_FIELD(this, kBitField3Offset);
}
void Map::AppendDescriptor(Descriptor* desc) {
DescriptorArray* descriptors = instance_descriptors();
int number_of_own_descriptors = NumberOfOwnDescriptors();
- ASSERT(descriptors->number_of_descriptors() == number_of_own_descriptors);
+ DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
descriptors->Append(desc);
SetNumberOfOwnDescriptors(number_of_own_descriptors + 1);
}
if (object->IsDescriptorArray()) {
return TransitionArray::cast(object)->back_pointer_storage();
} else {
- ASSERT(object->IsMap() || object->IsUndefined());
+ DCHECK(object->IsMap() || object->IsUndefined());
return object;
}
}
}
-bool Map::HasTransitionArray() {
+bool Map::HasTransitionArray() const {
Object* object = READ_FIELD(this, kTransitionsOrBackPointerOffset);
return object->IsTransitionArray();
}
bool Map::CanHaveMoreTransitions() {
if (!HasTransitionArray()) return true;
- return FixedArray::SizeFor(transitions()->length() +
- TransitionArray::kTransitionSize)
- <= Page::kMaxRegularHeapObjectSize;
+ return transitions()->number_of_transitions() <
+ TransitionArray::kMaxNumberOfTransitions;
}
int old_number_of_transitions = map->NumberOfProtoTransitions();
#ifdef DEBUG
if (map->HasPrototypeTransitions()) {
- ASSERT(map->GetPrototypeTransitions() != *proto_transitions);
+ DCHECK(map->GetPrototypeTransitions() != *proto_transitions);
map->ZapPrototypeTransitions();
}
#endif
}
-TransitionArray* Map::transitions() {
- ASSERT(HasTransitionArray());
+TransitionArray* Map::transitions() const {
+ DCHECK(HasTransitionArray());
Object* object = READ_FIELD(this, kTransitionsOrBackPointerOffset);
return TransitionArray::cast(object);
}
if (target->instance_descriptors() == instance_descriptors()) {
Name* key = transitions()->GetKey(i);
int new_target_index = transition_array->Search(key);
- ASSERT(new_target_index != TransitionArray::kNotFound);
- ASSERT(transition_array->GetTarget(new_target_index) == target);
+ DCHECK(new_target_index != TransitionArray::kNotFound);
+ DCHECK(transition_array->GetTarget(new_target_index) == target);
}
}
#endif
- ASSERT(transitions() != transition_array);
+ DCHECK(transitions() != transition_array);
ZapTransitions();
}
void Map::init_back_pointer(Object* undefined) {
- ASSERT(undefined->IsUndefined());
+ DCHECK(undefined->IsUndefined());
WRITE_FIELD(this, kTransitionsOrBackPointerOffset, undefined);
}
void Map::SetBackPointer(Object* value, WriteBarrierMode mode) {
- ASSERT(instance_type() >= FIRST_JS_RECEIVER_TYPE);
- ASSERT((value->IsUndefined() && GetBackPointer()->IsMap()) ||
+ DCHECK(instance_type() >= FIRST_JS_RECEIVER_TYPE);
+ DCHECK((value->IsUndefined() && GetBackPointer()->IsMap()) ||
(value->IsMap() && GetBackPointer()->IsUndefined()));
Object* object = READ_FIELD(this, kTransitionsOrBackPointerOffset);
if (object->IsTransitionArray()) {
ACCESSORS(GlobalObject, builtins, JSBuiltinsObject, kBuiltinsOffset)
ACCESSORS(GlobalObject, native_context, Context, kNativeContextOffset)
ACCESSORS(GlobalObject, global_context, Context, kGlobalContextOffset)
-ACCESSORS(GlobalObject, global_receiver, JSObject, kGlobalReceiverOffset)
+ACCESSORS(GlobalObject, global_proxy, JSObject, kGlobalProxyOffset)
ACCESSORS(JSGlobalProxy, native_context, Object, kNativeContextOffset)
+ACCESSORS(JSGlobalProxy, hash, Object, kHashOffset)
ACCESSORS(AccessorInfo, name, Object, kNameOffset)
ACCESSORS_TO_SMI(AccessorInfo, flag, kFlagOffset)
ACCESSORS(AccessorPair, getter, Object, kGetterOffset)
ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
-ACCESSORS_TO_SMI(AccessorPair, access_flags, kAccessFlagsOffset)
ACCESSORS(AccessCheckInfo, named_callback, Object, kNamedCallbackOffset)
ACCESSORS(AccessCheckInfo, indexed_callback, Object, kIndexedCallbackOffset)
ACCESSORS_TO_SMI(Script, line_offset, kLineOffsetOffset)
ACCESSORS_TO_SMI(Script, column_offset, kColumnOffsetOffset)
ACCESSORS(Script, context_data, Object, kContextOffset)
-ACCESSORS(Script, wrapper, Foreign, kWrapperOffset)
+ACCESSORS(Script, wrapper, HeapObject, kWrapperOffset)
ACCESSORS_TO_SMI(Script, type, kTypeOffset)
ACCESSORS(Script, line_ends, Object, kLineEndsOffset)
ACCESSORS(Script, eval_from_shared, Object, kEvalFromSharedOffset)
kEvalFrominstructionsOffsetOffset)
ACCESSORS_TO_SMI(Script, flags, kFlagsOffset)
BOOL_ACCESSORS(Script, flags, is_shared_cross_origin, kIsSharedCrossOriginBit)
+ACCESSORS(Script, source_url, Object, kSourceUrlOffset)
+ACCESSORS(Script, source_mapping_url, Object, kSourceMappingUrlOffset)
Script::CompilationType Script::compilation_type() {
return BooleanBit::get(flags(), kCompilationTypeBit) ?
ACCESSORS(SharedFunctionInfo, optimized_code_map, Object,
kOptimizedCodeMapOffset)
ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
-ACCESSORS(SharedFunctionInfo, feedback_vector, FixedArray,
+ACCESSORS(SharedFunctionInfo, feedback_vector, TypeFeedbackVector,
kFeedbackVectorOffset)
-ACCESSORS(SharedFunctionInfo, initial_map, Object, kInitialMapOffset)
ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
kInstanceClassNameOffset)
ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset)
compiler_hints,
has_duplicate_parameters,
kHasDuplicateParameters)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, asm_function, kIsAsmFunction)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, deserialized, kDeserialized)
#if V8_HOST_ARCH_32_BIT
#else
-#define PSEUDO_SMI_ACCESSORS_LO(holder, name, offset) \
- STATIC_ASSERT(holder::offset % kPointerSize == 0); \
- int holder::name() { \
- int value = READ_INT_FIELD(this, offset); \
- ASSERT(kHeapObjectTag == 1); \
- ASSERT((value & kHeapObjectTag) == 0); \
- return value >> 1; \
- } \
- void holder::set_##name(int value) { \
- ASSERT(kHeapObjectTag == 1); \
- ASSERT((value & 0xC0000000) == 0xC0000000 || \
- (value & 0xC0000000) == 0x000000000); \
- WRITE_INT_FIELD(this, \
- offset, \
- (value << 1) & ~kHeapObjectTag); \
- }
-
-#define PSEUDO_SMI_ACCESSORS_HI(holder, name, offset) \
- STATIC_ASSERT(holder::offset % kPointerSize == kIntSize); \
+#if V8_TARGET_LITTLE_ENDIAN
+#define PSEUDO_SMI_LO_ALIGN 0
+#define PSEUDO_SMI_HI_ALIGN kIntSize
+#else
+#define PSEUDO_SMI_LO_ALIGN kIntSize
+#define PSEUDO_SMI_HI_ALIGN 0
+#endif
+
+#define PSEUDO_SMI_ACCESSORS_LO(holder, name, offset) \
+ STATIC_ASSERT(holder::offset % kPointerSize == PSEUDO_SMI_LO_ALIGN); \
+ int holder::name() const { \
+ int value = READ_INT_FIELD(this, offset); \
+ DCHECK(kHeapObjectTag == 1); \
+ DCHECK((value & kHeapObjectTag) == 0); \
+ return value >> 1; \
+ } \
+ void holder::set_##name(int value) { \
+ DCHECK(kHeapObjectTag == 1); \
+ DCHECK((value & 0xC0000000) == 0xC0000000 || (value & 0xC0000000) == 0x0); \
+ WRITE_INT_FIELD(this, offset, (value << 1) & ~kHeapObjectTag); \
+ }
+
+#define PSEUDO_SMI_ACCESSORS_HI(holder, name, offset) \
+ STATIC_ASSERT(holder::offset % kPointerSize == PSEUDO_SMI_HI_ALIGN); \
INT_ACCESSORS(holder, name, offset)
#endif
-int SharedFunctionInfo::construction_count() {
- return READ_BYTE_FIELD(this, kConstructionCountOffset);
-}
-
-
-void SharedFunctionInfo::set_construction_count(int value) {
- ASSERT(0 <= value && value < 256);
- WRITE_BYTE_FIELD(this, kConstructionCountOffset, static_cast<byte>(value));
-}
-
-
-BOOL_ACCESSORS(SharedFunctionInfo,
- compiler_hints,
- live_objects_may_exist,
- kLiveObjectsMayExist)
-
-
-bool SharedFunctionInfo::IsInobjectSlackTrackingInProgress() {
- return initial_map() != GetHeap()->undefined_value();
-}
-
-
BOOL_GETTER(SharedFunctionInfo,
compiler_hints,
optimization_disabled,
void SharedFunctionInfo::set_strict_mode(StrictMode strict_mode) {
// We only allow mode transitions from sloppy to strict.
- ASSERT(this->strict_mode() == SLOPPY || this->strict_mode() == strict_mode);
+ DCHECK(this->strict_mode() == SLOPPY || this->strict_mode() == strict_mode);
int hints = compiler_hints();
hints = BooleanBit::set(hints, kStrictModeFunction, strict_mode == STRICT);
set_compiler_hints(hints);
}
+FunctionKind SharedFunctionInfo::kind() {
+ return FunctionKindBits::decode(compiler_hints());
+}
+
+
+void SharedFunctionInfo::set_kind(FunctionKind kind) {
+ DCHECK(IsValidFunctionKind(kind));
+ int hints = compiler_hints();
+ hints = FunctionKindBits::update(hints, kind);
+ set_compiler_hints(hints);
+}
+
+
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, native, kNative)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, inline_builtin,
kInlineBuiltin)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, bound, kBoundFunction)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_anonymous, kIsAnonymous)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_function, kIsFunction)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_optimize,
- kDontOptimize)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_inline, kDontInline)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_cache, kDontCache)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_flush, kDontFlush)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_arrow, kIsArrow)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_generator, kIsGenerator)
-
-void SharedFunctionInfo::BeforeVisitingPointers() {
- if (IsInobjectSlackTrackingInProgress()) DetachInitialMap();
-}
-
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_concise_method,
+ kIsConciseMethod)
ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
String* src_str = String::cast(src);
if (!StringShape(src_str).IsExternal()) return true;
if (src_str->IsOneByteRepresentation()) {
- return ExternalAsciiString::cast(src)->resource() != NULL;
+ return ExternalOneByteString::cast(src)->resource() != NULL;
} else if (src_str->IsTwoByteRepresentation()) {
return ExternalTwoByteString::cast(src)->resource() != NULL;
}
void SharedFunctionInfo::DontAdaptArguments() {
- ASSERT(code()->kind() == Code::BUILTIN);
+ DCHECK(code()->kind() == Code::BUILTIN);
set_formal_parameter_count(kDontAdaptArgumentsSentinel);
}
-int SharedFunctionInfo::start_position() {
+int SharedFunctionInfo::start_position() const {
return start_position_and_type() >> kStartPositionShift;
}
}
-Code* SharedFunctionInfo::code() {
+Code* SharedFunctionInfo::code() const {
return Code::cast(READ_FIELD(this, kCodeOffset));
}
void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) {
- ASSERT(value->kind() != Code::OPTIMIZED_FUNCTION);
+ DCHECK(value->kind() != Code::OPTIMIZED_FUNCTION);
WRITE_FIELD(this, kCodeOffset, value);
CONDITIONAL_WRITE_BARRIER(value->GetHeap(), this, kCodeOffset, value, mode);
}
flusher->EvictCandidate(this);
}
- ASSERT(code()->gc_metadata() == NULL && value->gc_metadata() == NULL);
+ DCHECK(code()->gc_metadata() == NULL && value->gc_metadata() == NULL);
set_code(value);
}
-ScopeInfo* SharedFunctionInfo::scope_info() {
+ScopeInfo* SharedFunctionInfo::scope_info() const {
return reinterpret_cast<ScopeInfo*>(READ_FIELD(this, kScopeInfoOffset));
}
bool SharedFunctionInfo::is_compiled() {
- return code() !=
- GetIsolate()->builtins()->builtin(Builtins::kCompileUnoptimized);
+ return code() != GetIsolate()->builtins()->builtin(Builtins::kCompileLazy);
}
FunctionTemplateInfo* SharedFunctionInfo::get_api_func_data() {
- ASSERT(IsApiFunction());
+ DCHECK(IsApiFunction());
return FunctionTemplateInfo::cast(function_data());
}
BuiltinFunctionId SharedFunctionInfo::builtin_function_id() {
- ASSERT(HasBuiltinFunctionId());
+ DCHECK(HasBuiltinFunctionId());
return static_cast<BuiltinFunctionId>(Smi::cast(function_data())->value());
}
}
+bool JSFunction::IsFromNativeScript() {
+ Object* script = shared()->script();
+ bool native = script->IsScript() &&
+ Script::cast(script)->type()->value() == Script::TYPE_NATIVE;
+ DCHECK(!IsBuiltin() || native); // All builtins are also native.
+ return native;
+}
+
+
+bool JSFunction::IsFromExtensionScript() {
+ Object* script = shared()->script();
+ return script->IsScript() &&
+ Script::cast(script)->type()->value() == Script::TYPE_EXTENSION;
+}
+
+
bool JSFunction::NeedsArgumentsAdaption() {
return shared()->formal_parameter_count() !=
SharedFunctionInfo::kDontAdaptArgumentsSentinel;
}
+bool JSFunction::IsInobjectSlackTrackingInProgress() {
+ return has_initial_map() &&
+ initial_map()->construction_count() != JSFunction::kNoSlackTracking;
+}
+
+
Code* JSFunction::code() {
return Code::cast(
Code::GetObjectFromEntryAddress(FIELD_ADDR(this, kCodeEntryOffset)));
void JSFunction::set_code(Code* value) {
- ASSERT(!GetHeap()->InNewSpace(value));
+ DCHECK(!GetHeap()->InNewSpace(value));
Address entry = value->entry();
WRITE_INTPTR_FIELD(this, kCodeEntryOffset, reinterpret_cast<intptr_t>(entry));
GetHeap()->incremental_marking()->RecordWriteOfCodeEntry(
void JSFunction::set_code_no_write_barrier(Code* value) {
- ASSERT(!GetHeap()->InNewSpace(value));
+ DCHECK(!GetHeap()->InNewSpace(value));
Address entry = value->entry();
WRITE_INTPTR_FIELD(this, kCodeEntryOffset, reinterpret_cast<intptr_t>(entry));
}
}
+JSObject* JSFunction::global_proxy() {
+ return context()->global_proxy();
+}
+
+
void JSFunction::set_context(Object* value) {
- ASSERT(value->IsUndefined() || value->IsContext());
+ DCHECK(value->IsUndefined() || value->IsContext());
WRITE_FIELD(this, kContextOffset, value);
WRITE_BARRIER(GetHeap(), this, kContextOffset, value);
}
}
-void JSFunction::set_initial_map(Map* value) {
- set_prototype_or_initial_map(value);
-}
-
-
bool JSFunction::has_initial_map() {
return prototype_or_initial_map()->IsMap();
}
Object* JSFunction::instance_prototype() {
- ASSERT(has_instance_prototype());
+ DCHECK(has_instance_prototype());
if (has_initial_map()) return initial_map()->prototype();
// When there is no initial map and the prototype is a JSObject, the
// initial map field is used for the prototype field.
Object* JSFunction::prototype() {
- ASSERT(has_prototype());
+ DCHECK(has_prototype());
// If the function's prototype property has been set to a non-JSObject
// value, that value is stored in the constructor field of the map.
if (map()->has_non_instance_prototype()) return map()->constructor();
bool JSFunction::is_compiled() {
- return code() !=
- GetIsolate()->builtins()->builtin(Builtins::kCompileUnoptimized);
+ return code() != GetIsolate()->builtins()->builtin(Builtins::kCompileLazy);
}
FixedArray* JSFunction::literals() {
- ASSERT(!shared()->bound());
+ DCHECK(!shared()->bound());
return literals_or_bindings();
}
void JSFunction::set_literals(FixedArray* literals) {
- ASSERT(!shared()->bound());
+ DCHECK(!shared()->bound());
set_literals_or_bindings(literals);
}
FixedArray* JSFunction::function_bindings() {
- ASSERT(shared()->bound());
+ DCHECK(shared()->bound());
return literals_or_bindings();
}
void JSFunction::set_function_bindings(FixedArray* bindings) {
- ASSERT(shared()->bound());
+ DCHECK(shared()->bound());
// Bound function literal may be initialized to the empty fixed array
// before the bindings are set.
- ASSERT(bindings == GetHeap()->empty_fixed_array() ||
+ DCHECK(bindings == GetHeap()->empty_fixed_array() ||
bindings->map() == GetHeap()->fixed_cow_array_map());
set_literals_or_bindings(bindings);
}
int JSFunction::NumberOfLiterals() {
- ASSERT(!shared()->bound());
+ DCHECK(!shared()->bound());
return literals()->length();
}
Object* JSBuiltinsObject::javascript_builtin(Builtins::JavaScript id) {
- ASSERT(id < kJSBuiltinsCount); // id is unsigned.
+ DCHECK(id < kJSBuiltinsCount); // id is unsigned.
return READ_FIELD(this, OffsetOfFunctionWithId(id));
}
void JSBuiltinsObject::set_javascript_builtin(Builtins::JavaScript id,
Object* value) {
- ASSERT(id < kJSBuiltinsCount); // id is unsigned.
+ DCHECK(id < kJSBuiltinsCount); // id is unsigned.
WRITE_FIELD(this, OffsetOfFunctionWithId(id), value);
WRITE_BARRIER(GetHeap(), this, OffsetOfFunctionWithId(id), value);
}
Code* JSBuiltinsObject::javascript_builtin_code(Builtins::JavaScript id) {
- ASSERT(id < kJSBuiltinsCount); // id is unsigned.
+ DCHECK(id < kJSBuiltinsCount); // id is unsigned.
return Code::cast(READ_FIELD(this, OffsetOfCodeWithId(id)));
}
void JSBuiltinsObject::set_javascript_builtin_code(Builtins::JavaScript id,
Code* value) {
- ASSERT(id < kJSBuiltinsCount); // id is unsigned.
+ DCHECK(id < kJSBuiltinsCount); // id is unsigned.
WRITE_FIELD(this, OffsetOfCodeWithId(id), value);
- ASSERT(!GetHeap()->InNewSpace(value));
+ DCHECK(!GetHeap()->InNewSpace(value));
}
void JSProxy::InitializeBody(int object_size, Object* value) {
- ASSERT(!value->IsHeapObject() || !GetHeap()->InNewSpace(value));
+ DCHECK(!value->IsHeapObject() || !GetHeap()->InNewSpace(value));
for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
WRITE_FIELD(this, offset, value);
}
}
-ACCESSORS(JSSet, table, Object, kTableOffset)
-ACCESSORS(JSMap, table, Object, kTableOffset)
+ACCESSORS(JSCollection, table, Object, kTableOffset)
#define ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(name, type, offset) \
template<class Derived, class TableType> \
- type* OrderedHashTableIterator<Derived, TableType>::name() { \
+ type* OrderedHashTableIterator<Derived, TableType>::name() const { \
return type::cast(READ_FIELD(this, offset)); \
} \
template<class Derived, class TableType> \
}
ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(table, Object, kTableOffset)
-ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(index, Smi, kIndexOffset)
-ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(count, Smi, kCountOffset)
-ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(kind, Smi, kKindOffset)
-ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(next_iterator, Object,
- kNextIteratorOffset)
-ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(previous_iterator, Object,
- kPreviousIteratorOffset)
+ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(index, Object, kIndexOffset)
+ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(kind, Object, kKindOffset)
#undef ORDERED_HASH_TABLE_ITERATOR_ACCESSORS
SMI_ACCESSORS(JSGeneratorObject, stack_handler_index, kStackHandlerIndexOffset)
bool JSGeneratorObject::is_suspended() {
- ASSERT_LT(kGeneratorExecuting, kGeneratorClosed);
- ASSERT_EQ(kGeneratorClosed, 0);
+ DCHECK_LT(kGeneratorExecuting, kGeneratorClosed);
+ DCHECK_EQ(kGeneratorClosed, 0);
return continuation() > 0;
}
-JSGeneratorObject* JSGeneratorObject::cast(Object* obj) {
- ASSERT(obj->IsJSGeneratorObject());
- ASSERT(HeapObject::cast(obj)->Size() == JSGeneratorObject::kSize);
- return reinterpret_cast<JSGeneratorObject*>(obj);
+bool JSGeneratorObject::is_closed() {
+ return continuation() == kGeneratorClosed;
}
+bool JSGeneratorObject::is_executing() {
+ return continuation() == kGeneratorExecuting;
+}
ACCESSORS(JSModule, context, Object, kContextOffset)
ACCESSORS(JSModule, scope_info, ScopeInfo, kScopeInfoOffset)
-JSModule* JSModule::cast(Object* obj) {
- ASSERT(obj->IsJSModule());
- ASSERT(HeapObject::cast(obj)->Size() == JSModule::kSize);
- return reinterpret_cast<JSModule*>(obj);
-}
+ACCESSORS(JSValue, value, Object, kValueOffset)
-ACCESSORS(JSValue, value, Object, kValueOffset)
+HeapNumber* HeapNumber::cast(Object* object) {
+ SLOW_DCHECK(object->IsHeapNumber() || object->IsMutableHeapNumber());
+ return reinterpret_cast<HeapNumber*>(object);
+}
-JSValue* JSValue::cast(Object* obj) {
- ASSERT(obj->IsJSValue());
- ASSERT(HeapObject::cast(obj)->Size() == JSValue::kSize);
- return reinterpret_cast<JSValue*>(obj);
+const HeapNumber* HeapNumber::cast(const Object* object) {
+ SLOW_DCHECK(object->IsHeapNumber() || object->IsMutableHeapNumber());
+ return reinterpret_cast<const HeapNumber*>(object);
}
ACCESSORS(JSDate, sec, Object, kSecOffset)
-JSDate* JSDate::cast(Object* obj) {
- ASSERT(obj->IsJSDate());
- ASSERT(HeapObject::cast(obj)->Size() == JSDate::kSize);
- return reinterpret_cast<JSDate*>(obj);
-}
-
-
ACCESSORS(JSMessageObject, type, String, kTypeOffset)
ACCESSORS(JSMessageObject, arguments, JSArray, kArgumentsOffset)
ACCESSORS(JSMessageObject, script, Object, kScriptOffset)
SMI_ACCESSORS(JSMessageObject, end_position, kEndPositionOffset)
-JSMessageObject* JSMessageObject::cast(Object* obj) {
- ASSERT(obj->IsJSMessageObject());
- ASSERT(HeapObject::cast(obj)->Size() == JSMessageObject::kSize);
- return reinterpret_cast<JSMessageObject*>(obj);
-}
-
-
INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
INT_ACCESSORS(Code, prologue_offset, kPrologueOffset)
ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
WRITE_FIELD(this, kHandlerTableOffset, NULL);
WRITE_FIELD(this, kDeoptimizationDataOffset, NULL);
WRITE_FIELD(this, kConstantPoolOffset, NULL);
- // Do not wipe out e.g. a minor key.
+ // Do not wipe out major/minor keys on a code stub or IC
if (!READ_FIELD(this, kTypeFeedbackInfoOffset)->IsSmi()) {
WRITE_FIELD(this, kTypeFeedbackInfoOffset, NULL);
}
Object* Code::type_feedback_info() {
- ASSERT(kind() == FUNCTION);
+ DCHECK(kind() == FUNCTION);
return raw_type_feedback_info();
}
void Code::set_type_feedback_info(Object* value, WriteBarrierMode mode) {
- ASSERT(kind() == FUNCTION);
+ DCHECK(kind() == FUNCTION);
set_raw_type_feedback_info(value, mode);
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kTypeFeedbackInfoOffset,
value, mode);
}
-int Code::stub_info() {
- ASSERT(kind() == COMPARE_IC || kind() == COMPARE_NIL_IC ||
- kind() == BINARY_OP_IC || kind() == LOAD_IC || kind() == CALL_IC);
- return Smi::cast(raw_type_feedback_info())->value();
+uint32_t Code::stub_key() {
+ DCHECK(IsCodeStubOrIC());
+ Smi* smi_key = Smi::cast(raw_type_feedback_info());
+ return static_cast<uint32_t>(smi_key->value());
}
-void Code::set_stub_info(int value) {
- ASSERT(kind() == COMPARE_IC ||
- kind() == COMPARE_NIL_IC ||
- kind() == BINARY_OP_IC ||
- kind() == STUB ||
- kind() == LOAD_IC ||
- kind() == CALL_IC ||
- kind() == KEYED_LOAD_IC ||
- kind() == STORE_IC ||
- kind() == KEYED_STORE_IC);
- set_raw_type_feedback_info(Smi::FromInt(value));
+void Code::set_stub_key(uint32_t key) {
+ DCHECK(IsCodeStubOrIC());
+ set_raw_type_feedback_info(Smi::FromInt(key));
}
ACCESSORS(JSArray, length, Object, kLengthOffset)
-void* JSArrayBuffer::backing_store() {
+void* JSArrayBuffer::backing_store() const {
intptr_t ptr = READ_INTPTR_FIELD(this, kBackingStoreOffset);
return reinterpret_cast<void*>(ptr);
}
}
+bool JSArrayBuffer::is_neuterable() {
+ return BooleanBit::get(flag(), kIsNeuterableBit);
+}
+
+
+void JSArrayBuffer::set_is_neuterable(bool value) {
+ set_flag(BooleanBit::set(flag(), kIsNeuterableBit, value));
+}
+
+
ACCESSORS(JSArrayBuffer, weak_next, Object, kWeakNextOffset)
ACCESSORS(JSArrayBuffer, weak_first_view, Object, kWeakFirstViewOffset)
JSRegExp::Flags JSRegExp::GetFlags() {
- ASSERT(this->data()->IsFixedArray());
+ DCHECK(this->data()->IsFixedArray());
Object* data = this->data();
Smi* smi = Smi::cast(FixedArray::cast(data)->get(kFlagsIndex));
return Flags(smi->value());
String* JSRegExp::Pattern() {
- ASSERT(this->data()->IsFixedArray());
+ DCHECK(this->data()->IsFixedArray());
Object* data = this->data();
String* pattern= String::cast(FixedArray::cast(data)->get(kSourceIndex));
return pattern;
Object* JSRegExp::DataAt(int index) {
- ASSERT(TypeTag() != NOT_COMPILED);
+ DCHECK(TypeTag() != NOT_COMPILED);
return FixedArray::cast(data())->get(index);
}
void JSRegExp::SetDataAt(int index, Object* value) {
- ASSERT(TypeTag() != NOT_COMPILED);
- ASSERT(index >= kDataIndex); // Only implementation data can be set this way.
+ DCHECK(TypeTag() != NOT_COMPILED);
+ DCHECK(index >= kDataIndex); // Only implementation data can be set this way.
FixedArray::cast(data())->set(index, value);
}
// pointer may point to a one pointer filler map.
if (ElementsAreSafeToExamine()) {
Map* map = fixed_array->map();
- ASSERT((IsFastSmiOrObjectElementsKind(kind) &&
+ DCHECK((IsFastSmiOrObjectElementsKind(kind) &&
(map == GetHeap()->fixed_array_map() ||
map == GetHeap()->fixed_cow_array_map())) ||
(IsFastDoubleElementsKind(kind) &&
fixed_array->IsFixedArray() &&
fixed_array->IsDictionary()) ||
(kind > DICTIONARY_ELEMENTS));
- ASSERT((kind != SLOPPY_ARGUMENTS_ELEMENTS) ||
+ DCHECK((kind != SLOPPY_ARGUMENTS_ELEMENTS) ||
(elements()->IsFixedArray() && elements()->length() >= 2));
}
#endif
bool JSObject::HasExternalArrayElements() {
HeapObject* array = elements();
- ASSERT(array != NULL);
+ DCHECK(array != NULL);
return array->IsExternalArray();
}
#define EXTERNAL_ELEMENTS_CHECK(Type, type, TYPE, ctype, size) \
bool JSObject::HasExternal##Type##Elements() { \
HeapObject* array = elements(); \
- ASSERT(array != NULL); \
+ DCHECK(array != NULL); \
if (!array->IsHeapObject()) \
return false; \
return array->map()->instance_type() == EXTERNAL_##TYPE##_ARRAY_TYPE; \
bool JSObject::HasFixedTypedArrayElements() {
HeapObject* array = elements();
- ASSERT(array != NULL);
+ DCHECK(array != NULL);
return array->IsFixedTypedArrayBase();
}
#define FIXED_TYPED_ELEMENTS_CHECK(Type, type, TYPE, ctype, size) \
bool JSObject::HasFixed##Type##Elements() { \
HeapObject* array = elements(); \
- ASSERT(array != NULL); \
+ DCHECK(array != NULL); \
if (!array->IsHeapObject()) \
return false; \
return array->map()->instance_type() == FIXED_##TYPE##_ARRAY_TYPE; \
NameDictionary* JSObject::property_dictionary() {
- ASSERT(!HasFastProperties());
+ DCHECK(!HasFastProperties());
return NameDictionary::cast(properties());
}
SeededNumberDictionary* JSObject::element_dictionary() {
- ASSERT(HasDictionaryElements());
+ DCHECK(HasDictionaryElements());
return SeededNumberDictionary::cast(elements());
}
-Handle<JSSetIterator> JSSetIterator::Create(
- Handle<OrderedHashSet> table,
- int kind) {
- return CreateInternal(table->GetIsolate()->set_iterator_map(), table, kind);
-}
-
-
-Handle<JSMapIterator> JSMapIterator::Create(
- Handle<OrderedHashMap> table,
- int kind) {
- return CreateInternal(table->GetIsolate()->map_iterator_map(), table, kind);
-}
-
-
bool Name::IsHashFieldComputed(uint32_t field) {
return (field & kHashNotComputedMask) == 0;
}
return String::cast(this)->ComputeAndSetHash();
}
+bool Name::IsOwn() {
+ return this->IsSymbol() && Symbol::cast(this)->is_own();
+}
+
StringHasher::StringHasher(int length, uint32_t seed)
: length_(length),
array_index_(0),
is_array_index_(0 < length_ && length_ <= String::kMaxArrayIndexSize),
is_first_char_(true) {
- ASSERT(FLAG_randomize_hashes || raw_running_hash_ == 0);
+ DCHECK(FLAG_randomize_hashes || raw_running_hash_ == 0);
}
bool StringHasher::UpdateIndex(uint16_t c) {
- ASSERT(is_array_index_);
+ DCHECK(is_array_index_);
if (c < '0' || c > '9') {
is_array_index_ = false;
return false;
template<typename Char>
inline void StringHasher::AddCharacters(const Char* chars, int length) {
- ASSERT(sizeof(Char) == 1 || sizeof(Char) == 2);
+ DCHECK(sizeof(Char) == 1 || sizeof(Char) == 2);
int i = 0;
if (is_array_index_) {
for (; i < length; i++) {
}
}
for (; i < length; i++) {
- ASSERT(!is_array_index_);
+ DCHECK(!is_array_index_);
AddCharacter(chars[i]);
}
}
}
+uint32_t IteratingStringHasher::Hash(String* string, uint32_t seed) {
+ IteratingStringHasher hasher(string->length(), seed);
+ // Nothing to do.
+ if (hasher.has_trivial_hash()) return hasher.GetHashField();
+ ConsString* cons_string = String::VisitFlat(&hasher, string);
+ // The string was flat.
+ if (cons_string == NULL) return hasher.GetHashField();
+ // This is a ConsString, iterate across it.
+ ConsStringIterator iter(cons_string);
+ int offset;
+ while (NULL != (string = iter.Next(&offset))) {
+ String::VisitFlat(&hasher, string, offset);
+ }
+ return hasher.GetHashField();
+}
+
+
+void IteratingStringHasher::VisitOneByteString(const uint8_t* chars,
+ int length) {
+ AddCharacters(chars, length);
+}
+
+
+void IteratingStringHasher::VisitTwoByteString(const uint16_t* chars,
+ int length) {
+ AddCharacters(chars, length);
+}
+
+
bool Name::AsArrayIndex(uint32_t* index) {
return IsString() && String::cast(this)->AsArrayIndex(index);
}
}
-Object* JSReceiver::GetPrototype() {
- return map()->prototype();
+void String::SetForwardedInternalizedString(String* canonical) {
+ DCHECK(IsInternalizedString());
+ DCHECK(HasHashCode());
+ if (canonical == this) return; // No need to forward.
+ DCHECK(SlowEquals(canonical));
+ DCHECK(canonical->IsInternalizedString());
+ DCHECK(canonical->HasHashCode());
+ WRITE_FIELD(this, kHashFieldSlot, canonical);
+ // Setting the hash field to a tagged value sets the LSB, causing the hash
+ // code to be interpreted as uninitialized. We use this fact to recognize
+ // that we have a forwarded string.
+ DCHECK(!HasHashCode());
}
-Object* JSReceiver::GetConstructor() {
- return map()->constructor();
+String* String::GetForwardedInternalizedString() {
+ DCHECK(IsInternalizedString());
+ if (HasHashCode()) return this;
+ String* canonical = String::cast(READ_FIELD(this, kHashFieldSlot));
+ DCHECK(canonical->IsInternalizedString());
+ DCHECK(SlowEquals(canonical));
+ DCHECK(canonical->HasHashCode());
+ return canonical;
}
-bool JSReceiver::HasProperty(Handle<JSReceiver> object,
- Handle<Name> name) {
+Maybe<bool> JSReceiver::HasProperty(Handle<JSReceiver> object,
+ Handle<Name> name) {
if (object->IsJSProxy()) {
Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
return JSProxy::HasPropertyWithHandler(proxy, name);
}
- return GetPropertyAttribute(object, name) != ABSENT;
+ Maybe<PropertyAttributes> result = GetPropertyAttributes(object, name);
+ if (!result.has_value) return Maybe<bool>();
+ return maybe(result.value != ABSENT);
}
-bool JSReceiver::HasLocalProperty(Handle<JSReceiver> object,
- Handle<Name> name) {
+Maybe<bool> JSReceiver::HasOwnProperty(Handle<JSReceiver> object,
+ Handle<Name> name) {
if (object->IsJSProxy()) {
Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
return JSProxy::HasPropertyWithHandler(proxy, name);
}
- return GetLocalPropertyAttribute(object, name) != ABSENT;
+ Maybe<PropertyAttributes> result = GetOwnPropertyAttributes(object, name);
+ if (!result.has_value) return Maybe<bool>();
+ return maybe(result.value != ABSENT);
}
-PropertyAttributes JSReceiver::GetPropertyAttribute(Handle<JSReceiver> object,
- Handle<Name> key) {
+Maybe<PropertyAttributes> JSReceiver::GetPropertyAttributes(
+ Handle<JSReceiver> object, Handle<Name> key) {
uint32_t index;
if (object->IsJSObject() && key->AsArrayIndex(&index)) {
return GetElementAttribute(object, index);
}
- return GetPropertyAttributeWithReceiver(object, object, key);
+ LookupIterator it(object, key);
+ return GetPropertyAttributes(&it);
}
-PropertyAttributes JSReceiver::GetElementAttribute(Handle<JSReceiver> object,
- uint32_t index) {
+Maybe<PropertyAttributes> JSReceiver::GetElementAttribute(
+ Handle<JSReceiver> object, uint32_t index) {
if (object->IsJSProxy()) {
return JSProxy::GetElementAttributeWithHandler(
Handle<JSProxy>::cast(object), object, index);
bool JSGlobalObject::IsDetached() {
- return JSGlobalProxy::cast(global_receiver())->IsDetachedFrom(this);
+ return JSGlobalProxy::cast(global_proxy())->IsDetachedFrom(this);
}
-bool JSGlobalProxy::IsDetachedFrom(GlobalObject* global) {
- return GetPrototype() != global;
+bool JSGlobalProxy::IsDetachedFrom(GlobalObject* global) const {
+ const PrototypeIterator iter(this->GetIsolate(),
+ const_cast<JSGlobalProxy*>(this));
+ return iter.GetCurrent() != global;
}
-Handle<Object> JSReceiver::GetOrCreateIdentityHash(Handle<JSReceiver> object) {
+Handle<Smi> JSReceiver::GetOrCreateIdentityHash(Handle<JSReceiver> object) {
return object->IsJSProxy()
? JSProxy::GetOrCreateIdentityHash(Handle<JSProxy>::cast(object))
: JSObject::GetOrCreateIdentityHash(Handle<JSObject>::cast(object));
}
-bool JSReceiver::HasElement(Handle<JSReceiver> object, uint32_t index) {
+Maybe<bool> JSReceiver::HasElement(Handle<JSReceiver> object, uint32_t index) {
if (object->IsJSProxy()) {
Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
return JSProxy::HasElementWithHandler(proxy, index);
}
- return JSObject::GetElementAttributeWithReceiver(
- Handle<JSObject>::cast(object), object, index, true) != ABSENT;
+ Maybe<PropertyAttributes> result = JSObject::GetElementAttributeWithReceiver(
+ Handle<JSObject>::cast(object), object, index, true);
+ if (!result.has_value) return Maybe<bool>();
+ return maybe(result.value != ABSENT);
}
-bool JSReceiver::HasLocalElement(Handle<JSReceiver> object, uint32_t index) {
+Maybe<bool> JSReceiver::HasOwnElement(Handle<JSReceiver> object,
+ uint32_t index) {
if (object->IsJSProxy()) {
Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
return JSProxy::HasElementWithHandler(proxy, index);
}
- return JSObject::GetElementAttributeWithReceiver(
- Handle<JSObject>::cast(object), object, index, false) != ABSENT;
+ Maybe<PropertyAttributes> result = JSObject::GetElementAttributeWithReceiver(
+ Handle<JSObject>::cast(object), object, index, false);
+ if (!result.has_value) return Maybe<bool>();
+ return maybe(result.value != ABSENT);
}
-PropertyAttributes JSReceiver::GetLocalElementAttribute(
+Maybe<PropertyAttributes> JSReceiver::GetOwnElementAttribute(
Handle<JSReceiver> object, uint32_t index) {
if (object->IsJSProxy()) {
return JSProxy::GetElementAttributeWithHandler(
}
-bool AccessorInfo::prohibits_overwriting() {
- return BooleanBit::get(flag(), kProhibitsOverwritingBit);
-}
-
-
-void AccessorInfo::set_prohibits_overwriting(bool value) {
- set_flag(BooleanBit::set(flag(), kProhibitsOverwritingBit, value));
-}
-
-
PropertyAttributes AccessorInfo::property_attributes() {
return AttributesField::decode(static_cast<uint32_t>(flag()->value()));
}
bool AccessorInfo::IsCompatibleReceiver(Object* receiver) {
- Object* function_template = expected_receiver_type();
- if (!function_template->IsFunctionTemplateInfo()) return true;
- return FunctionTemplateInfo::cast(function_template)->IsTemplateFor(receiver);
-}
-
-
-void AccessorPair::set_access_flags(v8::AccessControl access_control) {
- int current = access_flags()->value();
- current = BooleanBit::set(current,
- kProhibitsOverwritingBit,
- access_control & PROHIBITS_OVERWRITING);
- current = BooleanBit::set(current,
- kAllCanReadBit,
- access_control & ALL_CAN_READ);
- current = BooleanBit::set(current,
- kAllCanWriteBit,
- access_control & ALL_CAN_WRITE);
- set_access_flags(Smi::FromInt(current));
-}
-
-
-bool AccessorPair::all_can_read() {
- return BooleanBit::get(access_flags(), kAllCanReadBit);
-}
-
-
-bool AccessorPair::all_can_write() {
- return BooleanBit::get(access_flags(), kAllCanWriteBit);
+ if (!HasExpectedReceiverType()) return true;
+ if (!receiver->IsJSObject()) return false;
+ return FunctionTemplateInfo::cast(expected_receiver_type())
+ ->IsTemplateFor(JSObject::cast(receiver)->map());
}
-bool AccessorPair::prohibits_overwriting() {
- return BooleanBit::get(access_flags(), kProhibitsOverwritingBit);
+void ExecutableAccessorInfo::clear_setter() {
+ set_setter(GetIsolate()->heap()->undefined_value(), SKIP_WRITE_BARRIER);
}
Handle<Object> key,
Handle<Object> value,
PropertyDetails details) {
- ASSERT(!key->IsName() ||
+ DCHECK(!key->IsName() ||
details.IsDeleted() ||
details.dictionary_index() > 0);
int index = DerivedHashTable::EntryToIndex(entry);
bool NumberDictionaryShape::IsMatch(uint32_t key, Object* other) {
- ASSERT(other->IsNumber());
+ DCHECK(other->IsNumber());
return key == static_cast<uint32_t>(other->Number());
}
uint32_t UnseededNumberDictionaryShape::HashForObject(uint32_t key,
Object* other) {
- ASSERT(other->IsNumber());
+ DCHECK(other->IsNumber());
return ComputeIntegerHash(static_cast<uint32_t>(other->Number()), 0);
}
uint32_t SeededNumberDictionaryShape::SeededHashForObject(uint32_t key,
uint32_t seed,
Object* other) {
- ASSERT(other->IsNumber());
+ DCHECK(other->IsNumber());
return ComputeIntegerHash(static_cast<uint32_t>(other->Number()), seed);
}
Handle<Object> NameDictionaryShape::AsHandle(Isolate* isolate,
Handle<Name> key) {
- ASSERT(key->IsUniqueName());
+ DCHECK(key->IsUniqueName());
return key;
}
-void NameDictionary::DoGenerateNewEnumerationIndices(
+Handle<FixedArray> NameDictionary::DoGenerateNewEnumerationIndices(
Handle<NameDictionary> dictionary) {
- DerivedDictionary::GenerateNewEnumerationIndices(dictionary);
+ return DerivedDictionary::GenerateNewEnumerationIndices(dictionary);
}
// Please note this function is used during marking:
// - MarkCompactCollector::MarkUnmarkedObject
// - IncrementalMarking::Step
- ASSERT(!heap->InNewSpace(heap->empty_fixed_array()));
+ DCHECK(!heap->InNewSpace(heap->empty_fixed_array()));
WRITE_FIELD(this, kCodeCacheOffset, heap->empty_fixed_array());
}
+int Map::SlackForArraySize(int old_size, int size_limit) {
+ const int max_slack = size_limit - old_size;
+ CHECK(max_slack >= 0);
+ if (old_size < 4) return Min(max_slack, 1);
+ return Min(max_slack, old_size / 2);
+}
+
+
void JSArray::EnsureSize(Handle<JSArray> array, int required_size) {
- ASSERT(array->HasFastSmiOrObjectElements());
+ DCHECK(array->HasFastSmiOrObjectElements());
Handle<FixedArray> elts = handle(FixedArray::cast(array->elements()));
const int kArraySizeThatFitsComfortablyInNewSpace = 128;
if (elts->length() < required_size) {
bool JSArray::AllowsSetElementsLength() {
bool result = elements()->IsFixedArray() || elements()->IsFixedDoubleArray();
- ASSERT(result == !HasExternalArrayElements());
+ DCHECK(result == !HasExternalArrayElements());
return result;
}
EnsureCanContainElements(array, storage, storage->length(),
ALLOW_COPIED_DOUBLE_ELEMENTS);
- ASSERT((storage->map() == array->GetHeap()->fixed_double_array_map() &&
+ DCHECK((storage->map() == array->GetHeap()->fixed_double_array_map() &&
IsFastDoubleElementsKind(array->GetElementsKind())) ||
((storage->map() != array->GetHeap()->fixed_double_array_map()) &&
(IsFastObjectElementsKind(array->GetElementsKind()) ||
}
-Handle<Object> TypeFeedbackInfo::UninitializedSentinel(Isolate* isolate) {
- return isolate->factory()->uninitialized_symbol();
-}
-
-
-Handle<Object> TypeFeedbackInfo::MegamorphicSentinel(Isolate* isolate) {
- return isolate->factory()->megamorphic_symbol();
-}
-
-
-Handle<Object> TypeFeedbackInfo::MonomorphicArraySentinel(Isolate* isolate,
- ElementsKind elements_kind) {
- return Handle<Object>(Smi::FromInt(static_cast<int>(elements_kind)), isolate);
-}
-
-
-Object* TypeFeedbackInfo::RawUninitializedSentinel(Heap* heap) {
- return heap->uninitialized_symbol();
-}
-
-
int TypeFeedbackInfo::ic_total_count() {
int current = Smi::cast(READ_FIELD(this, kStorage1Offset))->value();
return ICTotalCountField::decode(current);
void TypeFeedbackInfo::change_ic_with_type_info_count(int delta) {
+ if (delta == 0) return;
int value = Smi::cast(READ_FIELD(this, kStorage2Offset))->value();
int new_count = ICsWithTypeInfoCountField::decode(value) + delta;
// We can get negative count here when the type-feedback info is
}
+int TypeFeedbackInfo::ic_generic_count() {
+ return Smi::cast(READ_FIELD(this, kStorage3Offset))->value();
+}
+
+
+void TypeFeedbackInfo::change_ic_generic_count(int delta) {
+ if (delta == 0) return;
+ int new_count = ic_generic_count() + delta;
+ if (new_count >= 0) {
+ new_count &= ~Smi::kMinValue;
+ WRITE_FIELD(this, kStorage3Offset, Smi::FromInt(new_count));
+ }
+}
+
+
void TypeFeedbackInfo::initialize_storage() {
WRITE_FIELD(this, kStorage1Offset, Smi::FromInt(0));
WRITE_FIELD(this, kStorage2Offset, Smi::FromInt(0));
+ WRITE_FIELD(this, kStorage3Offset, Smi::FromInt(0));
}
Relocatable::~Relocatable() {
- ASSERT_EQ(isolate_->relocatable_top(), this);
+ DCHECK_EQ(isolate_->relocatable_top(), this);
isolate_->set_relocatable_top(prev_);
}
}
-void ExternalAsciiString::ExternalAsciiStringIterateBody(ObjectVisitor* v) {
- typedef v8::String::ExternalAsciiStringResource Resource;
- v->VisitExternalAsciiString(
+void ExternalOneByteString::ExternalOneByteStringIterateBody(ObjectVisitor* v) {
+ typedef v8::String::ExternalOneByteStringResource Resource;
+ v->VisitExternalOneByteString(
reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
}
-template<typename StaticVisitor>
-void ExternalAsciiString::ExternalAsciiStringIterateBody() {
- typedef v8::String::ExternalAsciiStringResource Resource;
- StaticVisitor::VisitExternalAsciiString(
+template <typename StaticVisitor>
+void ExternalOneByteString::ExternalOneByteStringIterateBody() {
+ typedef v8::String::ExternalOneByteStringResource Resource;
+ StaticVisitor::VisitExternalOneByteString(
reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
}
}
+template<class Derived, class TableType>
+Object* OrderedHashTableIterator<Derived, TableType>::CurrentKey() {
+ TableType* table(TableType::cast(this->table()));
+ int index = Smi::cast(this->index())->value();
+ Object* key = table->KeyAt(index);
+ DCHECK(!key->IsTheHole());
+ return key;
+}
+
+
+void JSSetIterator::PopulateValueArray(FixedArray* array) {
+ array->set(0, CurrentKey());
+}
+
+
+void JSMapIterator::PopulateValueArray(FixedArray* array) {
+ array->set(0, CurrentKey());
+ array->set(1, CurrentValue());
+}
+
+
+Object* JSMapIterator::CurrentValue() {
+ OrderedHashMap* table(OrderedHashMap::cast(this->table()));
+ int index = Smi::cast(this->index())->value();
+ Object* value = table->ValueAt(index);
+ DCHECK(!value->IsTheHole());
+ return value;
+}
+
+
#undef TYPE_CHECKER
#undef CAST_ACCESSOR
#undef INT_ACCESSORS
#undef BOOL_GETTER
#undef BOOL_ACCESSORS
#undef FIELD_ADDR
+#undef FIELD_ADDR_CONST
#undef READ_FIELD
#undef NOBARRIER_READ_FIELD
#undef WRITE_FIELD