stream->Add("[");
key()->PrintTo(stream);
if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d]", additional_index());
+ stream->Add(" + %d]", base_offset());
} else {
stream->Add("]");
}
stream->Add("[");
key()->PrintTo(stream);
if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d] <-", additional_index());
+ stream->Add(" + %d] <-", base_offset());
} else {
stream->Add("] <- ");
}
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
+ uint32_t base_offset() const { return hydrogen()->base_offset(); }
};
}
return hydrogen()->NeedsCanonicalization();
}
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
+ uint32_t base_offset() const { return hydrogen()->base_offset(); }
};
int element_size_shift = ElementsKindToShiftSize(elements_kind);
int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
? (element_size_shift - kSmiTagSize) : element_size_shift;
- int additional_offset = IsFixedTypedArrayElementsKind(elements_kind)
- ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
- : 0;
-
+ int base_offset = instr->base_offset();
if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
elements_kind == FLOAT32_ELEMENTS ||
elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
elements_kind == FLOAT64_ELEMENTS) {
- int base_offset =
- (instr->additional_index() << element_size_shift) + additional_offset;
+ int base_offset = instr->base_offset();
DwVfpRegister result = ToDoubleRegister(instr->result());
Operand operand = key_is_constant
? Operand(constant_key << element_size_shift)
elements_kind == FLOAT32_ELEMENTS) {
__ vldr(double_scratch0().low(), scratch0(), base_offset);
__ vcvt_f64_f32(result, double_scratch0().low());
- } else { // loading doubles, not floats.
+ } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ vldr(result, scratch0(), base_offset);
}
} else {
Register result = ToRegister(instr->result());
MemOperand mem_operand = PrepareKeyedOperand(
key, external_pointer, key_is_constant, constant_key,
- element_size_shift, shift_size,
- instr->additional_index(), additional_offset);
+ element_size_shift, shift_size, base_offset);
switch (elements_kind) {
case EXTERNAL_INT8_ELEMENTS:
case INT8_ELEMENTS:
int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int base_offset =
- FixedDoubleArray::kHeaderSize - kHeapObjectTag +
- (instr->additional_index() << element_size_shift);
+ int base_offset = instr->base_offset();
if (key_is_constant) {
int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xF0000000) {
Abort(kArrayIndexConstantValueTooBig);
}
- base_offset += constant_key << element_size_shift;
+ base_offset += constant_key * kDoubleSize;
}
__ add(scratch, elements, Operand(base_offset));
Register result = ToRegister(instr->result());
Register scratch = scratch0();
Register store_base = scratch;
- int offset = 0;
+ int offset = instr->base_offset();
if (instr->key()->IsConstantOperand()) {
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
- instr->additional_index());
+ offset += ToInteger32(const_operand) * kPointerSize;
store_base = elements;
} else {
Register key = ToRegister(instr->key());
} else {
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
}
- offset = FixedArray::OffsetOfElementAt(instr->additional_index());
}
- __ ldr(result, FieldMemOperand(store_base, offset));
+ __ ldr(result, MemOperand(store_base, offset));
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
int constant_key,
int element_size,
int shift_size,
- int additional_index,
- int additional_offset) {
- int base_offset = (additional_index << element_size) + additional_offset;
+ int base_offset) {
if (key_is_constant) {
- return MemOperand(base,
- base_offset + (constant_key << element_size));
+ return MemOperand(base, (constant_key << element_size) + base_offset);
}
- if (additional_offset != 0) {
- __ mov(scratch0(), Operand(base_offset));
- if (shift_size >= 0) {
- __ add(scratch0(), scratch0(), Operand(key, LSL, shift_size));
- } else {
- ASSERT_EQ(-1, shift_size);
- // key can be negative, so using ASR here.
- __ add(scratch0(), scratch0(), Operand(key, ASR, 1));
- }
- return MemOperand(base, scratch0());
- }
-
- if (additional_index != 0) {
- additional_index *= 1 << (element_size - shift_size);
- __ add(scratch0(), key, Operand(additional_index));
- }
-
- if (additional_index == 0) {
+ if (base_offset == 0) {
if (shift_size >= 0) {
return MemOperand(base, key, LSL, shift_size);
} else {
}
if (shift_size >= 0) {
- return MemOperand(base, scratch0(), LSL, shift_size);
+ __ add(scratch0(), base, Operand(key, LSL, shift_size));
+ return MemOperand(scratch0(), base_offset);
} else {
ASSERT_EQ(-1, shift_size);
- return MemOperand(base, scratch0(), LSR, 1);
+ __ add(scratch0(), base, Operand(key, ASR, 1));
+ return MemOperand(scratch0(), base_offset);
}
}
int element_size_shift = ElementsKindToShiftSize(elements_kind);
int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
? (element_size_shift - kSmiTagSize) : element_size_shift;
- int additional_offset = IsFixedTypedArrayElementsKind(elements_kind)
- ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
- : 0;
+ int base_offset = instr->base_offset();
if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
elements_kind == FLOAT32_ELEMENTS ||
elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
elements_kind == FLOAT64_ELEMENTS) {
- int base_offset =
- (instr->additional_index() << element_size_shift) + additional_offset;
Register address = scratch0();
DwVfpRegister value(ToDoubleRegister(instr->value()));
if (key_is_constant) {
MemOperand mem_operand = PrepareKeyedOperand(
key, external_pointer, key_is_constant, constant_key,
element_size_shift, shift_size,
- instr->additional_index(), additional_offset);
+ base_offset);
switch (elements_kind) {
case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
case EXTERNAL_INT8_ELEMENTS:
Register scratch = scratch0();
DwVfpRegister double_scratch = double_scratch0();
bool key_is_constant = instr->key()->IsConstantOperand();
+ int base_offset = instr->base_offset();
// Calculate the effective address of the slot in the array to store the
// double value.
Abort(kArrayIndexConstantValueTooBig);
}
__ add(scratch, elements,
- Operand((constant_key << element_size_shift) +
- FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ Operand((constant_key << element_size_shift) + base_offset));
} else {
int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
? (element_size_shift - kSmiTagSize) : element_size_shift;
- __ add(scratch, elements,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ __ add(scratch, elements, Operand(base_offset));
__ add(scratch, scratch,
Operand(ToRegister(instr->key()), LSL, shift_size));
}
__ Assert(ne, kDefaultNaNModeNotSet);
}
__ VFPCanonicalizeNaN(double_scratch, value);
- __ vstr(double_scratch, scratch,
- instr->additional_index() << element_size_shift);
+ __ vstr(double_scratch, scratch, 0);
} else {
- __ vstr(value, scratch, instr->additional_index() << element_size_shift);
+ __ vstr(value, scratch, 0);
}
}
: no_reg;
Register scratch = scratch0();
Register store_base = scratch;
- int offset = 0;
+ int offset = instr->base_offset();
// Do the store.
if (instr->key()->IsConstantOperand()) {
ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
- instr->additional_index());
+ offset += ToInteger32(const_operand) * kPointerSize;
store_base = elements;
} else {
// Even though the HLoadKeyed instruction forces the input
} else {
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
}
- offset = FixedArray::OffsetOfElementAt(instr->additional_index());
}
- __ str(value, FieldMemOperand(store_base, offset));
+ __ str(value, MemOperand(store_base, offset));
if (instr->hydrogen()->NeedsWriteBarrier()) {
SmiCheck check_needed =
instr->hydrogen()->value()->IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
- __ add(key, store_base, Operand(offset - kHeapObjectTag));
+ __ add(key, store_base, Operand(offset));
__ RecordWrite(elements,
key,
value,
int constant_key,
int element_size,
int shift_size,
- int additional_index,
- int additional_offset);
+ int base_offset);
// Emit frame translation commands for an environment.
void WriteTranslation(LEnvironment* environment, Translation* translation);
bool is_typed_elements() const {
return is_external() || is_fixed_typed_array();
}
- uint32_t additional_index() const {
- return this->hydrogen()->index_offset();
+ uint32_t base_offset() const {
+ return this->hydrogen()->base_offset();
}
void PrintDataTo(StringStream* stream) V8_OVERRIDE {
this->elements()->PrintTo(stream);
stream->Add("[");
this->key()->PrintTo(stream);
- if (this->hydrogen()->IsDehoisted()) {
- stream->Add(" + %d]", this->additional_index());
+ if (this->base_offset() != 0) {
+ stream->Add(" + %d]", this->base_offset());
} else {
stream->Add("]");
}
}
return this->hydrogen()->NeedsCanonicalization();
}
- uint32_t additional_index() const { return this->hydrogen()->index_offset(); }
+ uint32_t base_offset() const { return this->hydrogen()->base_offset(); }
void PrintDataTo(StringStream* stream) V8_OVERRIDE {
this->elements()->PrintTo(stream);
stream->Add("[");
this->key()->PrintTo(stream);
- if (this->hydrogen()->IsDehoisted()) {
- stream->Add(" + %d] <-", this->additional_index());
+ if (this->base_offset() != 0) {
+ stream->Add(" + %d] <-", this->base_offset());
} else {
stream->Add("] <- ");
}
bool key_is_constant,
int constant_key,
ElementsKind elements_kind,
- int additional_index) {
+ int base_offset) {
int element_size_shift = ElementsKindToShiftSize(elements_kind);
- int additional_offset = additional_index << element_size_shift;
- if (IsFixedTypedArrayElementsKind(elements_kind)) {
- additional_offset += FixedTypedArrayBase::kDataOffset - kHeapObjectTag;
- }
if (key_is_constant) {
int key_offset = constant_key << element_size_shift;
- return MemOperand(base, key_offset + additional_offset);
+ return MemOperand(base, key_offset + base_offset);
}
if (key_is_smi) {
__ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift));
- return MemOperand(scratch, additional_offset);
+ return MemOperand(scratch, base_offset);
}
- if (additional_offset == 0) {
+ if (base_offset == 0) {
return MemOperand(base, key, SXTW, element_size_shift);
}
ASSERT(!AreAliased(scratch, key));
- __ Add(scratch, base, additional_offset);
+ __ Add(scratch, base, base_offset);
return MemOperand(scratch, key, SXTW, element_size_shift);
}
PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
key_is_constant, constant_key,
elements_kind,
- instr->additional_index());
+ instr->base_offset());
if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
(elements_kind == FLOAT32_ELEMENTS)) {
bool key_is_tagged,
ElementsKind elements_kind,
Representation representation,
- int additional_index) {
+ int base_offset) {
STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0));
int element_size_shift = ElementsKindToShiftSize(elements_kind);
ASSERT(elements_kind == FAST_SMI_ELEMENTS);
// Read or write only the most-significant 32 bits in the case of fast smi
// arrays.
- return UntagSmiFieldMemOperand(base, additional_index);
+ return UntagSmiMemOperand(base, base_offset);
} else {
- return FieldMemOperand(base, additional_index);
+ return MemOperand(base, base_offset);
}
} else {
// Sign extend key because it could be a 32-bit negative value or contain
// Read or write only the most-significant 32 bits in the case of fast smi
// arrays.
__ Add(base, elements, Operand(key, SXTW, element_size_shift));
- return UntagSmiFieldMemOperand(base, additional_index);
+ return UntagSmiMemOperand(base, base_offset);
} else {
- __ Add(base, elements, additional_index - kHeapObjectTag);
+ __ Add(base, elements, base_offset);
return MemOperand(base, key, SXTW, element_size_shift);
}
}
if (constant_key & 0xf0000000) {
Abort(kArrayIndexConstantValueTooBig);
}
- int offset = FixedDoubleArray::OffsetOfElementAt(constant_key +
- instr->additional_index());
- mem_op = FieldMemOperand(elements, offset);
+ int offset = instr->base_offset() + constant_key * kDoubleSize;
+ mem_op = MemOperand(elements, offset);
} else {
Register load_base = ToRegister(instr->temp());
Register key = ToRegister(instr->key());
bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
- int offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index());
mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
instr->hydrogen()->elements_kind(),
instr->hydrogen()->representation(),
- offset);
+ instr->base_offset());
}
__ Ldr(result, mem_op);
if (instr->key()->IsConstantOperand()) {
ASSERT(instr->temp() == NULL);
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- int offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
- instr->additional_index());
+ int offset = instr->base_offset() +
+ ToInteger32(const_operand) * kPointerSize;
if (representation.IsInteger32()) {
ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) &&
(kSmiTag == 0));
- mem_op = UntagSmiFieldMemOperand(elements, offset);
+ mem_op = UntagSmiMemOperand(elements, offset);
} else {
- mem_op = FieldMemOperand(elements, offset);
+ mem_op = MemOperand(elements, offset);
}
} else {
Register load_base = ToRegister(instr->temp());
Register key = ToRegister(instr->key());
bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
- int offset = FixedArray::OffsetOfElementAt(instr->additional_index());
mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
instr->hydrogen()->elements_kind(),
- representation, offset);
+ representation, instr->base_offset());
}
__ Load(result, mem_op, representation);
PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
key_is_constant, constant_key,
elements_kind,
- instr->additional_index());
+ instr->base_offset());
if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) ||
(elements_kind == FLOAT32_ELEMENTS)) {
if (constant_key & 0xf0000000) {
Abort(kArrayIndexConstantValueTooBig);
}
- int offset = FixedDoubleArray::OffsetOfElementAt(constant_key +
- instr->additional_index());
- mem_op = FieldMemOperand(elements, offset);
+ int offset = instr->base_offset() + constant_key * kDoubleSize;
+ mem_op = MemOperand(elements, offset);
} else {
Register store_base = ToRegister(instr->temp());
Register key = ToRegister(instr->key());
bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
- int offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index());
mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged,
instr->hydrogen()->elements_kind(),
instr->hydrogen()->representation(),
- offset);
+ instr->base_offset());
}
if (instr->NeedsCanonicalization()) {
Representation representation = instr->hydrogen()->value()->representation();
if (instr->key()->IsConstantOperand()) {
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- int offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
- instr->additional_index());
+ int offset = instr->base_offset() +
+ ToInteger32(const_operand) * kPointerSize;
store_base = elements;
if (representation.IsInteger32()) {
ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) &&
(kSmiTag == 0));
- mem_op = UntagSmiFieldMemOperand(store_base, offset);
+ mem_op = UntagSmiMemOperand(store_base, offset);
} else {
- mem_op = FieldMemOperand(store_base, offset);
+ mem_op = MemOperand(store_base, offset);
}
} else {
store_base = scratch;
key = ToRegister(instr->key());
bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
- int offset = FixedArray::OffsetOfElementAt(instr->additional_index());
mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged,
instr->hydrogen()->elements_kind(),
- representation, offset);
+ representation, instr->base_offset());
}
__ Store(value, mem_op, representation);
bool key_is_constant,
int constant_key,
ElementsKind elements_kind,
- int additional_index);
+ int base_offset);
MemOperand PrepareKeyedArrayOperand(Register base,
Register elements,
Register key,
bool key_is_tagged,
ElementsKind elements_kind,
Representation representation,
- int additional_index);
+ int base_offset);
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
}
+int GetDefaultHeaderSizeForElementsKind(ElementsKind elements_kind) {
+ return IsExternalArrayElementsKind(elements_kind)
+ ? 0 : (FixedArray::kHeaderSize - kSmiTagSize);
+}
+
+
const char* ElementsKindToString(ElementsKind kind) {
ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
return accessor->name();
FAST_HOLEY_SMI_ELEMENTS - FAST_SMI_ELEMENTS;
int ElementsKindToShiftSize(ElementsKind elements_kind);
+int GetDefaultHeaderSizeForElementsKind(ElementsKind elements_kind);
const char* ElementsKindToString(ElementsKind kind);
void PrintElementsKind(FILE* out, ElementsKind kind);
int32_t value = constant->Integer32Value() * sign;
// We limit offset values to 30 bits because we want to avoid the risk of
// overflows when the offset is added to the object header size.
- if (value >= 1 << array_operation->MaxIndexOffsetBits() || value < 0) return;
+ if (value >= 1 << array_operation->MaxBaseOffsetBits() || value < 0) return;
array_operation->SetKey(subexpression);
if (binary_operation->HasNoUses()) {
binary_operation->DeleteAndReplaceWith(NULL);
}
- array_operation->SetIndexOffset(static_cast<uint32_t>(value));
+ value <<= ElementsKindToShiftSize(array_operation->elements_kind());
+ array_operation->IncreaseBaseOffset(static_cast<uint32_t>(value));
array_operation->SetDehoisted(true);
}
stream->Add("[");
key()->PrintNameTo(stream);
if (IsDehoisted()) {
- stream->Add(" + %d]", index_offset());
+ stream->Add(" + %d]", base_offset());
} else {
stream->Add("]");
}
stream->Add("[");
key()->PrintNameTo(stream);
if (IsDehoisted()) {
- stream->Add(" + %d] = ", index_offset());
+ stream->Add(" + %d] = ", base_offset());
} else {
stream->Add("] = ");
}
return new(zone) I(p1, p2, p3, p4, p5); \
}
+#define DECLARE_INSTRUCTION_FACTORY_P6(I, P1, P2, P3, P4, P5, P6) \
+ static I* New(Zone* zone, \
+ HValue* context, \
+ P1 p1, \
+ P2 p2, \
+ P3 p3, \
+ P4 p4, \
+ P5 p5, \
+ P6 p6) { \
+ return new(zone) I(p1, p2, p3, p4, p5, p6); \
+ }
+
#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P0(I) \
static I* New(Zone* zone, HValue* context) { \
return new(zone) I(context); \
public:
virtual HValue* GetKey() = 0;
virtual void SetKey(HValue* key) = 0;
- virtual void SetIndexOffset(uint32_t index_offset) = 0;
- virtual int MaxIndexOffsetBits() = 0;
+ virtual ElementsKind elements_kind() const = 0;
+ virtual void IncreaseBaseOffset(uint32_t base_offset) = 0;
+ virtual int MaxBaseOffsetBits() = 0;
virtual bool IsDehoisted() = 0;
virtual void SetDehoisted(bool is_dehoisted) = 0;
virtual ~ArrayInstructionInterface() { }
};
+static const int kDefaultKeyedHeaderOffsetSentinel = -1;
+
enum LoadKeyedHoleMode {
NEVER_RETURN_HOLE,
ALLOW_RETURN_HOLE
ElementsKind);
DECLARE_INSTRUCTION_FACTORY_P5(HLoadKeyed, HValue*, HValue*, HValue*,
ElementsKind, LoadKeyedHoleMode);
+ DECLARE_INSTRUCTION_FACTORY_P6(HLoadKeyed, HValue*, HValue*, HValue*,
+ ElementsKind, LoadKeyedHoleMode, int);
bool is_external() const {
return IsExternalArrayElementsKind(elements_kind());
return OperandAt(2);
}
bool HasDependency() const { return OperandAt(0) != OperandAt(2); }
- uint32_t index_offset() { return IndexOffsetField::decode(bit_field_); }
- void SetIndexOffset(uint32_t index_offset) {
- bit_field_ = IndexOffsetField::update(bit_field_, index_offset);
+ uint32_t base_offset() { return BaseOffsetField::decode(bit_field_); }
+ void IncreaseBaseOffset(uint32_t base_offset) {
+ base_offset += BaseOffsetField::decode(bit_field_);
+ bit_field_ = BaseOffsetField::update(bit_field_, base_offset);
}
- virtual int MaxIndexOffsetBits() {
- return kBitsForIndexOffset;
+ virtual int MaxBaseOffsetBits() {
+ return kBitsForBaseOffset;
}
HValue* GetKey() { return key(); }
void SetKey(HValue* key) { SetOperandAt(1, key); }
if (!other->IsLoadKeyed()) return false;
HLoadKeyed* other_load = HLoadKeyed::cast(other);
- if (IsDehoisted() && index_offset() != other_load->index_offset())
+ if (IsDehoisted() && base_offset() != other_load->base_offset())
return false;
return elements_kind() == other_load->elements_kind();
}
HValue* key,
HValue* dependency,
ElementsKind elements_kind,
- LoadKeyedHoleMode mode = NEVER_RETURN_HOLE)
+ LoadKeyedHoleMode mode = NEVER_RETURN_HOLE,
+ int offset = kDefaultKeyedHeaderOffsetSentinel)
: bit_field_(0) {
+ offset = offset == kDefaultKeyedHeaderOffsetSentinel
+ ? GetDefaultHeaderSizeForElementsKind(elements_kind)
+ : offset;
bit_field_ = ElementsKindField::encode(elements_kind) |
- HoleModeField::encode(mode);
+ HoleModeField::encode(mode) |
+ BaseOffsetField::encode(offset);
SetOperandAt(0, obj);
SetOperandAt(1, key);
enum LoadKeyedBits {
kBitsForElementsKind = 5,
kBitsForHoleMode = 1,
- kBitsForIndexOffset = 25,
+ kBitsForBaseOffset = 25,
kBitsForIsDehoisted = 1,
kStartElementsKind = 0,
kStartHoleMode = kStartElementsKind + kBitsForElementsKind,
- kStartIndexOffset = kStartHoleMode + kBitsForHoleMode,
- kStartIsDehoisted = kStartIndexOffset + kBitsForIndexOffset
+ kStartBaseOffset = kStartHoleMode + kBitsForHoleMode,
+ kStartIsDehoisted = kStartBaseOffset + kBitsForBaseOffset
};
- STATIC_ASSERT((kBitsForElementsKind + kBitsForIndexOffset +
+ STATIC_ASSERT((kBitsForElementsKind + kBitsForBaseOffset +
kBitsForIsDehoisted) <= sizeof(uint32_t)*8);
STATIC_ASSERT(kElementsKindCount <= (1 << kBitsForElementsKind));
class ElementsKindField:
class HoleModeField:
public BitField<LoadKeyedHoleMode, kStartHoleMode, kBitsForHoleMode>
{}; // NOLINT
- class IndexOffsetField:
- public BitField<uint32_t, kStartIndexOffset, kBitsForIndexOffset>
+ class BaseOffsetField:
+ public BitField<uint32_t, kStartBaseOffset, kBitsForBaseOffset>
{}; // NOLINT
class IsDehoistedField:
public BitField<bool, kStartIsDehoisted, kBitsForIsDehoisted>
ElementsKind);
DECLARE_INSTRUCTION_FACTORY_P5(HStoreKeyed, HValue*, HValue*, HValue*,
ElementsKind, StoreFieldOrKeyedMode);
+ DECLARE_INSTRUCTION_FACTORY_P6(HStoreKeyed, HValue*, HValue*, HValue*,
+ ElementsKind, StoreFieldOrKeyedMode, int);
virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
// kind_fast: tagged[int32] = tagged
}
StoreFieldOrKeyedMode store_mode() const { return store_mode_; }
ElementsKind elements_kind() const { return elements_kind_; }
- uint32_t index_offset() { return index_offset_; }
- void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
- virtual int MaxIndexOffsetBits() {
+ uint32_t base_offset() { return base_offset_; }
+ void IncreaseBaseOffset(uint32_t base_offset) {
+ base_offset_ += base_offset;
+ }
+ virtual int MaxBaseOffsetBits() {
return 31 - ElementsKindToShiftSize(elements_kind_);
}
HValue* GetKey() { return key(); }
private:
HStoreKeyed(HValue* obj, HValue* key, HValue* val,
ElementsKind elements_kind,
- StoreFieldOrKeyedMode store_mode = INITIALIZING_STORE)
+ StoreFieldOrKeyedMode store_mode = INITIALIZING_STORE,
+ int offset = kDefaultKeyedHeaderOffsetSentinel)
: elements_kind_(elements_kind),
- index_offset_(0),
+ base_offset_(offset == kDefaultKeyedHeaderOffsetSentinel
+ ? GetDefaultHeaderSizeForElementsKind(elements_kind)
+ : offset),
is_dehoisted_(false),
is_uninitialized_(false),
store_mode_(store_mode),
}
ElementsKind elements_kind_;
- uint32_t index_offset_;
+ uint32_t base_offset_;
bool is_dehoisted_ : 1;
bool is_uninitialized_ : 1;
StoreFieldOrKeyedMode store_mode_: 1;
key,
instr->hydrogen()->key()->representation(),
elements_kind,
- 0,
- instr->additional_index()));
+ instr->base_offset()));
if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
elements_kind == FLOAT32_ELEMENTS) {
XMMRegister result(ToDoubleRegister(instr->result()));
void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
- int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
- sizeof(kHoleNanLower32);
Operand hole_check_operand = BuildFastArrayOperand(
instr->elements(), instr->key(),
instr->hydrogen()->key()->representation(),
FAST_DOUBLE_ELEMENTS,
- offset,
- instr->additional_index());
+ instr->base_offset() + sizeof(kHoleNanLower32));
__ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
DeoptimizeIf(equal, instr->environment());
}
instr->key(),
instr->hydrogen()->key()->representation(),
FAST_DOUBLE_ELEMENTS,
- FixedDoubleArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
+ instr->base_offset());
XMMRegister result = ToDoubleRegister(instr->result());
__ movsd(result, double_load_operand);
}
instr->key(),
instr->hydrogen()->key()->representation(),
FAST_ELEMENTS,
- FixedArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index()));
+ instr->base_offset()));
// Check for the hole value.
if (instr->hydrogen()->RequiresHoleCheck()) {
LOperand* key,
Representation key_representation,
ElementsKind elements_kind,
- uint32_t offset,
- uint32_t additional_index) {
+ uint32_t base_offset) {
Register elements_pointer_reg = ToRegister(elements_pointer);
int element_shift_size = ElementsKindToShiftSize(elements_kind);
- if (IsFixedTypedArrayElementsKind(elements_kind)) {
- offset += FixedTypedArrayBase::kDataOffset - kHeapObjectTag;
- }
int shift_size = element_shift_size;
if (key->IsConstantOperand()) {
int constant_value = ToInteger32(LConstantOperand::cast(key));
Abort(kArrayIndexConstantValueTooBig);
}
return Operand(elements_pointer_reg,
- ((constant_value + additional_index) << shift_size)
- + offset);
+ ((constant_value) << shift_size)
+ + base_offset);
} else {
// Take the tag bit into account while computing the shift size.
if (key_representation.IsSmi() && (shift_size >= 1)) {
return Operand(elements_pointer_reg,
ToRegister(key),
scale_factor,
- offset + (additional_index << element_shift_size));
+ base_offset);
}
}
key,
instr->hydrogen()->key()->representation(),
elements_kind,
- 0,
- instr->additional_index()));
+ instr->base_offset()));
if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
elements_kind == FLOAT32_ELEMENTS) {
XMMRegister xmm_scratch = double_scratch0();
instr->key(),
instr->hydrogen()->key()->representation(),
FAST_DOUBLE_ELEMENTS,
- FixedDoubleArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
+ instr->base_offset());
XMMRegister value = ToDoubleRegister(instr->value());
instr->key(),
instr->hydrogen()->key()->representation(),
FAST_ELEMENTS,
- FixedArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
+ instr->base_offset());
if (instr->value()->IsRegister()) {
__ mov(operand, ToRegister(instr->value()));
} else {
LOperand* key,
Representation key_representation,
ElementsKind elements_kind,
- uint32_t offset,
- uint32_t additional_index = 0);
+ uint32_t base_offset);
Operand BuildSeqStringOperand(Register string,
LOperand* index,
stream->Add("[");
key()->PrintTo(stream);
if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d]", additional_index());
+ stream->Add(" + %d]", base_offset());
} else {
stream->Add("]");
}
stream->Add("[");
key()->PrintTo(stream);
if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d] <-", additional_index());
+ stream->Add(" + %d] <-", base_offset());
} else {
stream->Add("] <- ");
}
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
+ uint32_t base_offset() const { return hydrogen()->base_offset(); }
bool key_is_smi() {
return hydrogen()->key()->representation().IsTagged();
}
DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
+ uint32_t base_offset() const { return hydrogen()->base_offset(); }
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
};
void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = instr->key();
- int base_offset = instr->is_fixed_typed_array()
- ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
- : 0;
Operand operand(BuildFastArrayOperand(
instr->elements(),
key,
elements_kind,
- base_offset,
- instr->additional_index()));
+ instr->base_offset()));
if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
elements_kind == FLOAT32_ELEMENTS) {
XMMRegister result(ToDoubleRegister(instr->result()));
LOperand* key = instr->key();
if (instr->hydrogen()->RequiresHoleCheck()) {
- int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
- sizeof(kHoleNanLower32);
Operand hole_check_operand = BuildFastArrayOperand(
instr->elements(),
key,
FAST_DOUBLE_ELEMENTS,
- offset,
- instr->additional_index());
+ instr->base_offset() + sizeof(kHoleNanLower32));
__ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
DeoptimizeIf(equal, instr->environment());
}
instr->elements(),
key,
FAST_DOUBLE_ELEMENTS,
- FixedDoubleArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
+ instr->base_offset());
__ movsd(result, double_load_operand);
}
Register result = ToRegister(instr->result());
LOperand* key = instr->key();
bool requires_hole_check = hinstr->RequiresHoleCheck();
- int offset = FixedArray::kHeaderSize - kHeapObjectTag;
Representation representation = hinstr->representation();
+ int offset = instr->base_offset();
if (representation.IsInteger32() && SmiValuesAre32Bits() &&
hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
BuildFastArrayOperand(instr->elements(),
key,
FAST_ELEMENTS,
- offset,
- instr->additional_index()),
+ offset),
Representation::Smi());
__ AssertSmi(scratch);
}
BuildFastArrayOperand(instr->elements(),
key,
FAST_ELEMENTS,
- offset,
- instr->additional_index()),
+ offset),
representation);
// Check for the hole value.
LOperand* elements_pointer,
LOperand* key,
ElementsKind elements_kind,
- uint32_t offset,
- uint32_t additional_index) {
+ uint32_t offset) {
Register elements_pointer_reg = ToRegister(elements_pointer);
int shift_size = ElementsKindToShiftSize(elements_kind);
if (key->IsConstantOperand()) {
Abort(kArrayIndexConstantValueTooBig);
}
return Operand(elements_pointer_reg,
- ((constant_value + additional_index) << shift_size)
- + offset);
+ (constant_value << shift_size) + offset);
} else {
ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
return Operand(elements_pointer_reg,
ToRegister(key),
scale_factor,
- offset + (additional_index << shift_size));
+ offset);
}
}
void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = instr->key();
- int base_offset = instr->is_fixed_typed_array()
- ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
- : 0;
Operand operand(BuildFastArrayOperand(
instr->elements(),
key,
elements_kind,
- base_offset,
- instr->additional_index()));
+ instr->base_offset()));
if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
elements_kind == FLOAT32_ELEMENTS) {
instr->elements(),
key,
FAST_DOUBLE_ELEMENTS,
- FixedDoubleArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
+ instr->base_offset());
__ movsd(double_store_operand, value);
}
void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
HStoreKeyed* hinstr = instr->hydrogen();
LOperand* key = instr->key();
- int offset = FixedArray::kHeaderSize - kHeapObjectTag;
+ int offset = instr->base_offset();
Representation representation = hinstr->value()->representation();
if (representation.IsInteger32() && SmiValuesAre32Bits()) {
BuildFastArrayOperand(instr->elements(),
key,
FAST_ELEMENTS,
- offset,
- instr->additional_index()),
+ offset),
Representation::Smi());
__ AssertSmi(scratch);
}
BuildFastArrayOperand(instr->elements(),
key,
FAST_ELEMENTS,
- offset,
- instr->additional_index());
-
+ offset);
if (instr->value()->IsRegister()) {
__ Store(operand, ToRegister(instr->value()), representation);
} else {
LOperand* elements_pointer,
LOperand* key,
ElementsKind elements_kind,
- uint32_t offset,
- uint32_t additional_index = 0);
+ uint32_t base_offset);
Operand BuildSeqStringOperand(Register string,
LOperand* index,
stream->Add("[");
key()->PrintTo(stream);
if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d]", additional_index());
+ stream->Add(" + %d]", base_offset());
} else {
stream->Add("]");
}
stream->Add("[");
key()->PrintTo(stream);
if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d] <-", additional_index());
+ stream->Add(" + %d] <-", base_offset());
} else {
stream->Add("] <- ");
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
+ uint32_t base_offset() const { return hydrogen()->base_offset(); }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
- uint32_t additional_index() const { return hydrogen()->index_offset(); }
+ uint32_t base_offset() const { return hydrogen()->base_offset(); }
};