}
-void LCodeGen::CalcKeyedArrayBaseRegister(Register base,
- Register elements,
- Register key,
- bool key_is_tagged,
- ElementsKind elements_kind) {
+MemOperand LCodeGen::PrepareKeyedArrayOperand(Register base,
+ Register elements,
+ Register key,
+ bool key_is_tagged,
+ ElementsKind elements_kind,
+ Representation representation,
+ int additional_index) {
+ STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0));
int element_size_shift = ElementsKindToShiftSize(elements_kind);
// Even though the HLoad/StoreKeyed instructions force the input
// can be tagged, so that case must be handled here, too.
if (key_is_tagged) {
__ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift));
+ if (representation.IsInteger32()) {
+ ASSERT(elements_kind == FAST_SMI_ELEMENTS);
+ // Read or write only the most-significant 32 bits in the case of fast smi
+ // arrays.
+ return UntagSmiFieldMemOperand(base, additional_index);
+ } else {
+ return FieldMemOperand(base, additional_index);
+ }
} else {
// Sign extend key because it could be a 32-bit negative value or contain
// garbage in the top 32-bits. The address computation happens in 64-bit.
ASSERT((element_size_shift >= 0) && (element_size_shift <= 4));
- __ Add(base, elements, Operand(key, SXTW, element_size_shift));
+ if (representation.IsInteger32()) {
+ ASSERT(elements_kind == FAST_SMI_ELEMENTS);
+ // Read or write only the most-significant 32 bits in the case of fast smi
+ // arrays.
+ __ Add(base, elements, Operand(key, SXTW, element_size_shift));
+ return UntagSmiFieldMemOperand(base, additional_index);
+ } else {
+ __ Add(base, elements, additional_index - kHeapObjectTag);
+ return MemOperand(base, key, SXTW, element_size_shift);
+ }
}
}
void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
Register elements = ToRegister(instr->elements());
DoubleRegister result = ToDoubleRegister(instr->result());
- Register load_base;
- int offset = 0;
+ MemOperand mem_op;
if (instr->key()->IsConstantOperand()) {
ASSERT(instr->hydrogen()->RequiresHoleCheck() ||
if (constant_key & 0xf0000000) {
Abort(kArrayIndexConstantValueTooBig);
}
- offset = FixedDoubleArray::OffsetOfElementAt(constant_key +
- instr->additional_index());
- load_base = elements;
+ int offset = FixedDoubleArray::OffsetOfElementAt(constant_key +
+ instr->additional_index());
+ mem_op = FieldMemOperand(elements, offset);
} else {
- load_base = ToRegister(instr->temp());
+ Register load_base = ToRegister(instr->temp());
Register key = ToRegister(instr->key());
bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
- CalcKeyedArrayBaseRegister(load_base, elements, key, key_is_tagged,
- instr->hydrogen()->elements_kind());
- offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index());
+ int offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index());
+ mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
+ instr->hydrogen()->elements_kind(),
+ instr->hydrogen()->representation(),
+ offset);
}
- __ Ldr(result, FieldMemOperand(load_base, offset));
+
+ __ Ldr(result, mem_op);
if (instr->hydrogen()->RequiresHoleCheck()) {
Register scratch = ToRegister(instr->temp());
-
- // TODO(all): Is it faster to reload this value to an integer register, or
- // move from fp to integer?
- __ Fmov(scratch, result);
- __ Cmp(scratch, kHoleNanInt64);
- DeoptimizeIf(eq, instr->environment());
+ // Detect the hole NaN by adding one to the integer representation of the
+ // result, and checking for overflow.
+ STATIC_ASSERT(kHoleNanInt64 == 0x7fffffffffffffff);
+ __ Ldr(scratch, mem_op);
+ __ Cmn(scratch, 1);
+ DeoptimizeIf(vs, instr->environment());
}
}
void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
Register elements = ToRegister(instr->elements());
Register result = ToRegister(instr->result());
- Register load_base;
- int offset = 0;
+ MemOperand mem_op;
+ Representation representation = instr->hydrogen()->representation();
if (instr->key()->IsConstantOperand()) {
ASSERT(instr->temp() == NULL);
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
- instr->additional_index());
- load_base = elements;
+ int offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
+ instr->additional_index());
+ if (representation.IsInteger32()) {
+ ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
+ STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) &&
+ (kSmiTag == 0));
+ mem_op = UntagSmiFieldMemOperand(elements, offset);
+ } else {
+ mem_op = FieldMemOperand(elements, offset);
+ }
} else {
- load_base = ToRegister(instr->temp());
+ Register load_base = ToRegister(instr->temp());
Register key = ToRegister(instr->key());
bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
- CalcKeyedArrayBaseRegister(load_base, elements, key, key_is_tagged,
- instr->hydrogen()->elements_kind());
- offset = FixedArray::OffsetOfElementAt(instr->additional_index());
- }
- Representation representation = instr->hydrogen()->representation();
+ int offset = FixedArray::OffsetOfElementAt(instr->additional_index());
- if (representation.IsInteger32() &&
- instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS) {
- STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
- __ Load(result, UntagSmiFieldMemOperand(load_base, offset),
- Representation::Integer32());
- } else {
- __ Load(result, FieldMemOperand(load_base, offset),
- representation);
+ mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
+ instr->hydrogen()->elements_kind(),
+ representation, offset);
}
+ __ Load(result, mem_op, representation);
+
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
DeoptimizeIfNotSmi(result, instr->environment());
void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) {
Register elements = ToRegister(instr->elements());
DoubleRegister value = ToDoubleRegister(instr->value());
- Register store_base = no_reg;
- int offset = 0;
+ MemOperand mem_op;
if (instr->key()->IsConstantOperand()) {
int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
if (constant_key & 0xf0000000) {
Abort(kArrayIndexConstantValueTooBig);
}
- offset = FixedDoubleArray::OffsetOfElementAt(constant_key +
- instr->additional_index());
- store_base = elements;
+ int offset = FixedDoubleArray::OffsetOfElementAt(constant_key +
+ instr->additional_index());
+ mem_op = FieldMemOperand(elements, offset);
} else {
- store_base = ToRegister(instr->temp());
+ Register store_base = ToRegister(instr->temp());
Register key = ToRegister(instr->key());
bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
- CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged,
- instr->hydrogen()->elements_kind());
- offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index());
+ int offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index());
+ mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged,
+ instr->hydrogen()->elements_kind(),
+ instr->hydrogen()->representation(),
+ offset);
}
if (instr->NeedsCanonicalization()) {
__ CanonicalizeNaN(double_scratch(), value);
- __ Str(double_scratch(), FieldMemOperand(store_base, offset));
+ __ Str(double_scratch(), mem_op);
} else {
- __ Str(value, FieldMemOperand(store_base, offset));
+ __ Str(value, mem_op);
}
}
Register scratch = no_reg;
Register store_base = no_reg;
Register key = no_reg;
- int offset = 0;
+ MemOperand mem_op;
if (!instr->key()->IsConstantOperand() ||
instr->hydrogen()->NeedsWriteBarrier()) {
scratch = ToRegister(instr->temp());
}
+ Representation representation = instr->hydrogen()->value()->representation();
if (instr->key()->IsConstantOperand()) {
LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
- instr->additional_index());
+ int offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
+ instr->additional_index());
store_base = elements;
+ if (representation.IsInteger32()) {
+ ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+ ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
+ STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) &&
+ (kSmiTag == 0));
+ mem_op = UntagSmiFieldMemOperand(store_base, offset);
+ } else {
+ mem_op = FieldMemOperand(store_base, offset);
+ }
} else {
store_base = scratch;
key = ToRegister(instr->key());
bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
- CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged,
- instr->hydrogen()->elements_kind());
- offset = FixedArray::OffsetOfElementAt(instr->additional_index());
- }
- Representation representation = instr->hydrogen()->value()->representation();
- if (representation.IsInteger32()) {
- ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
- ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
- STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
- __ Store(value, UntagSmiFieldMemOperand(store_base, offset),
- Representation::Integer32());
- } else {
- __ Store(value, FieldMemOperand(store_base, offset), representation);
+ int offset = FixedArray::OffsetOfElementAt(instr->additional_index());
+
+ mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged,
+ instr->hydrogen()->elements_kind(),
+ representation, offset);
}
+ __ Store(value, mem_op, representation);
+
if (instr->hydrogen()->NeedsWriteBarrier()) {
ASSERT(representation.IsTagged());
// This assignment may cause element_addr to alias store_base.
instr->hydrogen()->value()->IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
- __ Add(element_addr, store_base, offset - kHeapObjectTag);
+ __ Add(element_addr, mem_op.base(), mem_op.OffsetAsOperand());
__ RecordWrite(elements, element_addr, value, GetLinkRegisterState(),
kSaveFPRegs, EMIT_REMEMBERED_SET, check_needed);
}