From: m.m.capewell@googlemail.com Date: Tue, 6 May 2014 14:28:29 +0000 (+0000) Subject: ARM64: Sign extension on MemOperand for keyed ops X-Git-Tag: upstream/4.7.83~9247 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=ec2b42fd7b0eedcdd7948829ab9587a6be62943e;p=platform%2Fupstream%2Fv8.git ARM64: Sign extension on MemOperand for keyed ops SXTW extend mode is usually cheaper on loads and stores than arithmetic, so move it to the memory accesses where possible for Keyed loads and stores. BUG= R=ulan@chromium.org Review URL: https://codereview.chromium.org/268483002 git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@21172 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- diff --git a/src/arm64/assembler-arm64-inl.h b/src/arm64/assembler-arm64-inl.h index 175977c3f..3c17153f6 100644 --- a/src/arm64/assembler-arm64-inl.h +++ b/src/arm64/assembler-arm64-inl.h @@ -411,6 +411,12 @@ Operand Operand::UntagSmiAndScale(Register smi, int scale) { } +MemOperand::MemOperand() + : base_(NoReg), regoffset_(NoReg), offset_(0), addrmode_(Offset), + shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) { +} + + MemOperand::MemOperand(Register base, ptrdiff_t offset, AddrMode addrmode) : base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode), shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) { diff --git a/src/arm64/assembler-arm64.h b/src/arm64/assembler-arm64.h index 1aa9e2a4b..a3fbc98d9 100644 --- a/src/arm64/assembler-arm64.h +++ b/src/arm64/assembler-arm64.h @@ -669,6 +669,7 @@ class Operand { // MemOperand represents a memory operand in a load or store instruction. class MemOperand { public: + inline explicit MemOperand(); inline explicit MemOperand(Register base, ptrdiff_t offset = 0, AddrMode addrmode = Offset); diff --git a/src/arm64/lithium-arm64.cc b/src/arm64/lithium-arm64.cc index 84996d2d9..2411b7074 100644 --- a/src/arm64/lithium-arm64.cc +++ b/src/arm64/lithium-arm64.cc @@ -1666,10 +1666,9 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { ASSERT(instr->key()->representation().IsSmiOrInteger32()); ElementsKind elements_kind = instr->elements_kind(); LOperand* elements = UseRegister(instr->elements()); + LOperand* key = UseRegisterOrConstant(instr->key()); if (!instr->is_typed_elements()) { - LOperand* key = UseRegisterOrConstantAtStart(instr->key()); - if (instr->representation().IsDouble()) { LOperand* temp = (!instr->key()->IsConstant() || instr->RequiresHoleCheck()) @@ -1697,7 +1696,6 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { (instr->representation().IsDouble() && IsDoubleOrFloatElementsKind(instr->elements_kind()))); - LOperand* key = UseRegisterOrConstant(instr->key()); LOperand* temp = instr->key()->IsConstant() ? NULL : TempRegister(); LInstruction* result = DefineAsRegister( new(zone()) LLoadKeyedExternal(elements, key, temp)); @@ -2301,6 +2299,7 @@ LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) { LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { + LOperand* key = UseRegisterOrConstant(instr->key()); LOperand* temp = NULL; LOperand* elements = NULL; LOperand* val = NULL; @@ -2327,19 +2326,16 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { instr->elements()->representation().IsTagged()) || (instr->is_external() && instr->elements()->representation().IsExternal())); - LOperand* key = UseRegisterOrConstant(instr->key()); return new(zone()) LStoreKeyedExternal(elements, key, val, temp); } else if (instr->value()->representation().IsDouble()) { ASSERT(instr->elements()->representation().IsTagged()); - LOperand* key = UseRegisterOrConstantAtStart(instr->key()); return new(zone()) LStoreKeyedFixedDouble(elements, key, val, temp); } else { ASSERT(instr->elements()->representation().IsTagged()); ASSERT(instr->value()->representation().IsSmiOrTagged() || instr->value()->representation().IsInteger32()); - LOperand* key = UseRegisterOrConstantAtStart(instr->key()); return new(zone()) LStoreKeyedFixed(elements, key, val, temp); } } diff --git a/src/arm64/lithium-codegen-arm64.cc b/src/arm64/lithium-codegen-arm64.cc index e85705a8a..85f15aef0 100644 --- a/src/arm64/lithium-codegen-arm64.cc +++ b/src/arm64/lithium-codegen-arm64.cc @@ -3481,11 +3481,14 @@ void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) { } -void LCodeGen::CalcKeyedArrayBaseRegister(Register base, - Register elements, - Register key, - bool key_is_tagged, - ElementsKind elements_kind) { +MemOperand LCodeGen::PrepareKeyedArrayOperand(Register base, + Register elements, + Register key, + bool key_is_tagged, + ElementsKind elements_kind, + Representation representation, + int additional_index) { + STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0)); int element_size_shift = ElementsKindToShiftSize(elements_kind); // Even though the HLoad/StoreKeyed instructions force the input @@ -3494,11 +3497,28 @@ void LCodeGen::CalcKeyedArrayBaseRegister(Register base, // can be tagged, so that case must be handled here, too. if (key_is_tagged) { __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift)); + if (representation.IsInteger32()) { + ASSERT(elements_kind == FAST_SMI_ELEMENTS); + // Read or write only the most-significant 32 bits in the case of fast smi + // arrays. + return UntagSmiFieldMemOperand(base, additional_index); + } else { + return FieldMemOperand(base, additional_index); + } } else { // Sign extend key because it could be a 32-bit negative value or contain // garbage in the top 32-bits. The address computation happens in 64-bit. ASSERT((element_size_shift >= 0) && (element_size_shift <= 4)); - __ Add(base, elements, Operand(key, SXTW, element_size_shift)); + if (representation.IsInteger32()) { + ASSERT(elements_kind == FAST_SMI_ELEMENTS); + // Read or write only the most-significant 32 bits in the case of fast smi + // arrays. + __ Add(base, elements, Operand(key, SXTW, element_size_shift)); + return UntagSmiFieldMemOperand(base, additional_index); + } else { + __ Add(base, elements, additional_index - kHeapObjectTag); + return MemOperand(base, key, SXTW, element_size_shift); + } } } @@ -3506,8 +3526,7 @@ void LCodeGen::CalcKeyedArrayBaseRegister(Register base, void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) { Register elements = ToRegister(instr->elements()); DoubleRegister result = ToDoubleRegister(instr->result()); - Register load_base; - int offset = 0; + MemOperand mem_op; if (instr->key()->IsConstantOperand()) { ASSERT(instr->hydrogen()->RequiresHoleCheck() || @@ -3517,27 +3536,30 @@ void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) { if (constant_key & 0xf0000000) { Abort(kArrayIndexConstantValueTooBig); } - offset = FixedDoubleArray::OffsetOfElementAt(constant_key + - instr->additional_index()); - load_base = elements; + int offset = FixedDoubleArray::OffsetOfElementAt(constant_key + + instr->additional_index()); + mem_op = FieldMemOperand(elements, offset); } else { - load_base = ToRegister(instr->temp()); + Register load_base = ToRegister(instr->temp()); Register key = ToRegister(instr->key()); bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi(); - CalcKeyedArrayBaseRegister(load_base, elements, key, key_is_tagged, - instr->hydrogen()->elements_kind()); - offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index()); + int offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index()); + mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged, + instr->hydrogen()->elements_kind(), + instr->hydrogen()->representation(), + offset); } - __ Ldr(result, FieldMemOperand(load_base, offset)); + + __ Ldr(result, mem_op); if (instr->hydrogen()->RequiresHoleCheck()) { Register scratch = ToRegister(instr->temp()); - - // TODO(all): Is it faster to reload this value to an integer register, or - // move from fp to integer? - __ Fmov(scratch, result); - __ Cmp(scratch, kHoleNanInt64); - DeoptimizeIf(eq, instr->environment()); + // Detect the hole NaN by adding one to the integer representation of the + // result, and checking for overflow. + STATIC_ASSERT(kHoleNanInt64 == 0x7fffffffffffffff); + __ Ldr(scratch, mem_op); + __ Cmn(scratch, 1); + DeoptimizeIf(vs, instr->environment()); } } @@ -3545,35 +3567,35 @@ void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) { void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) { Register elements = ToRegister(instr->elements()); Register result = ToRegister(instr->result()); - Register load_base; - int offset = 0; + MemOperand mem_op; + Representation representation = instr->hydrogen()->representation(); if (instr->key()->IsConstantOperand()) { ASSERT(instr->temp() == NULL); LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); - offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) + - instr->additional_index()); - load_base = elements; + int offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) + + instr->additional_index()); + if (representation.IsInteger32()) { + ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS); + STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && + (kSmiTag == 0)); + mem_op = UntagSmiFieldMemOperand(elements, offset); + } else { + mem_op = FieldMemOperand(elements, offset); + } } else { - load_base = ToRegister(instr->temp()); + Register load_base = ToRegister(instr->temp()); Register key = ToRegister(instr->key()); bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi(); - CalcKeyedArrayBaseRegister(load_base, elements, key, key_is_tagged, - instr->hydrogen()->elements_kind()); - offset = FixedArray::OffsetOfElementAt(instr->additional_index()); - } - Representation representation = instr->hydrogen()->representation(); + int offset = FixedArray::OffsetOfElementAt(instr->additional_index()); - if (representation.IsInteger32() && - instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS) { - STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0); - __ Load(result, UntagSmiFieldMemOperand(load_base, offset), - Representation::Integer32()); - } else { - __ Load(result, FieldMemOperand(load_base, offset), - representation); + mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged, + instr->hydrogen()->elements_kind(), + representation, offset); } + __ Load(result, mem_op, representation); + if (instr->hydrogen()->RequiresHoleCheck()) { if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { DeoptimizeIfNotSmi(result, instr->environment()); @@ -5169,31 +5191,32 @@ void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) { void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) { Register elements = ToRegister(instr->elements()); DoubleRegister value = ToDoubleRegister(instr->value()); - Register store_base = no_reg; - int offset = 0; + MemOperand mem_op; if (instr->key()->IsConstantOperand()) { int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); if (constant_key & 0xf0000000) { Abort(kArrayIndexConstantValueTooBig); } - offset = FixedDoubleArray::OffsetOfElementAt(constant_key + - instr->additional_index()); - store_base = elements; + int offset = FixedDoubleArray::OffsetOfElementAt(constant_key + + instr->additional_index()); + mem_op = FieldMemOperand(elements, offset); } else { - store_base = ToRegister(instr->temp()); + Register store_base = ToRegister(instr->temp()); Register key = ToRegister(instr->key()); bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi(); - CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged, - instr->hydrogen()->elements_kind()); - offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index()); + int offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index()); + mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged, + instr->hydrogen()->elements_kind(), + instr->hydrogen()->representation(), + offset); } if (instr->NeedsCanonicalization()) { __ CanonicalizeNaN(double_scratch(), value); - __ Str(double_scratch(), FieldMemOperand(store_base, offset)); + __ Str(double_scratch(), mem_op); } else { - __ Str(value, FieldMemOperand(store_base, offset)); + __ Str(value, mem_op); } } @@ -5204,37 +5227,41 @@ void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) { Register scratch = no_reg; Register store_base = no_reg; Register key = no_reg; - int offset = 0; + MemOperand mem_op; if (!instr->key()->IsConstantOperand() || instr->hydrogen()->NeedsWriteBarrier()) { scratch = ToRegister(instr->temp()); } + Representation representation = instr->hydrogen()->value()->representation(); if (instr->key()->IsConstantOperand()) { LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); - offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) + - instr->additional_index()); + int offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) + + instr->additional_index()); store_base = elements; + if (representation.IsInteger32()) { + ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY); + ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS); + STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && + (kSmiTag == 0)); + mem_op = UntagSmiFieldMemOperand(store_base, offset); + } else { + mem_op = FieldMemOperand(store_base, offset); + } } else { store_base = scratch; key = ToRegister(instr->key()); bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi(); - CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged, - instr->hydrogen()->elements_kind()); - offset = FixedArray::OffsetOfElementAt(instr->additional_index()); - } - Representation representation = instr->hydrogen()->value()->representation(); - if (representation.IsInteger32()) { - ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY); - ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS); - STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0); - __ Store(value, UntagSmiFieldMemOperand(store_base, offset), - Representation::Integer32()); - } else { - __ Store(value, FieldMemOperand(store_base, offset), representation); + int offset = FixedArray::OffsetOfElementAt(instr->additional_index()); + + mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged, + instr->hydrogen()->elements_kind(), + representation, offset); } + __ Store(value, mem_op, representation); + if (instr->hydrogen()->NeedsWriteBarrier()) { ASSERT(representation.IsTagged()); // This assignment may cause element_addr to alias store_base. @@ -5243,7 +5270,7 @@ void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) { instr->hydrogen()->value()->IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; // Compute address of modified element and store it into key register. - __ Add(element_addr, store_base, offset - kHeapObjectTag); + __ Add(element_addr, mem_op.base(), mem_op.OffsetAsOperand()); __ RecordWrite(elements, element_addr, value, GetLinkRegisterState(), kSaveFPRegs, EMIT_REMEMBERED_SET, check_needed); } diff --git a/src/arm64/lithium-codegen-arm64.h b/src/arm64/lithium-codegen-arm64.h index 8cae5b049..8c25e6340 100644 --- a/src/arm64/lithium-codegen-arm64.h +++ b/src/arm64/lithium-codegen-arm64.h @@ -256,11 +256,13 @@ class LCodeGen: public LCodeGenBase { int constant_key, ElementsKind elements_kind, int additional_index); - void CalcKeyedArrayBaseRegister(Register base, - Register elements, - Register key, - bool key_is_tagged, - ElementsKind elements_kind); + MemOperand PrepareKeyedArrayOperand(Register base, + Register elements, + Register key, + bool key_is_tagged, + ElementsKind elements_kind, + Representation representation, + int additional_index); void RegisterEnvironmentForDeoptimization(LEnvironment* environment, Safepoint::DeoptMode mode);