From 6e0ccacc7acb09751fce437e4bd844b669c77bdd Mon Sep 17 00:00:00 2001 From: "haitao.feng@intel.com" Date: Thu, 7 Nov 2013 08:14:27 +0000 Subject: [PATCH] Refactor loading a pointer and loading an integer64 into a register instructions for X64 R=danno@chromium.org Review URL: https://codereview.chromium.org/39543003 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@17540 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- src/x64/assembler-x64.cc | 50 ++++++++++++--------------------- src/x64/assembler-x64.h | 6 ++-- src/x64/code-stubs-x64.cc | 10 +++---- src/x64/codegen-x64.cc | 6 ++-- src/x64/full-codegen-x64.cc | 4 +-- src/x64/lithium-codegen-x64.cc | 13 ++++----- src/x64/lithium-gap-resolver-x64.cc | 2 +- src/x64/macro-assembler-x64.cc | 36 +++++++++++------------- src/x64/macro-assembler-x64.h | 3 +- src/x64/stub-cache-x64.cc | 7 ++--- test/cctest/test-macro-assembler-x64.cc | 5 ++-- 11 files changed, 59 insertions(+), 83 deletions(-) diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc index eb1fa2b..2c266d2 100644 --- a/src/x64/assembler-x64.cc +++ b/src/x64/assembler-x64.cc @@ -1476,31 +1476,25 @@ void Assembler::movq(Register dst, void* value, RelocInfo::Mode rmode) { // This method must not be used with heap object references. The stored // address is not GC safe. Use the handle version instead. ASSERT(rmode > RelocInfo::LAST_GCED_ENUM); - EnsureSpace ensure_space(this); - emit_rex_64(dst); - emit(0xB8 | dst.low_bits()); - emitp(value, rmode); -} - - -void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) { - // Non-relocatable values might not need a 64-bit representation. - ASSERT(RelocInfo::IsNone(rmode)); - if (is_uint32(value)) { - movl(dst, Immediate(static_cast(value))); - } else if (is_int32(value)) { - movq(dst, Immediate(static_cast(value))); + if (RelocInfo::IsNone(rmode)) { + movq(dst, reinterpret_cast(value)); } else { - // Value cannot be represented by 32 bits, so do a full 64 bit immediate - // value. EnsureSpace ensure_space(this); emit_rex_64(dst); emit(0xB8 | dst.low_bits()); - emitq(value); + emitp(value, rmode); } } +void Assembler::movq(Register dst, int64_t value) { + EnsureSpace ensure_space(this); + emit_rex_64(dst); + emit(0xB8 | dst.low_bits()); + emitq(value); +} + + void Assembler::movq(const Operand& dst, Immediate value) { EnsureSpace ensure_space(this); emit_rex_64(dst); @@ -1535,21 +1529,13 @@ void Assembler::movl(const Operand& dst, Label* src) { void Assembler::movq(Register dst, Handle value, RelocInfo::Mode mode) { AllowDeferredHandleDereference using_raw_address; - // If there is no relocation info, emit the value of the handle efficiently - // (possibly using less that 8 bytes for the value). - if (RelocInfo::IsNone(mode)) { - // There is no possible reason to store a heap pointer without relocation - // info, so it must be a smi. - ASSERT(value->IsSmi()); - movq(dst, reinterpret_cast(*value), RelocInfo::NONE64); - } else { - EnsureSpace ensure_space(this); - ASSERT(value->IsHeapObject()); - ASSERT(!isolate()->heap()->InNewSpace(*value)); - emit_rex_64(dst); - emit(0xB8 | dst.low_bits()); - emitp(value.location(), mode); - } + ASSERT(!RelocInfo::IsNone(mode)); + EnsureSpace ensure_space(this); + ASSERT(value->IsHeapObject()); + ASSERT(!isolate()->heap()->InNewSpace(*value)); + emit_rex_64(dst); + emit(0xB8 | dst.low_bits()); + emitp(value.location(), mode); } diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h index dddb55b..dfa1ebc 100644 --- a/src/x64/assembler-x64.h +++ b/src/x64/assembler-x64.h @@ -721,10 +721,10 @@ class Assembler : public AssemblerBase { // Move sign extended immediate to memory location. void movq(const Operand& dst, Immediate value); - // Instructions to load a 64-bit immediate into a register. - // All 64-bit immediates must have a relocation mode. + // Loads a pointer into a register with a relocation mode. void movq(Register dst, void* ptr, RelocInfo::Mode rmode); - void movq(Register dst, int64_t value, RelocInfo::Mode rmode); + // Loads a 64-bit immediate into a register. + void movq(Register dst, int64_t value); void movq(Register dst, Handle handle, RelocInfo::Mode rmode); void movsxbq(Register dst, const Operand& src); diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc index 6df2ff4091..be8160b 100644 --- a/src/x64/code-stubs-x64.cc +++ b/src/x64/code-stubs-x64.cc @@ -959,7 +959,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { Label continue_sqrt, continue_rsqrt, not_plus_half; // Test for 0.5. // Load double_scratch with 0.5. - __ movq(scratch, V8_UINT64_C(0x3FE0000000000000), RelocInfo::NONE64); + __ movq(scratch, V8_UINT64_C(0x3FE0000000000000)); __ movq(double_scratch, scratch); // Already ruled out NaNs for exponent. __ ucomisd(double_scratch, double_exponent); @@ -969,7 +969,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13). // According to IEEE-754, double-precision -Infinity has the highest // 12 bits set and the lowest 52 bits cleared. - __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE64); + __ movq(scratch, V8_UINT64_C(0xFFF0000000000000)); __ movq(double_scratch, scratch); __ ucomisd(double_scratch, double_base); // Comparing -Infinity with NaN results in "unordered", which sets the @@ -1001,7 +1001,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13). // According to IEEE-754, double-precision -Infinity has the highest // 12 bits set and the lowest 52 bits cleared. - __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE64); + __ movq(scratch, V8_UINT64_C(0xFFF0000000000000)); __ movq(double_scratch, scratch); __ ucomisd(double_scratch, double_base); // Comparing -Infinity with NaN results in "unordered", which sets the @@ -3030,9 +3030,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // Scratch register is neither callee-save, nor an argument register on any // platform. It's free to use at this point. // Cannot use smi-register for loading yet. - __ movq(kScratchRegister, - reinterpret_cast(Smi::FromInt(marker)), - RelocInfo::NONE64); + __ movq(kScratchRegister, Smi::FromInt(marker), RelocInfo::NONE64); __ push(kScratchRegister); // context slot __ push(kScratchRegister); // function slot // Save callee-saved registers (X64/Win64 calling conventions). diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc index cb7ee12..afe0e3b 100644 --- a/src/x64/codegen-x64.cc +++ b/src/x64/codegen-x64.cc @@ -213,7 +213,7 @@ ModuloFunction CreateModuloFunction() { __ j(zero, &valid_result); __ fstp(0); // Drop result in st(0). int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000); - __ movq(rcx, kNaNValue, RelocInfo::NONE64); + __ movq(rcx, kNaNValue); __ movq(Operand(rsp, kPointerSize), rcx); __ movsd(xmm0, Operand(rsp, kPointerSize)); __ jmp(&return_result); @@ -338,7 +338,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize); Label loop, entry, convert_hole; - __ movq(r15, BitCast(kHoleNanInt64), RelocInfo::NONE64); + __ movq(r15, BitCast(kHoleNanInt64)); // r15: the-hole NaN __ jmp(&entry); @@ -440,7 +440,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( __ movq(FieldOperand(r11, FixedArray::kLengthOffset), r14); // Prepare for conversion loop. - __ movq(rsi, BitCast(kHoleNanInt64), RelocInfo::NONE64); + __ movq(rsi, BitCast(kHoleNanInt64)); __ LoadRoot(rdi, Heap::kTheHoleValueRootIndex); // rsi: the-hole NaN // rdi: pointer to the-hole diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc index 4a99287..f69dfb9 100644 --- a/src/x64/full-codegen-x64.cc +++ b/src/x64/full-codegen-x64.cc @@ -315,9 +315,7 @@ void FullCodeGenerator::EmitProfilingCounterReset() { reset_value = Smi::kMaxValue; } __ movq(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT); - __ movq(kScratchRegister, - reinterpret_cast(Smi::FromInt(reset_value)), - RelocInfo::NONE64); + __ Move(kScratchRegister, Smi::FromInt(reset_value)); __ movq(FieldOperand(rbx, Cell::kValueOffset), kScratchRegister); } diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc index b4a14e1..14d31c5 100644 --- a/src/x64/lithium-codegen-x64.cc +++ b/src/x64/lithium-codegen-x64.cc @@ -157,7 +157,7 @@ bool LCodeGen::GeneratePrologue() { #endif __ push(rax); __ Set(rax, slots); - __ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE64); + __ movq(kScratchRegister, kSlotsZapValue); Label loop; __ bind(&loop); __ movq(MemOperand(rsp, rax, times_pointer_size, 0), @@ -1123,7 +1123,7 @@ void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) { __ neg(reg1); DeoptimizeIf(zero, instr->environment()); } - __ movq(reg2, multiplier, RelocInfo::NONE64); + __ Set(reg2, multiplier); // Result just fit in r64, because it's int32 * uint32. __ imul(reg2, reg1); @@ -3481,7 +3481,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) { static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5 Label done, round_to_zero, below_one_half, do_not_compensate, restore; - __ movq(kScratchRegister, one_half, RelocInfo::NONE64); + __ movq(kScratchRegister, one_half); __ movq(xmm_scratch, kScratchRegister); __ ucomisd(xmm_scratch, input_reg); __ j(above, &below_one_half); @@ -3496,7 +3496,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) { __ jmp(&done); __ bind(&below_one_half); - __ movq(kScratchRegister, minus_one_half, RelocInfo::NONE64); + __ movq(kScratchRegister, minus_one_half); __ movq(xmm_scratch, kScratchRegister); __ ucomisd(xmm_scratch, input_reg); __ j(below_equal, &round_to_zero); @@ -3552,7 +3552,7 @@ void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { Label done, sqrt; // Check base for -Infinity. According to IEEE-754, double-precision // -Infinity has the highest 12 bits set and the lowest 52 bits cleared. - __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000), RelocInfo::NONE64); + __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000)); __ movq(xmm_scratch, kScratchRegister); __ ucomisd(xmm_scratch, input_reg); // Comparing -Infinity with NaN results in "unordered", which sets the @@ -3660,8 +3660,7 @@ void LCodeGen::DoRandom(LRandom* instr) { // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)). XMMRegister result = ToDoubleRegister(instr->result()); XMMRegister scratch4 = double_scratch0(); - __ movq(scratch3, V8_INT64_C(0x4130000000000000), - RelocInfo::NONE64); // 1.0 x 2^20 as double + __ movq(scratch3, V8_INT64_C(0x4130000000000000)); // 1.0 x 2^20 as double __ movq(scratch4, scratch3); __ movd(result, random); __ xorps(result, scratch4); diff --git a/src/x64/lithium-gap-resolver-x64.cc b/src/x64/lithium-gap-resolver-x64.cc index 01cfb12..6059c50 100644 --- a/src/x64/lithium-gap-resolver-x64.cc +++ b/src/x64/lithium-gap-resolver-x64.cc @@ -209,7 +209,7 @@ void LGapResolver::EmitMove(int index) { if (int_val == 0) { __ xorps(dst, dst); } else { - __ movq(kScratchRegister, int_val, RelocInfo::NONE64); + __ Set(kScratchRegister, int_val); __ movq(dst, kScratchRegister); } } else { diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc index 2cc4a87..586e496 100644 --- a/src/x64/macro-assembler-x64.cc +++ b/src/x64/macro-assembler-x64.cc @@ -164,7 +164,7 @@ void MacroAssembler::PushAddress(ExternalReference source) { int64_t address = reinterpret_cast(source.address()); if (is_int32(address) && !Serializer::enabled()) { if (emit_debug_code()) { - movq(kScratchRegister, BitCast(kZapValue), RelocInfo::NONE64); + movq(kScratchRegister, kZapValue, RelocInfo::NONE64); } push(Immediate(static_cast(address))); return; @@ -289,7 +289,8 @@ void MacroAssembler::InNewSpace(Register object, ASSERT(is_int32(static_cast(isolate()->heap()->NewSpaceMask()))); intptr_t new_space_start = reinterpret_cast(isolate()->heap()->NewSpaceStart()); - movq(kScratchRegister, -new_space_start, RelocInfo::NONE64); + movq(kScratchRegister, reinterpret_cast
(-new_space_start), + RelocInfo::NONE64); if (scratch.is(object)) { addq(scratch, kScratchRegister); } else { @@ -345,8 +346,8 @@ void MacroAssembler::RecordWriteField( // Clobber clobbered input registers when running with the debug-code flag // turned on to provoke errors. if (emit_debug_code()) { - movq(value, BitCast(kZapValue), RelocInfo::NONE64); - movq(dst, BitCast(kZapValue), RelocInfo::NONE64); + movq(value, kZapValue, RelocInfo::NONE64); + movq(dst, kZapValue, RelocInfo::NONE64); } } @@ -379,8 +380,8 @@ void MacroAssembler::RecordWriteArray(Register object, // Clobber clobbered input registers when running with the debug-code flag // turned on to provoke errors. if (emit_debug_code()) { - movq(value, BitCast(kZapValue), RelocInfo::NONE64); - movq(index, BitCast(kZapValue), RelocInfo::NONE64); + movq(value, kZapValue, RelocInfo::NONE64); + movq(index, kZapValue, RelocInfo::NONE64); } } @@ -445,8 +446,8 @@ void MacroAssembler::RecordWrite(Register object, // Clobber clobbered registers when running with the debug-code flag // turned on to provoke errors. if (emit_debug_code()) { - movq(address, BitCast(kZapValue), RelocInfo::NONE64); - movq(value, BitCast(kZapValue), RelocInfo::NONE64); + movq(address, kZapValue, RelocInfo::NONE64); + movq(value, kZapValue, RelocInfo::NONE64); } } @@ -534,10 +535,9 @@ void MacroAssembler::Abort(BailoutReason reason) { #endif push(rax); - movq(kScratchRegister, p0, RelocInfo::NONE64); + movq(kScratchRegister, reinterpret_cast(p0), RelocInfo::NONE64); push(kScratchRegister); - movq(kScratchRegister, - reinterpret_cast(Smi::FromInt(static_cast(p1 - p0))), + movq(kScratchRegister, Smi::FromInt(static_cast(p1 - p0)), RelocInfo::NONE64); push(kScratchRegister); @@ -980,7 +980,7 @@ void MacroAssembler::Set(Register dst, int64_t x) { } else if (is_int32(x)) { movq(dst, Immediate(static_cast(x))); } else { - movq(dst, x, RelocInfo::NONE64); + movq(dst, x); } } @@ -1045,9 +1045,7 @@ Register MacroAssembler::GetSmiConstant(Smi* source) { void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) { if (emit_debug_code()) { - movq(dst, - reinterpret_cast(Smi::FromInt(kSmiConstantRegisterValue)), - RelocInfo::NONE64); + movq(dst, Smi::FromInt(kSmiConstantRegisterValue), RelocInfo::NONE64); cmpq(dst, kSmiConstantRegister); if (allow_stub_calls()) { Assert(equal, kUninitializedKSmiConstantRegister); @@ -1094,7 +1092,7 @@ void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) { UNREACHABLE(); return; default: - movq(dst, reinterpret_cast(source), RelocInfo::NONE64); + movq(dst, source, RelocInfo::NONE64); return; } if (negative) { @@ -3120,9 +3118,7 @@ void MacroAssembler::TruncateDoubleToI(Register result_reg, XMMRegister input_reg) { Label done; cvttsd2siq(result_reg, input_reg); - movq(kScratchRegister, - V8_INT64_C(0x8000000000000000), - RelocInfo::NONE64); + movq(kScratchRegister, V8_INT64_C(0x8000000000000000)); cmpq(result_reg, kScratchRegister); j(not_equal, &done, Label::kNear); @@ -3272,7 +3268,7 @@ void MacroAssembler::AssertSmi(const Operand& object) { void MacroAssembler::AssertZeroExtended(Register int32_register) { if (emit_debug_code()) { ASSERT(!int32_register.is(kScratchRegister)); - movq(kScratchRegister, 0x100000000l, RelocInfo::NONE64); + movq(kScratchRegister, V8_INT64_C(0x0000000100000000)); cmpq(kScratchRegister, int32_register); Check(above_equal, k32BitValueInRegisterIsNotZeroExtended); } diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h index 887b6d7..3143d3d 100644 --- a/src/x64/macro-assembler-x64.h +++ b/src/x64/macro-assembler-x64.h @@ -384,8 +384,7 @@ class MacroAssembler: public Assembler { void SafePush(Smi* src); void InitializeSmiConstantRegister() { - movq(kSmiConstantRegister, - reinterpret_cast(Smi::FromInt(kSmiConstantRegisterValue)), + movq(kSmiConstantRegister, Smi::FromInt(kSmiConstantRegisterValue), RelocInfo::NONE64); } diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc index 096b7a6..f5339ca 100644 --- a/src/x64/stub-cache-x64.cc +++ b/src/x64/stub-cache-x64.cc @@ -2293,7 +2293,7 @@ Handle CallStubCompiler::CompileMathFloorCall( Label already_round; __ bind(&conversion_failure); int64_t kTwoMantissaBits= V8_INT64_C(0x4330000000000000); - __ movq(rbx, kTwoMantissaBits, RelocInfo::NONE64); + __ movq(rbx, kTwoMantissaBits); __ movq(xmm1, rbx); __ ucomisd(xmm0, xmm1); __ j(above_equal, &already_round); @@ -2314,7 +2314,7 @@ Handle CallStubCompiler::CompileMathFloorCall( // Subtract 1 if the argument was less than the tentative result. int64_t kOne = V8_INT64_C(0x3ff0000000000000); - __ movq(rbx, kOne, RelocInfo::NONE64); + __ movq(rbx, kOne); __ movq(xmm1, rbx); __ andpd(xmm1, xmm2); __ subsd(xmm0, xmm1); @@ -2418,8 +2418,7 @@ Handle CallStubCompiler::CompileMathAbsCall( Label negative_sign; const int sign_mask_shift = (HeapNumber::kExponentOffset - HeapNumber::kValueOffset) * kBitsPerByte; - __ movq(rdi, static_cast(HeapNumber::kSignMask) << sign_mask_shift, - RelocInfo::NONE64); + __ Set(rdi, static_cast(HeapNumber::kSignMask) << sign_mask_shift); __ testq(rbx, rdi); __ j(not_zero, &negative_sign); __ ret(2 * kPointerSize); diff --git a/test/cctest/test-macro-assembler-x64.cc b/test/cctest/test-macro-assembler-x64.cc index 61914b5..abde4c0 100644 --- a/test/cctest/test-macro-assembler-x64.cc +++ b/test/cctest/test-macro-assembler-x64.cc @@ -79,6 +79,7 @@ using v8::internal::rdx; using v8::internal::rsi; using v8::internal::rsp; using v8::internal::times_pointer_size; +using v8::internal::Address; // Test the x64 assembler by compiling some simple functions into // a buffer and executing them. These tests do not initialize the @@ -402,7 +403,7 @@ void TestI64PlusConstantToSmi(MacroAssembler* masm, ASSERT(Smi::IsValid(result)); __ movl(rax, Immediate(id)); __ Move(r8, Smi::FromInt(static_cast(result))); - __ movq(rcx, x, RelocInfo::NONE64); + __ movq(rcx, x); __ movq(r11, rcx); __ Integer64PlusConstantToSmi(rdx, rcx, y); __ cmpq(rdx, r8); @@ -2322,7 +2323,7 @@ TEST(OperandOffset) { __ lea(r13, Operand(rbp, -3 * kPointerSize)); __ lea(rbx, Operand(rbp, -5 * kPointerSize)); __ movl(rcx, Immediate(2)); - __ movq(r8, reinterpret_cast(&data[128]), RelocInfo::NONE64); + __ movq(r8, reinterpret_cast
(&data[128]), RelocInfo::NONE64); __ movl(rax, Immediate(1)); Operand sp0 = Operand(rsp, 0); -- 2.7.4