From 04da7b90c9e8d29f20a72c1fc5029709095b6d69 Mon Sep 17 00:00:00 2001 From: "lrn@chromium.org" Date: Fri, 2 Jul 2010 14:09:35 +0000 Subject: [PATCH] X64: Added register holding Smi::FromInt(1). Don't use r15 for anything any more. Review URL: http://codereview.chromium.org/2885018 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@5009 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- src/x64/builtins-x64.cc | 4 - src/x64/codegen-x64.cc | 69 ++++++----- src/x64/frames-x64.h | 4 + src/x64/macro-assembler-x64.cc | 197 ++++++++++++++++++++++++-------- src/x64/macro-assembler-x64.h | 26 ++++- src/x64/register-allocator-x64-inl.h | 11 +- src/x64/register-allocator-x64.h | 2 +- test/cctest/test-macro-assembler-x64.cc | 153 +++++++++++++++++-------- 8 files changed, 331 insertions(+), 135 deletions(-) diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc index ff655c7..a38ebaf 100644 --- a/src/x64/builtins-x64.cc +++ b/src/x64/builtins-x64.cc @@ -1238,10 +1238,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, __ movq(rbx, r8); #endif // _WIN64 - // Set up the roots register. - ExternalReference roots_address = ExternalReference::roots_address(); - __ movq(kRootRegister, roots_address); - // Current stack contents: // [rsp + 2 * kPointerSize ... ]: Internal frame // [rsp + kPointerSize] : function diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc index f3ee40c..3b1aeae 100644 --- a/src/x64/codegen-x64.cc +++ b/src/x64/codegen-x64.cc @@ -592,7 +592,6 @@ bool CodeGenerator::HasValidEntryRegisters() { && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0)) && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0)) && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0)) - && (allocator()->count(r15) == (frame()->is_used(r15) ? 1 : 0)) && (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0)); } #endif @@ -3606,17 +3605,16 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) { __ JumpIfNotSmi(new_value.reg(), deferred->entry_label()); } if (is_increment) { - __ SmiAddConstant(kScratchRegister, + __ SmiAddConstant(new_value.reg(), new_value.reg(), Smi::FromInt(1), deferred->entry_label()); } else { - __ SmiSubConstant(kScratchRegister, + __ SmiSubConstant(new_value.reg(), new_value.reg(), Smi::FromInt(1), deferred->entry_label()); } - __ movq(new_value.reg(), kScratchRegister); deferred->BindExit(); // Postfix count operations return their input converted to @@ -8727,26 +8725,26 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ bind(&seq_ascii_string); // rax: subject string (sequential ascii) // rcx: RegExp data (FixedArray) - __ movq(r12, FieldOperand(rcx, JSRegExp::kDataAsciiCodeOffset)); + __ movq(r11, FieldOperand(rcx, JSRegExp::kDataAsciiCodeOffset)); __ Set(rdi, 1); // Type is ascii. __ jmp(&check_code); __ bind(&seq_two_byte_string); // rax: subject string (flat two-byte) // rcx: RegExp data (FixedArray) - __ movq(r12, FieldOperand(rcx, JSRegExp::kDataUC16CodeOffset)); + __ movq(r11, FieldOperand(rcx, JSRegExp::kDataUC16CodeOffset)); __ Set(rdi, 0); // Type is two byte. __ bind(&check_code); // Check that the irregexp code has been generated for the actual string // encoding. If it has, the field contains a code object otherwise it contains // the hole. - __ CmpObjectType(r12, CODE_TYPE, kScratchRegister); + __ CmpObjectType(r11, CODE_TYPE, kScratchRegister); __ j(not_equal, &runtime); // rax: subject string // rdi: encoding of subject string (1 if ascii, 0 if two_byte); - // r12: code + // r11: code // Load used arguments before starting to push arguments for call to native // RegExp code to avoid handling changing stack height. __ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset)); @@ -8754,7 +8752,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // rax: subject string // rbx: previous index // rdi: encoding of subject string (1 if ascii 0 if two_byte); - // r12: code + // r11: code // All checks done. Now push arguments for native regexp code. __ IncrementCounter(&Counters::regexp_entry_native, 1); @@ -8804,7 +8802,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // rax: subject string // rbx: previous index // rdi: encoding of subject string (1 if ascii 0 if two_byte); - // r12: code + // r11: code // Argument 4: End of string data // Argument 3: Start of string data @@ -8828,8 +8826,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ movq(arg1, rax); // Locate the code entry and call it. - __ addq(r12, Immediate(Code::kHeaderSize - kHeapObjectTag)); - __ CallCFunction(r12, kRegExpExecuteArguments); + __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag)); + __ CallCFunction(r11, kRegExpExecuteArguments); // rsi is caller save, as it is used to pass parameter. __ pop(rsi); @@ -9627,7 +9625,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, // rbp: frame pointer (restored after C call). // rsp: stack pointer (restored after C call). // r14: number of arguments including receiver (C callee-saved). - // r15: pointer to the first argument (C callee-saved). + // r12: pointer to the first argument (C callee-saved). // This pointer is reused in LeaveExitFrame(), so it is stored in a // callee-saved register. @@ -9668,7 +9666,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9 // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots. __ movq(Operand(rsp, 4 * kPointerSize), r14); // argc. - __ movq(Operand(rsp, 5 * kPointerSize), r15); // argv. + __ movq(Operand(rsp, 5 * kPointerSize), r12); // argv. if (result_size_ < 2) { // Pass a pointer to the Arguments object as the first argument. // Return result in single register (rax). @@ -9684,7 +9682,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, #else // _WIN64 // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9. __ movq(rdi, r14); // argc. - __ movq(rsi, r15); // argv. + __ movq(rsi, r12); // argv. #endif __ call(rbx); // Result is in rax - do not destroy this register! @@ -9886,7 +9884,7 @@ void CEntryStub::Generate(MacroAssembler* masm) { // rbp: frame pointer of exit frame (restored after C call). // rsp: stack pointer (restored after C call). // r14: number of arguments including receiver (C callee-saved). - // r15: argv pointer (C callee-saved). + // r12: argv pointer (C callee-saved). Label throw_normal_exception; Label throw_termination_exception; @@ -9946,24 +9944,38 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // Push the stack frame type marker twice. int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; - __ Push(Smi::FromInt(marker)); // context slot - __ Push(Smi::FromInt(marker)); // function slot - // Save callee-saved registers (X64 calling conventions). + // Scratch register is neither callee-save, nor an argument register on any + // platform. It's free to use at this point. + // Cannot use smi-register for loading yet. + __ movq(kScratchRegister, + reinterpret_cast(Smi::FromInt(marker)), + RelocInfo::NONE); + __ push(kScratchRegister); // context slot + __ push(kScratchRegister); // function slot + // Save callee-saved registers (X64/Win64 calling conventions). __ push(r12); __ push(r13); __ push(r14); __ push(r15); - __ push(rdi); - __ push(rsi); +#ifdef _WIN64 + __ push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI. + __ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI. +#endif __ push(rbx); - // TODO(X64): Push XMM6-XMM15 (low 64 bits) as well, or make them - // callee-save in JS code as well. + // TODO(X64): On Win64, if we ever use XMM6-XMM15, the low low 64 bits are + // callee save as well. // Save copies of the top frame descriptor on the stack. ExternalReference c_entry_fp(Top::k_c_entry_fp_address); __ load_rax(c_entry_fp); __ push(rax); + // Set up the roots and smi constant registers. + // Needs to be done before any further smi loads. + ExternalReference roots_address = ExternalReference::roots_address(); + __ movq(kRootRegister, roots_address); + __ InitializeSmiConstantRegister(); + #ifdef ENABLE_LOGGING_AND_PROFILING // If this is the outermost JS call, set js_entry_sp value. ExternalReference js_entry_sp(Top::k_js_entry_sp_address); @@ -10034,8 +10046,11 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // Restore callee-saved registers (X64 conventions). __ pop(rbx); +#ifdef _WIN64 + // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI. __ pop(rsi); __ pop(rdi); +#endif __ pop(r15); __ pop(r14); __ pop(r13); @@ -11269,7 +11284,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { // Check that both strings are non-external ascii strings. __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx, - &string_add_runtime); + &string_add_runtime); // Get the two characters forming the sub string. __ movzxbq(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize)); @@ -11279,7 +11294,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { // just allocate a new one. Label make_two_character_string, make_flat_ascii_string; StringHelper::GenerateTwoCharacterSymbolTableProbe( - masm, rbx, rcx, r14, r12, rdi, r15, &make_two_character_string); + masm, rbx, rcx, r14, r11, rdi, r12, &make_two_character_string); __ IncrementCounter(&Counters::string_add_native, 1); __ ret(2 * kPointerSize); @@ -11371,7 +11386,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ bind(&make_flat_ascii_string); // Both strings are ascii strings. As they are short they are both flat. - __ AllocateAsciiString(rcx, rbx, rdi, r14, r15, &string_add_runtime); + __ AllocateAsciiString(rcx, rbx, rdi, r14, r11, &string_add_runtime); // rcx: result string __ movq(rbx, rcx); // Locate first character of result. @@ -11408,7 +11423,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ j(not_zero, &string_add_runtime); // Both strings are two byte strings. As they are short they are both // flat. - __ AllocateTwoByteString(rcx, rbx, rdi, r14, r15, &string_add_runtime); + __ AllocateTwoByteString(rcx, rbx, rdi, r14, r11, &string_add_runtime); // rcx: result string __ movq(rbx, rcx); // Locate first character of result. diff --git a/src/x64/frames-x64.h b/src/x64/frames-x64.h index a92b248..9991981 100644 --- a/src/x64/frames-x64.h +++ b/src/x64/frames-x64.h @@ -56,7 +56,11 @@ class StackHandlerConstants : public AllStatic { class EntryFrameConstants : public AllStatic { public: +#ifdef _WIN64 static const int kCallerFPOffset = -10 * kPointerSize; +#else + static const int kCallerFPOffset = -8 * kPointerSize; +#endif static const int kArgvOffset = 6 * kPointerSize; }; diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc index 77a7198..76200d7 100644 --- a/src/x64/macro-assembler-x64.cc +++ b/src/x64/macro-assembler-x64.cc @@ -467,7 +467,7 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { void MacroAssembler::Set(Register dst, int64_t x) { if (x == 0) { - xor_(dst, dst); + xorl(dst, dst); } else if (is_int32(x)) { movq(dst, Immediate(static_cast(x))); } else if (is_uint32(x)) { @@ -477,7 +477,6 @@ void MacroAssembler::Set(Register dst, int64_t x) { } } - void MacroAssembler::Set(const Operand& dst, int64_t x) { if (is_int32(x)) { movq(dst, Immediate(static_cast(x))); @@ -492,6 +491,78 @@ void MacroAssembler::Set(const Operand& dst, int64_t x) { static int kSmiShift = kSmiTagSize + kSmiShiftSize; +Register MacroAssembler::GetSmiConstant(Smi* source) { + int value = source->value(); + if (value == 0) { + xorl(kScratchRegister, kScratchRegister); + return kScratchRegister; + } + if (value == 1) { + return kSmiConstantRegister; + } + LoadSmiConstant(kScratchRegister, source); + return kScratchRegister; +} + +void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) { + if (FLAG_debug_code) { + movq(dst, + reinterpret_cast(Smi::FromInt(kSmiConstantRegisterValue)), + RelocInfo::NONE); + cmpq(dst, kSmiConstantRegister); + if (allow_stub_calls()) { + Assert(equal, "Uninitialized kSmiConstantRegister"); + } else { + Label ok; + j(equal, &ok); + int3(); + bind(&ok); + } + } + if (source->value() == 0) { + xorl(dst, dst); + return; + } + int value = source->value(); + bool negative = value < 0; + unsigned int uvalue = negative ? -value : value; + + switch (uvalue) { + case 9: + lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0)); + break; + case 8: + xorl(dst, dst); + lea(dst, Operand(dst, kSmiConstantRegister, times_8, 0)); + break; + case 4: + xorl(dst, dst); + lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0)); + break; + case 5: + lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0)); + break; + case 3: + lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0)); + break; + case 2: + lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0)); + break; + case 1: + movq(dst, kSmiConstantRegister); + break; + case 0: + UNREACHABLE(); + return; + default: + movq(dst, reinterpret_cast(source), RelocInfo::NONE); + return; + } + if (negative) { + neg(dst); + } +} + void MacroAssembler::Integer32ToSmi(Register dst, Register src) { ASSERT_EQ(0, kSmiTag); if (!dst.is(src)) { @@ -652,9 +723,10 @@ Condition MacroAssembler::CheckSmi(Register src) { Condition MacroAssembler::CheckPositiveSmi(Register src) { ASSERT_EQ(0, kSmiTag); + // Make mask 0x8000000000000001 and test that both bits are zero. movq(kScratchRegister, src); rol(kScratchRegister, Immediate(1)); - testl(kScratchRegister, Immediate(0x03)); + testb(kScratchRegister, Immediate(3)); return zero; } @@ -683,7 +755,6 @@ Condition MacroAssembler::CheckBothPositiveSmi(Register first, } - Condition MacroAssembler::CheckEitherSmi(Register first, Register second) { if (first.is(second)) { return CheckSmi(first); @@ -696,11 +767,10 @@ Condition MacroAssembler::CheckEitherSmi(Register first, Register second) { Condition MacroAssembler::CheckIsMinSmi(Register src) { - ASSERT(kSmiTag == 0 && kSmiTagSize == 1); - movq(kScratchRegister, src); - rol(kScratchRegister, Immediate(1)); - cmpq(kScratchRegister, Immediate(1)); - return equal; + ASSERT(!src.is(kScratchRegister)); + // If we overflow by subtracting one, it's the minimal smi value. + cmpq(src, kSmiConstantRegister); + return overflow; } @@ -713,8 +783,8 @@ Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) { Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) { // An unsigned 32-bit integer value is valid as long as the high bit // is not set. - testq(src, Immediate(0x80000000)); - return zero; + testl(src, src); + return positive; } @@ -807,10 +877,10 @@ void MacroAssembler::SmiSub(Register dst, } Assert(no_overflow, "Smi subtraction overflow"); } else if (dst.is(src1)) { - movq(kScratchRegister, src1); - subq(kScratchRegister, src2); + movq(kScratchRegister, src2); + cmpq(src1, kScratchRegister); j(overflow, on_not_smi_result); - movq(src1, kScratchRegister); + subq(src1, kScratchRegister); } else { movq(dst, src1); subq(dst, src2); @@ -883,7 +953,7 @@ void MacroAssembler::SmiTryAddConstant(Register dst, JumpIfNotSmi(src, on_not_smi_result); Register tmp = (dst.is(src) ? kScratchRegister : dst); - Move(tmp, constant); + LoadSmiConstant(tmp, constant); addq(tmp, src); j(overflow, on_not_smi_result); if (dst.is(src)) { @@ -897,14 +967,46 @@ void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) { if (!dst.is(src)) { movq(dst, src); } + return; } else if (dst.is(src)) { ASSERT(!dst.is(kScratchRegister)); - - Move(kScratchRegister, constant); - addq(dst, kScratchRegister); + switch (constant->value()) { + case 1: + addq(dst, kSmiConstantRegister); + return; + case 2: + lea(dst, Operand(src, kSmiConstantRegister, times_2, 0)); + return; + case 4: + lea(dst, Operand(src, kSmiConstantRegister, times_4, 0)); + return; + case 8: + lea(dst, Operand(src, kSmiConstantRegister, times_8, 0)); + return; + default: + Register constant_reg = GetSmiConstant(constant); + addq(dst, constant_reg); + return; + } } else { - Move(dst, constant); - addq(dst, src); + switch (constant->value()) { + case 1: + lea(dst, Operand(src, kSmiConstantRegister, times_1, 0)); + return; + case 2: + lea(dst, Operand(src, kSmiConstantRegister, times_2, 0)); + return; + case 4: + lea(dst, Operand(src, kSmiConstantRegister, times_4, 0)); + return; + case 8: + lea(dst, Operand(src, kSmiConstantRegister, times_8, 0)); + return; + default: + LoadSmiConstant(dst, constant); + addq(dst, src); + return; + } } } @@ -927,12 +1029,12 @@ void MacroAssembler::SmiAddConstant(Register dst, } else if (dst.is(src)) { ASSERT(!dst.is(kScratchRegister)); - Move(kScratchRegister, constant); - addq(kScratchRegister, dst); + LoadSmiConstant(kScratchRegister, constant); + addq(kScratchRegister, src); j(overflow, on_not_smi_result); movq(dst, kScratchRegister); } else { - Move(dst, constant); + LoadSmiConstant(dst, constant); addq(dst, src); j(overflow, on_not_smi_result); } @@ -946,19 +1048,17 @@ void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) { } } else if (dst.is(src)) { ASSERT(!dst.is(kScratchRegister)); - - Move(kScratchRegister, constant); - subq(dst, kScratchRegister); + Register constant_reg = GetSmiConstant(constant); + subq(dst, constant_reg); } else { - // Subtract by adding the negative, to do it in two operations. if (constant->value() == Smi::kMinValue) { - Move(dst, constant); + LoadSmiConstant(dst, constant); // Adding and subtracting the min-value gives the same result, it only // differs on the overflow bit, which we don't check here. addq(dst, src); } else { // Subtract by adding the negation. - Move(dst, Smi::FromInt(-constant->value())); + LoadSmiConstant(dst, Smi::FromInt(-constant->value())); addq(dst, src); } } @@ -980,11 +1080,11 @@ void MacroAssembler::SmiSubConstant(Register dst, // We test the non-negativeness before doing the subtraction. testq(src, src); j(not_sign, on_not_smi_result); - Move(kScratchRegister, constant); + LoadSmiConstant(kScratchRegister, constant); subq(dst, kScratchRegister); } else { // Subtract by adding the negation. - Move(kScratchRegister, Smi::FromInt(-constant->value())); + LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value())); addq(kScratchRegister, dst); j(overflow, on_not_smi_result); movq(dst, kScratchRegister); @@ -995,13 +1095,13 @@ void MacroAssembler::SmiSubConstant(Register dst, // We test the non-negativeness before doing the subtraction. testq(src, src); j(not_sign, on_not_smi_result); - Move(dst, constant); + LoadSmiConstant(dst, constant); // Adding and subtracting the min-value gives the same result, it only // differs on the overflow bit, which we don't check here. addq(dst, src); } else { // Subtract by adding the negation. - Move(dst, Smi::FromInt(-(constant->value()))); + LoadSmiConstant(dst, Smi::FromInt(-(constant->value()))); addq(dst, src); j(overflow, on_not_smi_result); } @@ -1155,10 +1255,10 @@ void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) { xor_(dst, dst); } else if (dst.is(src)) { ASSERT(!dst.is(kScratchRegister)); - Move(kScratchRegister, constant); - and_(dst, kScratchRegister); + Register constant_reg = GetSmiConstant(constant); + and_(dst, constant_reg); } else { - Move(dst, constant); + LoadSmiConstant(dst, constant); and_(dst, src); } } @@ -1175,10 +1275,10 @@ void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) { void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) { if (dst.is(src)) { ASSERT(!dst.is(kScratchRegister)); - Move(kScratchRegister, constant); - or_(dst, kScratchRegister); + Register constant_reg = GetSmiConstant(constant); + or_(dst, constant_reg); } else { - Move(dst, constant); + LoadSmiConstant(dst, constant); or_(dst, src); } } @@ -1195,10 +1295,10 @@ void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) { void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) { if (dst.is(src)) { ASSERT(!dst.is(kScratchRegister)); - Move(kScratchRegister, constant); - xor_(dst, kScratchRegister); + Register constant_reg = GetSmiConstant(constant); + xor_(dst, constant_reg); } else { - Move(dst, constant); + LoadSmiConstant(dst, constant); xor_(dst, src); } } @@ -1366,6 +1466,7 @@ void MacroAssembler::SelectNonSmi(Register dst, // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi. } + SmiIndex MacroAssembler::SmiToIndex(Register dst, Register src, int shift) { @@ -1591,8 +1692,8 @@ void MacroAssembler::Push(Smi* source) { if (is_int32(smi)) { push(Immediate(static_cast(smi))); } else { - Set(kScratchRegister, smi); - push(kScratchRegister); + Register constant = GetSmiConstant(source); + push(constant); } } @@ -2132,10 +2233,10 @@ void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode, int result_size) { movq(rax, rsi); store_rax(context_address); - // Setup argv in callee-saved register r15. It is reused in LeaveExitFrame, + // Setup argv in callee-saved register r12. It is reused in LeaveExitFrame, // so it must be retained across the C-call. int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize; - lea(r15, Operand(rbp, r14, times_pointer_size, offset)); + lea(r12, Operand(rbp, r14, times_pointer_size, offset)); #ifdef ENABLE_DEBUGGER_SUPPORT // Save the state of all registers to the stack from the memory @@ -2181,7 +2282,7 @@ void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode, int result_size) { void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode, int result_size) { // Registers: - // r15 : argv + // r12 : argv #ifdef ENABLE_DEBUGGER_SUPPORT // Restore the memory copy of the registers by digging them out from // the stack. This is needed to allow nested break points. @@ -2201,7 +2302,7 @@ void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode, int result_size) { // Pop everything up to and including the arguments and the receiver // from the caller stack. - lea(rsp, Operand(r15, 1 * kPointerSize)); + lea(rsp, Operand(r12, 1 * kPointerSize)); // Restore current context from top and clear it in debug mode. ExternalReference context_address(Top::k_context_address); diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h index 882ee65..a256ab8 100644 --- a/src/x64/macro-assembler-x64.h +++ b/src/x64/macro-assembler-x64.h @@ -47,8 +47,11 @@ enum AllocationFlags { // Default scratch register used by MacroAssembler (and other code that needs // a spare register). The register isn't callee save, and not used by the // function calling convention. -static const Register kScratchRegister = { 10 }; // r10. -static const Register kRootRegister = { 13 }; // r13 +static const Register kScratchRegister = { 10 }; // r10. +static const Register kSmiConstantRegister = { 15 }; // r15 (callee save). +static const Register kRootRegister = { 13 }; // r13 (callee save). +// Value of smi in kSmiConstantRegister. +static const int kSmiConstantRegisterValue = 1; // Convenience for platform-independent signatures. typedef Operand MemOperand; @@ -202,6 +205,12 @@ class MacroAssembler: public Assembler { // --------------------------------------------------------------------------- // Smi tagging, untagging and operations on tagged smis. + void InitializeSmiConstantRegister() { + movq(kSmiConstantRegister, + reinterpret_cast(Smi::FromInt(kSmiConstantRegisterValue)), + RelocInfo::NONE); + } + // Conversions between tagged smi values and non-tagged integer values. // Tag an integer value. The result must be known to be a valid smi value. @@ -469,11 +478,12 @@ class MacroAssembler: public Assembler { // Basic Smi operations. void Move(Register dst, Smi* source) { - Set(dst, reinterpret_cast(source)); + LoadSmiConstant(dst, source); } void Move(const Operand& dst, Smi* source) { - Set(dst, reinterpret_cast(source)); + Register constant = GetSmiConstant(source); + movq(dst, constant); } void Push(Smi* smi); @@ -820,6 +830,14 @@ class MacroAssembler: public Assembler { private: bool generating_stub_; bool allow_stub_calls_; + + // Returns a register holding the smi value. The register MUST NOT be + // modified. It may be the "smi 1 constant" register. + Register GetSmiConstant(Smi* value); + + // Moves the smi value to the destination register. + void LoadSmiConstant(Register dst, Smi* value); + // This handle will be patched with the code object on installation. Handle code_object_; diff --git a/src/x64/register-allocator-x64-inl.h b/src/x64/register-allocator-x64-inl.h index c7c18b3..c6bea3a 100644 --- a/src/x64/register-allocator-x64-inl.h +++ b/src/x64/register-allocator-x64-inl.h @@ -38,7 +38,8 @@ namespace internal { bool RegisterAllocator::IsReserved(Register reg) { return reg.is(rsp) || reg.is(rbp) || reg.is(rsi) || - reg.is(kScratchRegister) || reg.is(kRootRegister); + reg.is(kScratchRegister) || reg.is(kRootRegister) || + reg.is(kSmiConstantRegister); } @@ -58,11 +59,11 @@ int RegisterAllocator::ToNumber(Register reg) { 5, // r8 6, // r9 -1, // r10 Scratch register. - 9, // r11 - 10, // r12 + 8, // r11 + 9, // r12 -1, // r13 Roots array. This is callee saved. 7, // r14 - 8 // r15 + -1 // r15 Smi constant register. }; return kNumbers[reg.code()]; } @@ -71,7 +72,7 @@ int RegisterAllocator::ToNumber(Register reg) { Register RegisterAllocator::ToRegister(int num) { ASSERT(num >= 0 && num < kNumRegisters); const Register kRegisters[] = - { rax, rbx, rcx, rdx, rdi, r8, r9, r14, r15, r11, r12 }; + { rax, rbx, rcx, rdx, rdi, r8, r9, r14, r11, r12 }; return kRegisters[num]; } diff --git a/src/x64/register-allocator-x64.h b/src/x64/register-allocator-x64.h index 8d666d2..a2884d9 100644 --- a/src/x64/register-allocator-x64.h +++ b/src/x64/register-allocator-x64.h @@ -33,7 +33,7 @@ namespace internal { class RegisterAllocatorConstants : public AllStatic { public: - static const int kNumRegisters = 11; + static const int kNumRegisters = 10; static const int kInvalidRegister = -1; }; diff --git a/test/cctest/test-macro-assembler-x64.cc b/test/cctest/test-macro-assembler-x64.cc index dd97498..3d2b91b 100755 --- a/test/cctest/test-macro-assembler-x64.cc +++ b/test/cctest/test-macro-assembler-x64.cc @@ -57,10 +57,9 @@ using v8::internal::rsp; using v8::internal::r8; using v8::internal::r9; using v8::internal::r11; -using v8::internal::r12; // Remember: r12..r15 are callee save! +using v8::internal::r12; using v8::internal::r13; using v8::internal::r14; -using v8::internal::r15; using v8::internal::times_pointer_size; using v8::internal::FUNCTION_CAST; using v8::internal::CodeDesc; @@ -92,6 +91,24 @@ typedef int (*F0)(); #define __ masm-> + +static void EntryCode(MacroAssembler* masm) { + // Smi constant register is callee save. + __ push(v8::internal::kSmiConstantRegister); + __ InitializeSmiConstantRegister(); +} + + +static void ExitCode(MacroAssembler* masm) { + // Return -1 if kSmiConstantRegister was clobbered during the test. + __ Move(rdx, Smi::FromInt(1)); + __ cmpq(rdx, v8::internal::kSmiConstantRegister); + __ movq(rdx, Immediate(-1)); + __ cmovq(not_equal, rax, rdx); + __ pop(v8::internal::kSmiConstantRegister); +} + + TEST(Smi) { // Check that C++ Smi operations work as expected. int64_t test_numbers[] = { @@ -139,6 +156,7 @@ TEST(SmiMove) { MacroAssembler assembler(buffer, static_cast(actual_size)); MacroAssembler* masm = &assembler; // Create a pointer for the __ macro. masm->set_allow_stub_calls(false); + EntryCode(masm); Label exit; TestMoveSmi(masm, &exit, 1, Smi::FromInt(0)); @@ -156,6 +174,7 @@ TEST(SmiMove) { __ xor_(rax, rax); // Success. __ bind(&exit); + ExitCode(masm); __ ret(0); CodeDesc desc; @@ -225,6 +244,7 @@ TEST(SmiCompare) { MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); + EntryCode(masm); Label exit; TestSmiCompare(masm, &exit, 0x10, 0, 0); @@ -249,6 +269,7 @@ TEST(SmiCompare) { __ xor_(rax, rax); // Success. __ bind(&exit); + ExitCode(masm); __ ret(0); CodeDesc desc; @@ -272,6 +293,7 @@ TEST(Integer32ToSmi) { MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); + EntryCode(masm); Label exit; __ movq(rax, Immediate(1)); // Test number. @@ -349,6 +371,7 @@ TEST(Integer32ToSmi) { __ xor_(rax, rax); // Success. __ bind(&exit); + ExitCode(masm); __ ret(0); CodeDesc desc; @@ -397,6 +420,7 @@ TEST(Integer64PlusConstantToSmi) { MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); + EntryCode(masm); Label exit; int64_t twice_max = static_cast(Smi::kMaxValue) * 2; @@ -416,6 +440,7 @@ TEST(Integer64PlusConstantToSmi) { __ xor_(rax, rax); // Success. __ bind(&exit); + ExitCode(masm); __ ret(0); CodeDesc desc; @@ -438,6 +463,7 @@ TEST(SmiCheck) { MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); + EntryCode(masm); Label exit; Condition cond; @@ -613,6 +639,7 @@ TEST(SmiCheck) { __ xor_(rax, rax); __ bind(&exit); + ExitCode(masm); __ ret(0); CodeDesc desc; @@ -683,6 +710,7 @@ TEST(SmiNeg) { MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); + EntryCode(masm); Label exit; TestSmiNeg(masm, &exit, 0x10, 0); @@ -696,6 +724,7 @@ TEST(SmiNeg) { __ xor_(rax, rax); // Success. __ bind(&exit); + ExitCode(masm); __ ret(0); CodeDesc desc; @@ -768,6 +797,7 @@ TEST(SmiAdd) { MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); + EntryCode(masm); Label exit; // No-overflow tests. @@ -782,6 +812,7 @@ TEST(SmiAdd) { __ xor_(rax, rax); // Success. __ bind(&exit); + ExitCode(masm); __ ret(0); CodeDesc desc; @@ -955,6 +986,7 @@ TEST(SmiSub) { MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); + EntryCode(masm); Label exit; SmiSubTest(masm, &exit, 0x10, 1, 2); @@ -977,6 +1009,7 @@ TEST(SmiSub) { __ xor_(rax, rax); // Success. __ bind(&exit); + ExitCode(masm); __ ret(0); CodeDesc desc; @@ -1042,6 +1075,7 @@ TEST(SmiMul) { MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); + EntryCode(masm); Label exit; TestSmiMul(masm, &exit, 0x10, 0, 0); @@ -1061,6 +1095,7 @@ TEST(SmiMul) { __ xor_(rax, rax); // Success. __ bind(&exit); + ExitCode(masm); __ ret(0); CodeDesc desc; @@ -1081,51 +1116,51 @@ void TestSmiDiv(MacroAssembler* masm, Label* exit, int id, int x, int y) { #endif bool fraction = !division_by_zero && !overflow && (x % y != 0); __ Move(r11, Smi::FromInt(x)); - __ Move(r12, Smi::FromInt(y)); + __ Move(r14, Smi::FromInt(y)); if (!fraction && !overflow && !negative_zero && !division_by_zero) { // Division succeeds __ movq(rcx, r11); - __ movq(r15, Immediate(id)); + __ movq(r12, Immediate(id)); int result = x / y; __ Move(r8, Smi::FromInt(result)); - __ SmiDiv(r9, rcx, r12, exit); - // Might have destroyed rcx and r12. - __ incq(r15); + __ SmiDiv(r9, rcx, r14, exit); + // Might have destroyed rcx and r14. + __ incq(r12); __ SmiCompare(r9, r8); __ j(not_equal, exit); - __ incq(r15); + __ incq(r12); __ movq(rcx, r11); - __ Move(r12, Smi::FromInt(y)); + __ Move(r14, Smi::FromInt(y)); __ SmiCompare(rcx, r11); __ j(not_equal, exit); - __ incq(r15); - __ SmiDiv(rcx, rcx, r12, exit); + __ incq(r12); + __ SmiDiv(rcx, rcx, r14, exit); - __ incq(r15); + __ incq(r12); __ SmiCompare(rcx, r8); __ j(not_equal, exit); } else { // Division fails. - __ movq(r15, Immediate(id + 8)); + __ movq(r12, Immediate(id + 8)); Label fail_ok, fail_ok2; __ movq(rcx, r11); - __ SmiDiv(r9, rcx, r12, &fail_ok); + __ SmiDiv(r9, rcx, r14, &fail_ok); __ jmp(exit); __ bind(&fail_ok); - __ incq(r15); + __ incq(r12); __ SmiCompare(rcx, r11); __ j(not_equal, exit); - __ incq(r15); - __ SmiDiv(rcx, rcx, r12, &fail_ok2); + __ incq(r12); + __ SmiDiv(rcx, rcx, r14, &fail_ok2); __ jmp(exit); __ bind(&fail_ok2); - __ incq(r15); + __ incq(r12); __ SmiCompare(rcx, r11); __ j(not_equal, exit); } @@ -1145,10 +1180,11 @@ TEST(SmiDiv) { MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); + EntryCode(masm); Label exit; + __ push(r14); __ push(r12); - __ push(r15); TestSmiDiv(masm, &exit, 0x10, 1, 1); TestSmiDiv(masm, &exit, 0x20, 1, 0); TestSmiDiv(masm, &exit, 0x30, -1, 0); @@ -1170,11 +1206,12 @@ TEST(SmiDiv) { TestSmiDiv(masm, &exit, 0x130, Smi::kMinValue, Smi::kMinValue); TestSmiDiv(masm, &exit, 0x140, Smi::kMinValue, -1); - __ xor_(r15, r15); // Success. + __ xor_(r12, r12); // Success. __ bind(&exit); - __ movq(rax, r15); - __ pop(r15); + __ movq(rax, r12); __ pop(r12); + __ pop(r14); + ExitCode(masm); __ ret(0); CodeDesc desc; @@ -1192,47 +1229,47 @@ void TestSmiMod(MacroAssembler* masm, Label* exit, int id, int x, int y) { bool negative_zero = (!fraction && x < 0); __ Move(rcx, Smi::FromInt(x)); __ movq(r11, rcx); - __ Move(r12, Smi::FromInt(y)); + __ Move(r14, Smi::FromInt(y)); if (!division_overflow && !negative_zero && !division_by_zero) { // Modulo succeeds - __ movq(r15, Immediate(id)); + __ movq(r12, Immediate(id)); int result = x % y; __ Move(r8, Smi::FromInt(result)); - __ SmiMod(r9, rcx, r12, exit); + __ SmiMod(r9, rcx, r14, exit); - __ incq(r15); + __ incq(r12); __ SmiCompare(r9, r8); __ j(not_equal, exit); - __ incq(r15); + __ incq(r12); __ SmiCompare(rcx, r11); __ j(not_equal, exit); - __ incq(r15); - __ SmiMod(rcx, rcx, r12, exit); + __ incq(r12); + __ SmiMod(rcx, rcx, r14, exit); - __ incq(r15); + __ incq(r12); __ SmiCompare(rcx, r8); __ j(not_equal, exit); } else { // Modulo fails. - __ movq(r15, Immediate(id + 8)); + __ movq(r12, Immediate(id + 8)); Label fail_ok, fail_ok2; - __ SmiMod(r9, rcx, r12, &fail_ok); + __ SmiMod(r9, rcx, r14, &fail_ok); __ jmp(exit); __ bind(&fail_ok); - __ incq(r15); + __ incq(r12); __ SmiCompare(rcx, r11); __ j(not_equal, exit); - __ incq(r15); - __ SmiMod(rcx, rcx, r12, &fail_ok2); + __ incq(r12); + __ SmiMod(rcx, rcx, r14, &fail_ok2); __ jmp(exit); __ bind(&fail_ok2); - __ incq(r15); + __ incq(r12); __ SmiCompare(rcx, r11); __ j(not_equal, exit); } @@ -1252,10 +1289,11 @@ TEST(SmiMod) { MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); + EntryCode(masm); Label exit; + __ push(r14); __ push(r12); - __ push(r15); TestSmiMod(masm, &exit, 0x10, 1, 1); TestSmiMod(masm, &exit, 0x20, 1, 0); TestSmiMod(masm, &exit, 0x30, -1, 0); @@ -1277,11 +1315,12 @@ TEST(SmiMod) { TestSmiMod(masm, &exit, 0x130, Smi::kMinValue, Smi::kMinValue); TestSmiMod(masm, &exit, 0x140, Smi::kMinValue, -1); - __ xor_(r15, r15); // Success. + __ xor_(r12, r12); // Success. __ bind(&exit); - __ movq(rax, r15); - __ pop(r15); + __ movq(rax, r12); __ pop(r12); + __ pop(r14); + ExitCode(masm); __ ret(0); CodeDesc desc; @@ -1336,7 +1375,7 @@ TEST(SmiIndex) { // Allocate an executable page of memory. size_t actual_size; byte* buffer = - static_cast(OS::Allocate(Assembler::kMinimalBufferSize * 2, + static_cast(OS::Allocate(Assembler::kMinimalBufferSize * 3, &actual_size, true)); CHECK(buffer); @@ -1345,6 +1384,7 @@ TEST(SmiIndex) { MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); + EntryCode(masm); Label exit; TestSmiIndex(masm, &exit, 0x10, 0); @@ -1355,6 +1395,7 @@ TEST(SmiIndex) { __ xor_(rax, rax); // Success. __ bind(&exit); + ExitCode(masm); __ ret(0); CodeDesc desc; @@ -1411,6 +1452,7 @@ TEST(SmiSelectNonSmi) { MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); // Avoid inline checks. + EntryCode(masm); Label exit; TestSelectNonSmi(masm, &exit, 0x10, 0, 0); @@ -1425,6 +1467,7 @@ TEST(SmiSelectNonSmi) { __ xor_(rax, rax); // Success. __ bind(&exit); + ExitCode(masm); __ ret(0); CodeDesc desc; @@ -1487,6 +1530,7 @@ TEST(SmiAnd) { MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); + EntryCode(masm); Label exit; TestSmiAnd(masm, &exit, 0x10, 0, 0); @@ -1503,6 +1547,7 @@ TEST(SmiAnd) { __ xor_(rax, rax); // Success. __ bind(&exit); + ExitCode(masm); __ ret(0); CodeDesc desc; @@ -1565,6 +1610,7 @@ TEST(SmiOr) { MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); + EntryCode(masm); Label exit; TestSmiOr(masm, &exit, 0x10, 0, 0); @@ -1583,6 +1629,7 @@ TEST(SmiOr) { __ xor_(rax, rax); // Success. __ bind(&exit); + ExitCode(masm); __ ret(0); CodeDesc desc; @@ -1645,6 +1692,7 @@ TEST(SmiXor) { MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); + EntryCode(masm); Label exit; TestSmiXor(masm, &exit, 0x10, 0, 0); @@ -1663,6 +1711,7 @@ TEST(SmiXor) { __ xor_(rax, rax); // Success. __ bind(&exit); + ExitCode(masm); __ ret(0); CodeDesc desc; @@ -1709,6 +1758,7 @@ TEST(SmiNot) { MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); + EntryCode(masm); Label exit; TestSmiNot(masm, &exit, 0x10, 0); @@ -1722,6 +1772,7 @@ TEST(SmiNot) { __ xor_(rax, rax); // Success. __ bind(&exit); + ExitCode(masm); __ ret(0); CodeDesc desc; @@ -1793,7 +1844,7 @@ TEST(SmiShiftLeft) { // Allocate an executable page of memory. size_t actual_size; byte* buffer = - static_cast(OS::Allocate(Assembler::kMinimalBufferSize * 3, + static_cast(OS::Allocate(Assembler::kMinimalBufferSize * 4, &actual_size, true)); CHECK(buffer); @@ -1802,6 +1853,7 @@ TEST(SmiShiftLeft) { MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); + EntryCode(masm); Label exit; TestSmiShiftLeft(masm, &exit, 0x10, 0); @@ -1814,6 +1866,7 @@ TEST(SmiShiftLeft) { __ xor_(rax, rax); // Success. __ bind(&exit); + ExitCode(masm); __ ret(0); CodeDesc desc; @@ -1896,7 +1949,7 @@ TEST(SmiShiftLogicalRight) { // Allocate an executable page of memory. size_t actual_size; byte* buffer = - static_cast(OS::Allocate(Assembler::kMinimalBufferSize * 2, + static_cast(OS::Allocate(Assembler::kMinimalBufferSize * 3, &actual_size, true)); CHECK(buffer); @@ -1905,6 +1958,7 @@ TEST(SmiShiftLogicalRight) { MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); + EntryCode(masm); Label exit; TestSmiShiftLogicalRight(masm, &exit, 0x10, 0); @@ -1917,6 +1971,7 @@ TEST(SmiShiftLogicalRight) { __ xor_(rax, rax); // Success. __ bind(&exit); + ExitCode(masm); __ ret(0); CodeDesc desc; @@ -1971,6 +2026,7 @@ TEST(SmiShiftArithmeticRight) { MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); + EntryCode(masm); Label exit; TestSmiShiftArithmeticRight(masm, &exit, 0x10, 0); @@ -1983,6 +2039,7 @@ TEST(SmiShiftArithmeticRight) { __ xor_(rax, rax); // Success. __ bind(&exit); + ExitCode(masm); __ ret(0); CodeDesc desc; @@ -2032,6 +2089,7 @@ TEST(PositiveSmiTimesPowerOfTwoToInteger64) { MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); + EntryCode(masm); Label exit; TestPositiveSmiPowerUp(masm, &exit, 0x20, 0); @@ -2046,6 +2104,7 @@ TEST(PositiveSmiTimesPowerOfTwoToInteger64) { __ xor_(rax, rax); // Success. __ bind(&exit); + ExitCode(masm); __ ret(0); CodeDesc desc; @@ -2074,8 +2133,9 @@ TEST(OperandOffset) { masm->set_allow_stub_calls(false); Label exit; - __ push(r12); + EntryCode(masm); __ push(r13); + __ push(r14); __ push(rbx); __ push(rbp); __ push(Immediate(0x100)); // <-- rbp @@ -2093,7 +2153,7 @@ TEST(OperandOffset) { // r12 = rsp[3] // rbx = rsp[5] // r13 = rsp[7] - __ lea(r12, Operand(rsp, 3 * kPointerSize)); + __ lea(r14, Operand(rsp, 3 * kPointerSize)); __ lea(r13, Operand(rbp, -3 * kPointerSize)); __ lea(rbx, Operand(rbp, -5 * kPointerSize)); __ movl(rcx, Immediate(2)); @@ -2396,8 +2456,9 @@ TEST(OperandOffset) { __ lea(rsp, Operand(rbp, kPointerSize)); __ pop(rbp); __ pop(rbx); + __ pop(r14); __ pop(r13); - __ pop(r12); + ExitCode(masm); __ ret(0); -- 2.7.4