From c867df5181fa7b9a6c63a89d2d1d0bd2d6d0bdae Mon Sep 17 00:00:00 2001 From: "haitao.feng@intel.com" Date: Fri, 21 Mar 2014 02:42:10 +0000 Subject: [PATCH] Introduce addp, idivp, imulp and subp for x64 port R=verwaest@chromium.org Review URL: https://codereview.chromium.org/196893003 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@20140 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- src/x64/assembler-x64.cc | 72 ++-------- src/x64/assembler-x64.h | 238 ++++++++++++++++++++-------------- src/x64/builtins-x64.cc | 20 +-- src/x64/code-stubs-x64.cc | 76 +++++------ src/x64/codegen-x64.cc | 2 +- src/x64/debug-x64.cc | 2 +- src/x64/deoptimizer-x64.cc | 14 +- src/x64/full-codegen-x64.cc | 26 ++-- src/x64/ic-x64.cc | 4 +- src/x64/lithium-codegen-x64.cc | 46 +++---- src/x64/macro-assembler-x64.cc | 125 +++++++++--------- src/x64/macro-assembler-x64.h | 6 +- src/x64/regexp-macro-assembler-x64.cc | 54 ++++---- src/x64/stub-cache-x64.cc | 2 +- test/cctest/test-assembler-x64.cc | 2 +- test/cctest/test-disasm-x64.cc | 12 +- 16 files changed, 347 insertions(+), 354 deletions(-) diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc index 133a74d..36928f7 100644 --- a/src/x64/assembler-x64.cc +++ b/src/x64/assembler-x64.cc @@ -110,7 +110,8 @@ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) { #endif // Patch the code. - patcher.masm()->movp(kScratchRegister, target, Assembler::RelocInfoNone()); + patcher.masm()->movp(kScratchRegister, reinterpret_cast(target), + Assembler::RelocInfoNone()); patcher.masm()->call(kScratchRegister); // Check that the size of the code generated is as expected. @@ -1008,92 +1009,43 @@ void Assembler::hlt() { } -void Assembler::idivq(Register src) { +void Assembler::emit_idiv(Register src, int size) { EnsureSpace ensure_space(this); - emit_rex_64(src); + emit_rex(src, size); emit(0xF7); emit_modrm(0x7, src); } -void Assembler::idivl(Register src) { - EnsureSpace ensure_space(this); - emit_optional_rex_32(src); - emit(0xF7); - emit_modrm(0x7, src); -} - - -void Assembler::imul(Register src) { - EnsureSpace ensure_space(this); - emit_rex_64(src); - emit(0xF7); - emit_modrm(0x5, src); -} - - -void Assembler::imul(Register dst, Register src) { - EnsureSpace ensure_space(this); - emit_rex_64(dst, src); - emit(0x0F); - emit(0xAF); - emit_modrm(dst, src); -} - - -void Assembler::imul(Register dst, const Operand& src) { - EnsureSpace ensure_space(this); - emit_rex_64(dst, src); - emit(0x0F); - emit(0xAF); - emit_operand(dst, src); -} - - -void Assembler::imul(Register dst, Register src, Immediate imm) { - EnsureSpace ensure_space(this); - emit_rex_64(dst, src); - if (is_int8(imm.value_)) { - emit(0x6B); - emit_modrm(dst, src); - emit(imm.value_); - } else { - emit(0x69); - emit_modrm(dst, src); - emitl(imm.value_); - } -} - - -void Assembler::imull(Register src) { +void Assembler::emit_imul(Register src, int size) { EnsureSpace ensure_space(this); - emit_optional_rex_32(src); + emit_rex(src, size); emit(0xF7); emit_modrm(0x5, src); } -void Assembler::imull(Register dst, Register src) { +void Assembler::emit_imul(Register dst, Register src, int size) { EnsureSpace ensure_space(this); - emit_optional_rex_32(dst, src); + emit_rex(dst, src, size); emit(0x0F); emit(0xAF); emit_modrm(dst, src); } -void Assembler::imull(Register dst, const Operand& src) { +void Assembler::emit_imul(Register dst, const Operand& src, int size) { EnsureSpace ensure_space(this); - emit_optional_rex_32(dst, src); + emit_rex(dst, src, size); emit(0x0F); emit(0xAF); emit_operand(dst, src); } -void Assembler::imull(Register dst, Register src, Immediate imm) { +void Assembler::emit_imul(Register dst, Register src, Immediate imm, int size) { EnsureSpace ensure_space(this); - emit_optional_rex_32(dst, src); + emit_rex(dst, src, size); if (is_int8(imm.value_)) { emit(0x6B); emit_modrm(dst, src); diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h index a2b7282..c4f80c0 100644 --- a/src/x64/assembler-x64.h +++ b/src/x64/assembler-x64.h @@ -509,8 +509,12 @@ class CpuFeatures : public AllStatic { }; -#define ASSEMBLER_INSTRUCTION_LIST(V) \ - V(mov) +#define ASSEMBLER_INSTRUCTION_LIST(V) \ + V(add) \ + V(idiv) \ + V(imul) \ + V(mov) \ + V(sub) class Assembler : public AssemblerBase { @@ -664,6 +668,21 @@ class Assembler : public AssemblerBase { // Naming conflicts with C++ keywords are resolved by adding a trailing '_'. #define DECLARE_INSTRUCTION(instruction) \ + template \ + void instruction##p(P1 p1) { \ + emit_##instruction(p1, kPointerSize); \ + } \ + \ + template \ + void instruction##l(P1 p1) { \ + emit_##instruction(p1, kInt32Size); \ + } \ + \ + template \ + void instruction##q(P1 p1) { \ + emit_##instruction(p1, kInt64Size); \ + } \ + \ template \ void instruction##p(P1 p1, P2 p2) { \ emit_##instruction(p1, p2, kPointerSize); \ @@ -677,6 +696,21 @@ class Assembler : public AssemblerBase { template \ void instruction##q(P1 p1, P2 p2) { \ emit_##instruction(p1, p2, kInt64Size); \ + } \ + \ + template \ + void instruction##p(P1 p1, P2 p2, P3 p3) { \ + emit_##instruction(p1, p2, p3, kPointerSize); \ + } \ + \ + template \ + void instruction##l(P1 p1, P2 p2, P3 p3) { \ + emit_##instruction(p1, p2, p3, kInt32Size); \ + } \ + \ + template \ + void instruction##q(P1 p1, P2 p2, P3 p3) { \ + emit_##instruction(p1, p2, p3, kInt64Size); \ } ASSEMBLER_INSTRUCTION_LIST(DECLARE_INSTRUCTION) #undef DECLARE_INSTRUCTION @@ -760,47 +794,6 @@ class Assembler : public AssemblerBase { void xchgq(Register dst, Register src); void xchgl(Register dst, Register src); - // Arithmetics - void addl(Register dst, Register src) { - arithmetic_op_32(0x03, dst, src); - } - - void addl(Register dst, Immediate src) { - immediate_arithmetic_op_32(0x0, dst, src); - } - - void addl(Register dst, const Operand& src) { - arithmetic_op_32(0x03, dst, src); - } - - void addl(const Operand& dst, Immediate src) { - immediate_arithmetic_op_32(0x0, dst, src); - } - - void addl(const Operand& dst, Register src) { - arithmetic_op_32(0x01, src, dst); - } - - void addq(Register dst, Register src) { - arithmetic_op(0x03, dst, src); - } - - void addq(Register dst, const Operand& src) { - arithmetic_op(0x03, dst, src); - } - - void addq(const Operand& dst, Register src) { - arithmetic_op(0x01, src, dst); - } - - void addq(Register dst, Immediate src) { - immediate_arithmetic_op(0x0, dst, src); - } - - void addq(const Operand& dst, Immediate src) { - immediate_arithmetic_op(0x0, dst, src); - } - void sbbl(Register dst, Register src) { arithmetic_op_32(0x1b, dst, src); } @@ -939,22 +932,6 @@ class Assembler : public AssemblerBase { // Sign-extends eax into edx:eax. void cdq(); - // Divide rdx:rax by src. Quotient in rax, remainder in rdx. - void idivq(Register src); - // Divide edx:eax by lower 32 bits of src. Quotient in eax, rem. in edx. - void idivl(Register src); - - // Signed multiply instructions. - void imul(Register src); // rdx:rax = rax * src. - void imul(Register dst, Register src); // dst = dst * src. - void imul(Register dst, const Operand& src); // dst = dst * src. - void imul(Register dst, Register src, Immediate imm); // dst = src * imm. - // Signed 32-bit multiply instructions. - void imull(Register src); // edx:eax = eax * src. - void imull(Register dst, Register src); // dst = dst * src. - void imull(Register dst, const Operand& src); // dst = dst * src. - void imull(Register dst, Register src, Immediate imm); // dst = src * imm. - void incq(Register dst); void incq(const Operand& dst); void incl(Register dst); @@ -1105,46 +1082,6 @@ class Assembler : public AssemblerBase { void store_rax(void* dst, RelocInfo::Mode mode); void store_rax(ExternalReference ref); - void subq(Register dst, Register src) { - arithmetic_op(0x2B, dst, src); - } - - void subq(Register dst, const Operand& src) { - arithmetic_op(0x2B, dst, src); - } - - void subq(const Operand& dst, Register src) { - arithmetic_op(0x29, src, dst); - } - - void subq(Register dst, Immediate src) { - immediate_arithmetic_op(0x5, dst, src); - } - - void subq(const Operand& dst, Immediate src) { - immediate_arithmetic_op(0x5, dst, src); - } - - void subl(Register dst, Register src) { - arithmetic_op_32(0x2B, dst, src); - } - - void subl(Register dst, const Operand& src) { - arithmetic_op_32(0x2B, dst, src); - } - - void subl(const Operand& dst, Register src) { - arithmetic_op_32(0x29, src, dst); - } - - void subl(const Operand& dst, Immediate src) { - immediate_arithmetic_op_32(0x5, dst, src); - } - - void subl(Register dst, Immediate src) { - immediate_arithmetic_op_32(0x5, dst, src); - } - void subb(Register dst, Immediate src) { immediate_arithmetic_op_8(0x5, dst, src); } @@ -1712,6 +1649,109 @@ class Assembler : public AssemblerBase { // record reloc info for current pc_ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); + // Arithmetics + void emit_add(Register dst, Register src, int size) { + if (size == kInt64Size) { + arithmetic_op(0x03, dst, src); + } else { + ASSERT(size == kInt32Size); + arithmetic_op_32(0x03, dst, src); + } + } + + void emit_add(Register dst, Immediate src, int size) { + if (size == kInt64Size) { + immediate_arithmetic_op(0x0, dst, src); + } else { + ASSERT(size == kInt32Size); + immediate_arithmetic_op_32(0x0, dst, src); + } + } + + void emit_add(Register dst, const Operand& src, int size) { + if (size == kInt64Size) { + arithmetic_op(0x03, dst, src); + } else { + ASSERT(size == kInt32Size); + arithmetic_op_32(0x03, dst, src); + } + } + + void emit_add(const Operand& dst, Register src, int size) { + if (size == kInt64Size) { + arithmetic_op(0x1, src, dst); + } else { + ASSERT(size == kInt32Size); + arithmetic_op_32(0x1, src, dst); + } + } + + void emit_add(const Operand& dst, Immediate src, int size) { + if (size == kInt64Size) { + immediate_arithmetic_op(0x0, dst, src); + } else { + ASSERT(size == kInt32Size); + immediate_arithmetic_op_32(0x0, dst, src); + } + } + + // Divide rdx:rax by src. Quotient in rax, remainder in rdx when size is 64. + // Divide edx:eax by lower 32 bits of src. Quotient in eax, remainder in edx + // when size is 32. + void emit_idiv(Register src, int size); + + // Signed multiply instructions. + // rdx:rax = rax * src when size is 64 or edx:eax = eax * src when size is 32. + void emit_imul(Register src, int size); + void emit_imul(Register dst, Register src, int size); + void emit_imul(Register dst, const Operand& src, int size); + void emit_imul(Register dst, Register src, Immediate imm, int size); + + void emit_sub(Register dst, Register src, int size) { + if (size == kInt64Size) { + arithmetic_op(0x2B, dst, src); + } else { + ASSERT(size == kInt32Size); + arithmetic_op_32(0x2B, dst, src); + } + } + + void emit_sub(Register dst, Immediate src, int size) { + if (size == kInt64Size) { + immediate_arithmetic_op(0x5, dst, src); + } else { + ASSERT(size == kInt32Size); + immediate_arithmetic_op_32(0x5, dst, src); + } + } + + void emit_sub(Register dst, const Operand& src, int size) { + if (size == kInt64Size) { + arithmetic_op(0x2B, dst, src); + } else { + ASSERT(size == kInt32Size); + arithmetic_op_32(0x2B, dst, src); + } + } + + void emit_sub(const Operand& dst, Register src, int size) { + if (size == kInt64Size) { + arithmetic_op(0x29, src, dst); + } else { + ASSERT(size == kInt32Size); + arithmetic_op_32(0x29, src, dst); + } + } + + void emit_sub(const Operand& dst, Immediate src, int size) { + if (size == kInt64Size) { + immediate_arithmetic_op(0x5, dst, src); + } else { + ASSERT(size == kInt32Size); + immediate_arithmetic_op_32(0x5, dst, src); + } + } + void emit_mov(Register dst, const Operand& src, int size); void emit_mov(Register dst, Register src, int size); void emit_mov(const Operand& dst, Register src, int size); diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc index a816030..c9be9b7 100644 --- a/src/x64/builtins-x64.cc +++ b/src/x64/builtins-x64.cc @@ -69,7 +69,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, // JumpToExternalReference expects rax to contain the number of arguments // including the receiver and the extra arguments. - __ addq(rax, Immediate(num_extra_args + 1)); + __ addp(rax, Immediate(num_extra_args + 1)); __ JumpToExternalReference(ExternalReference(id, masm->isolate()), 1); } @@ -289,10 +289,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, __ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset)); __ movzxbq(rcx, FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset)); - __ addq(rdx, rcx); + __ addp(rdx, rcx); // Calculate unused properties past the end of the in-object properties. __ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset)); - __ subq(rdx, rcx); + __ subp(rdx, rcx); // Done if no extra properties are to be allocated. __ j(zero, &allocated); __ Assert(positive, kPropertyAllocationCountFailed); @@ -332,7 +332,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, __ jmp(&entry); __ bind(&loop); __ movp(Operand(rcx, 0), rdx); - __ addq(rcx, Immediate(kPointerSize)); + __ addp(rcx, Immediate(kPointerSize)); __ bind(&entry); __ cmpq(rcx, rax); __ j(below, &loop); @@ -590,7 +590,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, __ bind(&loop); __ movp(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0)); __ Push(Operand(kScratchRegister, 0)); // dereference handle - __ addq(rcx, Immediate(1)); + __ addp(rcx, Immediate(1)); __ bind(&entry); __ cmpq(rcx, rax); __ j(not_equal, &loop); @@ -670,7 +670,7 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) { // Re-execute the code that was patched back to the young age when // the stub returns. - __ subq(Operand(rsp, 0), Immediate(5)); + __ subp(Operand(rsp, 0), Immediate(5)); __ Pushad(); __ Move(arg_reg_2, ExternalReference::isolate_address(masm->isolate())); __ movp(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize)); @@ -706,7 +706,7 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) { __ Pushad(); __ Move(arg_reg_2, ExternalReference::isolate_address(masm->isolate())); __ movp(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize)); - __ subq(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength)); + __ subp(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength)); { // NOLINT FrameScope scope(masm, StackFrame::MANUAL); __ PrepareCallCFunction(2); @@ -1007,7 +1007,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { __ movp(rcx, rsp); // Make rcx the space we have left. The stack might already be overflowed // here which will cause rcx to become negative. - __ subq(rcx, kScratchRegister); + __ subp(rcx, kScratchRegister); // Make rdx the space we need for the array when it is unrolled onto the // stack. __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2); @@ -1388,7 +1388,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { __ bind(©); __ incq(r8); __ Push(Operand(rax, 0)); - __ subq(rax, Immediate(kPointerSize)); + __ subp(rax, Immediate(kPointerSize)); __ cmpq(r8, rbx); __ j(less, ©); __ jmp(&invoke); @@ -1407,7 +1407,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { __ bind(©); __ incq(r8); __ Push(Operand(rdi, 0)); - __ subq(rdi, Immediate(kPointerSize)); + __ subp(rdi, Immediate(kPointerSize)); __ cmpq(r8, rax); __ j(less, ©); diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc index c34d1c8..ce820b1 100644 --- a/src/x64/code-stubs-x64.cc +++ b/src/x64/code-stubs-x64.cc @@ -603,7 +603,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) { // Restore registers __ bind(&done); if (stash_exponent_copy) { - __ addq(rsp, Immediate(kDoubleSize)); + __ addp(rsp, Immediate(kDoubleSize)); } if (!final_result_reg.is(result_reg)) { ASSERT(final_result_reg.is(rcx)); @@ -787,7 +787,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ bind(&fast_power); __ fnclex(); // Clear flags to catch exceptions later. // Transfer (B)ase and (E)xponent onto the FPU register stack. - __ subq(rsp, Immediate(kDoubleSize)); + __ subp(rsp, Immediate(kDoubleSize)); __ movsd(Operand(rsp, 0), double_exponent); __ fld_d(Operand(rsp, 0)); // E __ movsd(Operand(rsp, 0), double_base); @@ -814,12 +814,12 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ j(not_zero, &fast_power_failed, Label::kNear); __ fstp_d(Operand(rsp, 0)); __ movsd(double_result, Operand(rsp, 0)); - __ addq(rsp, Immediate(kDoubleSize)); + __ addp(rsp, Immediate(kDoubleSize)); __ jmp(&done); __ bind(&fast_power_failed); __ fninit(); - __ addq(rsp, Immediate(kDoubleSize)); + __ addp(rsp, Immediate(kDoubleSize)); __ jmp(&call_runtime); } @@ -1050,7 +1050,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) { __ lea(r8, Operand(r8, rcx, times_pointer_size, FixedArray::kHeaderSize)); // 3. Arguments object. - __ addq(r8, Immediate(Heap::kSloppyArgumentsObjectSize)); + __ addp(r8, Immediate(Heap::kSloppyArgumentsObjectSize)); // Do the allocation of all three objects in one go. __ Allocate(r8, rax, rdx, rdi, &runtime, TAG_OBJECT); @@ -1136,8 +1136,8 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) { // Load tagged parameter count into r9. __ Integer32ToSmi(r9, rbx); __ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS)); - __ addq(r8, args.GetArgumentOperand(2)); - __ subq(r8, r9); + __ addp(r8, args.GetArgumentOperand(2)); + __ subp(r8, r9); __ Move(r11, factory->the_hole_value()); __ movp(rdx, rdi); __ lea(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize)); @@ -1179,17 +1179,17 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) { // Untag rcx for the loop below. __ SmiToInteger64(rcx, rcx); __ lea(kScratchRegister, Operand(r8, times_pointer_size, 0)); - __ subq(rdx, kScratchRegister); + __ subp(rdx, kScratchRegister); __ jmp(&arguments_test, Label::kNear); __ bind(&arguments_loop); - __ subq(rdx, Immediate(kPointerSize)); + __ subp(rdx, Immediate(kPointerSize)); __ movp(r9, Operand(rdx, 0)); __ movp(FieldOperand(rdi, r8, times_pointer_size, FixedArray::kHeaderSize), r9); - __ addq(r8, Immediate(1)); + __ addp(r8, Immediate(1)); __ bind(&arguments_test); __ cmpq(r8, rcx); @@ -1270,7 +1270,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { __ j(zero, &add_arguments_object, Label::kNear); __ lea(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize)); __ bind(&add_arguments_object); - __ addq(rcx, Immediate(Heap::kStrictArgumentsObjectSize)); + __ addp(rcx, Immediate(Heap::kStrictArgumentsObjectSize)); // Do the allocation of both objects in one go. __ Allocate(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT); @@ -1320,8 +1320,8 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { __ bind(&loop); __ movp(rbx, Operand(rdx, -1 * kPointerSize)); // Skip receiver. __ movp(FieldOperand(rdi, FixedArray::kHeaderSize), rbx); - __ addq(rdi, Immediate(kPointerSize)); - __ subq(rdx, Immediate(kPointerSize)); + __ addp(rdi, Immediate(kPointerSize)); + __ subp(rdx, Immediate(kPointerSize)); __ decq(rcx); __ j(not_zero, &loop); @@ -1541,7 +1541,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ Move(kScratchRegister, address_of_regexp_stack_memory_address); __ movp(r9, Operand(kScratchRegister, 0)); __ Move(kScratchRegister, address_of_regexp_stack_memory_size); - __ addq(r9, Operand(kScratchRegister, 0)); + __ addp(r9, Operand(kScratchRegister, 0)); __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kRegisterSize), r9); // Argument 6: Set the number of capture registers to zero to force global @@ -1577,9 +1577,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { Label setup_two_byte, setup_rest, got_length, length_not_from_slice; // Prepare start and end index of the input. // Load the length from the original sliced string if that is the case. - __ addq(rbx, r14); + __ addp(rbx, r14); __ SmiToInteger32(arg_reg_3, FieldOperand(r15, String::kLengthOffset)); - __ addq(r14, arg_reg_3); // Using arg3 as scratch. + __ addp(r14, arg_reg_3); // Using arg3 as scratch. // rbx: start index of the input // r14: end index of the input @@ -1606,7 +1606,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ movp(arg_reg_1, r15); // Locate the code entry and call it. - __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag)); + __ addp(r11, Immediate(Code::kHeaderSize - kHeapObjectTag)); __ call(r11); __ LeaveApiExitFrame(true); @@ -1691,7 +1691,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Capture register counter starts from number of capture registers and // counts down until wraping after zero. __ bind(&next_capture); - __ subq(rdx, Immediate(1)); + __ subp(rdx, Immediate(1)); __ j(negative, &done, Label::kNear); // Read the value from the static offsets vector buffer and make it a smi. __ movl(rdi, Operand(rcx, rdx, times_int_size, 0)); @@ -1755,7 +1755,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ movp(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset)); // Move the pointer so that offset-wise, it looks like a sequential string. STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); - __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + __ subp(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); STATIC_ASSERT(kTwoByteStringTag == 0); // (8a) Is the external string one byte? If yes, go to (6). __ testb(rbx, Immediate(kStringEncodingMask)); @@ -1837,7 +1837,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { // Compare two smis. Label non_smi, smi_done; __ JumpIfNotBothSmi(rax, rdx, &non_smi); - __ subq(rdx, rax); + __ subp(rdx, rax); __ j(no_overflow, &smi_done); __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here. __ bind(&smi_done); @@ -1971,7 +1971,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { // Return a result of -1, 0, or 1, based on EFLAGS. __ setcc(above, rax); __ setcc(below, rcx); - __ subq(rax, rcx); + __ subp(rax, rcx); __ ret(0); // If one of the numbers was NaN, then the result is always false. @@ -2685,7 +2685,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { #ifdef _WIN64 // On Win64 XMM6-XMM15 are callee-save - __ subq(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize)); + __ subp(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize)); __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0), xmm6); __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1), xmm7); __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2), xmm8); @@ -2798,7 +2798,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ movdqu(xmm13, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 7)); __ movdqu(xmm14, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 8)); __ movdqu(xmm15, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9)); - __ addq(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize)); + __ addp(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize)); #endif __ popq(rbx); @@ -2811,7 +2811,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ popq(r14); __ popq(r13); __ popq(r12); - __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers + __ addp(rsp, Immediate(2 * kPointerSize)); // remove markers // Restore frame pointer and return. __ popq(rbp); @@ -2900,7 +2900,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) { } else { // Get return address and delta to inlined map check. __ movq(kScratchRegister, StackOperandForReturnAddress(0)); - __ subq(kScratchRegister, args.GetArgumentOperand(2)); + __ subp(kScratchRegister, args.GetArgumentOperand(2)); if (FLAG_debug_code) { __ movl(rdi, Immediate(kWordBeforeMapCheckValue)); __ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi); @@ -2941,7 +2941,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) { ASSERT(true_offset >= 0 && true_offset < 0x100); __ movl(rax, Immediate(true_offset)); __ movq(kScratchRegister, StackOperandForReturnAddress(0)); - __ subq(kScratchRegister, args.GetArgumentOperand(2)); + __ subp(kScratchRegister, args.GetArgumentOperand(2)); __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax); if (FLAG_debug_code) { __ movl(rax, Immediate(kWordBeforeResultValue)); @@ -2964,7 +2964,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) { ASSERT(false_offset >= 0 && false_offset < 0x100); __ movl(rax, Immediate(false_offset)); __ movq(kScratchRegister, StackOperandForReturnAddress(0)); - __ subq(kScratchRegister, args.GetArgumentOperand(2)); + __ subp(kScratchRegister, args.GetArgumentOperand(2)); __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax); if (FLAG_debug_code) { __ movl(rax, Immediate(kWordBeforeResultValue)); @@ -3322,7 +3322,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ bind(&sliced_string); // Sliced string. Fetch parent and correct start index by offset. - __ addq(rdx, FieldOperand(rax, SlicedString::kOffsetOffset)); + __ addp(rdx, FieldOperand(rax, SlicedString::kOffsetOffset)); __ movp(rdi, FieldOperand(rax, SlicedString::kParentOffset)); // Update instance type. __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset)); @@ -3393,7 +3393,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ movp(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset)); // Move the pointer so that offset-wise, it looks like a sequential string. STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); - __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + __ subp(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); __ bind(&sequential_string); STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0); @@ -3641,7 +3641,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) { __ IncrementCounter(counters->string_compare_native(), 1); // Drop arguments from the stack __ PopReturnAddressTo(rcx); - __ addq(rsp, Immediate(2 * kPointerSize)); + __ addp(rsp, Immediate(2 * kPointerSize)); __ PushReturnAddressFrom(rcx); GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8); @@ -3804,7 +3804,7 @@ void ArrayPushStub::Generate(MacroAssembler* masm) { FixedArray::kHeaderSize - argc * kPointerSize)); __ cmpq(rdx, rcx); __ j(not_equal, &call_builtin); - __ addq(rcx, Immediate(kAllocationDelta * kPointerSize)); + __ addp(rcx, Immediate(kAllocationDelta * kPointerSize)); Operand limit_operand = masm->ExternalOperand(new_space_allocation_limit); __ cmpq(rcx, limit_operand); __ j(above, &call_builtin); @@ -3884,10 +3884,10 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) { if (GetCondition() == equal) { // For equality we do not care about the sign of the result. - __ subq(rax, rdx); + __ subp(rax, rdx); } else { Label done; - __ subq(rdx, rax); + __ subp(rdx, rax); __ j(no_overflow, &done, Label::kNear); // Correct sign of result in case of overflow. __ not_(rdx); @@ -4163,7 +4163,7 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) { __ j(not_equal, &miss, Label::kNear); ASSERT(GetCondition() == equal); - __ subq(rax, rdx); + __ subp(rax, rdx); __ ret(0); __ bind(&miss); @@ -4183,7 +4183,7 @@ void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) { __ Cmp(rbx, known_map_); __ j(not_equal, &miss, Label::kNear); - __ subq(rax, rdx); + __ subp(rax, rdx); __ ret(0); __ bind(&miss); @@ -4550,7 +4550,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker( __ movp(regs_.scratch1(), Operand(regs_.scratch0(), MemoryChunk::kWriteBarrierCounterOffset)); - __ subq(regs_.scratch1(), Immediate(1)); + __ subp(regs_.scratch1(), Immediate(1)); __ movp(Operand(regs_.scratch0(), MemoryChunk::kWriteBarrierCounterOffset), regs_.scratch1()); @@ -4745,7 +4745,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) { // Calculate the function address to the first arg. __ movp(arg_reg_1, Operand(rsp, kNumSavedRegisters * kRegisterSize)); - __ subq(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength)); + __ subp(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength)); // Save the remainder of the volatile registers. masm->PushCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2); @@ -5157,7 +5157,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) { // FunctionCallbackInfo::implicit_args_. __ movp(StackSpaceOperand(0), scratch); - __ addq(scratch, Immediate((argc + FCA::kArgsLength - 1) * kPointerSize)); + __ addp(scratch, Immediate((argc + FCA::kArgsLength - 1) * kPointerSize)); __ movp(StackSpaceOperand(1), scratch); // FunctionCallbackInfo::values_. __ Set(StackSpaceOperand(2), argc); // FunctionCallbackInfo::length_. // FunctionCallbackInfo::is_construct_call_. diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc index f338668..b72f12f 100644 --- a/src/x64/codegen-x64.cc +++ b/src/x64/codegen-x64.cc @@ -496,7 +496,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm, // Handle slices. Label indirect_string_loaded; __ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset)); - __ addq(index, result); + __ addp(index, result); __ movp(string, FieldOperand(string, SlicedString::kParentOffset)); __ jmp(&indirect_string_loaded, Label::kNear); diff --git a/src/x64/debug-x64.cc b/src/x64/debug-x64.cc index d4126d9..dcf2341 100644 --- a/src/x64/debug-x64.cc +++ b/src/x64/debug-x64.cc @@ -164,7 +164,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm, // If this call did not replace a call but patched other code then there will // be an unwanted return address left on the stack. Here we get rid of that. if (convert_call_to_jmp) { - __ addq(rsp, Immediate(kPCOnStackSize)); + __ addp(rsp, Immediate(kPCOnStackSize)); } // Now that the break point has been handled, resume normal execution by diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc index 04f6feb..11934a9 100644 --- a/src/x64/deoptimizer-x64.cc +++ b/src/x64/deoptimizer-x64.cc @@ -167,7 +167,7 @@ void Deoptimizer::EntryGenerator::Generate() { const int kDoubleRegsSize = kDoubleSize * XMMRegister::NumAllocatableRegisters(); - __ subq(rsp, Immediate(kDoubleRegsSize)); + __ subp(rsp, Immediate(kDoubleRegsSize)); for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) { XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); @@ -199,7 +199,7 @@ void Deoptimizer::EntryGenerator::Generate() { __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kRegisterSize + kPCOnStackSize)); - __ subq(arg5, rbp); + __ subp(arg5, rbp); __ neg(arg5); // Allocate a new deoptimizer object. @@ -241,12 +241,12 @@ void Deoptimizer::EntryGenerator::Generate() { } // Remove the bailout id and return address from the stack. - __ addq(rsp, Immediate(1 * kRegisterSize + kPCOnStackSize)); + __ addp(rsp, Immediate(1 * kRegisterSize + kPCOnStackSize)); // Compute a pointer to the unwinding limit in register rcx; that is // the first stack slot not part of the input frame. __ movp(rcx, Operand(rbx, FrameDescription::frame_size_offset())); - __ addq(rcx, rsp); + __ addp(rcx, rsp); // Unwind the stack down to - but not including - the unwinding // limit and copy the contents of the activation frame to the input @@ -257,7 +257,7 @@ void Deoptimizer::EntryGenerator::Generate() { Label pop_loop; __ bind(&pop_loop); __ Pop(Operand(rdx, 0)); - __ addq(rdx, Immediate(sizeof(intptr_t))); + __ addp(rdx, Immediate(sizeof(intptr_t))); __ bind(&pop_loop_header); __ cmpq(rcx, rsp); __ j(not_equal, &pop_loop); @@ -289,12 +289,12 @@ void Deoptimizer::EntryGenerator::Generate() { __ movp(rcx, Operand(rbx, FrameDescription::frame_size_offset())); __ jmp(&inner_loop_header); __ bind(&inner_push_loop); - __ subq(rcx, Immediate(sizeof(intptr_t))); + __ subp(rcx, Immediate(sizeof(intptr_t))); __ Push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset())); __ bind(&inner_loop_header); __ testq(rcx, rcx); __ j(not_zero, &inner_push_loop); - __ addq(rax, Immediate(kPointerSize)); + __ addp(rax, Immediate(kPointerSize)); __ bind(&outer_loop_header); __ cmpq(rax, rdx); __ j(below, &outer_push_loop); diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc index 846b713..137622e 100644 --- a/src/x64/full-codegen-x64.cc +++ b/src/x64/full-codegen-x64.cc @@ -1120,7 +1120,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ jmp(&loop); __ bind(&no_descriptors); - __ addq(rsp, Immediate(kPointerSize)); + __ addp(rsp, Immediate(kPointerSize)); __ jmp(&exit); // We got a fixed array in register rax. Iterate through that. @@ -1212,7 +1212,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { // Remove the pointers stored on the stack. __ bind(loop_statement.break_label()); - __ addq(rsp, Immediate(5 * kPointerSize)); + __ addp(rsp, Immediate(5 * kPointerSize)); // Exit and decrement the loop depth. PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS); @@ -1834,7 +1834,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { } if (result_saved) { - __ addq(rsp, Immediate(kPointerSize)); // literal index + __ addp(rsp, Immediate(kPointerSize)); // literal index context()->PlugTOS(); } else { context()->Plug(rax); @@ -2138,7 +2138,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator, __ LoadRoot(rcx, Heap::kTheHoleValueRootIndex); Label push_argument_holes, push_frame; __ bind(&push_argument_holes); - __ subq(rdx, Immediate(1)); + __ subp(rdx, Immediate(1)); __ j(carry, &push_frame); __ Push(rcx); __ jmp(&push_argument_holes); @@ -2169,7 +2169,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator, __ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset)); __ SmiToInteger64(rcx, FieldOperand(rbx, JSGeneratorObject::kContinuationOffset)); - __ addq(rdx, rcx); + __ addp(rdx, rcx); __ Move(FieldOperand(rbx, JSGeneratorObject::kContinuationOffset), Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)); __ jmp(rdx); @@ -2180,7 +2180,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator, // up the stack and the handlers. Label push_operand_holes, call_resume; __ bind(&push_operand_holes); - __ subq(rdx, Immediate(1)); + __ subp(rdx, Immediate(1)); __ j(carry, &call_resume); __ Push(rcx); __ jmp(&push_operand_holes); @@ -3002,13 +3002,13 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( // rbx: descriptor array. // rcx: valid entries in the descriptor array. // Calculate the end of the descriptor array. - __ imul(rcx, rcx, Immediate(DescriptorArray::kDescriptorSize)); + __ imulp(rcx, rcx, Immediate(DescriptorArray::kDescriptorSize)); SmiIndex index = masm_->SmiToIndex(rdx, rcx, kPointerSizeLog2); __ lea(rcx, Operand( r8, index.reg, index.scale, DescriptorArray::kFirstOffset)); // Calculate location of the first key name. - __ addq(r8, Immediate(DescriptorArray::kFirstOffset)); + __ addp(r8, Immediate(DescriptorArray::kFirstOffset)); // Loop through all the keys in the descriptor array. If one of these is the // internalized string "valueOf" the result is false. __ jmp(&entry); @@ -3016,7 +3016,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( __ movp(rdx, FieldOperand(r8, 0)); __ Cmp(rdx, isolate()->factory()->value_of_string()); __ j(equal, if_false); - __ addq(r8, Immediate(DescriptorArray::kDescriptorSize * kPointerSize)); + __ addp(r8, Immediate(DescriptorArray::kDescriptorSize * kPointerSize)); __ bind(&entry); __ cmpq(r8, rcx); __ j(not_equal, &loop); @@ -3858,7 +3858,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { // Separator operand is already pushed. Make room for the two // other stack fields, and clear the direction flag in anticipation // of calling CopyBytes. - __ subq(rsp, Immediate(2 * kPointerSize)); + __ subp(rsp, Immediate(2 * kPointerSize)); __ cld(); // Check that the array is a JSArray __ JumpIfSmi(array, &bailout); @@ -4106,7 +4106,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ bind(&return_result); // Drop temp values from the stack, and restore context register. - __ addq(rsp, Immediate(3 * kPointerSize)); + __ addp(rsp, Immediate(3 * kPointerSize)); __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); context()->Plug(rax); } @@ -4737,7 +4737,7 @@ void FullCodeGenerator::EnterFinallyBlock() { // Cook return address on top of stack (smi encoded Code* delta) __ PopReturnAddressTo(rdx); __ Move(rcx, masm_->CodeObject()); - __ subq(rdx, rcx); + __ subp(rdx, rcx); __ Integer32ToSmi(rdx, rdx); __ Push(rdx); @@ -4790,7 +4790,7 @@ void FullCodeGenerator::ExitFinallyBlock() { __ Pop(rdx); __ SmiToInteger32(rdx, rdx); __ Move(rcx, masm_->CodeObject()); - __ addq(rdx, rcx); + __ addp(rdx, rcx); __ jmp(rdx); } diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc index f6af561..2c927b5 100644 --- a/src/x64/ic-x64.cc +++ b/src/x64/ic-x64.cc @@ -468,7 +468,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ LoadAddress(kScratchRegister, cache_field_offsets); __ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0)); __ movzxbq(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset)); - __ subq(rdi, rcx); + __ subp(rdi, rcx); __ j(above_equal, &property_array_property); if (i != 0) { __ jmp(&load_in_object_property); @@ -478,7 +478,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // Load in-object property. __ bind(&load_in_object_property); __ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset)); - __ addq(rcx, rdi); + __ addp(rcx, rdi); __ movp(rax, FieldOperand(rdx, rcx, times_pointer_size, 0)); __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1); __ ret(0); diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc index d1c893d..bd8a6ca 100644 --- a/src/x64/lithium-codegen-x64.cc +++ b/src/x64/lithium-codegen-x64.cc @@ -187,7 +187,7 @@ bool LCodeGen::GeneratePrologue() { int slots = GetStackSlotCount(); if (slots > 0) { if (FLAG_debug_code) { - __ subq(rsp, Immediate(slots * kPointerSize)); + __ subp(rsp, Immediate(slots * kPointerSize)); #ifdef _MSC_VER MakeSureStackPagesMapped(slots * kPointerSize); #endif @@ -202,7 +202,7 @@ bool LCodeGen::GeneratePrologue() { __ j(not_zero, &loop); __ Pop(rax); } else { - __ subq(rsp, Immediate(slots * kPointerSize)); + __ subp(rsp, Immediate(slots * kPointerSize)); #ifdef _MSC_VER MakeSureStackPagesMapped(slots * kPointerSize); #endif @@ -269,7 +269,7 @@ void LCodeGen::GenerateOsrPrologue() { // optimized frame. int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); ASSERT(slots >= 0); - __ subq(rsp, Immediate(slots * kPointerSize)); + __ subp(rsp, Immediate(slots * kPointerSize)); } @@ -1371,14 +1371,14 @@ void LCodeGen::DoMulI(LMulI* instr) { } else if (right->IsStackSlot()) { if (instr->hydrogen_value()->representation().IsSmi()) { __ SmiToInteger64(left, left); - __ imul(left, ToOperand(right)); + __ imulp(left, ToOperand(right)); } else { __ imull(left, ToOperand(right)); } } else { if (instr->hydrogen_value()->representation().IsSmi()) { __ SmiToInteger64(left, left); - __ imul(left, ToRegister(right)); + __ imulp(left, ToRegister(right)); } else { __ imull(left, ToRegister(right)); } @@ -1566,13 +1566,13 @@ void LCodeGen::DoSubI(LSubI* instr) { Immediate(ToInteger32(LConstantOperand::cast(right)))); } else if (right->IsRegister()) { if (instr->hydrogen_value()->representation().IsSmi()) { - __ subq(ToRegister(left), ToRegister(right)); + __ subp(ToRegister(left), ToRegister(right)); } else { __ subl(ToRegister(left), ToRegister(right)); } } else { if (instr->hydrogen_value()->representation().IsSmi()) { - __ subq(ToRegister(left), ToOperand(right)); + __ subp(ToRegister(left), ToOperand(right)); } else { __ subl(ToRegister(left), ToOperand(right)); } @@ -1754,12 +1754,12 @@ void LCodeGen::DoAddI(LAddI* instr) { LOperand* right = instr->right(); Representation target_rep = instr->hydrogen()->representation(); - bool is_q = target_rep.IsSmi() || target_rep.IsExternal(); + bool is_p = target_rep.IsSmi() || target_rep.IsExternal(); if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) { if (right->IsConstantOperand()) { int32_t offset = ToInteger32(LConstantOperand::cast(right)); - if (is_q) { + if (is_p) { __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset)); } else { @@ -1768,7 +1768,7 @@ void LCodeGen::DoAddI(LAddI* instr) { } } else { Operand address(ToRegister(left), ToRegister(right), times_1, 0); - if (is_q) { + if (is_p) { __ lea(ToRegister(instr->result()), address); } else { __ leal(ToRegister(instr->result()), address); @@ -1776,22 +1776,22 @@ void LCodeGen::DoAddI(LAddI* instr) { } } else { if (right->IsConstantOperand()) { - if (is_q) { - __ addq(ToRegister(left), + if (is_p) { + __ addp(ToRegister(left), Immediate(ToInteger32(LConstantOperand::cast(right)))); } else { __ addl(ToRegister(left), Immediate(ToInteger32(LConstantOperand::cast(right)))); } } else if (right->IsRegister()) { - if (is_q) { - __ addq(ToRegister(left), ToRegister(right)); + if (is_p) { + __ addp(ToRegister(left), ToRegister(right)); } else { __ addl(ToRegister(left), ToRegister(right)); } } else { - if (is_q) { - __ addq(ToRegister(left), ToOperand(right)); + if (is_p) { + __ addp(ToRegister(left), ToOperand(right)); } else { __ addl(ToRegister(left), ToOperand(right)); } @@ -2230,9 +2230,9 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { __ ucomisd(input_reg, input_reg); EmitFalseBranch(instr, parity_odd); - __ subq(rsp, Immediate(kDoubleSize)); + __ subp(rsp, Immediate(kDoubleSize)); __ movsd(MemOperand(rsp, 0), input_reg); - __ addq(rsp, Immediate(kDoubleSize)); + __ addp(rsp, Immediate(kDoubleSize)); int offset = sizeof(kHoleNanUpper32); __ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32)); @@ -2459,7 +2459,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true, // actual type and do a signed compare with the width of the type range. __ movp(temp, FieldOperand(input, HeapObject::kMapOffset)); __ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset)); - __ subq(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); + __ subp(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); __ cmpq(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); __ j(above, is_false); @@ -2690,7 +2690,7 @@ void LCodeGen::DoReturn(LReturn* instr) { Register return_addr_reg = reg.is(rcx) ? rbx : rcx; __ PopReturnAddressTo(return_addr_reg); __ shl(reg, Immediate(kPointerSizeLog2)); - __ addq(rsp, reg); + __ addp(rsp, reg); __ jmp(return_addr_reg); } if (no_frame_start != -1) { @@ -3414,7 +3414,7 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { ASSERT(instr->target()->IsRegister()); Register target = ToRegister(instr->target()); generator.BeforeCall(__ CallSize(target)); - __ addq(target, Immediate(Code::kHeaderSize - kHeapObjectTag)); + __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag)); __ call(target); } generator.AfterCall(); @@ -3786,13 +3786,13 @@ void LCodeGen::DoMathLog(LMathLog* instr) { __ jmp(&done, Label::kNear); __ bind(&positive); __ fldln2(); - __ subq(rsp, Immediate(kDoubleSize)); + __ subp(rsp, Immediate(kDoubleSize)); __ movsd(Operand(rsp, 0), input_reg); __ fld_d(Operand(rsp, 0)); __ fyl2x(); __ fstp_d(Operand(rsp, 0)); __ movsd(input_reg, Operand(rsp, 0)); - __ addq(rsp, Immediate(kDoubleSize)); + __ addp(rsp, Immediate(kDoubleSize)); __ bind(&done); } diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc index 48dc16a..e5b4160 100644 --- a/src/x64/macro-assembler-x64.cc +++ b/src/x64/macro-assembler-x64.cc @@ -236,7 +236,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. // Store pointer to buffer. movp(Operand(scratch, 0), addr); // Increment buffer top. - addq(scratch, Immediate(kPointerSize)); + addp(scratch, Immediate(kPointerSize)); // Write back new top of buffer. StoreRoot(scratch, Heap::kStoreBufferTopRootIndex); // Call stub on end of buffer. @@ -291,7 +291,7 @@ void MacroAssembler::InNewSpace(Register object, Move(kScratchRegister, reinterpret_cast
(-new_space_start), Assembler::RelocInfoNone()); if (scratch.is(object)) { - addq(scratch, kScratchRegister); + addp(scratch, kScratchRegister); } else { lea(scratch, Operand(object, kScratchRegister, times_1, 0)); } @@ -560,7 +560,7 @@ bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { void MacroAssembler::IllegalOperation(int num_arguments) { if (num_arguments > 0) { - addq(rsp, Immediate(num_arguments * kPointerSize)); + addp(rsp, Immediate(num_arguments * kPointerSize)); } LoadRoot(rax, Heap::kUndefinedValueRootIndex); } @@ -886,7 +886,7 @@ void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, } // R12 to r15 are callee save on all platforms. if (fp_mode == kSaveFPRegs) { - subq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters)); + subp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters)); for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) { XMMRegister reg = XMMRegister::from_code(i); movsd(Operand(rsp, i * kDoubleSize), reg); @@ -904,7 +904,7 @@ void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, XMMRegister reg = XMMRegister::from_code(i); movsd(reg, Operand(rsp, i * kDoubleSize)); } - addq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters)); + addp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters)); } for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) { Register reg = saved_regs[i]; @@ -1449,7 +1449,7 @@ void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) { ASSERT(!dst.is(kScratchRegister)); switch (constant->value()) { case 1: - addq(dst, kSmiConstantRegister); + addp(dst, kSmiConstantRegister); return; case 2: lea(dst, Operand(src, kSmiConstantRegister, times_2, 0)); @@ -1462,7 +1462,7 @@ void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) { return; default: Register constant_reg = GetSmiConstant(constant); - addq(dst, constant_reg); + addp(dst, constant_reg); return; } } else { @@ -1481,7 +1481,7 @@ void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) { return; default: LoadSmiConstant(dst, constant); - addq(dst, src); + addp(dst, src); return; } } @@ -1508,16 +1508,16 @@ void MacroAssembler::SmiAddConstant(Register dst, } else if (dst.is(src)) { ASSERT(!dst.is(kScratchRegister)); LoadSmiConstant(kScratchRegister, constant); - addq(dst, kScratchRegister); + addp(dst, kScratchRegister); if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) { j(no_overflow, bailout_label, near_jump); ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER)); - subq(dst, kScratchRegister); + subp(dst, kScratchRegister); } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) { if (mode.Contains(PRESERVE_SOURCE_REGISTER)) { Label done; j(no_overflow, &done, Label::kNear); - subq(dst, kScratchRegister); + subp(dst, kScratchRegister); jmp(bailout_label, near_jump); bind(&done); } else { @@ -1531,7 +1531,7 @@ void MacroAssembler::SmiAddConstant(Register dst, ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER)); ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW)); LoadSmiConstant(dst, constant); - addq(dst, src); + addp(dst, src); j(overflow, bailout_label, near_jump); } } @@ -1545,17 +1545,17 @@ void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) { } else if (dst.is(src)) { ASSERT(!dst.is(kScratchRegister)); Register constant_reg = GetSmiConstant(constant); - subq(dst, constant_reg); + subp(dst, constant_reg); } else { if (constant->value() == Smi::kMinValue) { LoadSmiConstant(dst, constant); // Adding and subtracting the min-value gives the same result, it only // differs on the overflow bit, which we don't check here. - addq(dst, src); + addp(dst, src); } else { // Subtract by adding the negation. LoadSmiConstant(dst, Smi::FromInt(-constant->value())); - addq(dst, src); + addp(dst, src); } } } @@ -1574,16 +1574,16 @@ void MacroAssembler::SmiSubConstant(Register dst, } else if (dst.is(src)) { ASSERT(!dst.is(kScratchRegister)); LoadSmiConstant(kScratchRegister, constant); - subq(dst, kScratchRegister); + subp(dst, kScratchRegister); if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) { j(no_overflow, bailout_label, near_jump); ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER)); - addq(dst, kScratchRegister); + addp(dst, kScratchRegister); } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) { if (mode.Contains(PRESERVE_SOURCE_REGISTER)) { Label done; j(no_overflow, &done, Label::kNear); - addq(dst, kScratchRegister); + addp(dst, kScratchRegister); jmp(bailout_label, near_jump); bind(&done); } else { @@ -1600,12 +1600,12 @@ void MacroAssembler::SmiSubConstant(Register dst, ASSERT(!dst.is(kScratchRegister)); movp(dst, src); LoadSmiConstant(kScratchRegister, constant); - subq(dst, kScratchRegister); + subp(dst, kScratchRegister); j(overflow, bailout_label, near_jump); } else { // Subtract by adding the negation. LoadSmiConstant(dst, Smi::FromInt(-(constant->value()))); - addq(dst, src); + addp(dst, src); j(overflow, bailout_label, near_jump); } } @@ -1643,15 +1643,15 @@ static void SmiAddHelper(MacroAssembler* masm, Label::Distance near_jump) { if (dst.is(src1)) { Label done; - masm->addq(dst, src2); + masm->addp(dst, src2); masm->j(no_overflow, &done, Label::kNear); // Restore src1. - masm->subq(dst, src2); + masm->subp(dst, src2); masm->jmp(on_not_smi_result, near_jump); masm->bind(&done); } else { masm->movp(dst, src1); - masm->addq(dst, src2); + masm->addp(dst, src2); masm->j(overflow, on_not_smi_result, near_jump); } } @@ -1687,12 +1687,12 @@ void MacroAssembler::SmiAdd(Register dst, if (!dst.is(src1)) { if (emit_debug_code()) { movp(kScratchRegister, src1); - addq(kScratchRegister, src2); + addp(kScratchRegister, src2); Check(no_overflow, kSmiAdditionOverflow); } lea(dst, Operand(src1, src2, times_1, 0)); } else { - addq(dst, src2); + addp(dst, src2); Assert(no_overflow, kSmiAdditionOverflow); } } @@ -1707,15 +1707,15 @@ static void SmiSubHelper(MacroAssembler* masm, Label::Distance near_jump) { if (dst.is(src1)) { Label done; - masm->subq(dst, src2); + masm->subp(dst, src2); masm->j(no_overflow, &done, Label::kNear); // Restore src1. - masm->addq(dst, src2); + masm->addp(dst, src2); masm->jmp(on_not_smi_result, near_jump); masm->bind(&done); } else { masm->movp(dst, src1); - masm->subq(dst, src2); + masm->subp(dst, src2); masm->j(overflow, on_not_smi_result, near_jump); } } @@ -1753,7 +1753,7 @@ static void SmiSubNoOverflowHelper(MacroAssembler* masm, if (!dst.is(src1)) { masm->movp(dst, src1); } - masm->subq(dst, src2); + masm->subp(dst, src2); masm->Assert(no_overflow, kSmiSubtractionOverflow); } @@ -1785,7 +1785,7 @@ void MacroAssembler::SmiMul(Register dst, Label failure, zero_correct_result; movp(kScratchRegister, src1); // Create backup for later testing. SmiToInteger64(dst, src1); - imul(dst, src2); + imulp(dst, src2); j(overflow, &failure, Label::kNear); // Check for negative zero result. If product is zero, and one @@ -1809,7 +1809,7 @@ void MacroAssembler::SmiMul(Register dst, bind(&correct_result); } else { SmiToInteger64(dst, src1); - imul(dst, src2); + imulp(dst, src2); j(overflow, on_not_smi_result, near_jump); // Check for negative zero result. If product is zero, and one // argument is negative, go to slow case. @@ -2176,7 +2176,7 @@ void MacroAssembler::SelectNonSmi(Register dst, // Exactly one operand is a smi. ASSERT_EQ(1, static_cast(kSmiTagMask)); // kScratchRegister still holds src1 & kSmiTag, which is either zero or one. - subq(kScratchRegister, Immediate(1)); + subp(kScratchRegister, Immediate(1)); // If src1 is a smi, then scratch register all 1s, else it is all 0s. movp(dst, src1); xor_(dst, src2); @@ -2289,7 +2289,7 @@ void MacroAssembler::LookupNumberStringCache(Register object, SmiToInteger32( mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset)); shrl(mask, Immediate(1)); - subq(mask, Immediate(1)); // Make mask. + subp(mask, Immediate(1)); // Make mask. // Calculate the entry in the number string cache. The hash value in the // number string cache for smis is just the smi value, and the hash for @@ -2567,7 +2567,7 @@ void MacroAssembler::LoadGlobalCell(Register dst, Handle cell) { void MacroAssembler::Drop(int stack_elements) { if (stack_elements > 0) { - addq(rsp, Immediate(stack_elements * kPointerSize)); + addp(rsp, Immediate(stack_elements * kPointerSize)); } } @@ -2644,7 +2644,8 @@ void MacroAssembler::Pop(const Operand& dst) { leal(rsp, Operand(rsp, 4)); if (scratch.is(kSmiConstantRegister)) { // Restore kSmiConstantRegister. - movp(kSmiConstantRegister, Smi::FromInt(kSmiConstantRegisterValue), + movp(kSmiConstantRegister, + reinterpret_cast(Smi::FromInt(kSmiConstantRegisterValue)), Assembler::RelocInfoNone()); } } @@ -2788,7 +2789,7 @@ void MacroAssembler::Popad() { void MacroAssembler::Dropad() { - addq(rsp, Immediate(kNumSafepointRegisters * kPointerSize)); + addp(rsp, Immediate(kNumSafepointRegisters * kPointerSize)); } @@ -2879,7 +2880,7 @@ void MacroAssembler::PopTryHandler() { STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); ExternalReference handler_address(Isolate::kHandlerAddress, isolate()); Pop(ExternalOperand(handler_address)); - addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize)); + addp(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize)); } @@ -2993,7 +2994,7 @@ void MacroAssembler::Ret(int bytes_dropped, Register scratch) { ret(bytes_dropped); } else { PopReturnAddressTo(scratch); - addq(rsp, Immediate(bytes_dropped)); + addp(rsp, Immediate(bytes_dropped)); PushReturnAddressFrom(scratch); ret(0); } @@ -3199,10 +3200,10 @@ void MacroAssembler::TruncateHeapNumberToI(Register result_reg, // Slow case. if (input_reg.is(result_reg)) { - subq(rsp, Immediate(kDoubleSize)); + subp(rsp, Immediate(kDoubleSize)); movsd(MemOperand(rsp, 0), xmm0); SlowTruncateToI(result_reg, rsp, 0); - addq(rsp, Immediate(kDoubleSize)); + addp(rsp, Immediate(kDoubleSize)); } else { SlowTruncateToI(result_reg, input_reg); } @@ -3219,10 +3220,10 @@ void MacroAssembler::TruncateDoubleToI(Register result_reg, cmpq(result_reg, kScratchRegister); j(not_equal, &done, Label::kNear); - subq(rsp, Immediate(kDoubleSize)); + subp(rsp, Immediate(kDoubleSize)); movsd(MemOperand(rsp, 0), input_reg); SlowTruncateToI(result_reg, rsp, 0); - addq(rsp, Immediate(kDoubleSize)); + addp(rsp, Immediate(kDoubleSize)); bind(&done); } @@ -3716,7 +3717,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected, Handle adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline(); if (!code_constant.is_null()) { Move(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT); - addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag)); + addp(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag)); } else if (!code_register.is(rdx)) { movp(rdx, code_register); } @@ -3824,14 +3825,14 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space, if (save_doubles) { int space = XMMRegister::kMaxNumAllocatableRegisters * kDoubleSize + arg_stack_space * kRegisterSize; - subq(rsp, Immediate(space)); + subp(rsp, Immediate(space)); int offset = -2 * kPointerSize; for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) { XMMRegister reg = XMMRegister::FromAllocationIndex(i); movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg); } } else if (arg_stack_space > 0) { - subq(rsp, Immediate(arg_stack_space * kRegisterSize)); + subp(rsp, Immediate(arg_stack_space * kRegisterSize)); } // Get the required frame alignment for the OS. @@ -4193,7 +4194,7 @@ void MacroAssembler::Allocate(int object_size, if (!top_reg.is(result)) { movp(top_reg, result); } - addq(top_reg, Immediate(object_size)); + addp(top_reg, Immediate(object_size)); j(carry, gc_required); Operand limit_operand = ExternalOperand(allocation_limit); cmpq(top_reg, limit_operand); @@ -4205,9 +4206,9 @@ void MacroAssembler::Allocate(int object_size, bool tag_result = (flags & TAG_OBJECT) != 0; if (top_reg.is(result)) { if (tag_result) { - subq(result, Immediate(object_size - kHeapObjectTag)); + subp(result, Immediate(object_size - kHeapObjectTag)); } else { - subq(result, Immediate(object_size)); + subp(result, Immediate(object_size)); } } else if (tag_result) { // Tag the result if requested. @@ -4269,7 +4270,7 @@ void MacroAssembler::Allocate(Register object_size, if (!object_size.is(result_end)) { movp(result_end, object_size); } - addq(result_end, result); + addp(result_end, result); j(carry, gc_required); Operand limit_operand = ExternalOperand(allocation_limit); cmpq(result_end, limit_operand); @@ -4280,7 +4281,7 @@ void MacroAssembler::Allocate(Register object_size, // Tag the result if requested. if ((flags & TAG_OBJECT) != 0) { - addq(result, Immediate(kHeapObjectTag)); + addp(result, Immediate(kHeapObjectTag)); } } @@ -4328,7 +4329,7 @@ void MacroAssembler::AllocateTwoByteString(Register result, kHeaderAlignment)); and_(scratch1, Immediate(~kObjectAlignmentMask)); if (kHeaderAlignment > 0) { - subq(scratch1, Immediate(kHeaderAlignment)); + subp(scratch1, Immediate(kHeaderAlignment)); } // Allocate two byte string in new space. @@ -4363,10 +4364,10 @@ void MacroAssembler::AllocateAsciiString(Register result, kObjectAlignmentMask; movl(scratch1, length); ASSERT(kCharSize == 1); - addq(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment)); + addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment)); and_(scratch1, Immediate(~kObjectAlignmentMask)); if (kHeaderAlignment > 0) { - subq(scratch1, Immediate(kHeaderAlignment)); + subp(scratch1, Immediate(kHeaderAlignment)); } // Allocate ASCII string in new space. @@ -4517,7 +4518,7 @@ void MacroAssembler::CopyBytes(Register destination, andl(scratch, Immediate(kPointerSize - 1)); movp(length, Operand(source, scratch, times_1, -kPointerSize)); movp(Operand(destination, scratch, times_1, -kPointerSize), length); - addq(destination, scratch); + addp(destination, scratch); if (min_length <= kLongStringLimit) { jmp(&done, Label::kNear); @@ -4533,7 +4534,7 @@ void MacroAssembler::CopyBytes(Register destination, // Move remaining bytes of length. movp(scratch, Operand(source, length, times_1, -kPointerSize)); movp(Operand(destination, length, times_1, -kPointerSize), scratch); - addq(destination, length); + addp(destination, length); jmp(&done, Label::kNear); bind(&short_string); @@ -4562,7 +4563,7 @@ void MacroAssembler::InitializeFieldsWithFiller(Register start_offset, jmp(&entry); bind(&loop); movp(Operand(start_offset, 0), filler); - addq(start_offset, Immediate(kPointerSize)); + addp(start_offset, Immediate(kPointerSize)); bind(&entry); cmpq(start_offset, end_offset); j(less, &loop); @@ -4716,7 +4717,7 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments) { ASSERT(IsPowerOf2(frame_alignment)); int argument_slots_on_stack = ArgumentStackSlotsForCFunctionCall(num_arguments); - subq(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize)); + subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize)); and_(rsp, Immediate(-frame_alignment)); movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister); } @@ -4874,7 +4875,7 @@ void MacroAssembler::GetMarkBits(Register addr_reg, Immediate((Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1))); - addq(bitmap_reg, rcx); + addp(bitmap_reg, rcx); movp(rcx, addr_reg); shrl(rcx, Immediate(kPointerSizeLog2)); and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1)); @@ -4910,7 +4911,7 @@ void MacroAssembler::EnsureNotWhite( Label ok; Push(mask_scratch); // shl. May overflow making the check conservative. - addq(mask_scratch, mask_scratch); + addp(mask_scratch, mask_scratch); testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch); j(zero, &ok, Label::kNear); int3(); @@ -4960,11 +4961,11 @@ void MacroAssembler::EnsureNotWhite( ASSERT(kOneByteStringTag == 0x04); and_(length, Immediate(kStringEncodingMask)); xor_(length, Immediate(kStringEncodingMask)); - addq(length, Immediate(0x04)); + addp(length, Immediate(0x04)); // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2. - imul(length, FieldOperand(value, String::kLengthOffset)); + imulp(length, FieldOperand(value, String::kLengthOffset)); shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize)); - addq(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask)); + addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask)); and_(length, Immediate(~kObjectAlignmentMask)); bind(&is_data_object); diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h index fe709c9..6c9c19f 100644 --- a/src/x64/macro-assembler-x64.h +++ b/src/x64/macro-assembler-x64.h @@ -336,7 +336,7 @@ class MacroAssembler: public Assembler { ExternalReference roots_array_start = ExternalReference::roots_array_start(isolate()); Move(kRootRegister, roots_array_start); - addq(kRootRegister, Immediate(kRootRegisterBias)); + addp(kRootRegister, Immediate(kRootRegisterBias)); } // --------------------------------------------------------------------------- @@ -846,7 +846,7 @@ class MacroAssembler: public Assembler { void PushReturnAddressFrom(Register src) { pushq(src); } void PopReturnAddressTo(Register dst) { popq(dst); } void Move(Register dst, ExternalReference ext) { - movp(dst, reinterpret_cast
(ext.address()), + movp(dst, reinterpret_cast(ext.address()), RelocInfo::EXTERNAL_REFERENCE); } @@ -863,7 +863,7 @@ class MacroAssembler: public Assembler { ASSERT(!RelocInfo::IsNone(rmode)); ASSERT(value->IsHeapObject()); ASSERT(!isolate()->heap()->InNewSpace(*value)); - movp(dst, value.location(), rmode); + movp(dst, reinterpret_cast(value.location()), rmode); } // Control Flow diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc index 208bb60..3d5d85b 100644 --- a/src/x64/regexp-macro-assembler-x64.cc +++ b/src/x64/regexp-macro-assembler-x64.cc @@ -166,7 +166,7 @@ void RegExpMacroAssemblerX64::AdvanceRegister(int reg, int by) { ASSERT(reg >= 0); ASSERT(reg < num_registers_); if (by != 0) { - __ addq(register_location(reg), Immediate(by)); + __ addp(register_location(reg), Immediate(by)); } } @@ -175,7 +175,7 @@ void RegExpMacroAssemblerX64::Backtrack() { CheckPreemption(); // Pop Code* offset from backtrack stack, add Code* and jump to location. Pop(rbx); - __ addq(rbx, code_object_pointer()); + __ addp(rbx, code_object_pointer()); __ jmp(rbx); } @@ -243,7 +243,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase( Label fallthrough; __ movq(rdx, register_location(start_reg)); // Offset of start of capture __ movq(rbx, register_location(start_reg + 1)); // Offset of end of capture - __ subq(rbx, rdx); // Length of capture. + __ subp(rbx, rdx); // Length of capture. // ----------------------- // rdx = Start offset of capture. @@ -275,7 +275,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase( __ lea(r9, Operand(rsi, rdx, times_1, 0)); __ lea(r11, Operand(rsi, rdi, times_1, 0)); - __ addq(rbx, r9); // End of capture + __ addp(rbx, r9); // End of capture // --------------------- // r11 - current input character address // r9 - current capture character address @@ -308,8 +308,8 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase( __ j(equal, on_no_match); __ bind(&loop_increment); // Increment pointers into match and capture strings. - __ addq(r11, Immediate(1)); - __ addq(r9, Immediate(1)); + __ addp(r11, Immediate(1)); + __ addp(r9, Immediate(1)); // Compare to end of capture, and loop if not done. __ cmpq(r9, rbx); __ j(below, &loop); @@ -392,7 +392,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReference( // Find length of back-referenced capture. __ movq(rdx, register_location(start_reg)); __ movq(rax, register_location(start_reg + 1)); - __ subq(rax, rdx); // Length to check. + __ subp(rax, rdx); // Length to check. // Fail on partial or illegal capture (start of capture after end of capture). // This must not happen (no back-reference can reference a capture that wasn't @@ -413,7 +413,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReference( // Compute pointers to match string and capture string __ lea(rbx, Operand(rsi, rdi, times_1, 0)); // Start of match. - __ addq(rdx, rsi); // Start of capture. + __ addp(rdx, rsi); // Start of capture. __ lea(r9, Operand(rdx, rax, times_1, 0)); // End of capture // ----------------------- @@ -433,8 +433,8 @@ void RegExpMacroAssemblerX64::CheckNotBackReference( } BranchOrBacktrack(not_equal, on_no_match); // Increment pointers into capture and match string. - __ addq(rbx, Immediate(char_size())); - __ addq(rdx, Immediate(char_size())); + __ addp(rbx, Immediate(char_size())); + __ addp(rdx, Immediate(char_size())); // Check if we have reached end of match area. __ cmpq(rdx, r9); __ j(below, &loop); @@ -719,7 +719,7 @@ Handle RegExpMacroAssemblerX64::GetCode(Handle source) { ExternalReference::address_of_stack_limit(isolate()); __ movp(rcx, rsp); __ Move(kScratchRegister, stack_limit); - __ subq(rcx, Operand(kScratchRegister, 0)); + __ subp(rcx, Operand(kScratchRegister, 0)); // Handle it if the stack pointer is already below the stack limit. __ j(below_equal, &stack_limit_hit); // Check if there is room for the variable number of registers above @@ -741,13 +741,13 @@ Handle RegExpMacroAssemblerX64::GetCode(Handle source) { __ bind(&stack_ok); // Allocate space on stack for registers. - __ subq(rsp, Immediate(num_registers_ * kPointerSize)); + __ subp(rsp, Immediate(num_registers_ * kPointerSize)); // Load string length. __ movp(rsi, Operand(rbp, kInputEnd)); // Load input position. __ movp(rdi, Operand(rbp, kInputStart)); // Set up rdi to be negative offset from string end. - __ subq(rdi, rsi); + __ subp(rdi, rsi); // Set rax to address of char before start of the string // (effectively string position -1). __ movp(rbx, Operand(rbp, kStartIndex)); @@ -824,11 +824,11 @@ Handle RegExpMacroAssemblerX64::GetCode(Handle source) { __ movp(rdx, Operand(rbp, kStartIndex)); __ movp(rbx, Operand(rbp, kRegisterOutput)); __ movp(rcx, Operand(rbp, kInputEnd)); - __ subq(rcx, Operand(rbp, kInputStart)); + __ subp(rcx, Operand(rbp, kInputStart)); if (mode_ == UC16) { __ lea(rcx, Operand(rcx, rdx, times_2, 0)); } else { - __ addq(rcx, rdx); + __ addp(rcx, rdx); } for (int i = 0; i < num_saved_registers_; i++) { __ movq(rax, register_location(i)); @@ -836,7 +836,7 @@ Handle RegExpMacroAssemblerX64::GetCode(Handle source) { // Keep capture start in rdx for the zero-length check later. __ movp(rdx, rax); } - __ addq(rax, rcx); // Convert to index from start, not end. + __ addp(rax, rcx); // Convert to index from start, not end. if (mode_ == UC16) { __ sar(rax, Immediate(1)); // Convert byte index to character index. } @@ -851,14 +851,14 @@ Handle RegExpMacroAssemblerX64::GetCode(Handle source) { // Capture results have been stored, so the number of remaining global // output registers is reduced by the number of stored captures. __ movsxlq(rcx, Operand(rbp, kNumOutputRegisters)); - __ subq(rcx, Immediate(num_saved_registers_)); + __ subp(rcx, Immediate(num_saved_registers_)); // Check whether we have enough room for another set of capture results. __ cmpq(rcx, Immediate(num_saved_registers_)); __ j(less, &exit_label_); __ movp(Operand(rbp, kNumOutputRegisters), rcx); // Advance the location for output. - __ addq(Operand(rbp, kRegisterOutput), + __ addp(Operand(rbp, kRegisterOutput), Immediate(num_saved_registers_ * kIntSize)); // Prepare rax to initialize registers with its value in the next run. @@ -1091,7 +1091,7 @@ void RegExpMacroAssemblerX64::ReadCurrentPositionFromRegister(int reg) { void RegExpMacroAssemblerX64::ReadStackPointerFromRegister(int reg) { __ movq(backtrack_stackpointer(), register_location(reg)); - __ addq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd)); + __ addp(backtrack_stackpointer(), Operand(rbp, kStackHighEnd)); } @@ -1142,7 +1142,7 @@ void RegExpMacroAssemblerX64::ClearRegisters(int reg_from, int reg_to) { void RegExpMacroAssemblerX64::WriteStackPointerToRegister(int reg) { __ movp(rax, backtrack_stackpointer()); - __ subq(rax, Operand(rbp, kStackHighEnd)); + __ subp(rax, Operand(rbp, kStackHighEnd)); __ movp(register_location(reg), rax); } @@ -1323,12 +1323,12 @@ void RegExpMacroAssemblerX64::SafeCall(Label* to) { void RegExpMacroAssemblerX64::SafeCallTarget(Label* label) { __ bind(label); - __ subq(Operand(rsp, 0), code_object_pointer()); + __ subp(Operand(rsp, 0), code_object_pointer()); } void RegExpMacroAssemblerX64::SafeReturn() { - __ addq(Operand(rsp, 0), code_object_pointer()); + __ addp(Operand(rsp, 0), code_object_pointer()); __ ret(0); } @@ -1336,14 +1336,14 @@ void RegExpMacroAssemblerX64::SafeReturn() { void RegExpMacroAssemblerX64::Push(Register source) { ASSERT(!source.is(backtrack_stackpointer())); // Notice: This updates flags, unlike normal Push. - __ subq(backtrack_stackpointer(), Immediate(kIntSize)); + __ subp(backtrack_stackpointer(), Immediate(kIntSize)); __ movl(Operand(backtrack_stackpointer(), 0), source); } void RegExpMacroAssemblerX64::Push(Immediate value) { // Notice: This updates flags, unlike normal Push. - __ subq(backtrack_stackpointer(), Immediate(kIntSize)); + __ subp(backtrack_stackpointer(), Immediate(kIntSize)); __ movl(Operand(backtrack_stackpointer(), 0), value); } @@ -1367,7 +1367,7 @@ void RegExpMacroAssemblerX64::FixupCodeRelativePositions() { void RegExpMacroAssemblerX64::Push(Label* backtrack_target) { - __ subq(backtrack_stackpointer(), Immediate(kIntSize)); + __ subp(backtrack_stackpointer(), Immediate(kIntSize)); __ movl(Operand(backtrack_stackpointer(), 0), backtrack_target); MarkPositionForCodeRelativeFixup(); } @@ -1377,12 +1377,12 @@ void RegExpMacroAssemblerX64::Pop(Register target) { ASSERT(!target.is(backtrack_stackpointer())); __ movsxlq(target, Operand(backtrack_stackpointer(), 0)); // Notice: This updates flags, unlike normal Pop. - __ addq(backtrack_stackpointer(), Immediate(kIntSize)); + __ addp(backtrack_stackpointer(), Immediate(kIntSize)); } void RegExpMacroAssemblerX64::Drop() { - __ addq(backtrack_stackpointer(), Immediate(kIntSize)); + __ addp(backtrack_stackpointer(), Immediate(kIntSize)); } diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc index f783637..4f127ef 100644 --- a/src/x64/stub-cache-x64.cc +++ b/src/x64/stub-cache-x64.cc @@ -102,7 +102,7 @@ static void ProbeTable(Isolate* isolate, #endif // Jump to the first instruction in the code stub. - __ addq(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag)); + __ addp(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag)); __ jmp(kScratchRegister); __ bind(&miss); diff --git a/test/cctest/test-assembler-x64.cc b/test/cctest/test-assembler-x64.cc index a46770d..9c65b58 100644 --- a/test/cctest/test-assembler-x64.cc +++ b/test/cctest/test-assembler-x64.cc @@ -153,7 +153,7 @@ TEST(AssemblerX64ImulOperation) { // Assemble a simple function that multiplies arguments returning the high // word. __ movq(rax, arg2); - __ imul(arg1); + __ imulq(arg1); __ movq(rax, rdx); __ ret(0); diff --git a/test/cctest/test-disasm-x64.cc b/test/cctest/test-disasm-x64.cc index ca843c6..0e708ab 100644 --- a/test/cctest/test-disasm-x64.cc +++ b/test/cctest/test-disasm-x64.cc @@ -112,7 +112,7 @@ TEST(DisasmX64) { __ movzxwq(rdx, Operand(rcx, 0)); __ nop(); - __ imul(rdx, rcx); + __ imulq(rdx, rcx); __ shld(rdx, rcx); __ shrd(rdx, rcx); __ bts(Operand(rdx, 0), rcx); @@ -162,9 +162,9 @@ TEST(DisasmX64) { __ not_(rdx); __ testq(Operand(rbx, rcx, times_4, 10000), rdx); - __ imul(rdx, Operand(rbx, rcx, times_4, 10000)); - __ imul(rdx, rcx, Immediate(12)); - __ imul(rdx, rcx, Immediate(1000)); + __ imulq(rdx, Operand(rbx, rcx, times_4, 10000)); + __ imulq(rdx, rcx, Immediate(12)); + __ imulq(rdx, rcx, Immediate(1000)); __ incq(rdx); __ incq(Operand(rbx, rcx, times_4, 10000)); @@ -216,8 +216,8 @@ TEST(DisasmX64) { __ xor_(rbx, Immediate(12345)); - __ imul(rdx, rcx, Immediate(12)); - __ imul(rdx, rcx, Immediate(1000)); + __ imulq(rdx, rcx, Immediate(12)); + __ imulq(rdx, rcx, Immediate(1000)); __ cld(); -- 2.7.4