From: haitao.feng@intel.com Date: Wed, 26 Mar 2014 11:17:53 +0000 (+0000) Subject: Introduce leap, movzxbp, movzxwp, repmovsp and xchgp for x64 port X-Git-Tag: upstream/4.7.83~9993 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=687e524983d2e0234d1207b61aff14a514a6abf2;p=platform%2Fupstream%2Fv8.git Introduce leap, movzxbp, movzxwp, repmovsp and xchgp for x64 port R=verwaest@chromium.org Review URL: https://codereview.chromium.org/211413008 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@20273 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc index 925b6c2..efdd472 100644 --- a/src/x64/assembler-x64.cc +++ b/src/x64/assembler-x64.cc @@ -1224,17 +1224,9 @@ void Assembler::jmp(const Operand& src) { } -void Assembler::lea(Register dst, const Operand& src) { +void Assembler::emit_lea(Register dst, const Operand& src, int size) { EnsureSpace ensure_space(this); - emit_rex_64(dst, src); - emit(0x8D); - emit_operand(dst, src); -} - - -void Assembler::leal(Register dst, const Operand& src) { - EnsureSpace ensure_space(this); - emit_optional_rex_32(dst, src); + emit_rex(dst, src, size); emit(0x8D); emit_operand(dst, src); } @@ -1473,7 +1465,7 @@ void Assembler::movsxlq(Register dst, const Operand& src) { } -void Assembler::movzxbq(Register dst, const Operand& src) { +void Assembler::emit_movzxb(Register dst, const Operand& src, int size) { EnsureSpace ensure_space(this); // 32 bit operations zero the top 32 bits of 64 bit registers. Therefore // there is no need to make this a 64 bit operation. @@ -1484,26 +1476,10 @@ void Assembler::movzxbq(Register dst, const Operand& src) { } -void Assembler::movzxbl(Register dst, const Operand& src) { - EnsureSpace ensure_space(this); - emit_optional_rex_32(dst, src); - emit(0x0F); - emit(0xB6); - emit_operand(dst, src); -} - - -void Assembler::movzxwq(Register dst, const Operand& src) { - EnsureSpace ensure_space(this); - emit_optional_rex_32(dst, src); - emit(0x0F); - emit(0xB7); - emit_operand(dst, src); -} - - -void Assembler::movzxwl(Register dst, const Operand& src) { +void Assembler::emit_movzxw(Register dst, const Operand& src, int size) { EnsureSpace ensure_space(this); + // 32 bit operations zero the top 32 bits of 64 bit registers. Therefore + // there is no need to make this a 64 bit operation. emit_optional_rex_32(dst, src); emit(0x0F); emit(0xB7); @@ -1511,8 +1487,10 @@ void Assembler::movzxwl(Register dst, const Operand& src) { } -void Assembler::movzxwl(Register dst, Register src) { +void Assembler::emit_movzxw(Register dst, Register src, int size) { EnsureSpace ensure_space(this); + // 32 bit operations zero the top 32 bits of 64 bit registers. Therefore + // there is no need to make this a 64 bit operation. emit_optional_rex_32(dst, src); emit(0x0F); emit(0xB7); @@ -1535,17 +1513,10 @@ void Assembler::repmovsw() { } -void Assembler::repmovsl() { - EnsureSpace ensure_space(this); - emit(0xF3); - emit(0xA5); -} - - -void Assembler::repmovsq() { +void Assembler::emit_repmovs(int size) { EnsureSpace ensure_space(this); emit(0xF3); - emit_rex_64(); + emit_rex(size); emit(0xA5); } @@ -1789,36 +1760,18 @@ void Assembler::shrd(Register dst, Register src) { } -void Assembler::xchgq(Register dst, Register src) { +void Assembler::emit_xchg(Register dst, Register src, int size) { EnsureSpace ensure_space(this); if (src.is(rax) || dst.is(rax)) { // Single-byte encoding Register other = src.is(rax) ? dst : src; - emit_rex_64(other); + emit_rex(other, size); emit(0x90 | other.low_bits()); } else if (dst.low_bits() == 4) { - emit_rex_64(dst, src); - emit(0x87); - emit_modrm(dst, src); - } else { - emit_rex_64(src, dst); - emit(0x87); - emit_modrm(src, dst); - } -} - - -void Assembler::xchgl(Register dst, Register src) { - EnsureSpace ensure_space(this); - if (src.is(rax) || dst.is(rax)) { // Single-byte encoding - Register other = src.is(rax) ? dst : src; - emit_optional_rex_32(other); - emit(0x90 | other.low_bits()); - } else if (dst.low_bits() == 4) { - emit_optional_rex_32(dst, src); + emit_rex(dst, src, size); emit(0x87); emit_modrm(dst, src); } else { - emit_optional_rex_32(src, dst); + emit_rex(src, dst, size); emit(0x87); emit_modrm(src, dst); } diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h index 8364179..e23b72b 100644 --- a/src/x64/assembler-x64.h +++ b/src/x64/assembler-x64.h @@ -516,11 +516,16 @@ class CpuFeatures : public AllStatic { V(idiv) \ V(imul) \ V(inc) \ + V(lea) \ V(mov) \ + V(movzxb) \ + V(movzxw) \ V(neg) \ + V(repmovs) \ V(sbb) \ V(sub) \ - V(test) + V(test) \ + V(xchg) class Assembler : public AssemblerBase { @@ -773,18 +778,14 @@ class Assembler : public AssemblerBase { void movsxwq(Register dst, const Operand& src); void movsxlq(Register dst, Register src); void movsxlq(Register dst, const Operand& src); - void movzxbq(Register dst, const Operand& src); - void movzxbl(Register dst, const Operand& src); - void movzxwq(Register dst, const Operand& src); - void movzxwl(Register dst, const Operand& src); - void movzxwl(Register dst, Register src); // Repeated moves. void repmovsb(); void repmovsw(); - void repmovsl(); - void repmovsq(); + void repmovsp() { emit_repmovs(kPointerSize); } + void repmovsl() { emit_repmovs(kInt32Size); } + void repmovsq() { emit_repmovs(kInt64Size); } // Instruction to load from an immediate 64-bit pointer into RAX. void load_rax(void* ptr, RelocInfo::Mode rmode); @@ -796,10 +797,6 @@ class Assembler : public AssemblerBase { void cmovl(Condition cc, Register dst, Register src); void cmovl(Condition cc, Register dst, const Operand& src); - // Exchange two registers - void xchgq(Register dst, Register src); - void xchgl(Register dst, Register src); - void cmpb(Register dst, Immediate src) { immediate_arithmetic_op_8(0x7, dst, src); } @@ -886,9 +883,6 @@ class Assembler : public AssemblerBase { // Sign-extends eax into edx:eax. void cdq(); - void lea(Register dst, const Operand& src); - void leal(Register dst, const Operand& src); - // Multiply rax by src, put the result in rdx:rax. void mul(Register src); @@ -1483,6 +1477,14 @@ class Assembler : public AssemblerBase { // numbers have a high bit set. inline void emit_optional_rex_32(const Operand& op); + void emit_rex(int size) { + if (size == kInt64Size) { + emit_rex_64(); + } else { + ASSERT(size == kInt32Size); + } + } + template void emit_rex(P1 p1, int size) { if (size == kInt64Size) { @@ -1696,15 +1698,23 @@ class Assembler : public AssemblerBase { void emit_inc(Register dst, int size); void emit_inc(const Operand& dst, int size); + void emit_lea(Register dst, const Operand& src, int size); + void emit_mov(Register dst, const Operand& src, int size); void emit_mov(Register dst, Register src, int size); void emit_mov(const Operand& dst, Register src, int size); void emit_mov(Register dst, Immediate value, int size); void emit_mov(const Operand& dst, Immediate value, int size); + void emit_movzxb(Register dst, const Operand& src, int size); + void emit_movzxw(Register dst, const Operand& src, int size); + void emit_movzxw(Register dst, Register src, int size); + void emit_neg(Register dst, int size); void emit_neg(const Operand& dst, int size); + void emit_repmovs(int size); + void emit_sbb(Register dst, Register src, int size) { if (size == kInt64Size) { arithmetic_op(0x1b, dst, src); @@ -1764,6 +1774,9 @@ class Assembler : public AssemblerBase { void emit_test(const Operand& op, Register reg, int size); void emit_test(const Operand& op, Immediate mask, int size); + // Exchange two registers + void emit_xchg(Register dst, Register src, int size); + friend class CodePatcher; friend class EnsureSpace; friend class RegExpMacroAssemblerX64; diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc index fdab3f3..d5577f3 100644 --- a/src/x64/builtins-x64.cc +++ b/src/x64/builtins-x64.cc @@ -93,13 +93,13 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) { FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); __ movp(kScratchRegister, FieldOperand(kScratchRegister, SharedFunctionInfo::kCodeOffset)); - __ lea(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize)); + __ leap(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize)); __ jmp(kScratchRegister); } static void GenerateTailCallToReturnedCode(MacroAssembler* masm) { - __ lea(rax, FieldOperand(rax, Code::kHeaderSize)); + __ leap(rax, FieldOperand(rax, Code::kHeaderSize)); __ jmp(rax); } @@ -213,7 +213,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, } // Now allocate the JSObject on the heap. - __ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset)); + __ movzxbp(rdi, FieldOperand(rax, Map::kInstanceSizeOffset)); __ shl(rdi, Immediate(kPointerSizeLog2)); if (create_memento) { __ addp(rdi, Immediate(AllocationMemento::kSize)); @@ -238,12 +238,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, // rax: initial map // rbx: JSObject // rdi: start of next object (including memento if create_memento) - __ lea(rcx, Operand(rbx, JSObject::kHeaderSize)); + __ leap(rcx, Operand(rbx, JSObject::kHeaderSize)); __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex); if (count_constructions) { - __ movzxbq(rsi, + __ movzxbp(rsi, FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset)); - __ lea(rsi, + __ leap(rsi, Operand(rbx, rsi, times_pointer_size, JSObject::kHeaderSize)); // rsi: offset of first field after pre-allocated fields if (FLAG_debug_code) { @@ -255,7 +255,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, __ LoadRoot(rdx, Heap::kOnePointerFillerMapRootIndex); __ InitializeFieldsWithFiller(rcx, rdi, rdx); } else if (create_memento) { - __ lea(rsi, Operand(rdi, -AllocationMemento::kSize)); + __ leap(rsi, Operand(rdi, -AllocationMemento::kSize)); __ InitializeFieldsWithFiller(rcx, rsi, rdx); // Fill in memento fields if necessary. @@ -286,12 +286,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, // rbx: JSObject // rdi: start of next object // Calculate total properties described map. - __ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset)); - __ movzxbq(rcx, + __ movzxbp(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset)); + __ movzxbp(rcx, FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset)); __ addp(rdx, rcx); // Calculate unused properties past the end of the in-object properties. - __ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset)); + __ movzxbp(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset)); __ subp(rdx, rcx); // Done if no extra properties are to be allocated. __ j(zero, &allocated); @@ -328,7 +328,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, // rdx: number of elements { Label loop, entry; __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex); - __ lea(rcx, Operand(rdi, FixedArray::kHeaderSize)); + __ leap(rcx, Operand(rdi, FixedArray::kHeaderSize)); __ jmp(&entry); __ bind(&loop); __ movp(Operand(rcx, 0), rdx); @@ -417,7 +417,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, __ Push(rbx); // Set up pointer to last argument. - __ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset)); + __ leap(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset)); // Copy arguments and receiver to the expression stack. Label loop, entry; @@ -476,7 +476,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, // Remove caller arguments from the stack and return. __ PopReturnAddressTo(rcx); SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2); - __ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize)); + __ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize)); __ PushReturnAddressFrom(rcx); Counters* counters = masm->isolate()->counters(); __ IncrementCounter(counters->constructed_objects(), 1); @@ -1222,7 +1222,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { __ j(zero, &no_arguments); __ movp(rbx, args.GetArgumentOperand(1)); __ PopReturnAddressTo(rcx); - __ lea(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize)); + __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize)); __ PushReturnAddressFrom(rcx); __ movp(rax, rbx); @@ -1306,7 +1306,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { __ bind(&no_arguments); __ LoadRoot(rbx, Heap::kempty_stringRootIndex); __ PopReturnAddressTo(rcx); - __ lea(rsp, Operand(rsp, kPointerSize)); + __ leap(rsp, Operand(rsp, kPointerSize)); __ PushReturnAddressFrom(rcx); __ jmp(&argument_is_string); @@ -1352,7 +1352,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { // Remove caller arguments from the stack. __ PopReturnAddressTo(rcx); SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2); - __ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize)); + __ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize)); __ PushReturnAddressFrom(rcx); } @@ -1381,7 +1381,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // Copy receiver and all expected arguments. const int offset = StandardFrameConstants::kCallerSPOffset; - __ lea(rax, Operand(rbp, rax, times_pointer_size, offset)); + __ leap(rax, Operand(rbp, rax, times_pointer_size, offset)); __ Set(r8, -1); // account for receiver Label copy; @@ -1400,7 +1400,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // Copy receiver and all actual arguments. const int offset = StandardFrameConstants::kCallerSPOffset; - __ lea(rdi, Operand(rbp, rax, times_pointer_size, offset)); + __ leap(rdi, Operand(rbp, rax, times_pointer_size, offset)); __ Set(r8, -1); // account for receiver Label copy; @@ -1469,7 +1469,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag)); // Compute the target address = code_obj + header_size + osr_offset - __ lea(rax, Operand(rax, rbx, times_1, Code::kHeaderSize - kHeapObjectTag)); + __ leap(rax, Operand(rax, rbx, times_1, Code::kHeaderSize - kHeapObjectTag)); // Overwrite the return address on the stack. __ movq(StackOperandForReturnAddress(0), rax); diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc index ebf4c31..fe333e9 100644 --- a/src/x64/code-stubs-x64.cc +++ b/src/x64/code-stubs-x64.cc @@ -1022,7 +1022,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) { __ SmiToInteger64(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ lea(rdx, Operand(rdx, rcx, times_pointer_size, + __ leap(rdx, Operand(rdx, rcx, times_pointer_size, StandardFrameConstants::kCallerSPOffset)); __ movp(args.GetArgumentOperand(1), rdx); @@ -1043,11 +1043,11 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) { __ xor_(r8, r8); __ testp(rbx, rbx); __ j(zero, &no_parameter_map, Label::kNear); - __ lea(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize)); + __ leap(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize)); __ bind(&no_parameter_map); // 2. Backing store. - __ lea(r8, Operand(r8, rcx, times_pointer_size, FixedArray::kHeaderSize)); + __ leap(r8, Operand(r8, rcx, times_pointer_size, FixedArray::kHeaderSize)); // 3. Arguments object. __ addp(r8, Immediate(Heap::kSloppyArgumentsObjectSize)); @@ -1101,7 +1101,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) { // Set up the elements pointer in the allocated arguments object. // If we allocated a parameter map, edi will point there, otherwise to the // backing store. - __ lea(rdi, Operand(rax, Heap::kSloppyArgumentsObjectSize)); + __ leap(rdi, Operand(rax, Heap::kSloppyArgumentsObjectSize)); __ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi); // rax = address of new object (tagged) @@ -1120,7 +1120,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) { __ Integer64PlusConstantToSmi(r9, rbx, 2); __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), r9); __ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 0 * kPointerSize), rsi); - __ lea(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize)); + __ leap(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize)); __ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 1 * kPointerSize), r9); // Copy the parameter slots and the holes in the arguments. @@ -1140,7 +1140,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) { __ subp(r8, r9); __ Move(r11, factory->the_hole_value()); __ movp(rdx, rdi); - __ lea(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize)); + __ leap(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize)); // r9 = loop variable (tagged) // r8 = mapping index (tagged) // r11 = the hole value @@ -1178,7 +1178,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) { __ movp(rdx, args.GetArgumentOperand(1)); // Untag rcx for the loop below. __ SmiToInteger64(rcx, rcx); - __ lea(kScratchRegister, Operand(r8, times_pointer_size, 0)); + __ leap(kScratchRegister, Operand(r8, times_pointer_size, 0)); __ subp(rdx, kScratchRegister); __ jmp(&arguments_test, Label::kNear); @@ -1225,7 +1225,7 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) { __ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset)); __ movp(args.GetArgumentOperand(2), rcx); __ SmiToInteger64(rcx, rcx); - __ lea(rdx, Operand(rdx, rcx, times_pointer_size, + __ leap(rdx, Operand(rdx, rcx, times_pointer_size, StandardFrameConstants::kCallerSPOffset)); __ movp(args.GetArgumentOperand(1), rdx); @@ -1258,7 +1258,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { __ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset)); __ movp(args.GetArgumentOperand(2), rcx); __ SmiToInteger64(rcx, rcx); - __ lea(rdx, Operand(rdx, rcx, times_pointer_size, + __ leap(rdx, Operand(rdx, rcx, times_pointer_size, StandardFrameConstants::kCallerSPOffset)); __ movp(args.GetArgumentOperand(1), rdx); @@ -1268,7 +1268,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { __ bind(&try_allocate); __ testp(rcx, rcx); __ j(zero, &add_arguments_object, Label::kNear); - __ lea(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize)); + __ leap(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize)); __ bind(&add_arguments_object); __ addp(rcx, Immediate(Heap::kStrictArgumentsObjectSize)); @@ -1305,7 +1305,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { // Set up the elements pointer in the allocated arguments object and // initialize the header in the elements fixed array. - __ lea(rdi, Operand(rax, Heap::kStrictArgumentsObjectSize)); + __ leap(rdi, Operand(rax, Heap::kStrictArgumentsObjectSize)); __ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi); __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex); __ movp(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister); @@ -1586,15 +1586,15 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // r15: original subject string __ testb(rcx, rcx); // Last use of rcx as encoding of subject string. __ j(zero, &setup_two_byte, Label::kNear); - __ lea(arg_reg_4, + __ leap(arg_reg_4, FieldOperand(rdi, r14, times_1, SeqOneByteString::kHeaderSize)); - __ lea(arg_reg_3, + __ leap(arg_reg_3, FieldOperand(rdi, rbx, times_1, SeqOneByteString::kHeaderSize)); __ jmp(&setup_rest, Label::kNear); __ bind(&setup_two_byte); - __ lea(arg_reg_4, + __ leap(arg_reg_4, FieldOperand(rdi, r14, times_2, SeqTwoByteString::kHeaderSize)); - __ lea(arg_reg_3, + __ leap(arg_reg_3, FieldOperand(rdi, rbx, times_2, SeqTwoByteString::kHeaderSize)); __ bind(&setup_rest); @@ -1817,7 +1817,7 @@ static void BranchIfNotInternalizedString(MacroAssembler* masm, Register scratch) { __ JumpIfSmi(object, label); __ movp(scratch, FieldOperand(object, HeapObject::kMapOffset)); - __ movzxbq(scratch, + __ movzxbp(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset)); STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); __ testb(scratch, Immediate(kIsNotStringMask | kIsNotInternalizedMask)); @@ -2039,7 +2039,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { // a heap object has the low bit clear. STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagMask == 1); - __ lea(rcx, Operand(rax, rdx, times_1, 0)); + __ leap(rcx, Operand(rax, rdx, times_1, 0)); __ testb(rcx, Immediate(kSmiTagMask)); __ j(not_zero, ¬_both_objects, Label::kNear); __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx); @@ -2355,7 +2355,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) { __ movp(jmp_reg, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); __ movp(jmp_reg, FieldOperand(jmp_reg, SharedFunctionInfo::kConstructStubOffset)); - __ lea(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize)); + __ leap(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize)); __ jmp(jmp_reg); // rdi: called object @@ -2463,7 +2463,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, } else { ASSERT_EQ(2, result_size_); // Pass a pointer to the result location as the first argument. - __ lea(rcx, StackSpaceOperand(2)); + __ leap(rcx, StackSpaceOperand(2)); // Pass a pointer to the Arguments object as the second argument. __ movp(rdx, r14); // argc. __ movp(r8, r15); // argv. @@ -2498,7 +2498,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, __ movq(rdx, Operand(rsp, 7 * kRegisterSize)); } #endif - __ lea(rcx, Operand(rax, 1)); + __ leap(rcx, Operand(rax, 1)); // Lower 2 bits of rcx are 0 iff rax has failure tag. __ testl(rcx, Immediate(kFailureTagMask)); __ j(zero, &failure_returned); @@ -2723,7 +2723,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { ExternalReference entry(Builtins::kJSEntryTrampoline, isolate); __ Load(rax, entry); } - __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize)); + __ leap(kScratchRegister, FieldOperand(rax, Code::kHeaderSize)); __ call(kScratchRegister); // Unlink this frame from the handler chain. @@ -3115,7 +3115,7 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm, // Copy from edi to esi using rep movs instruction. __ movl(kScratchRegister, count); __ shr(count, Immediate(kPointerSizeLog2)); // Number of doublewords to copy. - __ repmovsq(); + __ repmovsp(); // Find number of bytes left. __ movl(count, kScratchRegister); @@ -3366,11 +3366,11 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ movp(r14, rsi); // esi used by following code. { // Locate character of sub string start. SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_1); - __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale, + __ leap(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale, SeqOneByteString::kHeaderSize - kHeapObjectTag)); } // Locate first character of result. - __ lea(rdi, FieldOperand(rax, SeqOneByteString::kHeaderSize)); + __ leap(rdi, FieldOperand(rax, SeqOneByteString::kHeaderSize)); // rax: result string // rcx: result length @@ -3391,11 +3391,11 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ movp(r14, rsi); // esi used by following code. { // Locate character of sub string start. SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_2); - __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale, + __ leap(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale, SeqOneByteString::kHeaderSize - kHeapObjectTag)); } // Locate first character of result. - __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize)); + __ leap(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize)); // rax: result string // rcx: result length @@ -3551,9 +3551,9 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop( // start. This means that loop ends when index reaches zero, which // doesn't need an additional compare. __ SmiToInteger32(length, length); - __ lea(left, + __ leap(left, FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize)); - __ lea(right, + __ leap(right, FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize)); __ negq(length); Register index = length; // index = -length; @@ -3718,7 +3718,7 @@ void ArrayPushStub::Generate(MacroAssembler* masm) { __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax); // Store the value. - __ lea(rdx, FieldOperand(rdi, + __ leap(rdx, FieldOperand(rdi, rax, times_pointer_size, FixedArray::kHeaderSize - argc * kPointerSize)); __ movp(Operand(rdx, 0), rcx); @@ -3757,7 +3757,7 @@ void ArrayPushStub::Generate(MacroAssembler* masm) { __ Load(rcx, new_space_allocation_top); // Check if it's the end of elements. - __ lea(rdx, FieldOperand(rdi, + __ leap(rdx, FieldOperand(rdi, rax, times_pointer_size, FixedArray::kHeaderSize - argc * kPointerSize)); __ cmpp(rdx, rcx); @@ -3954,8 +3954,8 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) { // Check that both operands are internalized strings. __ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset)); __ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset)); - __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset)); - __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset)); + __ movzxbp(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset)); + __ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset)); STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); __ or_(tmp1, tmp2); __ testb(tmp1, Immediate(kIsNotStringMask | kIsNotInternalizedMask)); @@ -3998,8 +3998,8 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) { // types loaded in tmp1 and tmp2. __ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset)); __ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset)); - __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset)); - __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset)); + __ movzxbp(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset)); + __ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset)); __ JumpIfNotUniqueName(tmp1, &miss, Label::kNear); __ JumpIfNotUniqueName(tmp2, &miss, Label::kNear); @@ -4043,8 +4043,8 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { // types loaded in tmp1 and tmp2. __ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset)); __ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset)); - __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset)); - __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset)); + __ movzxbp(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset)); + __ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset)); __ movp(tmp3, tmp1); STATIC_ASSERT(kNotStringTag != 0); __ or_(tmp3, tmp2); @@ -4164,7 +4164,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) { __ CallExternalReference(miss, 3); // Compute the entry point of the rewritten stub. - __ lea(rdi, FieldOperand(rax, Code::kHeaderSize)); + __ leap(rdi, FieldOperand(rax, Code::kHeaderSize)); __ Pop(rax); __ Pop(rdx); } @@ -4198,7 +4198,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, // Scale the index by multiplying by the entry size. ASSERT(NameDictionary::kEntrySize == 3); - __ lea(index, Operand(index, index, times_2, 0)); // index *= 3. + __ leap(index, Operand(index, index, times_2, 0)); // index *= 3. Register entity_name = r0; // Having undefined at this place means the name is not contained. @@ -4268,7 +4268,7 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, // Scale the index by multiplying by the entry size. ASSERT(NameDictionary::kEntrySize == 3); - __ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3 + __ leap(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3 // Check if the key is identical to the name. __ cmpp(name, Operand(elements, r1, times_pointer_size, @@ -4329,7 +4329,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) { // Scale the index by multiplying by the entry size. ASSERT(NameDictionary::kEntrySize == 3); - __ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3. + __ leap(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3. // Having undefined at this place means the name is not contained. __ movp(scratch, Operand(dictionary_, @@ -4633,7 +4633,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { __ bind(&fast_elements); __ SmiToInteger32(kScratchRegister, rcx); __ movp(rbx, FieldOperand(rbx, JSObject::kElementsOffset)); - __ lea(rcx, FieldOperand(rbx, kScratchRegister, times_pointer_size, + __ leap(rcx, FieldOperand(rbx, kScratchRegister, times_pointer_size, FixedArrayBase::kHeaderSize)); __ movp(Operand(rcx, 0), rax); // Update the write barrier for the array store. @@ -4677,7 +4677,7 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { int additional_offset = function_mode_ == JS_FUNCTION_STUB_MODE ? kPointerSize : 0; - __ lea(rsp, MemOperand(rsp, rbx, times_pointer_size, additional_offset)); + __ leap(rsp, MemOperand(rsp, rbx, times_pointer_size, additional_offset)); __ jmp(rcx); // Return to IC Miss stub, continuation still on stack. } @@ -4698,7 +4698,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) { __ pushq(arg_reg_2); // Calculate the original stack pointer and store it in the second arg. - __ lea(arg_reg_2, + __ leap(arg_reg_2, Operand(rsp, kNumSavedRegisters * kRegisterSize + kPCOnStackSize)); // Calculate the function address to the first arg. @@ -5014,7 +5014,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) { // Load the map's "bit field 2" into |result|. We only need the first byte, // but the following masking takes care of that anyway. - __ movzxbq(rcx, FieldOperand(rcx, Map::kBitField2Offset)); + __ movzxbp(rcx, FieldOperand(rcx, Map::kBitField2Offset)); // Retrieve elements_kind from bit field 2. __ and_(rcx, Immediate(Map::kElementsKindMask)); __ shr(rcx, Immediate(Map::kElementsKindShift)); @@ -5134,7 +5134,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) { ASSERT(!api_function_address.is(arguments_arg)); // v8::InvocationCallback's argument. - __ lea(arguments_arg, StackSpaceOperand(0)); + __ leap(arguments_arg, StackSpaceOperand(0)); Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback); @@ -5183,17 +5183,17 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) { // Allocate v8::AccessorInfo in non-GCed stack space. const int kArgStackSpace = 1; - __ lea(name_arg, Operand(rsp, kPCOnStackSize)); + __ leap(name_arg, Operand(rsp, kPCOnStackSize)); __ PrepareCallApiFunction(kArgStackSpace); - __ lea(scratch, Operand(name_arg, 1 * kPointerSize)); + __ leap(scratch, Operand(name_arg, 1 * kPointerSize)); // v8::PropertyAccessorInfo::args_. __ movp(StackSpaceOperand(0), scratch); // The context register (rsi) has been saved in PrepareCallApiFunction and // could be used to pass arguments. - __ lea(accessor_info_arg, StackSpaceOperand(0)); + __ leap(accessor_info_arg, StackSpaceOperand(0)); Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback); diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc index 4306af9..333889d 100644 --- a/src/x64/codegen-x64.cc +++ b/src/x64/codegen-x64.cc @@ -300,7 +300,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( // Allocate new backing store. __ bind(&new_backing_store); - __ lea(rdi, Operand(r9, times_8, FixedArray::kHeaderSize)); + __ leap(rdi, Operand(r9, times_8, FixedArray::kHeaderSize)); __ Allocate(rdi, r14, r11, r15, fail, TAG_OBJECT); // Set backing store's map __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex); @@ -387,7 +387,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset)); // r8 : source FixedDoubleArray // r9 : number of elements - __ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize)); + __ leap(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize)); __ Allocate(rdi, r11, r14, r15, &gc_required, TAG_OBJECT); // r11: destination FixedArray __ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex); @@ -606,7 +606,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm, __ movq(temp2, double_scratch); __ subsd(double_scratch, result); __ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize)); - __ lea(temp1, Operand(temp2, 0x1ff800)); + __ leaq(temp1, Operand(temp2, 0x1ff800)); __ and_(temp2, Immediate(0x7ff)); __ shr(temp1, Immediate(11)); __ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize)); diff --git a/src/x64/debug-x64.cc b/src/x64/debug-x64.cc index dcf2341..36d5df6 100644 --- a/src/x64/debug-x64.cc +++ b/src/x64/debug-x64.cc @@ -156,7 +156,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm, // Read current padding counter and skip corresponding number of words. __ Pop(kScratchRegister); __ SmiToInteger32(kScratchRegister, kScratchRegister); - __ lea(rsp, Operand(rsp, kScratchRegister, times_pointer_size, 0)); + __ leap(rsp, Operand(rsp, kScratchRegister, times_pointer_size, 0)); // Get rid of the internal frame. } @@ -327,7 +327,7 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) { __ movp(Operand(rax, 0), Immediate(0)); // We do not know our frame height, but set rsp based on rbp. - __ lea(rsp, Operand(rbp, -1 * kPointerSize)); + __ leap(rsp, Operand(rbp, -1 * kPointerSize)); __ Pop(rdi); // Function. __ popq(rbp); @@ -338,7 +338,7 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) { // Get function code. __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); __ movp(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset)); - __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize)); + __ leap(rdx, FieldOperand(rdx, Code::kHeaderSize)); // Re-run JSFunction, rdi is function, rsi is context. __ jmp(rdx); diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc index dd2717b..4bc644d 100644 --- a/src/x64/deoptimizer-x64.cc +++ b/src/x64/deoptimizer-x64.cc @@ -196,7 +196,7 @@ void Deoptimizer::EntryGenerator::Generate() { // Get the address of the location in the code object // and compute the fp-to-sp delta in register arg5. __ movp(arg_reg_4, Operand(rsp, kSavedRegistersAreaSize + 1 * kRegisterSize)); - __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kRegisterSize + + __ leap(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kRegisterSize + kPCOnStackSize)); __ subp(arg5, rbp); @@ -251,7 +251,7 @@ void Deoptimizer::EntryGenerator::Generate() { // Unwind the stack down to - but not including - the unwinding // limit and copy the contents of the activation frame to the input // frame description. - __ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset())); + __ leap(rdx, Operand(rbx, FrameDescription::frame_content_offset())); Label pop_loop_header; __ jmp(&pop_loop_header); Label pop_loop; @@ -281,7 +281,7 @@ void Deoptimizer::EntryGenerator::Generate() { // last FrameDescription**. __ movl(rdx, Operand(rax, Deoptimizer::output_count_offset())); __ movp(rax, Operand(rax, Deoptimizer::output_offset())); - __ lea(rdx, Operand(rax, rdx, times_pointer_size, 0)); + __ leap(rdx, Operand(rax, rdx, times_pointer_size, 0)); __ jmp(&outer_loop_header); __ bind(&outer_push_loop); // Inner loop state: rbx = current FrameDescription*, rcx = loop index. diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc index 8181b3d..e779c6d 100644 --- a/src/x64/full-codegen-x64.cc +++ b/src/x64/full-codegen-x64.cc @@ -271,7 +271,7 @@ void FullCodeGenerator::Generate() { // The receiver is just before the parameters on the caller's stack. int num_parameters = info->scope()->num_parameters(); int offset = num_parameters * kPointerSize; - __ lea(rdx, + __ leap(rdx, Operand(rbp, StandardFrameConstants::kCallerSPOffset + offset)); __ Push(rdx); __ Push(Smi::FromInt(num_parameters)); @@ -2012,7 +2012,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) { __ movp(rcx, rsi); __ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx, kDontSaveFPRegs); - __ lea(rbx, Operand(rbp, StandardFrameConstants::kExpressionsOffset)); + __ leap(rbx, Operand(rbp, StandardFrameConstants::kExpressionsOffset)); __ cmpp(rsp, rbx); __ j(equal, &post_runtime); __ Push(rax); // generator object @@ -2934,7 +2934,7 @@ void FullCodeGenerator::EmitIsObject(CallRuntime* expr) { __ testb(FieldOperand(rbx, Map::kBitFieldOffset), Immediate(1 << Map::kIsUndetectable)); __ j(not_zero, if_false); - __ movzxbq(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset)); + __ movzxbp(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset)); __ cmpp(rbx, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); __ j(below, if_false); __ cmpp(rbx, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); @@ -3036,7 +3036,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( // Calculate the end of the descriptor array. __ imulp(rcx, rcx, Immediate(DescriptorArray::kDescriptorSize)); SmiIndex index = masm_->SmiToIndex(rdx, rcx, kPointerSizeLog2); - __ lea(rcx, + __ leap(rcx, Operand( r8, index.reg, index.scale, DescriptorArray::kFirstOffset)); // Calculate location of the first key name. @@ -4004,7 +4004,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ AllocateAsciiString(result_pos, string_length, scratch, index, string, &bailout); __ movp(result_operand, result_pos); - __ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize)); + __ leap(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize)); __ movp(string, separator_operand); __ SmiCompare(FieldOperand(string, SeqOneByteString::kLengthOffset), @@ -4032,7 +4032,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { FixedArray::kHeaderSize)); __ SmiToInteger32(string_length, FieldOperand(string, String::kLengthOffset)); - __ lea(string, + __ leap(string, FieldOperand(string, SeqOneByteString::kHeaderSize)); __ CopyBytes(result_pos, string, string_length); __ incl(index); @@ -4076,7 +4076,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { FixedArray::kHeaderSize)); __ SmiToInteger32(string_length, FieldOperand(string, String::kLengthOffset)); - __ lea(string, + __ leap(string, FieldOperand(string, SeqOneByteString::kHeaderSize)); __ CopyBytes(result_pos, string, string_length); __ incl(index); @@ -4092,7 +4092,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { // count from -array_length to zero, so we don't need to maintain // a loop limit. __ movl(index, array_length_operand); - __ lea(elements, FieldOperand(elements, index, times_pointer_size, + __ leap(elements, FieldOperand(elements, index, times_pointer_size, FixedArray::kHeaderSize)); __ negq(index); @@ -4101,7 +4101,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ movp(string, separator_operand); __ SmiToInteger32(scratch, FieldOperand(string, String::kLengthOffset)); - __ lea(string, + __ leap(string, FieldOperand(string, SeqOneByteString::kHeaderSize)); __ movp(separator_operand, string); @@ -4127,7 +4127,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ movp(string, Operand(elements, index, times_pointer_size, 0)); __ SmiToInteger32(string_length, FieldOperand(string, String::kLengthOffset)); - __ lea(string, + __ leap(string, FieldOperand(string, SeqOneByteString::kHeaderSize)); __ CopyBytes(result_pos, string, string_length); __ incq(index); diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc index 075b8bf..5449095 100644 --- a/src/x64/ic-x64.cc +++ b/src/x64/ic-x64.cc @@ -212,7 +212,7 @@ static void GenerateDictionaryStore(MacroAssembler* masm, // Store the value at the masked, scaled index. const int kValueOffset = kElementsStartOffset + kPointerSize; - __ lea(scratch1, Operand(elements, + __ leap(scratch1, Operand(elements, scratch1, times_pointer_size, kValueOffset - kHeapObjectTag)); @@ -467,7 +467,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { } __ LoadAddress(kScratchRegister, cache_field_offsets); __ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0)); - __ movzxbq(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset)); + __ movzxbp(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset)); __ subp(rdi, rcx); __ j(above_equal, &property_array_property); if (i != 0) { @@ -477,7 +477,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // Load in-object property. __ bind(&load_in_object_property); - __ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset)); + __ movzxbp(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset)); __ addp(rcx, rdi); __ movp(rax, FieldOperand(rdx, rcx, times_pointer_size, 0)); __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1); @@ -945,7 +945,7 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { Operand mapped_location = GenerateMappedArgumentsLookup( masm, rdx, rcx, rbx, rdi, r8, ¬in, &slow); __ movp(mapped_location, rax); - __ lea(r9, mapped_location); + __ leap(r9, mapped_location); __ movp(r8, rax); __ RecordWrite(rbx, r9, @@ -959,7 +959,7 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { Operand unmapped_location = GenerateUnmappedArgumentsLookup(masm, rcx, rbx, rdi, &slow); __ movp(unmapped_location, rax); - __ lea(r9, unmapped_location); + __ leap(r9, unmapped_location); __ movp(r8, rax); __ RecordWrite(rbx, r9, diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc index d788496..89c7e4c 100644 --- a/src/x64/lithium-codegen-x64.cc +++ b/src/x64/lithium-codegen-x64.cc @@ -356,7 +356,7 @@ bool LCodeGen::GenerateDeferredCode() { __ pushq(rbp); // Caller's frame pointer. __ Push(Operand(rbp, StandardFrameConstants::kContextOffset)); __ Push(Smi::FromInt(StackFrame::STUB)); - __ lea(rbp, Operand(rsp, 2 * kPointerSize)); + __ leap(rbp, Operand(rsp, 2 * kPointerSize)); Comment(";;; Deferred code"); } code->Generate(); @@ -1687,7 +1687,7 @@ void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) { if (FLAG_debug_code) { __ Push(string); __ movp(string, FieldOperand(string, HeapObject::kMapOffset)); - __ movzxbq(string, FieldOperand(string, Map::kInstanceTypeOffset)); + __ movzxbp(string, FieldOperand(string, Map::kInstanceTypeOffset)); __ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask)); static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; @@ -1755,8 +1755,8 @@ void LCodeGen::DoAddI(LAddI* instr) { if (right->IsConstantOperand()) { int32_t offset = ToInteger32(LConstantOperand::cast(right)); if (is_p) { - __ lea(ToRegister(instr->result()), - MemOperand(ToRegister(left), offset)); + __ leap(ToRegister(instr->result()), + MemOperand(ToRegister(left), offset)); } else { __ leal(ToRegister(instr->result()), MemOperand(ToRegister(left), offset)); @@ -1764,7 +1764,7 @@ void LCodeGen::DoAddI(LAddI* instr) { } else { Operand address(ToRegister(left), ToRegister(right), times_1, 0); if (is_p) { - __ lea(ToRegister(instr->result()), address); + __ leap(ToRegister(instr->result()), address); } else { __ leal(ToRegister(instr->result()), address); } @@ -2978,7 +2978,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { case EXTERNAL_UINT8_CLAMPED_ELEMENTS: case UINT8_ELEMENTS: case UINT8_CLAMPED_ELEMENTS: - __ movzxbq(result, operand); + __ movzxbp(result, operand); break; case EXTERNAL_INT16_ELEMENTS: case INT16_ELEMENTS: @@ -2986,7 +2986,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { break; case EXTERNAL_UINT16_ELEMENTS: case UINT16_ELEMENTS: - __ movzxwq(result, operand); + __ movzxwp(result, operand); break; case EXTERNAL_INT32_ELEMENTS: case INT32_ELEMENTS: @@ -3172,7 +3172,7 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { Register result = ToRegister(instr->result()); if (instr->hydrogen()->from_inlined()) { - __ lea(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize)); + __ leap(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize)); } else { // Check for arguments adapter frame. Label done, adapted; @@ -3907,7 +3907,7 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) { void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { Register function = ToRegister(instr->function()); Register code_object = ToRegister(instr->code_object()); - __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize)); + __ leap(code_object, FieldOperand(code_object, Code::kHeaderSize)); __ movp(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object); } @@ -3917,10 +3917,10 @@ void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { Register base = ToRegister(instr->base_object()); if (instr->offset()->IsConstantOperand()) { LConstantOperand* offset = LConstantOperand::cast(instr->offset()); - __ lea(result, Operand(base, ToInteger32(offset))); + __ leap(result, Operand(base, ToInteger32(offset))); } else { Register offset = ToRegister(instr->offset()); - __ lea(result, Operand(base, offset, times_1, 0)); + __ leap(result, Operand(base, offset, times_1, 0)); } } @@ -4322,7 +4322,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; // Compute address of modified element and store it into key register. Register key_reg(ToRegister(key)); - __ lea(key_reg, operand); + __ leap(key_reg, operand); __ RecordWrite(elements, key_reg, value, diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc index b5b7324..caf9b20 100644 --- a/src/x64/macro-assembler-x64.cc +++ b/src/x64/macro-assembler-x64.cc @@ -128,7 +128,7 @@ void MacroAssembler::LoadAddress(Register destination, intptr_t delta = RootRegisterDelta(source); if (delta != kInvalidRootRegisterDelta && is_int32(delta)) { Serializer::TooLateToEnableNow(); - lea(destination, Operand(kRootRegister, static_cast(delta))); + leap(destination, Operand(kRootRegister, static_cast(delta))); return; } } @@ -145,7 +145,7 @@ int MacroAssembler::LoadAddressSize(ExternalReference source) { intptr_t delta = RootRegisterDelta(source); if (delta != kInvalidRootRegisterDelta && is_int32(delta)) { Serializer::TooLateToEnableNow(); - // Operand is lea(scratch, Operand(kRootRegister, delta)); + // Operand is leap(scratch, Operand(kRootRegister, delta)); // Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7. int size = 4; if (!is_int8(static_cast(delta))) { @@ -293,7 +293,7 @@ void MacroAssembler::InNewSpace(Register object, if (scratch.is(object)) { addp(scratch, kScratchRegister); } else { - lea(scratch, Operand(object, kScratchRegister, times_1, 0)); + leap(scratch, Operand(object, kScratchRegister, times_1, 0)); } and_(scratch, Immediate(static_cast(isolate()->heap()->NewSpaceMask()))); @@ -323,7 +323,7 @@ void MacroAssembler::RecordWriteField( // of the object, so so offset must be a multiple of kPointerSize. ASSERT(IsAligned(offset, kPointerSize)); - lea(dst, FieldOperand(object, offset)); + leap(dst, FieldOperand(object, offset)); if (emit_debug_code()) { Label ok; testb(dst, Immediate((1 << kPointerSizeLog2) - 1)); @@ -363,7 +363,7 @@ void MacroAssembler::RecordWriteArray(Register object, // Array access: calculate the destination address. Index is not a smi. Register dst = index; - lea(dst, Operand(object, index, times_pointer_size, + leap(dst, Operand(object, index, times_pointer_size, FixedArray::kHeaderSize - kHeapObjectTag)); RecordWrite( @@ -1052,24 +1052,28 @@ void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) { switch (uvalue) { case 9: - lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0)); + leap(dst, + Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0)); break; case 8: xorl(dst, dst); - lea(dst, Operand(dst, kSmiConstantRegister, times_8, 0)); + leap(dst, Operand(dst, kSmiConstantRegister, times_8, 0)); break; case 4: xorl(dst, dst); - lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0)); + leap(dst, Operand(dst, kSmiConstantRegister, times_4, 0)); break; case 5: - lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0)); + leap(dst, + Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0)); break; case 3: - lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0)); + leap(dst, + Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0)); break; case 2: - lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0)); + leap(dst, + Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0)); break; case 1: movp(dst, kSmiConstantRegister); @@ -1452,13 +1456,13 @@ void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) { addp(dst, kSmiConstantRegister); return; case 2: - lea(dst, Operand(src, kSmiConstantRegister, times_2, 0)); + leap(dst, Operand(src, kSmiConstantRegister, times_2, 0)); return; case 4: - lea(dst, Operand(src, kSmiConstantRegister, times_4, 0)); + leap(dst, Operand(src, kSmiConstantRegister, times_4, 0)); return; case 8: - lea(dst, Operand(src, kSmiConstantRegister, times_8, 0)); + leap(dst, Operand(src, kSmiConstantRegister, times_8, 0)); return; default: Register constant_reg = GetSmiConstant(constant); @@ -1468,16 +1472,16 @@ void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) { } else { switch (constant->value()) { case 1: - lea(dst, Operand(src, kSmiConstantRegister, times_1, 0)); + leap(dst, Operand(src, kSmiConstantRegister, times_1, 0)); return; case 2: - lea(dst, Operand(src, kSmiConstantRegister, times_2, 0)); + leap(dst, Operand(src, kSmiConstantRegister, times_2, 0)); return; case 4: - lea(dst, Operand(src, kSmiConstantRegister, times_4, 0)); + leap(dst, Operand(src, kSmiConstantRegister, times_4, 0)); return; case 8: - lea(dst, Operand(src, kSmiConstantRegister, times_8, 0)); + leap(dst, Operand(src, kSmiConstantRegister, times_8, 0)); return; default: LoadSmiConstant(dst, constant); @@ -1690,7 +1694,7 @@ void MacroAssembler::SmiAdd(Register dst, addp(kScratchRegister, src2); Check(no_overflow, kSmiAdditionOverflow); } - lea(dst, Operand(src1, src2, times_1, 0)); + leap(dst, Operand(src1, src2, times_1, 0)); } else { addp(dst, src2); Assert(no_overflow, kSmiAdditionOverflow); @@ -1953,7 +1957,7 @@ void MacroAssembler::SmiNot(Register dst, Register src) { if (dst.is(src)) { xor_(dst, kScratchRegister); } else { - lea(dst, Operand(src, kScratchRegister, times_1, 0)); + leap(dst, Operand(src, kScratchRegister, times_1, 0)); } not_(dst); } @@ -2394,7 +2398,7 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings( andl(scratch2, Immediate(kFlatAsciiStringMask)); // Interleave the bits to check both scratch1 and scratch2 in one test. ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3)); - lea(scratch1, Operand(scratch1, scratch2, times_8, 0)); + leap(scratch1, Operand(scratch1, scratch2, times_8, 0)); cmpl(scratch1, Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3))); j(not_equal, on_fail, near_jump); @@ -2441,7 +2445,7 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii( andl(scratch2, Immediate(kFlatAsciiStringMask)); // Interleave the bits to check both scratch1 and scratch2 in one test. ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3)); - lea(scratch1, Operand(scratch1, scratch2, times_8, 0)); + leap(scratch1, Operand(scratch1, scratch2, times_8, 0)); cmpl(scratch1, Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3))); j(not_equal, on_fail, near_jump); @@ -2765,7 +2769,7 @@ void MacroAssembler::Pushad() { // Use lea for symmetry with Popad. int sp_delta = (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize; - lea(rsp, Operand(rsp, -sp_delta)); + leap(rsp, Operand(rsp, -sp_delta)); } @@ -2773,7 +2777,7 @@ void MacroAssembler::Popad() { // Popad must not change the flags, so use lea instead of addq. int sp_delta = (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize; - lea(rsp, Operand(rsp, sp_delta)); + leap(rsp, Operand(rsp, sp_delta)); Pop(r15); Pop(r14); Pop(r11); @@ -2893,7 +2897,7 @@ void MacroAssembler::JumpToHandlerEntry() { movp(rdx, FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize)); SmiToInteger64(rdx, rdx); - lea(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize)); + leap(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize)); jmp(rdi); } @@ -3852,7 +3856,7 @@ void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) { // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame, // so it must be retained across the C-call. int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize; - lea(r15, Operand(rbp, r14, times_pointer_size, offset)); + leap(r15, Operand(rbp, r14, times_pointer_size, offset)); EnterExitFrameEpilogue(arg_stack_space, save_doubles); } @@ -3880,7 +3884,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) { // Drop everything up to and including the arguments and the receiver // from the caller stack. - lea(rsp, Operand(r15, 1 * kPointerSize)); + leap(rsp, Operand(r15, 1 * kPointerSize)); PushReturnAddressFrom(rcx); @@ -4068,7 +4072,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss, // Scale the index by multiplying by the entry size. ASSERT(SeededNumberDictionary::kEntrySize == 3); - lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3 + leap(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3 // Check if the key matches. cmpp(key, FieldOperand(elements, @@ -4225,7 +4229,7 @@ void MacroAssembler::Allocate(int header_size, Label* gc_required, AllocationFlags flags) { ASSERT((flags & SIZE_IN_WORDS) == 0); - lea(result_end, Operand(element_count, element_size, header_size)); + leap(result_end, Operand(element_count, element_size, header_size)); Allocate(result_end, result, result_end, scratch, gc_required, flags); } @@ -4323,7 +4327,7 @@ void MacroAssembler::AllocateTwoByteString(Register result, kObjectAlignmentMask; ASSERT(kShortSize == 2); // scratch1 = length * 2 + kObjectAlignmentMask. - lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask + + leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask + kHeaderAlignment)); and_(scratch1, Immediate(~kObjectAlignmentMask)); if (kHeaderAlignment > 0) { @@ -4511,7 +4515,7 @@ void MacroAssembler::CopyBytes(Register destination, // at the end of the ranges. movp(scratch, length); shrl(length, Immediate(kPointerSizeLog2)); - repmovsq(); + repmovsp(); // Move remaining bytes of length. andl(scratch, Immediate(kPointerSize - 1)); movp(length, Operand(source, scratch, times_1, -kPointerSize)); @@ -4683,7 +4687,7 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string, Push(value); movp(value, FieldOperand(string, HeapObject::kMapOffset)); - movzxbq(value, FieldOperand(value, Map::kInstanceTypeOffset)); + movzxbp(value, FieldOperand(value, Map::kInstanceTypeOffset)); andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask)); cmpp(value, Immediate(encoding_mask)); @@ -4827,7 +4831,7 @@ void MacroAssembler::JumpIfBlack(Register object, movp(rcx, mask_scratch); // Make rcx into a mask that covers both marking bits using the operation // rcx = mask | (mask << 1). - lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0)); + leap(rcx, Operand(mask_scratch, mask_scratch, times_2, 0)); // Note that we are using a 4-byte aligned 8-byte load. and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize)); cmpp(mask_scratch, rcx); @@ -5032,7 +5036,7 @@ void MacroAssembler::TestJSArrayForAllocationMemento( ExternalReference new_space_allocation_top = ExternalReference::new_space_allocation_top_address(isolate()); - lea(scratch_reg, Operand(receiver_reg, + leap(scratch_reg, Operand(receiver_reg, JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag)); Move(kScratchRegister, new_space_start); cmpp(scratch_reg, kScratchRegister); diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc index baaa4ba..586c61a 100644 --- a/src/x64/regexp-macro-assembler-x64.cc +++ b/src/x64/regexp-macro-assembler-x64.cc @@ -203,7 +203,7 @@ void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) { __ cmpl(Operand(rbp, kStartIndex), Immediate(0)); BranchOrBacktrack(not_equal, ¬_at_start); // If we did, are we still at the start of the input? - __ lea(rax, Operand(rsi, rdi, times_1, 0)); + __ leap(rax, Operand(rsi, rdi, times_1, 0)); __ cmpp(rax, Operand(rbp, kInputStart)); BranchOrBacktrack(equal, on_at_start); __ bind(¬_at_start); @@ -215,7 +215,7 @@ void RegExpMacroAssemblerX64::CheckNotAtStart(Label* on_not_at_start) { __ cmpl(Operand(rbp, kStartIndex), Immediate(0)); BranchOrBacktrack(not_equal, on_not_at_start); // If we did, are we still at the start of the input? - __ lea(rax, Operand(rsi, rdi, times_1, 0)); + __ leap(rax, Operand(rsi, rdi, times_1, 0)); __ cmpp(rax, Operand(rbp, kInputStart)); BranchOrBacktrack(not_equal, on_not_at_start); } @@ -273,8 +273,8 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase( on_no_match = &backtrack_label_; } - __ lea(r9, Operand(rsi, rdx, times_1, 0)); - __ lea(r11, Operand(rsi, rdi, times_1, 0)); + __ leap(r9, Operand(rsi, rdx, times_1, 0)); + __ leap(r11, Operand(rsi, rdi, times_1, 0)); __ addp(rbx, r9); // End of capture // --------------------- // r11 - current input character address @@ -337,18 +337,18 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase( // Isolate* isolate #ifdef _WIN64 // Compute and set byte_offset1 (start of capture). - __ lea(rcx, Operand(rsi, rdx, times_1, 0)); + __ leap(rcx, Operand(rsi, rdx, times_1, 0)); // Set byte_offset2. - __ lea(rdx, Operand(rsi, rdi, times_1, 0)); + __ leap(rdx, Operand(rsi, rdi, times_1, 0)); // Set byte_length. __ movp(r8, rbx); // Isolate. __ LoadAddress(r9, ExternalReference::isolate_address(isolate())); #else // AMD64 calling convention // Compute byte_offset2 (current position = rsi+rdi). - __ lea(rax, Operand(rsi, rdi, times_1, 0)); + __ leap(rax, Operand(rsi, rdi, times_1, 0)); // Compute and set byte_offset1 (start of capture). - __ lea(rdi, Operand(rsi, rdx, times_1, 0)); + __ leap(rdi, Operand(rsi, rdx, times_1, 0)); // Set byte_offset2. __ movp(rsi, rax); // Set byte_length. @@ -412,9 +412,9 @@ void RegExpMacroAssemblerX64::CheckNotBackReference( BranchOrBacktrack(greater, on_no_match); // Compute pointers to match string and capture string - __ lea(rbx, Operand(rsi, rdi, times_1, 0)); // Start of match. + __ leap(rbx, Operand(rsi, rdi, times_1, 0)); // Start of match. __ addp(rdx, rsi); // Start of capture. - __ lea(r9, Operand(rdx, rax, times_1, 0)); // End of capture + __ leap(r9, Operand(rdx, rax, times_1, 0)); // End of capture // ----------------------- // rbx - current capture character address. @@ -489,7 +489,7 @@ void RegExpMacroAssemblerX64::CheckNotCharacterAfterMinusAnd( uc16 mask, Label* on_not_equal) { ASSERT(minus < String::kMaxUtf16CodeUnit); - __ lea(rax, Operand(current_character(), -minus)); + __ leap(rax, Operand(current_character(), -minus)); __ and_(rax, Immediate(mask)); __ cmpl(rax, Immediate(c)); BranchOrBacktrack(not_equal, on_not_equal); @@ -536,7 +536,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type, Label* on_no_match) { // Range checks (c in min..max) are generally implemented by an unsigned // (c - min) <= (max - min) check, using the sequence: - // lea(rax, Operand(current_character(), -min)) or sub(rax, Immediate(min)) + // leap(rax, Operand(current_character(), -min)) or sub(rax, Immediate(min)) // cmp(rax, Immediate(max - min)) switch (type) { case 's': @@ -547,7 +547,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type, __ cmpl(current_character(), Immediate(' ')); __ j(equal, &success, Label::kNear); // Check range 0x09..0x0d - __ lea(rax, Operand(current_character(), -'\t')); + __ leap(rax, Operand(current_character(), -'\t')); __ cmpl(rax, Immediate('\r' - '\t')); __ j(below_equal, &success, Label::kNear); // \u00a0 (NBSP). @@ -562,13 +562,13 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type, return false; case 'd': // Match ASCII digits ('0'..'9') - __ lea(rax, Operand(current_character(), -'0')); + __ leap(rax, Operand(current_character(), -'0')); __ cmpl(rax, Immediate('9' - '0')); BranchOrBacktrack(above, on_no_match); return true; case 'D': // Match non ASCII-digits - __ lea(rax, Operand(current_character(), -'0')); + __ leap(rax, Operand(current_character(), -'0')); __ cmpl(rax, Immediate('9' - '0')); BranchOrBacktrack(below_equal, on_no_match); return true; @@ -753,9 +753,9 @@ Handle RegExpMacroAssemblerX64::GetCode(Handle source) { __ movp(rbx, Operand(rbp, kStartIndex)); __ negq(rbx); if (mode_ == UC16) { - __ lea(rax, Operand(rdi, rbx, times_2, -char_size())); + __ leap(rax, Operand(rdi, rbx, times_2, -char_size())); } else { - __ lea(rax, Operand(rdi, rbx, times_1, -char_size())); + __ leap(rax, Operand(rdi, rbx, times_1, -char_size())); } // Store this value in a local variable, for use when clearing // position registers. @@ -826,7 +826,7 @@ Handle RegExpMacroAssemblerX64::GetCode(Handle source) { __ movp(rcx, Operand(rbp, kInputEnd)); __ subp(rcx, Operand(rbp, kInputStart)); if (mode_ == UC16) { - __ lea(rcx, Operand(rcx, rdx, times_2, 0)); + __ leap(rcx, Operand(rcx, rdx, times_2, 0)); } else { __ addp(rcx, rdx); } @@ -896,7 +896,7 @@ Handle RegExpMacroAssemblerX64::GetCode(Handle source) { __ bind(&return_rax); #ifdef _WIN64 // Restore callee save registers. - __ lea(rsp, Operand(rbp, kLastCalleeSaveRegister)); + __ leap(rsp, Operand(rbp, kLastCalleeSaveRegister)); __ popq(rbx); __ popq(rdi); __ popq(rsi); @@ -960,12 +960,12 @@ Handle RegExpMacroAssemblerX64::GetCode(Handle source) { #ifdef _WIN64 // Microsoft passes parameters in rcx, rdx, r8. // First argument, backtrack stackpointer, is already in rcx. - __ lea(rdx, Operand(rbp, kStackHighEnd)); // Second argument + __ leap(rdx, Operand(rbp, kStackHighEnd)); // Second argument __ LoadAddress(r8, ExternalReference::isolate_address(isolate())); #else // AMD64 ABI passes parameters in rdi, rsi, rdx. __ movp(rdi, backtrack_stackpointer()); // First argument. - __ lea(rsi, Operand(rbp, kStackHighEnd)); // Second argument. + __ leap(rsi, Operand(rbp, kStackHighEnd)); // Second argument. __ LoadAddress(rdx, ExternalReference::isolate_address(isolate())); #endif ExternalReference grow_stack = @@ -1125,7 +1125,7 @@ void RegExpMacroAssemblerX64::WriteCurrentPositionToRegister(int reg, if (cp_offset == 0) { __ movp(register_location(reg), rdi); } else { - __ lea(rax, Operand(rdi, cp_offset * char_size())); + __ leap(rax, Operand(rdi, cp_offset * char_size())); __ movp(register_location(reg), rax); } } @@ -1161,7 +1161,7 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() { __ movp(r8, rbp); // First argument: Next address on the stack (will be address of // return address). - __ lea(rcx, Operand(rsp, -kPointerSize)); + __ leap(rcx, Operand(rsp, -kPointerSize)); #else // Third argument: RegExp code frame pointer. __ movp(rdx, rbp); @@ -1169,7 +1169,7 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() { __ movp(rsi, code_object_pointer()); // First argument: Next address on the stack (will be address of // return address). - __ lea(rdi, Operand(rsp, -kPointerSize)); + __ leap(rdi, Operand(rsp, -kRegisterSize)); #endif ExternalReference stack_check = ExternalReference::re_check_stack_guard_state(isolate()); diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc index 03a4ae7..0c5ee62 100644 --- a/src/x64/stub-cache-x64.cc +++ b/src/x64/stub-cache-x64.cc @@ -64,7 +64,7 @@ static void ProbeTable(Isolate* isolate, Label miss; // Multiply by 3 because there are 3 fields per entry (name, code, map). - __ lea(offset, Operand(offset, offset, times_2, 0)); + __ leap(offset, Operand(offset, offset, times_2, 0)); __ LoadAddress(kScratchRegister, key_offset); diff --git a/test/cctest/test-code-stubs-x64.cc b/test/cctest/test-code-stubs-x64.cc index 05fd543..348b21a 100644 --- a/test/cctest/test-code-stubs-x64.cc +++ b/test/cctest/test-code-stubs-x64.cc @@ -70,7 +70,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate, // registers. int double_argument_slot = (Register::NumAllocatableRegisters() - 1) * kPointerSize + kDoubleSize; - __ lea(source_reg, MemOperand(rsp, -double_argument_slot - offset)); + __ leaq(source_reg, MemOperand(rsp, -double_argument_slot - offset)); } // Save registers make sure they don't get clobbered. diff --git a/test/cctest/test-disasm-x64.cc b/test/cctest/test-disasm-x64.cc index ffc741a..0a16d22 100644 --- a/test/cctest/test-disasm-x64.cc +++ b/test/cctest/test-disasm-x64.cc @@ -173,7 +173,7 @@ TEST(DisasmX64) { // TODO(mstarzinger): The following is protected. // __ jmp(Operand(rbx, rcx, times_4, 10000)); - __ lea(rdx, Operand(rbx, rcx, times_4, 10000)); + __ leaq(rdx, Operand(rbx, rcx, times_4, 10000)); __ or_(rdx, Immediate(12345)); __ or_(rdx, Operand(rbx, rcx, times_4, 10000)); diff --git a/test/cctest/test-macro-assembler-x64.cc b/test/cctest/test-macro-assembler-x64.cc index e265760..ee23a0b 100644 --- a/test/cctest/test-macro-assembler-x64.cc +++ b/test/cctest/test-macro-assembler-x64.cc @@ -2343,9 +2343,9 @@ TEST(OperandOffset) { // r15 = rsp[3] // rbx = rsp[5] // r13 = rsp[7] - __ lea(r14, Operand(rsp, 3 * kPointerSize)); - __ lea(r13, Operand(rbp, -3 * kPointerSize)); - __ lea(rbx, Operand(rbp, -5 * kPointerSize)); + __ leaq(r14, Operand(rsp, 3 * kPointerSize)); + __ leaq(r13, Operand(rbp, -3 * kPointerSize)); + __ leaq(rbx, Operand(rbp, -5 * kPointerSize)); __ movl(rcx, Immediate(2)); __ Move(r8, reinterpret_cast
(&data[128]), RelocInfo::NONE64); __ movl(rax, Immediate(1)); @@ -2643,7 +2643,7 @@ TEST(OperandOffset) { __ movl(rax, Immediate(0)); __ bind(&exit); - __ lea(rsp, Operand(rbp, kPointerSize)); + __ leaq(rsp, Operand(rbp, kPointerSize)); __ popq(rbp); __ popq(rbx); __ popq(r14);