From: kmillikin@chromium.org Date: Mon, 3 Oct 2011 11:44:39 +0000 (+0000) Subject: Clean up the x86 assembler API. X-Git-Tag: upstream/4.7.83~18300 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=a4e01037081aba346a7526d8a7c22204504d9a61;p=platform%2Fupstream%2Fv8.git Clean up the x86 assembler API. The API is inconsistent about when a register must be coerced to an operand and when it can be used as a register. Simplify usage by never requiring it to be wrapped. R=fschneider@chromium.org BUG= TEST= Review URL: http://codereview.chromium.org/8086021 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@9507 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc index b4eb0658a..66a98841a 100644 --- a/src/ia32/assembler-ia32.cc +++ b/src/ia32/assembler-ia32.cc @@ -88,23 +88,23 @@ void CpuFeatures::Probe() { __ pushfd(); __ push(ecx); __ push(ebx); - __ mov(ebp, Operand(esp)); + __ mov(ebp, esp); // If we can modify bit 21 of the EFLAGS register, then CPUID is supported. __ pushfd(); __ pop(eax); - __ mov(edx, Operand(eax)); + __ mov(edx, eax); __ xor_(eax, 0x200000); // Flip bit 21. __ push(eax); __ popfd(); __ pushfd(); __ pop(eax); - __ xor_(eax, Operand(edx)); // Different if CPUID is supported. + __ xor_(eax, edx); // Different if CPUID is supported. __ j(not_zero, &cpuid); // CPUID not supported. Clear the supported features in edx:eax. - __ xor_(eax, Operand(eax)); - __ xor_(edx, Operand(edx)); + __ xor_(eax, eax); + __ xor_(edx, edx); __ jmp(&done); // Invoke CPUID with 1 in eax to get feature information in @@ -120,13 +120,13 @@ void CpuFeatures::Probe() { // Move the result from ecx:edx to edx:eax and make sure to mark the // CPUID feature as supported. - __ mov(eax, Operand(edx)); + __ mov(eax, edx); __ or_(eax, 1 << CPUID); - __ mov(edx, Operand(ecx)); + __ mov(edx, ecx); // Done. __ bind(&done); - __ mov(esp, Operand(ebp)); + __ mov(esp, ebp); __ pop(ebx); __ pop(ecx); __ popfd(); @@ -772,19 +772,19 @@ void Assembler::cmpb(const Operand& op, int8_t imm8) { } -void Assembler::cmpb(const Operand& dst, Register src) { - ASSERT(src.is_byte_register()); +void Assembler::cmpb(const Operand& op, Register reg) { + ASSERT(reg.is_byte_register()); EnsureSpace ensure_space(this); EMIT(0x38); - emit_operand(src, dst); + emit_operand(reg, op); } -void Assembler::cmpb(Register dst, const Operand& src) { - ASSERT(dst.is_byte_register()); +void Assembler::cmpb(Register reg, const Operand& op) { + ASSERT(reg.is_byte_register()); EnsureSpace ensure_space(this); EMIT(0x3A); - emit_operand(dst, src); + emit_operand(reg, op); } @@ -1187,10 +1187,10 @@ void Assembler::xor_(Register dst, const Operand& src) { } -void Assembler::xor_(const Operand& src, Register dst) { +void Assembler::xor_(const Operand& dst, Register src) { EnsureSpace ensure_space(this); EMIT(0x31); - emit_operand(dst, src); + emit_operand(src, dst); } diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h index 638a6d47f..4dfde5f62 100644 --- a/src/ia32/assembler-ia32.h +++ b/src/ia32/assembler-ia32.h @@ -302,9 +302,6 @@ enum ScaleFactor { class Operand BASE_EMBEDDED { public: - // reg - INLINE(explicit Operand(Register reg)); - // XMM reg INLINE(explicit Operand(XMMRegister xmm_reg)); @@ -357,11 +354,8 @@ class Operand BASE_EMBEDDED { Register reg() const; private: - byte buf_[6]; - // The number of bytes in buf_. - unsigned int len_; - // Only valid if len_ > 4. - RelocInfo::Mode rmode_; + // reg + INLINE(explicit Operand(Register reg)); // Set the ModRM byte without an encoded 'reg' register. The // register is encoded later as part of the emit_operand operation. @@ -371,7 +365,15 @@ class Operand BASE_EMBEDDED { inline void set_disp8(int8_t disp); inline void set_dispr(int32_t disp, RelocInfo::Mode rmode); + byte buf_[6]; + // The number of bytes in buf_. + unsigned int len_; + // Only valid if len_ > 4. + RelocInfo::Mode rmode_; + friend class Assembler; + friend class MacroAssembler; + friend class LCodeGen; }; @@ -680,7 +682,9 @@ class Assembler : public AssemblerBase { void leave(); // Moves + void mov_b(Register dst, Register src) { mov_b(dst, Operand(src)); } void mov_b(Register dst, const Operand& src); + void mov_b(Register dst, int8_t imm8) { mov_b(Operand(dst), imm8); } void mov_b(const Operand& dst, int8_t imm8); void mov_b(const Operand& dst, Register src); @@ -696,17 +700,24 @@ class Assembler : public AssemblerBase { void mov(const Operand& dst, Handle handle); void mov(const Operand& dst, Register src); + void movsx_b(Register dst, Register src) { movsx_b(dst, Operand(src)); } void movsx_b(Register dst, const Operand& src); + void movsx_w(Register dst, Register src) { movsx_w(dst, Operand(src)); } void movsx_w(Register dst, const Operand& src); + void movzx_b(Register dst, Register src) { movzx_b(dst, Operand(src)); } void movzx_b(Register dst, const Operand& src); + void movzx_w(Register dst, Register src) { movzx_w(dst, Operand(src)); } void movzx_w(Register dst, const Operand& src); // Conditional moves void cmov(Condition cc, Register dst, int32_t imm32); void cmov(Condition cc, Register dst, Handle handle); + void cmov(Condition cc, Register dst, Register src) { + cmov(cc, dst, Operand(src)); + } void cmov(Condition cc, Register dst, const Operand& src); // Flag management. @@ -724,25 +735,31 @@ class Assembler : public AssemblerBase { void adc(Register dst, int32_t imm32); void adc(Register dst, const Operand& src); + void add(Register dst, Register src) { add(dst, Operand(src)); } void add(Register dst, const Operand& src); void add(const Operand& dst, Register src); + void add(Register dst, const Immediate& imm) { add(Operand(dst), imm); } void add(const Operand& dst, const Immediate& x); void and_(Register dst, int32_t imm32); void and_(Register dst, const Immediate& x); + void and_(Register dst, Register src) { and_(dst, Operand(src)); } void and_(Register dst, const Operand& src); - void and_(const Operand& src, Register dst); + void and_(const Operand& dst, Register src); void and_(const Operand& dst, const Immediate& x); + void cmpb(Register reg, int8_t imm8) { cmpb(Operand(reg), imm8); } void cmpb(const Operand& op, int8_t imm8); - void cmpb(Register src, const Operand& dst); - void cmpb(const Operand& dst, Register src); + void cmpb(Register reg, const Operand& op); + void cmpb(const Operand& op, Register reg); void cmpb_al(const Operand& op); void cmpw_ax(const Operand& op); void cmpw(const Operand& op, Immediate imm16); void cmp(Register reg, int32_t imm32); void cmp(Register reg, Handle handle); + void cmp(Register reg0, Register reg1) { cmp(reg0, Operand(reg1)); } void cmp(Register reg, const Operand& op); + void cmp(Register reg, const Immediate& imm) { cmp(Operand(reg), imm); } void cmp(const Operand& op, const Immediate& imm); void cmp(const Operand& op, Handle handle); @@ -758,6 +775,7 @@ class Assembler : public AssemblerBase { // Signed multiply instructions. void imul(Register src); // edx:eax = eax * src. + void imul(Register dst, Register src) { imul(dst, Operand(src)); } void imul(Register dst, const Operand& src); // dst = dst * src. void imul(Register dst, Register src, int32_t imm32); // dst = src * imm32. @@ -774,8 +792,10 @@ class Assembler : public AssemblerBase { void not_(Register dst); void or_(Register dst, int32_t imm32); + void or_(Register dst, Register src) { or_(dst, Operand(src)); } void or_(Register dst, const Operand& src); void or_(const Operand& dst, Register src); + void or_(Register dst, const Immediate& imm) { or_(Operand(dst), imm); } void or_(const Operand& dst, const Immediate& x); void rcl(Register dst, uint8_t imm8); @@ -786,33 +806,42 @@ class Assembler : public AssemblerBase { void sbb(Register dst, const Operand& src); + void shld(Register dst, Register src) { shld(dst, Operand(src)); } void shld(Register dst, const Operand& src); void shl(Register dst, uint8_t imm8); void shl_cl(Register dst); + void shrd(Register dst, Register src) { shrd(dst, Operand(src)); } void shrd(Register dst, const Operand& src); void shr(Register dst, uint8_t imm8); void shr_cl(Register dst); + void sub(Register dst, const Immediate& imm) { sub(Operand(dst), imm); } void sub(const Operand& dst, const Immediate& x); + void sub(Register dst, Register src) { sub(dst, Operand(src)); } void sub(Register dst, const Operand& src); void sub(const Operand& dst, Register src); void test(Register reg, const Immediate& imm); + void test(Register reg0, Register reg1) { test(reg0, Operand(reg1)); } void test(Register reg, const Operand& op); void test_b(Register reg, const Operand& op); void test(const Operand& op, const Immediate& imm); + void test_b(Register reg, uint8_t imm8) { test_b(Operand(reg), imm8); } void test_b(const Operand& op, uint8_t imm8); void xor_(Register dst, int32_t imm32); + void xor_(Register dst, Register src) { xor_(dst, Operand(src)); } void xor_(Register dst, const Operand& src); - void xor_(const Operand& src, Register dst); + void xor_(const Operand& dst, Register src); + void xor_(Register dst, const Immediate& imm) { xor_(Operand(dst), imm); } void xor_(const Operand& dst, const Immediate& x); // Bit operations. void bt(const Operand& dst, Register src); + void bts(Register dst, Register src) { bts(Operand(dst), src); } void bts(const Operand& dst, Register src); // Miscellaneous @@ -843,6 +872,7 @@ class Assembler : public AssemblerBase { void call(Label* L); void call(byte* entry, RelocInfo::Mode rmode); int CallSize(const Operand& adr); + void call(Register reg) { call(Operand(reg)); } void call(const Operand& adr); int CallSize(Handle code, RelocInfo::Mode mode); void call(Handle code, @@ -853,6 +883,7 @@ class Assembler : public AssemblerBase { // unconditional jump to L void jmp(Label* L, Label::Distance distance = Label::kFar); void jmp(byte* entry, RelocInfo::Mode rmode); + void jmp(Register reg) { jmp(Operand(reg)); } void jmp(const Operand& adr); void jmp(Handle code, RelocInfo::Mode rmode); @@ -937,6 +968,7 @@ class Assembler : public AssemblerBase { void cvttss2si(Register dst, const Operand& src); void cvttsd2si(Register dst, const Operand& src); + void cvtsi2sd(XMMRegister dst, Register src) { cvtsi2sd(dst, Operand(src)); } void cvtsi2sd(XMMRegister dst, const Operand& src); void cvtss2sd(XMMRegister dst, XMMRegister src); void cvtsd2ss(XMMRegister dst, XMMRegister src); @@ -977,12 +1009,14 @@ class Assembler : public AssemblerBase { void movdbl(XMMRegister dst, const Operand& src); void movdbl(const Operand& dst, XMMRegister src); + void movd(XMMRegister dst, Register src) { movd(dst, Operand(src)); } void movd(XMMRegister dst, const Operand& src); - void movd(const Operand& src, XMMRegister dst); + void movd(Register dst, XMMRegister src) { movd(Operand(dst), src); } + void movd(const Operand& dst, XMMRegister src); void movsd(XMMRegister dst, XMMRegister src); void movss(XMMRegister dst, const Operand& src); - void movss(const Operand& src, XMMRegister dst); + void movss(const Operand& dst, XMMRegister src); void movss(XMMRegister dst, XMMRegister src); void pand(XMMRegister dst, XMMRegister src); @@ -995,11 +1029,17 @@ class Assembler : public AssemblerBase { void psrlq(XMMRegister reg, int8_t shift); void psrlq(XMMRegister dst, XMMRegister src); void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle); + void pextrd(Register dst, XMMRegister src, int8_t offset) { + pextrd(Operand(dst), src, offset); + } void pextrd(const Operand& dst, XMMRegister src, int8_t offset); + void pinsrd(XMMRegister dst, Register src, int8_t offset) { + pinsrd(dst, Operand(src), offset); + } void pinsrd(XMMRegister dst, const Operand& src, int8_t offset); // Parallel XMM operations. - void movntdqa(XMMRegister src, const Operand& dst); + void movntdqa(XMMRegister dst, const Operand& src); void movntdq(const Operand& dst, XMMRegister src); // Prefetch src position into cache level. // Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc index 141fccca7..53ade3a6c 100644 --- a/src/ia32/builtins-ia32.cc +++ b/src/ia32/builtins-ia32.cc @@ -69,7 +69,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, // JumpToExternalReference expects eax to contain the number of arguments // including the receiver and the extra arguments. - __ add(Operand(eax), Immediate(num_extra_args + 1)); + __ add(eax, Immediate(num_extra_args + 1)); __ JumpToExternalReference(ExternalReference(id, masm->isolate())); } @@ -91,7 +91,7 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) { __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kConstructStubOffset)); __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize)); - __ jmp(Operand(ebx)); + __ jmp(ebx); // edi: called object // eax: number of arguments @@ -213,7 +213,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, Operand(ebx, esi, times_pointer_size, JSObject::kHeaderSize)); // esi: offset of first field after pre-allocated fields if (FLAG_debug_code) { - __ cmp(esi, Operand(edi)); + __ cmp(esi, edi); __ Assert(less_equal, "Unexpected number of pre-allocated property fields."); } @@ -229,7 +229,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, // eax: initial map // ebx: JSObject // edi: start of next object - __ or_(Operand(ebx), Immediate(kHeapObjectTag)); + __ or_(ebx, Immediate(kHeapObjectTag)); // Check if a non-empty properties array is needed. // Allocate and initialize a FixedArray if it is. @@ -240,10 +240,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, __ movzx_b(edx, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset)); __ movzx_b(ecx, FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset)); - __ add(edx, Operand(ecx)); + __ add(edx, ecx); // Calculate unused properties past the end of the in-object properties. __ movzx_b(ecx, FieldOperand(eax, Map::kInObjectPropertiesOffset)); - __ sub(edx, Operand(ecx)); + __ sub(edx, ecx); // Done if no extra properties are to be allocated. __ j(zero, &allocated); __ Assert(positive, "Property allocation count failed."); @@ -282,9 +282,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, __ jmp(&entry); __ bind(&loop); __ mov(Operand(eax, 0), edx); - __ add(Operand(eax), Immediate(kPointerSize)); + __ add(eax, Immediate(kPointerSize)); __ bind(&entry); - __ cmp(eax, Operand(ecx)); + __ cmp(eax, ecx); __ j(below, &loop); } @@ -292,7 +292,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, // the JSObject // ebx: JSObject // edi: FixedArray - __ or_(Operand(edi), Immediate(kHeapObjectTag)); // add the heap tag + __ or_(edi, Immediate(kHeapObjectTag)); // add the heap tag __ mov(FieldOperand(ebx, JSObject::kPropertiesOffset), edi); @@ -315,7 +315,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, // edi: function (constructor) __ push(edi); __ CallRuntime(Runtime::kNewObject, 1); - __ mov(ebx, Operand(eax)); // store result in ebx + __ mov(ebx, eax); // store result in ebx // New object allocated. // ebx: newly allocated object @@ -338,7 +338,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, // Copy arguments and receiver to the expression stack. Label loop, entry; - __ mov(ecx, Operand(eax)); + __ mov(ecx, eax); __ jmp(&entry); __ bind(&loop); __ push(Operand(ebx, ecx, times_4, 0)); @@ -443,9 +443,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, __ bind(&loop); __ mov(edx, Operand(ebx, ecx, times_4, 0)); // push parameter from argv __ push(Operand(edx, 0)); // dereference handle - __ inc(Operand(ecx)); + __ inc(ecx); __ bind(&entry); - __ cmp(ecx, Operand(eax)); + __ cmp(ecx, eax); __ j(not_equal, &loop); // Get the function from the stack and call it. @@ -502,7 +502,7 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) { // Do a tail-call of the compiled function. __ lea(eax, FieldOperand(eax, Code::kHeaderSize)); - __ jmp(Operand(eax)); + __ jmp(eax); } @@ -528,7 +528,7 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) { // Do a tail-call of the compiled function. __ lea(eax, FieldOperand(eax, Code::kHeaderSize)); - __ jmp(Operand(eax)); + __ jmp(eax); } @@ -597,7 +597,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { // 1. Make sure we have at least one argument. { Label done; - __ test(eax, Operand(eax)); + __ test(eax, eax); __ j(not_zero, &done); __ pop(ebx); __ push(Immediate(factory->undefined_value())); @@ -716,11 +716,11 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin, // or a function proxy via CALL_FUNCTION_PROXY. { Label function, non_proxy; - __ test(edx, Operand(edx)); + __ test(edx, edx); __ j(zero, &function); __ Set(ebx, Immediate(0)); __ SetCallKind(ecx, CALL_AS_METHOD); - __ cmp(Operand(edx), Immediate(1)); + __ cmp(edx, Immediate(1)); __ j(not_equal, &non_proxy); __ pop(edx); // return address @@ -747,13 +747,13 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset)); __ SmiUntag(ebx); __ SetCallKind(ecx, CALL_AS_METHOD); - __ cmp(eax, Operand(ebx)); + __ cmp(eax, ebx); __ j(not_equal, masm->isolate()->builtins()->ArgumentsAdaptorTrampoline()); ParameterCount expected(0); - __ InvokeCode(Operand(edx), expected, expected, JUMP_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); + __ InvokeCode(edx, expected, expected, JUMP_FUNCTION, NullCallWrapper(), + CALL_AS_METHOD); } @@ -777,14 +777,14 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { __ mov(edi, Operand::StaticVariable(real_stack_limit)); // Make ecx the space we have left. The stack might already be overflowed // here which will cause ecx to become negative. - __ mov(ecx, Operand(esp)); - __ sub(ecx, Operand(edi)); + __ mov(ecx, esp); + __ sub(ecx, edi); // Make edx the space we need for the array when it is unrolled onto the // stack. - __ mov(edx, Operand(eax)); + __ mov(edx, eax); __ shl(edx, kPointerSizeLog2 - kSmiTagSize); // Check if the arguments will overflow the stack. - __ cmp(ecx, Operand(edx)); + __ cmp(ecx, edx); __ j(greater, &okay); // Signed comparison. // Out of stack space. @@ -843,7 +843,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { __ bind(&call_to_object); __ push(ebx); __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); - __ mov(ebx, Operand(eax)); + __ mov(ebx, eax); __ jmp(&push_receiver); // Use the current global receiver object as the receiver. @@ -879,7 +879,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { // Update the index on the stack and in register eax. __ mov(eax, Operand(ebp, kIndexOffset)); - __ add(Operand(eax), Immediate(1 << kSmiTagSize)); + __ add(eax, Immediate(1 << kSmiTagSize)); __ mov(Operand(ebp, kIndexOffset), eax); __ bind(&entry); @@ -1005,9 +1005,9 @@ static void AllocateEmptyJSArray(MacroAssembler* masm, __ jmp(&entry); __ bind(&loop); __ mov(Operand(scratch1, 0), factory->the_hole_value()); - __ add(Operand(scratch1), Immediate(kPointerSize)); + __ add(scratch1, Immediate(kPointerSize)); __ bind(&entry); - __ cmp(scratch1, Operand(scratch2)); + __ cmp(scratch1, scratch2); __ j(below, &loop); } } @@ -1104,7 +1104,7 @@ static void AllocateJSArray(MacroAssembler* masm, __ bind(&loop); __ stos(); __ bind(&entry); - __ cmp(edi, Operand(elements_array_end)); + __ cmp(edi, elements_array_end); __ j(below, &loop); __ bind(&done); } @@ -1142,7 +1142,7 @@ static void ArrayNativeCode(MacroAssembler* masm, __ push(eax); // Check for array construction with zero arguments. - __ test(eax, Operand(eax)); + __ test(eax, eax); __ j(not_zero, &argc_one_or_more); __ bind(&empty_array); @@ -1169,7 +1169,7 @@ static void ArrayNativeCode(MacroAssembler* masm, __ j(not_equal, &argc_two_or_more); STATIC_ASSERT(kSmiTag == 0); __ mov(ecx, Operand(esp, (push_count + 1) * kPointerSize)); - __ test(ecx, Operand(ecx)); + __ test(ecx, ecx); __ j(not_zero, ¬_empty_array); // The single argument passed is zero, so we jump to the code above used to @@ -1182,7 +1182,7 @@ static void ArrayNativeCode(MacroAssembler* masm, __ mov(eax, Operand(esp, i * kPointerSize)); __ mov(Operand(esp, (i + 1) * kPointerSize), eax); } - __ add(Operand(esp), Immediate(2 * kPointerSize)); // Drop two stack slots. + __ add(esp, Immediate(2 * kPointerSize)); // Drop two stack slots. __ push(Immediate(0)); // Treat this as a call with argc of zero. __ jmp(&empty_array); @@ -1272,7 +1272,7 @@ static void ArrayNativeCode(MacroAssembler* masm, __ bind(&loop); __ mov(eax, Operand(edi, ecx, times_pointer_size, 0)); __ mov(Operand(edx, 0), eax); - __ add(Operand(edx), Immediate(kPointerSize)); + __ add(edx, Immediate(kPointerSize)); __ bind(&entry); __ dec(ecx); __ j(greater_equal, &loop); @@ -1378,14 +1378,14 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { if (FLAG_debug_code) { __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, ecx); - __ cmp(edi, Operand(ecx)); + __ cmp(edi, ecx); __ Assert(equal, "Unexpected String function"); } // Load the first argument into eax and get rid of the rest // (including the receiver). Label no_arguments; - __ test(eax, Operand(eax)); + __ test(eax, eax); __ j(zero, &no_arguments); __ mov(ebx, Operand(esp, eax, times_pointer_size, 0)); __ pop(ecx); @@ -1495,7 +1495,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { __ push(ebp); - __ mov(ebp, Operand(esp)); + __ mov(ebp, esp); // Store the arguments adaptor context sentinel. __ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); @@ -1539,7 +1539,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { __ IncrementCounter(masm->isolate()->counters()->arguments_adaptors(), 1); Label enough, too_few; - __ cmp(eax, Operand(ebx)); + __ cmp(eax, ebx); __ j(less, &too_few); __ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel); __ j(equal, &dont_adapt_arguments); @@ -1557,8 +1557,8 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { __ bind(©); __ inc(edi); __ push(Operand(eax, 0)); - __ sub(Operand(eax), Immediate(kPointerSize)); - __ cmp(edi, Operand(ebx)); + __ sub(eax, Immediate(kPointerSize)); + __ cmp(edi, ebx); __ j(less, ©); __ jmp(&invoke); } @@ -1571,17 +1571,17 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { const int offset = StandardFrameConstants::kCallerSPOffset; __ lea(edi, Operand(ebp, eax, times_4, offset)); // ebx = expected - actual. - __ sub(ebx, Operand(eax)); + __ sub(ebx, eax); // eax = -actual - 1 __ neg(eax); - __ sub(Operand(eax), Immediate(1)); + __ sub(eax, Immediate(1)); Label copy; __ bind(©); __ inc(eax); __ push(Operand(edi, 0)); - __ sub(Operand(edi), Immediate(kPointerSize)); - __ test(eax, Operand(eax)); + __ sub(edi, Immediate(kPointerSize)); + __ test(eax, eax); __ j(not_zero, ©); // Fill remaining expected arguments with undefined values. @@ -1589,7 +1589,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { __ bind(&fill); __ inc(eax); __ push(Immediate(masm->isolate()->factory()->undefined_value())); - __ cmp(eax, Operand(ebx)); + __ cmp(eax, ebx); __ j(less, &fill); } @@ -1597,7 +1597,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { __ bind(&invoke); // Restore function pointer. __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); - __ call(Operand(edx)); + __ call(edx); // Leave frame and return. LeaveArgumentsAdaptorFrame(masm); @@ -1607,7 +1607,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { // Dont adapt arguments. // ------------------------------------------- __ bind(&dont_adapt_arguments); - __ jmp(Operand(edx)); + __ jmp(edx); } @@ -1649,7 +1649,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { // If the result was -1 it means that we couldn't optimize the // function. Just return and continue in the unoptimized version. Label skip; - __ cmp(Operand(eax), Immediate(Smi::FromInt(-1))); + __ cmp(eax, Immediate(Smi::FromInt(-1))); __ j(not_equal, &skip, Label::kNear); __ ret(0); diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc index 00265dd71..1ca577c01 100644 --- a/src/ia32/code-stubs-ia32.cc +++ b/src/ia32/code-stubs-ia32.cc @@ -49,7 +49,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) { __ bind(&check_heap_number); __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); Factory* factory = masm->isolate()->factory(); - __ cmp(Operand(ebx), Immediate(factory->heap_number_map())); + __ cmp(ebx, Immediate(factory->heap_number_map())); __ j(not_equal, &call_builtin, Label::kNear); __ ret(0); @@ -150,7 +150,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { } // Return and remove the on-stack parameter. - __ mov(esi, Operand(eax)); + __ mov(esi, eax); __ ret(1 * kPointerSize); // Need to collect. Call into runtime system. @@ -345,7 +345,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { __ pushad(); if (save_doubles_ == kSaveFPRegs) { CpuFeatures::Scope scope(SSE2); - __ sub(Operand(esp), Immediate(kDoubleSize * XMMRegister::kNumRegisters)); + __ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters)); for (int i = 0; i < XMMRegister::kNumRegisters; i++) { XMMRegister reg = XMMRegister::from_code(i); __ movdbl(Operand(esp, i * kDoubleSize), reg); @@ -366,7 +366,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { XMMRegister reg = XMMRegister::from_code(i); __ movdbl(reg, Operand(esp, i * kDoubleSize)); } - __ add(Operand(esp), Immediate(kDoubleSize * XMMRegister::kNumRegisters)); + __ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters)); } __ popad(); __ ret(0); @@ -507,27 +507,27 @@ static void IntegerConvert(MacroAssembler* masm, // Check whether the exponent is too big for a 64 bit signed integer. static const uint32_t kTooBigExponent = (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift; - __ cmp(Operand(scratch2), Immediate(kTooBigExponent)); + __ cmp(scratch2, Immediate(kTooBigExponent)); __ j(greater_equal, conversion_failure); // Load x87 register with heap number. __ fld_d(FieldOperand(source, HeapNumber::kValueOffset)); // Reserve space for 64 bit answer. - __ sub(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint. + __ sub(esp, Immediate(sizeof(uint64_t))); // Nolint. // Do conversion, which cannot fail because we checked the exponent. __ fisttp_d(Operand(esp, 0)); __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx. - __ add(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint. + __ add(esp, Immediate(sizeof(uint64_t))); // Nolint. } else { // Load ecx with zero. We use this either for the final shift or // for the answer. - __ xor_(ecx, Operand(ecx)); + __ xor_(ecx, ecx); // Check whether the exponent matches a 32 bit signed int that cannot be // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the // exponent is 30 (biased). This is the exponent that we are fastest at and // also the highest exponent we can handle here. const uint32_t non_smi_exponent = (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; - __ cmp(Operand(scratch2), Immediate(non_smi_exponent)); + __ cmp(scratch2, Immediate(non_smi_exponent)); // If we have a match of the int32-but-not-Smi exponent then skip some // logic. __ j(equal, &right_exponent, Label::kNear); @@ -540,7 +540,7 @@ static void IntegerConvert(MacroAssembler* masm, // >>> operator has a tendency to generate numbers with an exponent of 31. const uint32_t big_non_smi_exponent = (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift; - __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent)); + __ cmp(scratch2, Immediate(big_non_smi_exponent)); __ j(not_equal, conversion_failure); // We have the big exponent, typically from >>>. This means the number is // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa. @@ -559,9 +559,9 @@ static void IntegerConvert(MacroAssembler* masm, // Shift down 21 bits to get the most significant 11 bits or the low // mantissa word. __ shr(ecx, 32 - big_shift_distance); - __ or_(ecx, Operand(scratch2)); + __ or_(ecx, scratch2); // We have the answer in ecx, but we may need to negate it. - __ test(scratch, Operand(scratch)); + __ test(scratch, scratch); __ j(positive, &done, Label::kNear); __ neg(ecx); __ jmp(&done, Label::kNear); @@ -575,14 +575,14 @@ static void IntegerConvert(MacroAssembler* masm, // it rounds to zero. const uint32_t zero_exponent = (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift; - __ sub(Operand(scratch2), Immediate(zero_exponent)); + __ sub(scratch2, Immediate(zero_exponent)); // ecx already has a Smi zero. __ j(less, &done, Label::kNear); // We have a shifted exponent between 0 and 30 in scratch2. __ shr(scratch2, HeapNumber::kExponentShift); __ mov(ecx, Immediate(30)); - __ sub(ecx, Operand(scratch2)); + __ sub(ecx, scratch2); __ bind(&right_exponent); // Here ecx is the shift, scratch is the exponent word. @@ -602,19 +602,19 @@ static void IntegerConvert(MacroAssembler* masm, // Shift down 22 bits to get the most significant 10 bits or the low // mantissa word. __ shr(scratch2, 32 - shift_distance); - __ or_(scratch2, Operand(scratch)); + __ or_(scratch2, scratch); // Move down according to the exponent. __ shr_cl(scratch2); // Now the unsigned answer is in scratch2. We need to move it to ecx and // we may need to fix the sign. Label negative; - __ xor_(ecx, Operand(ecx)); + __ xor_(ecx, ecx); __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset)); __ j(greater, &negative, Label::kNear); __ mov(ecx, scratch2); __ jmp(&done, Label::kNear); __ bind(&negative); - __ sub(ecx, Operand(scratch2)); + __ sub(ecx, scratch2); __ bind(&done); } } @@ -716,13 +716,13 @@ void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm, __ JumpIfNotSmi(eax, non_smi, non_smi_near); // We can't handle -0 with smis, so use a type transition for that case. - __ test(eax, Operand(eax)); + __ test(eax, eax); __ j(zero, slow, slow_near); // Try optimistic subtraction '0 - value', saving operand in eax for undo. - __ mov(edx, Operand(eax)); + __ mov(edx, eax); __ Set(eax, Immediate(0)); - __ sub(eax, Operand(edx)); + __ sub(eax, edx); __ j(overflow, undo, undo_near); __ ret(0); } @@ -743,7 +743,7 @@ void UnaryOpStub::GenerateSmiCodeBitNot( void UnaryOpStub::GenerateSmiCodeUndo(MacroAssembler* masm) { - __ mov(eax, Operand(edx)); + __ mov(eax, edx); } @@ -797,7 +797,7 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, __ xor_(FieldOperand(eax, HeapNumber::kExponentOffset), Immediate(HeapNumber::kSignMask)); // Flip sign. } else { - __ mov(edx, Operand(eax)); + __ mov(edx, eax); // edx: operand Label slow_allocate_heapnumber, heapnumber_allocated; @@ -872,7 +872,7 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm, } if (CpuFeatures::IsSupported(SSE2)) { CpuFeatures::Scope use_sse2(SSE2); - __ cvtsi2sd(xmm0, Operand(ecx)); + __ cvtsi2sd(xmm0, ecx); __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); } else { __ push(ecx); @@ -1065,7 +1065,7 @@ void BinaryOpStub::GenerateSmiCode( // eax in case the result is not a smi. ASSERT(!left.is(ecx) && !right.is(ecx)); __ mov(ecx, right); - __ or_(right, Operand(left)); // Bitwise or is commutative. + __ or_(right, left); // Bitwise or is commutative. combined = right; break; @@ -1077,7 +1077,7 @@ void BinaryOpStub::GenerateSmiCode( case Token::DIV: case Token::MOD: __ mov(combined, right); - __ or_(combined, Operand(left)); + __ or_(combined, left); break; case Token::SHL: @@ -1087,7 +1087,7 @@ void BinaryOpStub::GenerateSmiCode( // for the smi check register. ASSERT(!left.is(ecx) && !right.is(ecx)); __ mov(ecx, right); - __ or_(right, Operand(left)); + __ or_(right, left); combined = right; break; @@ -1110,12 +1110,12 @@ void BinaryOpStub::GenerateSmiCode( case Token::BIT_XOR: ASSERT(right.is(eax)); - __ xor_(right, Operand(left)); // Bitwise xor is commutative. + __ xor_(right, left); // Bitwise xor is commutative. break; case Token::BIT_AND: ASSERT(right.is(eax)); - __ and_(right, Operand(left)); // Bitwise and is commutative. + __ and_(right, left); // Bitwise and is commutative. break; case Token::SHL: @@ -1164,12 +1164,12 @@ void BinaryOpStub::GenerateSmiCode( case Token::ADD: ASSERT(right.is(eax)); - __ add(right, Operand(left)); // Addition is commutative. + __ add(right, left); // Addition is commutative. __ j(overflow, &use_fp_on_smis); break; case Token::SUB: - __ sub(left, Operand(right)); + __ sub(left, right); __ j(overflow, &use_fp_on_smis); __ mov(eax, left); break; @@ -1183,7 +1183,7 @@ void BinaryOpStub::GenerateSmiCode( // Remove tag from one of the operands (but keep sign). __ SmiUntag(right); // Do multiplication. - __ imul(right, Operand(left)); // Multiplication is commutative. + __ imul(right, left); // Multiplication is commutative. __ j(overflow, &use_fp_on_smis); // Check for negative zero result. Use combined = left | right. __ NegativeZeroTest(right, combined, &use_fp_on_smis); @@ -1194,7 +1194,7 @@ void BinaryOpStub::GenerateSmiCode( // save the left operand. __ mov(edi, left); // Check for 0 divisor. - __ test(right, Operand(right)); + __ test(right, right); __ j(zero, &use_fp_on_smis); // Sign extend left into edx:eax. ASSERT(left.is(eax)); @@ -1210,7 +1210,7 @@ void BinaryOpStub::GenerateSmiCode( // Check for negative zero result. Use combined = left | right. __ NegativeZeroTest(eax, combined, &use_fp_on_smis); // Check that the remainder is zero. - __ test(edx, Operand(edx)); + __ test(edx, edx); __ j(not_zero, &use_fp_on_smis); // Tag the result and store it in register eax. __ SmiTag(eax); @@ -1218,7 +1218,7 @@ void BinaryOpStub::GenerateSmiCode( case Token::MOD: // Check for 0 divisor. - __ test(right, Operand(right)); + __ test(right, right); __ j(zero, ¬_smis); // Sign extend left into edx:eax. @@ -1269,11 +1269,11 @@ void BinaryOpStub::GenerateSmiCode( break; case Token::ADD: // Revert right = right + left. - __ sub(right, Operand(left)); + __ sub(right, left); break; case Token::SUB: // Revert left = left - right. - __ add(left, Operand(right)); + __ add(left, right); break; case Token::MUL: // Right was clobbered but a copy is in ebx. @@ -1311,7 +1311,7 @@ void BinaryOpStub::GenerateSmiCode( ASSERT_EQ(Token::SHL, op_); if (CpuFeatures::IsSupported(SSE2)) { CpuFeatures::Scope use_sse2(SSE2); - __ cvtsi2sd(xmm0, Operand(left)); + __ cvtsi2sd(xmm0, left); __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); } else { __ mov(Operand(esp, 1 * kPointerSize), left); @@ -1333,11 +1333,11 @@ void BinaryOpStub::GenerateSmiCode( switch (op_) { case Token::ADD: // Revert right = right + left. - __ sub(right, Operand(left)); + __ sub(right, left); break; case Token::SUB: // Revert left = left - right. - __ add(left, Operand(right)); + __ add(left, right); break; case Token::MUL: // Right was clobbered but a copy is in ebx. @@ -1529,7 +1529,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { // Check result type if it is currently Int32. if (result_type_ <= BinaryOpIC::INT32) { __ cvttsd2si(ecx, Operand(xmm0)); - __ cvtsi2sd(xmm2, Operand(ecx)); + __ cvtsi2sd(xmm2, ecx); __ ucomisd(xmm0, xmm2); __ j(not_zero, ¬_int32); __ j(carry, ¬_int32); @@ -1591,9 +1591,9 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3_, ¬_int32); switch (op_) { - case Token::BIT_OR: __ or_(eax, Operand(ecx)); break; - case Token::BIT_AND: __ and_(eax, Operand(ecx)); break; - case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break; + case Token::BIT_OR: __ or_(eax, ecx); break; + case Token::BIT_AND: __ and_(eax, ecx); break; + case Token::BIT_XOR: __ xor_(eax, ecx); break; case Token::SAR: __ sar_cl(eax); break; case Token::SHL: __ shl_cl(eax); break; case Token::SHR: __ shr_cl(eax); break; @@ -1617,7 +1617,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { if (op_ != Token::SHR) { __ bind(&non_smi_result); // Allocate a heap number if needed. - __ mov(ebx, Operand(eax)); // ebx: result + __ mov(ebx, eax); // ebx: result Label skip_allocation; switch (mode_) { case OVERWRITE_LEFT: @@ -1637,7 +1637,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { // Store the result in the HeapNumber and return. if (CpuFeatures::IsSupported(SSE2)) { CpuFeatures::Scope use_sse2(SSE2); - __ cvtsi2sd(xmm0, Operand(ebx)); + __ cvtsi2sd(xmm0, ebx); __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); } else { __ mov(Operand(esp, 1 * kPointerSize), ebx); @@ -1718,7 +1718,7 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { __ cmp(edx, factory->undefined_value()); __ j(not_equal, &check, Label::kNear); if (Token::IsBitOp(op_)) { - __ xor_(edx, Operand(edx)); + __ xor_(edx, edx); } else { __ mov(edx, Immediate(factory->nan_value())); } @@ -1727,7 +1727,7 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { __ cmp(eax, factory->undefined_value()); __ j(not_equal, &done, Label::kNear); if (Token::IsBitOp(op_)) { - __ xor_(eax, Operand(eax)); + __ xor_(eax, eax); } else { __ mov(eax, Immediate(factory->nan_value())); } @@ -1805,9 +1805,9 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { use_sse3_, ¬_floats); switch (op_) { - case Token::BIT_OR: __ or_(eax, Operand(ecx)); break; - case Token::BIT_AND: __ and_(eax, Operand(ecx)); break; - case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break; + case Token::BIT_OR: __ or_(eax, ecx); break; + case Token::BIT_AND: __ and_(eax, ecx); break; + case Token::BIT_XOR: __ xor_(eax, ecx); break; case Token::SAR: __ sar_cl(eax); break; case Token::SHL: __ shl_cl(eax); break; case Token::SHR: __ shr_cl(eax); break; @@ -1831,7 +1831,7 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { if (op_ != Token::SHR) { __ bind(&non_smi_result); // Allocate a heap number if needed. - __ mov(ebx, Operand(eax)); // ebx: result + __ mov(ebx, eax); // ebx: result Label skip_allocation; switch (mode_) { case OVERWRITE_LEFT: @@ -1851,7 +1851,7 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { // Store the result in the HeapNumber and return. if (CpuFeatures::IsSupported(SSE2)) { CpuFeatures::Scope use_sse2(SSE2); - __ cvtsi2sd(xmm0, Operand(ebx)); + __ cvtsi2sd(xmm0, ebx); __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); } else { __ mov(Operand(esp, 1 * kPointerSize), ebx); @@ -2004,9 +2004,9 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { use_sse3_, &call_runtime); switch (op_) { - case Token::BIT_OR: __ or_(eax, Operand(ecx)); break; - case Token::BIT_AND: __ and_(eax, Operand(ecx)); break; - case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break; + case Token::BIT_OR: __ or_(eax, ecx); break; + case Token::BIT_AND: __ and_(eax, ecx); break; + case Token::BIT_XOR: __ xor_(eax, ecx); break; case Token::SAR: __ sar_cl(eax); break; case Token::SHL: __ shl_cl(eax); break; case Token::SHR: __ shr_cl(eax); break; @@ -2030,7 +2030,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { if (op_ != Token::SHR) { __ bind(&non_smi_result); // Allocate a heap number if needed. - __ mov(ebx, Operand(eax)); // ebx: result + __ mov(ebx, eax); // ebx: result Label skip_allocation; switch (mode_) { case OVERWRITE_LEFT: @@ -2050,7 +2050,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { // Store the result in the HeapNumber and return. if (CpuFeatures::IsSupported(SSE2)) { CpuFeatures::Scope use_sse2(SSE2); - __ cvtsi2sd(xmm0, Operand(ebx)); + __ cvtsi2sd(xmm0, ebx); __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); } else { __ mov(Operand(esp, 1 * kPointerSize), ebx); @@ -2160,10 +2160,10 @@ void BinaryOpStub::GenerateHeapResultAllocation( __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure); // Now edx can be overwritten losing one of the arguments as we are // now done and will not need it any more. - __ mov(edx, Operand(ebx)); + __ mov(edx, ebx); __ bind(&skip_allocation); // Use object in edx as a result holder - __ mov(eax, Operand(edx)); + __ mov(eax, edx); break; } case OVERWRITE_RIGHT: @@ -2221,7 +2221,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // Then load the low and high words of the double into ebx, edx. STATIC_ASSERT(kSmiTagSize == 1); __ sar(eax, 1); - __ sub(Operand(esp), Immediate(2 * kPointerSize)); + __ sub(esp, Immediate(2 * kPointerSize)); __ mov(Operand(esp, 0), eax); __ fild_s(Operand(esp, 0)); __ fst_d(Operand(esp, 0)); @@ -2232,7 +2232,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // Check if input is a HeapNumber. __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); Factory* factory = masm->isolate()->factory(); - __ cmp(Operand(ebx), Immediate(factory->heap_number_map())); + __ cmp(ebx, Immediate(factory->heap_number_map())); __ j(not_equal, &runtime_call); // Input is a HeapNumber. Push it on the FPU stack and load its // low and high words into ebx, edx. @@ -2244,12 +2244,12 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { } else { // UNTAGGED. if (CpuFeatures::IsSupported(SSE4_1)) { CpuFeatures::Scope sse4_scope(SSE4_1); - __ pextrd(Operand(edx), xmm1, 0x1); // copy xmm1[63..32] to edx. + __ pextrd(edx, xmm1, 0x1); // copy xmm1[63..32] to edx. } else { __ pshufd(xmm0, xmm1, 0x1); - __ movd(Operand(edx), xmm0); + __ movd(edx, xmm0); } - __ movd(Operand(ebx), xmm1); + __ movd(ebx, xmm1); } // ST[0] or xmm1 == double value @@ -2258,15 +2258,15 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // Compute hash (the shifts are arithmetic): // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); __ mov(ecx, ebx); - __ xor_(ecx, Operand(edx)); + __ xor_(ecx, edx); __ mov(eax, ecx); __ sar(eax, 16); - __ xor_(ecx, Operand(eax)); + __ xor_(ecx, eax); __ mov(eax, ecx); __ sar(eax, 8); - __ xor_(ecx, Operand(eax)); + __ xor_(ecx, eax); ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize)); - __ and_(Operand(ecx), + __ and_(ecx, Immediate(TranscendentalCache::SubCache::kCacheSize - 1)); // ST[0] or xmm1 == double value. @@ -2281,7 +2281,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { __ mov(eax, Operand(eax, cache_array_index)); // Eax points to the cache for the type type_. // If NULL, the cache hasn't been initialized yet, so go through runtime. - __ test(eax, Operand(eax)); + __ test(eax, eax); __ j(zero, &runtime_call_clear_stack); #ifdef DEBUG // Check that the layout of cache elements match expectations. @@ -2324,10 +2324,10 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack); } else { // UNTAGGED. __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache); - __ sub(Operand(esp), Immediate(kDoubleSize)); + __ sub(esp, Immediate(kDoubleSize)); __ movdbl(Operand(esp, 0), xmm1); __ fld_d(Operand(esp, 0)); - __ add(Operand(esp), Immediate(kDoubleSize)); + __ add(esp, Immediate(kDoubleSize)); } GenerateOperation(masm); __ mov(Operand(ecx, 0), ebx); @@ -2342,13 +2342,13 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // Skip cache and return answer directly, only in untagged case. __ bind(&skip_cache); - __ sub(Operand(esp), Immediate(kDoubleSize)); + __ sub(esp, Immediate(kDoubleSize)); __ movdbl(Operand(esp, 0), xmm1); __ fld_d(Operand(esp, 0)); GenerateOperation(masm); __ fstp_d(Operand(esp, 0)); __ movdbl(xmm1, Operand(esp, 0)); - __ add(Operand(esp), Immediate(kDoubleSize)); + __ add(esp, Immediate(kDoubleSize)); // We return the value in xmm1 without adding it to the cache, but // we cause a scavenging GC so that future allocations will succeed. { @@ -2409,13 +2409,13 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) { // If argument is outside the range -2^63..2^63, fsin/cos doesn't // work. We must reduce it to the appropriate range. __ mov(edi, edx); - __ and_(Operand(edi), Immediate(0x7ff00000)); // Exponent only. + __ and_(edi, Immediate(0x7ff00000)); // Exponent only. int supported_exponent_limit = (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift; - __ cmp(Operand(edi), Immediate(supported_exponent_limit)); + __ cmp(edi, Immediate(supported_exponent_limit)); __ j(below, &in_range, Label::kNear); // Check for infinity and NaN. Both return NaN for sin. - __ cmp(Operand(edi), Immediate(0x7ff00000)); + __ cmp(edi, Immediate(0x7ff00000)); Label non_nan_result; __ j(not_equal, &non_nan_result, Label::kNear); // Input is +/-Infinity or NaN. Result is NaN. @@ -2424,7 +2424,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) { __ push(Immediate(0x7ff80000)); __ push(Immediate(0)); __ fld_d(Operand(esp, 0)); - __ add(Operand(esp), Immediate(2 * kPointerSize)); + __ add(esp, Immediate(2 * kPointerSize)); __ jmp(&done, Label::kNear); __ bind(&non_nan_result); @@ -2440,7 +2440,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) { __ fwait(); __ fnstsw_ax(); // Clear if Illegal Operand or Zero Division exceptions are set. - __ test(Operand(eax), Immediate(5)); + __ test(eax, Immediate(5)); __ j(zero, &no_exceptions, Label::kNear); __ fnclex(); __ bind(&no_exceptions); @@ -2453,7 +2453,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) { __ fprem1(); __ fwait(); __ fnstsw_ax(); - __ test(Operand(eax), Immediate(0x400 /* C2 */)); + __ test(eax, Immediate(0x400 /* C2 */)); // If C2 is set, computation only has partial result. Loop to // continue computation. __ j(not_zero, &partial_remainder_loop); @@ -2586,13 +2586,13 @@ void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) { __ bind(&load_smi_edx); __ SmiUntag(edx); // Untag smi before converting to float. - __ cvtsi2sd(xmm0, Operand(edx)); + __ cvtsi2sd(xmm0, edx); __ SmiTag(edx); // Retag smi for heap number overwriting test. __ jmp(&load_eax); __ bind(&load_smi_eax); __ SmiUntag(eax); // Untag smi before converting to float. - __ cvtsi2sd(xmm1, Operand(eax)); + __ cvtsi2sd(xmm1, eax); __ SmiTag(eax); // Retag smi for heap number overwriting test. __ bind(&done); @@ -2616,12 +2616,12 @@ void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm, __ jmp(not_numbers); // Argument in eax is not a number. __ bind(&load_smi_edx); __ SmiUntag(edx); // Untag smi before converting to float. - __ cvtsi2sd(xmm0, Operand(edx)); + __ cvtsi2sd(xmm0, edx); __ SmiTag(edx); // Retag smi for heap number overwriting test. __ jmp(&load_eax); __ bind(&load_smi_eax); __ SmiUntag(eax); // Untag smi before converting to float. - __ cvtsi2sd(xmm1, Operand(eax)); + __ cvtsi2sd(xmm1, eax); __ SmiTag(eax); // Retag smi for heap number overwriting test. __ jmp(&done, Label::kNear); __ bind(&load_float_eax); @@ -2637,11 +2637,11 @@ void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm, __ mov(scratch, left); ASSERT(!scratch.is(right)); // We're about to clobber scratch. __ SmiUntag(scratch); - __ cvtsi2sd(xmm0, Operand(scratch)); + __ cvtsi2sd(xmm0, scratch); __ mov(scratch, right); __ SmiUntag(scratch); - __ cvtsi2sd(xmm1, Operand(scratch)); + __ cvtsi2sd(xmm1, scratch); } @@ -2649,12 +2649,12 @@ void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm, Label* non_int32, Register scratch) { __ cvttsd2si(scratch, Operand(xmm0)); - __ cvtsi2sd(xmm2, Operand(scratch)); + __ cvtsi2sd(xmm2, scratch); __ ucomisd(xmm0, xmm2); __ j(not_zero, non_int32); __ j(carry, non_int32); __ cvttsd2si(scratch, Operand(xmm1)); - __ cvtsi2sd(xmm2, Operand(scratch)); + __ cvtsi2sd(xmm2, scratch); __ ucomisd(xmm1, xmm2); __ j(not_zero, non_int32); __ j(carry, non_int32); @@ -2762,7 +2762,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { // Save 1 in xmm3 - we need this several times later on. __ mov(ecx, Immediate(1)); - __ cvtsi2sd(xmm3, Operand(ecx)); + __ cvtsi2sd(xmm3, ecx); Label exponent_nonsmi; Label base_nonsmi; @@ -2773,7 +2773,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { // Optimized version when both exponent and base are smis. Label powi; __ SmiUntag(edx); - __ cvtsi2sd(xmm0, Operand(edx)); + __ cvtsi2sd(xmm0, edx); __ jmp(&powi); // exponent is smi and base is a heapnumber. __ bind(&base_nonsmi); @@ -2815,11 +2815,11 @@ void MathPowStub::Generate(MacroAssembler* masm) { // base has the original value of the exponent - if the exponent is // negative return 1/result. - __ test(edx, Operand(edx)); + __ test(edx, edx); __ j(positive, &allocate_return); // Special case if xmm1 has reached infinity. __ mov(ecx, Immediate(0x7FB00000)); - __ movd(xmm0, Operand(ecx)); + __ movd(xmm0, ecx); __ cvtss2sd(xmm0, xmm0); __ ucomisd(xmm0, xmm1); __ j(equal, &call_runtime); @@ -2842,7 +2842,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { Label handle_special_cases; __ JumpIfNotSmi(edx, &base_not_smi, Label::kNear); __ SmiUntag(edx); - __ cvtsi2sd(xmm0, Operand(edx)); + __ cvtsi2sd(xmm0, edx); __ jmp(&handle_special_cases, Label::kNear); __ bind(&base_not_smi); @@ -2851,7 +2851,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ j(not_equal, &call_runtime); __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset)); __ and_(ecx, HeapNumber::kExponentMask); - __ cmp(Operand(ecx), Immediate(HeapNumber::kExponentMask)); + __ cmp(ecx, Immediate(HeapNumber::kExponentMask)); // base is NaN or +/-Infinity __ j(greater_equal, &call_runtime); __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); @@ -2862,7 +2862,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { // Test for -0.5. // Load xmm2 with -0.5. __ mov(ecx, Immediate(0xBF000000)); - __ movd(xmm2, Operand(ecx)); + __ movd(xmm2, ecx); __ cvtss2sd(xmm2, xmm2); // xmm2 now has -0.5. __ ucomisd(xmm2, xmm1); @@ -2918,13 +2918,13 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { Label adaptor; __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset)); - __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); __ j(equal, &adaptor, Label::kNear); // Check index against formal parameters count limit passed in // through register eax. Use unsigned comparison to get negative // check for free. - __ cmp(edx, Operand(eax)); + __ cmp(edx, eax); __ j(above_equal, &slow, Label::kNear); // Read the argument from the stack and return it. @@ -2940,7 +2940,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { // comparison to get negative check for free. __ bind(&adaptor); __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset)); - __ cmp(edx, Operand(ecx)); + __ cmp(edx, ecx); __ j(above_equal, &slow, Label::kNear); // Read the argument from the stack and return it. @@ -2971,7 +2971,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) { Label runtime; __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset)); - __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); __ j(not_equal, &runtime, Label::kNear); // Patch the arguments.length and the parameters pointer. @@ -3002,7 +3002,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { Label adaptor_frame, try_allocate; __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset)); - __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); __ j(equal, &adaptor_frame, Label::kNear); // No adaptor, parameter count = argument count. @@ -3021,7 +3021,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { // esp[4] = parameter count (tagged) // esp[8] = address of receiver argument // Compute the mapped parameter count = min(ebx, ecx) in ebx. - __ cmp(ebx, Operand(ecx)); + __ cmp(ebx, ecx); __ j(less_equal, &try_allocate, Label::kNear); __ mov(ebx, ecx); @@ -3035,7 +3035,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { const int kParameterMapHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize; Label no_parameter_map; - __ test(ebx, Operand(ebx)); + __ test(ebx, ebx); __ j(zero, &no_parameter_map, Label::kNear); __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize)); __ bind(&no_parameter_map); @@ -3044,7 +3044,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize)); // 3. Arguments object. - __ add(Operand(ebx), Immediate(Heap::kArgumentsObjectSize)); + __ add(ebx, Immediate(Heap::kArgumentsObjectSize)); // Do the allocation of all three objects in one go. __ AllocateInNewSpace(ebx, eax, edx, edi, &runtime, TAG_OBJECT); @@ -3059,7 +3059,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX))); __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset)); __ mov(ebx, Operand(esp, 0 * kPointerSize)); - __ test(ebx, Operand(ebx)); + __ test(ebx, ebx); __ j(not_zero, &has_mapped_parameters, Label::kNear); __ mov(edi, Operand(edi, Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX))); @@ -3114,7 +3114,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { // Initialize parameter map. If there are no mapped arguments, we're done. Label skip_parameter_map; - __ test(ebx, Operand(ebx)); + __ test(ebx, ebx); __ j(zero, &skip_parameter_map); __ mov(FieldOperand(edi, FixedArray::kMapOffset), @@ -3138,7 +3138,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { __ mov(eax, Operand(esp, 2 * kPointerSize)); __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS))); __ add(ebx, Operand(esp, 4 * kPointerSize)); - __ sub(ebx, Operand(eax)); + __ sub(ebx, eax); __ mov(ecx, FACTORY->the_hole_value()); __ mov(edx, edi); __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize)); @@ -3155,12 +3155,12 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { __ jmp(¶meters_test, Label::kNear); __ bind(¶meters_loop); - __ sub(Operand(eax), Immediate(Smi::FromInt(1))); + __ sub(eax, Immediate(Smi::FromInt(1))); __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx); __ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx); - __ add(Operand(ebx), Immediate(Smi::FromInt(1))); + __ add(ebx, Immediate(Smi::FromInt(1))); __ bind(¶meters_test); - __ test(eax, Operand(eax)); + __ test(eax, eax); __ j(not_zero, ¶meters_loop, Label::kNear); __ pop(ecx); @@ -3180,18 +3180,18 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { Label arguments_loop, arguments_test; __ mov(ebx, Operand(esp, 1 * kPointerSize)); __ mov(edx, Operand(esp, 4 * kPointerSize)); - __ sub(Operand(edx), ebx); // Is there a smarter way to do negative scaling? - __ sub(Operand(edx), ebx); + __ sub(edx, ebx); // Is there a smarter way to do negative scaling? + __ sub(edx, ebx); __ jmp(&arguments_test, Label::kNear); __ bind(&arguments_loop); - __ sub(Operand(edx), Immediate(kPointerSize)); + __ sub(edx, Immediate(kPointerSize)); __ mov(eax, Operand(edx, 0)); __ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax); - __ add(Operand(ebx), Immediate(Smi::FromInt(1))); + __ add(ebx, Immediate(Smi::FromInt(1))); __ bind(&arguments_test); - __ cmp(ebx, Operand(ecx)); + __ cmp(ebx, ecx); __ j(less, &arguments_loop, Label::kNear); // Restore. @@ -3219,7 +3219,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { Label adaptor_frame, try_allocate, runtime; __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset)); - __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); + __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); __ j(equal, &adaptor_frame, Label::kNear); // Get the length from the frame. @@ -3238,11 +3238,11 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { // the arguments object and the elements array. Label add_arguments_object; __ bind(&try_allocate); - __ test(ecx, Operand(ecx)); + __ test(ecx, ecx); __ j(zero, &add_arguments_object, Label::kNear); __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize)); __ bind(&add_arguments_object); - __ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSizeStrict)); + __ add(ecx, Immediate(Heap::kArgumentsObjectSizeStrict)); // Do the allocation of both objects in one go. __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT); @@ -3269,7 +3269,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { // If there are no actual arguments, we're done. Label done; - __ test(ecx, Operand(ecx)); + __ test(ecx, ecx); __ j(zero, &done, Label::kNear); // Get the parameters pointer from the stack. @@ -3291,8 +3291,8 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { __ bind(&loop); __ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver. __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx); - __ add(Operand(edi), Immediate(kPointerSize)); - __ sub(Operand(edx), Immediate(kPointerSize)); + __ add(edi, Immediate(kPointerSize)); + __ sub(edx, Immediate(kPointerSize)); __ dec(ecx); __ j(not_zero, &loop); @@ -3339,7 +3339,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { ExternalReference address_of_regexp_stack_memory_size = ExternalReference::address_of_regexp_stack_memory_size(masm->isolate()); __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size)); - __ test(ebx, Operand(ebx)); + __ test(ebx, ebx); __ j(zero, &runtime); // Check that the first argument is a JSRegExp object. @@ -3360,7 +3360,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // ecx: RegExp data (FixedArray) // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset)); - __ cmp(Operand(ebx), Immediate(Smi::FromInt(JSRegExp::IRREGEXP))); + __ cmp(ebx, Immediate(Smi::FromInt(JSRegExp::IRREGEXP))); __ j(not_equal, &runtime); // ecx: RegExp data (FixedArray) @@ -3370,7 +3370,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // uses the asumption that smis are 2 * their untagged value. STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); - __ add(Operand(edx), Immediate(2)); // edx was a smi. + __ add(edx, Immediate(2)); // edx was a smi. // Check that the static offsets vector buffer is large enough. __ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize); __ j(above, &runtime); @@ -3392,7 +3392,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // string length. A negative value will be greater (unsigned comparison). __ mov(eax, Operand(esp, kPreviousIndexOffset)); __ JumpIfNotSmi(eax, &runtime); - __ cmp(eax, Operand(ebx)); + __ cmp(eax, ebx); __ j(above_equal, &runtime); // ecx: RegExp data (FixedArray) @@ -3412,8 +3412,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // additional information. __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset)); __ SmiUntag(eax); - __ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead)); - __ cmp(edx, Operand(eax)); + __ add(edx, Immediate(RegExpImpl::kLastMatchOverhead)); + __ cmp(edx, eax); __ j(greater, &runtime); // Reset offset for possibly sliced string. @@ -3430,8 +3430,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0); __ j(zero, &seq_two_byte_string, Label::kNear); // Any other flat string must be a flat ascii string. - __ and_(Operand(ebx), - Immediate(kIsNotStringMask | kStringRepresentationMask)); + __ and_(ebx, Immediate(kIsNotStringMask | kStringRepresentationMask)); __ j(zero, &seq_ascii_string, Label::kNear); // Check for flat cons string or sliced string. @@ -3443,7 +3442,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { Label cons_string, check_encoding; STATIC_ASSERT(kConsStringTag < kExternalStringTag); STATIC_ASSERT(kSlicedStringTag > kExternalStringTag); - __ cmp(Operand(ebx), Immediate(kExternalStringTag)); + __ cmp(ebx, Immediate(kExternalStringTag)); __ j(less, &cons_string); __ j(equal, &runtime); @@ -3549,14 +3548,14 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Prepare start and end index of the input. // Load the length from the original sliced string if that is the case. __ mov(esi, FieldOperand(esi, String::kLengthOffset)); - __ add(esi, Operand(edi)); // Calculate input end wrt offset. + __ add(esi, edi); // Calculate input end wrt offset. __ SmiUntag(edi); - __ add(ebx, Operand(edi)); // Calculate input start wrt offset. + __ add(ebx, edi); // Calculate input start wrt offset. // ebx: start index of the input string // esi: end index of the input string Label setup_two_byte, setup_rest; - __ test(ecx, Operand(ecx)); + __ test(ecx, ecx); __ j(zero, &setup_two_byte, Label::kNear); __ SmiUntag(esi); __ lea(ecx, FieldOperand(eax, esi, times_1, SeqAsciiString::kHeaderSize)); @@ -3576,8 +3575,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ bind(&setup_rest); // Locate the code entry and call it. - __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag)); - __ call(Operand(edx)); + __ add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag)); + __ call(edx); // Drop arguments and come back to JS mode. __ LeaveApiExitFrame(); @@ -3602,7 +3601,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { Operand::StaticVariable(ExternalReference::the_hole_value_location( masm->isolate()))); __ mov(eax, Operand::StaticVariable(pending_exception)); - __ cmp(edx, Operand(eax)); + __ cmp(edx, eax); __ j(equal, &runtime); // For exception, throw the exception again. @@ -3623,7 +3622,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ bind(&failure); // For failure to match, return null. - __ mov(Operand(eax), factory->null_value()); + __ mov(eax, factory->null_value()); __ ret(4 * kPointerSize); // Load RegExp data. @@ -3634,7 +3633,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Calculate number of capture registers (number_of_captures + 1) * 2. STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); - __ add(Operand(edx), Immediate(2)); // edx was a smi. + __ add(edx, Immediate(2)); // edx was a smi. // edx: Number of capture registers // Load last_match_info which is still known to be a fast case JSArray. @@ -3675,7 +3674,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Capture register counter starts from number of capture registers and // counts down until wraping after zero. __ bind(&next_capture); - __ sub(Operand(edx), Immediate(1)); + __ sub(edx, Immediate(1)); __ j(negative, &done, Label::kNear); // Read the value from the static offsets vector buffer. __ mov(edi, Operand(ecx, edx, times_int_size, 0)); @@ -3706,7 +3705,7 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) { Label done; __ mov(ebx, Operand(esp, kPointerSize * 3)); __ JumpIfNotSmi(ebx, &slowcase); - __ cmp(Operand(ebx), Immediate(Smi::FromInt(kMaxInlineLength))); + __ cmp(ebx, Immediate(Smi::FromInt(kMaxInlineLength))); __ j(above, &slowcase); // Smi-tagging is equivalent to multiplying by 2. STATIC_ASSERT(kSmiTag == 0); @@ -3766,10 +3765,10 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) { // ebx: Start of elements in FixedArray. // edx: the hole. Label loop; - __ test(ecx, Operand(ecx)); + __ test(ecx, ecx); __ bind(&loop); __ j(less_equal, &done, Label::kNear); // Jump if ecx is negative or zero. - __ sub(Operand(ecx), Immediate(1)); + __ sub(ecx, Immediate(1)); __ mov(Operand(ebx, ecx, times_pointer_size, 0), edx); __ jmp(&loop); @@ -3803,7 +3802,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, // contains two elements (number and string) for each cache entry. __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset)); __ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two. - __ sub(Operand(mask), Immediate(1)); // Make mask. + __ sub(mask, Immediate(1)); // Make mask. // Calculate the entry in the number string cache. The hash value in the // number string cache for smis is just the smi value, and the hash for @@ -3829,7 +3828,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset)); __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4)); // Object is heap number and hash is now in scratch. Calculate cache index. - __ and_(scratch, Operand(mask)); + __ and_(scratch, mask); Register index = scratch; Register probe = mask; __ mov(probe, @@ -3855,7 +3854,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, __ bind(&smi_hash_calculated); // Object is smi and hash is now in scratch. Calculate cache index. - __ and_(scratch, Operand(mask)); + __ and_(scratch, mask); Register index = scratch; // Check if the entry is the smi we are looking for. __ cmp(object, @@ -3907,10 +3906,10 @@ void CompareStub::Generate(MacroAssembler* masm) { // Compare two smis if required. if (include_smi_compare_) { Label non_smi, smi_done; - __ mov(ecx, Operand(edx)); - __ or_(ecx, Operand(eax)); + __ mov(ecx, edx); + __ or_(ecx, eax); __ JumpIfNotSmi(ecx, &non_smi, Label::kNear); - __ sub(edx, Operand(eax)); // Return on the result of the subtraction. + __ sub(edx, eax); // Return on the result of the subtraction. __ j(no_overflow, &smi_done, Label::kNear); __ not_(edx); // Correct sign in case of overflow. edx is never 0 here. __ bind(&smi_done); @@ -3918,8 +3917,8 @@ void CompareStub::Generate(MacroAssembler* masm) { __ ret(0); __ bind(&non_smi); } else if (FLAG_debug_code) { - __ mov(ecx, Operand(edx)); - __ or_(ecx, Operand(eax)); + __ mov(ecx, edx); + __ or_(ecx, eax); __ test(ecx, Immediate(kSmiTagMask)); __ Assert(not_zero, "Unexpected smi operands."); } @@ -3931,7 +3930,7 @@ void CompareStub::Generate(MacroAssembler* masm) { // for NaN and undefined. { Label not_identical; - __ cmp(eax, Operand(edx)); + __ cmp(eax, edx); __ j(not_equal, ¬_identical); if (cc_ != equal) { @@ -3980,7 +3979,7 @@ void CompareStub::Generate(MacroAssembler* masm) { __ Set(eax, Immediate(0)); // Shift value and mask so kQuietNaNHighBitsMask applies to topmost // bits. - __ add(edx, Operand(edx)); + __ add(edx, edx); __ cmp(edx, kQuietNaNHighBitsMask << 1); if (cc_ == equal) { STATIC_ASSERT(EQUAL != 1); @@ -4014,19 +4013,19 @@ void CompareStub::Generate(MacroAssembler* masm) { STATIC_ASSERT(kSmiTag == 0); ASSERT_EQ(0, Smi::FromInt(0)); __ mov(ecx, Immediate(kSmiTagMask)); - __ and_(ecx, Operand(eax)); - __ test(ecx, Operand(edx)); + __ and_(ecx, eax); + __ test(ecx, edx); __ j(not_zero, ¬_smis, Label::kNear); // One operand is a smi. // Check whether the non-smi is a heap number. STATIC_ASSERT(kSmiTagMask == 1); // ecx still holds eax & kSmiTag, which is either zero or one. - __ sub(Operand(ecx), Immediate(0x01)); + __ sub(ecx, Immediate(0x01)); __ mov(ebx, edx); - __ xor_(ebx, Operand(eax)); - __ and_(ebx, Operand(ecx)); // ebx holds either 0 or eax ^ edx. - __ xor_(ebx, Operand(eax)); + __ xor_(ebx, eax); + __ and_(ebx, ecx); // ebx holds either 0 or eax ^ edx. + __ xor_(ebx, eax); // if eax was smi, ebx is now edx, else eax. // Check if the non-smi operand is a heap number. @@ -4088,9 +4087,9 @@ void CompareStub::Generate(MacroAssembler* masm) { // Return a result of -1, 0, or 1, based on EFLAGS. __ mov(eax, 0); // equal __ mov(ecx, Immediate(Smi::FromInt(1))); - __ cmov(above, eax, Operand(ecx)); + __ cmov(above, eax, ecx); __ mov(ecx, Immediate(Smi::FromInt(-1))); - __ cmov(below, eax, Operand(ecx)); + __ cmov(below, eax, ecx); __ ret(0); } else { FloatingPointHelper::CheckFloatOperands( @@ -4316,14 +4315,14 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { // A monomorphic cache hit or an already megamorphic state: invoke the // function without changing the state. - __ cmp(ecx, Operand(edi)); + __ cmp(ecx, edi); __ j(equal, &call, Label::kNear); - __ cmp(Operand(ecx), Immediate(MegamorphicSentinel(isolate))); + __ cmp(ecx, Immediate(MegamorphicSentinel(isolate))); __ j(equal, &call, Label::kNear); // A monomorphic miss (i.e, here the cache is not uninitialized) goes // megamorphic. - __ cmp(Operand(ecx), Immediate(UninitializedSentinel(isolate))); + __ cmp(ecx, Immediate(UninitializedSentinel(isolate))); __ j(equal, &initialize, Label::kNear); // MegamorphicSentinel is a root so no write-barrier is needed. __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), @@ -4483,7 +4482,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, __ mov(Operand(esp, 1 * kPointerSize), esi); // argv. __ mov(Operand(esp, 2 * kPointerSize), Immediate(ExternalReference::isolate_address())); - __ call(Operand(ebx)); + __ call(ebx); // Result is in eax or edx:eax - do not destroy these registers! if (always_allocate_scope) { @@ -4638,7 +4637,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // Setup frame. __ push(ebp); - __ mov(ebp, Operand(esp)); + __ mov(ebp, esp); // Push marker in two places. int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; @@ -4706,7 +4705,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { } __ mov(edx, Operand(edx, 0)); // deref address __ lea(edx, FieldOperand(edx, Code::kHeaderSize)); - __ call(Operand(edx)); + __ call(edx); // Unlink this frame from the handler chain. __ PopTryHandler(); @@ -4714,8 +4713,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ bind(&exit); // Check if the current stack frame is marked as the outermost JS frame. __ pop(ebx); - __ cmp(Operand(ebx), - Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); + __ cmp(ebx, Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); __ j(not_equal, ¬_outermost_js_2); __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0)); __ bind(¬_outermost_js_2); @@ -4729,7 +4727,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ pop(ebx); __ pop(esi); __ pop(edi); - __ add(Operand(esp), Immediate(2 * kPointerSize)); // remove markers + __ add(esp, Immediate(2 * kPointerSize)); // remove markers // Restore frame pointer and return. __ pop(ebp); @@ -4845,10 +4843,10 @@ void InstanceofStub::Generate(MacroAssembler* masm) { __ mov(scratch, FieldOperand(map, Map::kPrototypeOffset)); Label loop, is_instance, is_not_instance; __ bind(&loop); - __ cmp(scratch, Operand(prototype)); + __ cmp(scratch, prototype); __ j(equal, &is_instance, Label::kNear); Factory* factory = masm->isolate()->factory(); - __ cmp(Operand(scratch), Immediate(factory->null_value())); + __ cmp(scratch, Immediate(factory->null_value())); __ j(equal, &is_not_instance, Label::kNear); __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset)); __ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset)); @@ -4946,7 +4944,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) { __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); } Label true_value, done; - __ test(eax, Operand(eax)); + __ test(eax, eax); __ j(zero, &true_value, Label::kNear); __ mov(eax, factory->false_value()); __ jmp(&done, Label::kNear); @@ -5262,7 +5260,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { Label second_not_zero_length, both_not_zero_length; __ mov(ecx, FieldOperand(edx, String::kLengthOffset)); STATIC_ASSERT(kSmiTag == 0); - __ test(ecx, Operand(ecx)); + __ test(ecx, ecx); __ j(not_zero, &second_not_zero_length, Label::kNear); // Second string is empty, result is first string which is already in eax. Counters* counters = masm->isolate()->counters(); @@ -5271,7 +5269,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ bind(&second_not_zero_length); __ mov(ebx, FieldOperand(eax, String::kLengthOffset)); STATIC_ASSERT(kSmiTag == 0); - __ test(ebx, Operand(ebx)); + __ test(ebx, ebx); __ j(not_zero, &both_not_zero_length, Label::kNear); // First string is empty, result is second string which is in edx. __ mov(eax, edx); @@ -5286,13 +5284,13 @@ void StringAddStub::Generate(MacroAssembler* masm) { // Look at the length of the result of adding the two strings. Label string_add_flat_result, longer_than_two; __ bind(&both_not_zero_length); - __ add(ebx, Operand(ecx)); + __ add(ebx, ecx); STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength); // Handle exceptionally long strings in the runtime system. __ j(overflow, &string_add_runtime); // Use the symbol table when adding two one character strings, as it // helps later optimizations to return a symbol here. - __ cmp(Operand(ebx), Immediate(Smi::FromInt(2))); + __ cmp(ebx, Immediate(Smi::FromInt(2))); __ j(not_equal, &longer_than_two); // Check that both strings are non-external ascii strings. @@ -5329,7 +5327,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { &string_add_runtime); // Pack both characters in ebx. __ shl(ecx, kBitsPerByte); - __ or_(ebx, Operand(ecx)); + __ or_(ebx, ecx); // Set the characters in the new string. __ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx); __ IncrementCounter(counters->string_add_native(), 1); @@ -5337,7 +5335,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ bind(&longer_than_two); // Check if resulting string will be flat. - __ cmp(Operand(ebx), Immediate(Smi::FromInt(String::kMinNonFlatLength))); + __ cmp(ebx, Immediate(Smi::FromInt(String::kMinNonFlatLength))); __ j(below, &string_add_flat_result); // If result is not supposed to be flat allocate a cons string object. If both @@ -5347,7 +5345,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset)); __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset)); __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset)); - __ and_(ecx, Operand(edi)); + __ and_(ecx, edi); STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0); STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); __ test(ecx, Immediate(kStringEncodingMask)); @@ -5375,7 +5373,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ j(not_zero, &ascii_data); __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset)); __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); - __ xor_(edi, Operand(ecx)); + __ xor_(edi, ecx); STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); __ and_(edi, kAsciiStringTag | kAsciiDataHintTag); __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag); @@ -5423,12 +5421,12 @@ void StringAddStub::Generate(MacroAssembler* masm) { // eax: result string __ mov(ecx, eax); // Locate first character of result. - __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(ecx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); // Load first argument and locate first character. __ mov(edx, Operand(esp, 2 * kPointerSize)); __ mov(edi, FieldOperand(edx, String::kLengthOffset)); __ SmiUntag(edi); - __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); // eax: result string // ecx: first character of result // edx: first char of first argument @@ -5438,7 +5436,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ mov(edx, Operand(esp, 1 * kPointerSize)); __ mov(edi, FieldOperand(edx, String::kLengthOffset)); __ SmiUntag(edi); - __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); // eax: result string // ecx: next character of result // edx: first char of second argument @@ -5462,13 +5460,13 @@ void StringAddStub::Generate(MacroAssembler* masm) { // eax: result string __ mov(ecx, eax); // Locate first character of result. - __ add(Operand(ecx), + __ add(ecx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); // Load first argument and locate first character. __ mov(edx, Operand(esp, 2 * kPointerSize)); __ mov(edi, FieldOperand(edx, String::kLengthOffset)); __ SmiUntag(edi); - __ add(Operand(edx), + __ add(edx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); // eax: result string // ecx: first character of result @@ -5479,7 +5477,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ mov(edx, Operand(esp, 1 * kPointerSize)); __ mov(edi, FieldOperand(edx, String::kLengthOffset)); __ SmiUntag(edi); - __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); // eax: result string // ecx: next character of result // edx: first char of second argument @@ -5555,15 +5553,15 @@ void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, if (ascii) { __ mov_b(scratch, Operand(src, 0)); __ mov_b(Operand(dest, 0), scratch); - __ add(Operand(src), Immediate(1)); - __ add(Operand(dest), Immediate(1)); + __ add(src, Immediate(1)); + __ add(dest, Immediate(1)); } else { __ mov_w(scratch, Operand(src, 0)); __ mov_w(Operand(dest, 0), scratch); - __ add(Operand(src), Immediate(2)); - __ add(Operand(dest), Immediate(2)); + __ add(src, Immediate(2)); + __ add(dest, Immediate(2)); } - __ sub(Operand(count), Immediate(1)); + __ sub(count, Immediate(1)); __ j(not_zero, &loop); } @@ -5586,7 +5584,7 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm, // Nothing to do for zero characters. Label done; - __ test(count, Operand(count)); + __ test(count, count); __ j(zero, &done); // Make count the number of bytes to copy. @@ -5611,7 +5609,7 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm, // Check if there are more bytes to copy. __ bind(&last_bytes); - __ test(count, Operand(count)); + __ test(count, count); __ j(zero, &done); // Copy remaining characters. @@ -5619,9 +5617,9 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm, __ bind(&loop); __ mov_b(scratch, Operand(src, 0)); __ mov_b(Operand(dest, 0), scratch); - __ add(Operand(src), Immediate(1)); - __ add(Operand(dest), Immediate(1)); - __ sub(Operand(count), Immediate(1)); + __ add(src, Immediate(1)); + __ add(dest, Immediate(1)); + __ sub(count, Immediate(1)); __ j(not_zero, &loop); __ bind(&done); @@ -5643,12 +5641,12 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, // different hash algorithm. Don't try to look for these in the symbol table. Label not_array_index; __ mov(scratch, c1); - __ sub(Operand(scratch), Immediate(static_cast('0'))); - __ cmp(Operand(scratch), Immediate(static_cast('9' - '0'))); + __ sub(scratch, Immediate(static_cast('0'))); + __ cmp(scratch, Immediate(static_cast('9' - '0'))); __ j(above, ¬_array_index, Label::kNear); __ mov(scratch, c2); - __ sub(Operand(scratch), Immediate(static_cast('0'))); - __ cmp(Operand(scratch), Immediate(static_cast('9' - '0'))); + __ sub(scratch, Immediate(static_cast('0'))); + __ cmp(scratch, Immediate(static_cast('9' - '0'))); __ j(below_equal, not_probed); __ bind(¬_array_index); @@ -5661,7 +5659,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, // Collect the two characters in a register. Register chars = c1; __ shl(c2, kBitsPerByte); - __ or_(chars, Operand(c2)); + __ or_(chars, c2); // chars: two character string, char 1 in byte 0 and char 2 in byte 1. // hash: hash of two character string. @@ -5678,7 +5676,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, Register mask = scratch2; __ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset)); __ SmiUntag(mask); - __ sub(Operand(mask), Immediate(1)); + __ sub(mask, Immediate(1)); // Registers // chars: two character string, char 1 in byte 0 and char 2 in byte 1. @@ -5695,9 +5693,9 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, // Calculate entry in symbol table. __ mov(scratch, hash); if (i > 0) { - __ add(Operand(scratch), Immediate(SymbolTable::GetProbeOffset(i))); + __ add(scratch, Immediate(SymbolTable::GetProbeOffset(i))); } - __ and_(scratch, Operand(mask)); + __ and_(scratch, mask); // Load the entry from the symbol table. Register candidate = scratch; // Scratch register contains candidate. @@ -5734,7 +5732,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, // Check if the two characters match. __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize)); __ and_(temp, 0x0000ffff); - __ cmp(chars, Operand(temp)); + __ cmp(chars, temp); __ j(equal, &found_in_symbol_table); __ bind(&next_probe_pop_mask[i]); __ pop(mask); @@ -5761,11 +5759,11 @@ void StringHelper::GenerateHashInit(MacroAssembler* masm, // hash = character + (character << 10); __ mov(hash, character); __ shl(hash, 10); - __ add(hash, Operand(character)); + __ add(hash, character); // hash ^= hash >> 6; __ mov(scratch, hash); __ sar(scratch, 6); - __ xor_(hash, Operand(scratch)); + __ xor_(hash, scratch); } @@ -5774,15 +5772,15 @@ void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, Register character, Register scratch) { // hash += character; - __ add(hash, Operand(character)); + __ add(hash, character); // hash += hash << 10; __ mov(scratch, hash); __ shl(scratch, 10); - __ add(hash, Operand(scratch)); + __ add(hash, scratch); // hash ^= hash >> 6; __ mov(scratch, hash); __ sar(scratch, 6); - __ xor_(hash, Operand(scratch)); + __ xor_(hash, scratch); } @@ -5792,19 +5790,19 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm, // hash += hash << 3; __ mov(scratch, hash); __ shl(scratch, 3); - __ add(hash, Operand(scratch)); + __ add(hash, scratch); // hash ^= hash >> 11; __ mov(scratch, hash); __ sar(scratch, 11); - __ xor_(hash, Operand(scratch)); + __ xor_(hash, scratch); // hash += hash << 15; __ mov(scratch, hash); __ shl(scratch, 15); - __ add(hash, Operand(scratch)); + __ add(hash, scratch); // if (hash == 0) hash = 27; Label hash_not_zero; - __ test(hash, Operand(hash)); + __ test(hash, hash); __ j(not_zero, &hash_not_zero, Label::kNear); __ mov(hash, Immediate(27)); __ bind(&hash_not_zero); @@ -5836,7 +5834,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ JumpIfNotSmi(ecx, &runtime); __ mov(edx, Operand(esp, 2 * kPointerSize)); // From index. __ JumpIfNotSmi(edx, &runtime); - __ sub(ecx, Operand(edx)); + __ sub(ecx, edx); __ cmp(ecx, FieldOperand(eax, String::kLengthOffset)); Label return_eax; __ j(equal, &return_eax); @@ -5968,13 +5966,13 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ mov(edx, esi); // esi used by following code. // Locate first character of result. __ mov(edi, eax); - __ add(Operand(edi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(edi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); // Load string argument and locate character of sub string start. __ mov(esi, Operand(esp, 3 * kPointerSize)); - __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(esi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from __ SmiUntag(ebx); - __ add(esi, Operand(ebx)); + __ add(esi, ebx); // eax: result string // ecx: result length @@ -6003,18 +6001,17 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ mov(edx, esi); // esi used by following code. // Locate first character of result. __ mov(edi, eax); - __ add(Operand(edi), + __ add(edi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); // Load string argument and locate character of sub string start. __ mov(esi, Operand(esp, 3 * kPointerSize)); - __ add(Operand(esi), - Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + __ add(esi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from // As from is a smi it is 2 times the value which matches the size of a two // byte character. STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); - __ add(esi, Operand(ebx)); + __ add(esi, ebx); // eax: result string // ecx: result length @@ -6054,7 +6051,7 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm, Label compare_chars; __ bind(&check_zero_length); STATIC_ASSERT(kSmiTag == 0); - __ test(length, Operand(length)); + __ test(length, length); __ j(not_zero, &compare_chars, Label::kNear); __ Set(eax, Immediate(Smi::FromInt(EQUAL))); __ ret(0); @@ -6089,14 +6086,14 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, __ j(less_equal, &left_shorter, Label::kNear); // Right string is shorter. Change scratch1 to be length of right string. - __ sub(scratch1, Operand(length_delta)); + __ sub(scratch1, length_delta); __ bind(&left_shorter); Register min_length = scratch1; // If either length is zero, just compare lengths. Label compare_lengths; - __ test(min_length, Operand(min_length)); + __ test(min_length, min_length); __ j(zero, &compare_lengths, Label::kNear); // Compare characters. @@ -6106,7 +6103,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, // Compare lengths - strings up to min-length are equal. __ bind(&compare_lengths); - __ test(length_delta, Operand(length_delta)); + __ test(length_delta, length_delta); __ j(not_zero, &result_not_equal, Label::kNear); // Result is EQUAL. @@ -6155,7 +6152,7 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop( __ mov_b(scratch, Operand(left, index, times_1, 0)); __ cmpb(scratch, Operand(right, index, times_1, 0)); __ j(not_equal, chars_not_equal, chars_not_equal_near); - __ add(Operand(index), Immediate(1)); + __ add(index, Immediate(1)); __ j(not_zero, &loop); } @@ -6172,7 +6169,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) { __ mov(eax, Operand(esp, 1 * kPointerSize)); // right Label not_same; - __ cmp(edx, Operand(eax)); + __ cmp(edx, eax); __ j(not_equal, ¬_same, Label::kNear); STATIC_ASSERT(EQUAL == 0); STATIC_ASSERT(kSmiTag == 0); @@ -6188,7 +6185,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) { // Compare flat ascii strings. // Drop arguments from the stack. __ pop(ecx); - __ add(Operand(esp), Immediate(2 * kPointerSize)); + __ add(esp, Immediate(2 * kPointerSize)); __ push(ecx); GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi); @@ -6202,16 +6199,16 @@ void StringCompareStub::Generate(MacroAssembler* masm) { void ICCompareStub::GenerateSmis(MacroAssembler* masm) { ASSERT(state_ == CompareIC::SMIS); Label miss; - __ mov(ecx, Operand(edx)); - __ or_(ecx, Operand(eax)); + __ mov(ecx, edx); + __ or_(ecx, eax); __ JumpIfNotSmi(ecx, &miss, Label::kNear); if (GetCondition() == equal) { // For equality we do not care about the sign of the result. - __ sub(eax, Operand(edx)); + __ sub(eax, edx); } else { Label done; - __ sub(edx, Operand(eax)); + __ sub(edx, eax); __ j(no_overflow, &done, Label::kNear); // Correct sign of result in case of overflow. __ not_(edx); @@ -6231,8 +6228,8 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { Label generic_stub; Label unordered; Label miss; - __ mov(ecx, Operand(edx)); - __ and_(ecx, Operand(eax)); + __ mov(ecx, edx); + __ and_(ecx, eax); __ JumpIfSmi(ecx, &generic_stub, Label::kNear); __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx); @@ -6260,9 +6257,9 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { // Performing mov, because xor would destroy the flag register. __ mov(eax, 0); // equal __ mov(ecx, Immediate(Smi::FromInt(1))); - __ cmov(above, eax, Operand(ecx)); + __ cmov(above, eax, ecx); __ mov(ecx, Immediate(Smi::FromInt(-1))); - __ cmov(below, eax, Operand(ecx)); + __ cmov(below, eax, ecx); __ ret(0); __ bind(&unordered); @@ -6289,9 +6286,9 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { // Check that both operands are heap objects. Label miss; - __ mov(tmp1, Operand(left)); + __ mov(tmp1, left); STATIC_ASSERT(kSmiTag == 0); - __ and_(tmp1, Operand(right)); + __ and_(tmp1, right); __ JumpIfSmi(tmp1, &miss, Label::kNear); // Check that both operands are symbols. @@ -6300,13 +6297,13 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset)); __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset)); STATIC_ASSERT(kSymbolTag != 0); - __ and_(tmp1, Operand(tmp2)); + __ and_(tmp1, tmp2); __ test(tmp1, Immediate(kIsSymbolMask)); __ j(zero, &miss, Label::kNear); // Symbols are compared by identity. Label done; - __ cmp(left, Operand(right)); + __ cmp(left, right); // Make sure eax is non-zero. At this point input operands are // guaranteed to be non-zero. ASSERT(right.is(eax)); @@ -6335,9 +6332,9 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { Register tmp3 = edi; // Check that both operands are heap objects. - __ mov(tmp1, Operand(left)); + __ mov(tmp1, left); STATIC_ASSERT(kSmiTag == 0); - __ and_(tmp1, Operand(right)); + __ and_(tmp1, right); __ JumpIfSmi(tmp1, &miss); // Check that both operands are strings. This leaves the instance @@ -6348,13 +6345,13 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset)); __ mov(tmp3, tmp1); STATIC_ASSERT(kNotStringTag != 0); - __ or_(tmp3, Operand(tmp2)); + __ or_(tmp3, tmp2); __ test(tmp3, Immediate(kIsNotStringMask)); __ j(not_zero, &miss); // Fast check for identical strings. Label not_same; - __ cmp(left, Operand(right)); + __ cmp(left, right); __ j(not_equal, ¬_same, Label::kNear); STATIC_ASSERT(EQUAL == 0); STATIC_ASSERT(kSmiTag == 0); @@ -6368,7 +6365,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { // because we already know they are not identical. Label do_compare; STATIC_ASSERT(kSymbolTag != 0); - __ and_(tmp1, Operand(tmp2)); + __ and_(tmp1, tmp2); __ test(tmp1, Immediate(kIsSymbolMask)); __ j(zero, &do_compare, Label::kNear); // Make sure eax is non-zero. At this point input operands are @@ -6401,8 +6398,8 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { void ICCompareStub::GenerateObjects(MacroAssembler* masm) { ASSERT(state_ == CompareIC::OBJECTS); Label miss; - __ mov(ecx, Operand(edx)); - __ and_(ecx, Operand(eax)); + __ mov(ecx, edx); + __ and_(ecx, eax); __ JumpIfSmi(ecx, &miss, Label::kNear); __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx); @@ -6411,7 +6408,7 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) { __ j(not_equal, &miss, Label::kNear); ASSERT(GetCondition() == equal); - __ sub(eax, Operand(edx)); + __ sub(eax, edx); __ ret(0); __ bind(&miss); @@ -6447,7 +6444,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) { __ push(ecx); // Do a tail call to the rewritten stub. - __ jmp(Operand(edi)); + __ jmp(edi); } @@ -6476,8 +6473,8 @@ MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup( // Capacity is smi 2^n. __ mov(index, FieldOperand(properties, kCapacityOffset)); __ dec(index); - __ and_(Operand(index), - Immediate(Smi::FromInt(name->Hash() + + __ and_(index, + Immediate(Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i)))); // Scale the index by multiplying by the entry size. @@ -6510,7 +6507,7 @@ MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup( __ push(Immediate(name->Hash())); MaybeObject* result = masm->TryCallStub(&stub); if (result->IsFailure()) return result; - __ test(r0, Operand(r0)); + __ test(r0, r0); __ j(not_zero, miss); __ jmp(done); return result; @@ -6543,9 +6540,9 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, __ mov(r0, FieldOperand(name, String::kHashFieldOffset)); __ shr(r0, String::kHashShift); if (i > 0) { - __ add(Operand(r0), Immediate(StringDictionary::GetProbeOffset(i))); + __ add(r0, Immediate(StringDictionary::GetProbeOffset(i))); } - __ and_(r0, Operand(r1)); + __ and_(r0, r1); // Scale the index by multiplying by the entry size. ASSERT(StringDictionary::kEntrySize == 3); @@ -6569,7 +6566,7 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, __ push(r0); __ CallStub(&stub); - __ test(r1, Operand(r1)); + __ test(r1, r1); __ j(zero, miss); __ jmp(done); } @@ -6608,8 +6605,7 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { // Compute the masked index: (hash + i + i * i) & mask. __ mov(scratch, Operand(esp, 2 * kPointerSize)); if (i > 0) { - __ add(Operand(scratch), - Immediate(StringDictionary::GetProbeOffset(i))); + __ add(scratch, Immediate(StringDictionary::GetProbeOffset(i))); } __ and_(scratch, Operand(esp, 0)); diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h index 498fe57f3..2a7d316f4 100644 --- a/src/ia32/code-stubs-ia32.h +++ b/src/ia32/code-stubs-ia32.h @@ -1,4 +1,4 @@ -// Copyright 2010 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -631,8 +631,8 @@ class RecordWriteStub: public CodeStub { if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx); if (mode == kSaveFPRegs) { CpuFeatures::Scope scope(SSE2); - masm->sub(Operand(esp), - Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1))); + masm->sub(esp, + Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1))); // Save all XMM registers except XMM0. for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) { XMMRegister reg = XMMRegister::from_code(i); @@ -650,7 +650,7 @@ class RecordWriteStub: public CodeStub { XMMRegister reg = XMMRegister::from_code(i); masm->movdbl(reg, Operand(esp, (i - 1) * kDoubleSize)); } - masm->add(Operand(esp), + masm->add(esp, Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1))); } if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->pop(edx); diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc index 19fd8f1a8..f901b6f88 100644 --- a/src/ia32/codegen-ia32.cc +++ b/src/ia32/codegen-ia32.cc @@ -112,14 +112,14 @@ OS::MemCopyFunction CreateMemCopyFunction() { __ mov(edx, dst); __ and_(edx, 0xF); __ neg(edx); - __ add(Operand(edx), Immediate(16)); - __ add(dst, Operand(edx)); - __ add(src, Operand(edx)); - __ sub(Operand(count), edx); + __ add(edx, Immediate(16)); + __ add(dst, edx); + __ add(src, edx); + __ sub(count, edx); // edi is now aligned. Check if esi is also aligned. Label unaligned_source; - __ test(Operand(src), Immediate(0x0F)); + __ test(src, Immediate(0x0F)); __ j(not_zero, &unaligned_source); { // Copy loop for aligned source and destination. @@ -134,11 +134,11 @@ OS::MemCopyFunction CreateMemCopyFunction() { __ prefetch(Operand(src, 0x20), 1); __ movdqa(xmm0, Operand(src, 0x00)); __ movdqa(xmm1, Operand(src, 0x10)); - __ add(Operand(src), Immediate(0x20)); + __ add(src, Immediate(0x20)); __ movdqa(Operand(dst, 0x00), xmm0); __ movdqa(Operand(dst, 0x10), xmm1); - __ add(Operand(dst), Immediate(0x20)); + __ add(dst, Immediate(0x20)); __ dec(loop_count); __ j(not_zero, &loop); @@ -146,12 +146,12 @@ OS::MemCopyFunction CreateMemCopyFunction() { // At most 31 bytes to copy. Label move_less_16; - __ test(Operand(count), Immediate(0x10)); + __ test(count, Immediate(0x10)); __ j(zero, &move_less_16); __ movdqa(xmm0, Operand(src, 0)); - __ add(Operand(src), Immediate(0x10)); + __ add(src, Immediate(0x10)); __ movdqa(Operand(dst, 0), xmm0); - __ add(Operand(dst), Immediate(0x10)); + __ add(dst, Immediate(0x10)); __ bind(&move_less_16); // At most 15 bytes to copy. Copy 16 bytes at end of string. @@ -180,11 +180,11 @@ OS::MemCopyFunction CreateMemCopyFunction() { __ prefetch(Operand(src, 0x20), 1); __ movdqu(xmm0, Operand(src, 0x00)); __ movdqu(xmm1, Operand(src, 0x10)); - __ add(Operand(src), Immediate(0x20)); + __ add(src, Immediate(0x20)); __ movdqa(Operand(dst, 0x00), xmm0); __ movdqa(Operand(dst, 0x10), xmm1); - __ add(Operand(dst), Immediate(0x20)); + __ add(dst, Immediate(0x20)); __ dec(loop_count); __ j(not_zero, &loop); @@ -192,12 +192,12 @@ OS::MemCopyFunction CreateMemCopyFunction() { // At most 31 bytes to copy. Label move_less_16; - __ test(Operand(count), Immediate(0x10)); + __ test(count, Immediate(0x10)); __ j(zero, &move_less_16); __ movdqu(xmm0, Operand(src, 0)); - __ add(Operand(src), Immediate(0x10)); + __ add(src, Immediate(0x10)); __ movdqa(Operand(dst, 0), xmm0); - __ add(Operand(dst), Immediate(0x10)); + __ add(dst, Immediate(0x10)); __ bind(&move_less_16); // At most 15 bytes to copy. Copy 16 bytes at end of string. @@ -232,10 +232,10 @@ OS::MemCopyFunction CreateMemCopyFunction() { __ mov(edx, dst); __ and_(edx, 0x03); __ neg(edx); - __ add(Operand(edx), Immediate(4)); // edx = 4 - (dst & 3) - __ add(dst, Operand(edx)); - __ add(src, Operand(edx)); - __ sub(Operand(count), edx); + __ add(edx, Immediate(4)); // edx = 4 - (dst & 3) + __ add(dst, edx); + __ add(src, edx); + __ sub(count, edx); // edi is now aligned, ecx holds number of remaning bytes to copy. __ mov(edx, count); diff --git a/src/ia32/debug-ia32.cc b/src/ia32/debug-ia32.cc index 59c36ce5f..d7184ed20 100644 --- a/src/ia32/debug-ia32.cc +++ b/src/ia32/debug-ia32.cc @@ -157,7 +157,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm, // If this call did not replace a call but patched other code then there will // be an unwanted return address left on the stack. Here we get rid of that. if (convert_call_to_jmp) { - __ add(Operand(esp), Immediate(kPointerSize)); + __ add(esp, Immediate(kPointerSize)); } // Now that the break point has been handled, resume normal execution by @@ -299,7 +299,7 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) { __ lea(edx, FieldOperand(edx, Code::kHeaderSize)); // Re-run JSFunction, edi is function, esi is context. - __ jmp(Operand(edx)); + __ jmp(edx); } const bool Debug::kFrameDropperSupported = true; diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc index 991b096ab..02cc4ebd3 100644 --- a/src/ia32/deoptimizer-ia32.cc +++ b/src/ia32/deoptimizer-ia32.cc @@ -666,7 +666,7 @@ void Deoptimizer::EntryGenerator::Generate() { const int kDoubleRegsSize = kDoubleSize * XMMRegister::kNumAllocatableRegisters; - __ sub(Operand(esp), Immediate(kDoubleRegsSize)); + __ sub(esp, Immediate(kDoubleRegsSize)); for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); int offset = i * kDoubleSize; @@ -690,7 +690,7 @@ void Deoptimizer::EntryGenerator::Generate() { __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize)); __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize)); } - __ sub(edx, Operand(ebp)); + __ sub(edx, ebp); __ neg(edx); // Allocate a new deoptimizer object. @@ -729,15 +729,15 @@ void Deoptimizer::EntryGenerator::Generate() { // Remove the bailout id and the double registers from the stack. if (type() == EAGER) { - __ add(Operand(esp), Immediate(kDoubleRegsSize + kPointerSize)); + __ add(esp, Immediate(kDoubleRegsSize + kPointerSize)); } else { - __ add(Operand(esp), Immediate(kDoubleRegsSize + 2 * kPointerSize)); + __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize)); } // Compute a pointer to the unwinding limit in register ecx; that is // the first stack slot not part of the input frame. __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset())); - __ add(ecx, Operand(esp)); + __ add(ecx, esp); // Unwind the stack down to - but not including - the unwinding // limit and copy the contents of the activation frame to the input @@ -746,16 +746,16 @@ void Deoptimizer::EntryGenerator::Generate() { Label pop_loop; __ bind(&pop_loop); __ pop(Operand(edx, 0)); - __ add(Operand(edx), Immediate(sizeof(uint32_t))); - __ cmp(ecx, Operand(esp)); + __ add(edx, Immediate(sizeof(uint32_t))); + __ cmp(ecx, esp); __ j(not_equal, &pop_loop); // If frame was dynamically aligned, pop padding. Label sentinel, sentinel_done; - __ pop(Operand(ecx)); + __ pop(ecx); __ cmp(ecx, Operand(eax, Deoptimizer::frame_alignment_marker_offset())); __ j(equal, &sentinel); - __ push(Operand(ecx)); + __ push(ecx); __ jmp(&sentinel_done); __ bind(&sentinel); __ mov(Operand(eax, Deoptimizer::has_alignment_padding_offset()), @@ -795,12 +795,12 @@ void Deoptimizer::EntryGenerator::Generate() { __ mov(ebx, Operand(eax, 0)); __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset())); __ bind(&inner_push_loop); - __ sub(Operand(ecx), Immediate(sizeof(uint32_t))); + __ sub(ecx, Immediate(sizeof(uint32_t))); __ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset())); - __ test(ecx, Operand(ecx)); + __ test(ecx, ecx); __ j(not_zero, &inner_push_loop); - __ add(Operand(eax), Immediate(kPointerSize)); - __ cmp(eax, Operand(edx)); + __ add(eax, Immediate(kPointerSize)); + __ cmp(eax, edx); __ j(below, &outer_push_loop); // In case of OSR, we have to restore the XMM registers. diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc index c6f275bbd..d45a9cdae 100644 --- a/src/ia32/full-codegen-ia32.cc +++ b/src/ia32/full-codegen-ia32.cc @@ -138,7 +138,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { // function calls. if (info->is_strict_mode() || info->is_native()) { Label ok; - __ test(ecx, Operand(ecx)); + __ test(ecx, ecx); __ j(zero, &ok, Label::kNear); // +1 for return address. int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize; @@ -371,10 +371,10 @@ void FullCodeGenerator::EmitReturnSequence() { void FullCodeGenerator::verify_stack_height() { ASSERT(FLAG_verify_stack_height); - __ sub(Operand(ebp), Immediate(kPointerSize * stack_height())); - __ cmp(ebp, Operand(esp)); + __ sub(ebp, Immediate(kPointerSize * stack_height())); + __ cmp(ebp, esp); __ Assert(equal, "Full codegen stack height not as expected."); - __ add(Operand(ebp), Immediate(kPointerSize * stack_height())); + __ add(ebp, Immediate(kPointerSize * stack_height())); } @@ -603,7 +603,7 @@ void FullCodeGenerator::DoTest(Expression* condition, ToBooleanStub stub(result_register()); __ push(result_register()); __ CallStub(&stub, condition->test_id()); - __ test(result_register(), Operand(result_register())); + __ test(result_register(), result_register()); // The stub returns nonzero for true. Split(not_zero, if_true, if_false, fall_through); } @@ -847,10 +847,10 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { if (inline_smi_code) { Label slow_case; __ mov(ecx, edx); - __ or_(ecx, Operand(eax)); + __ or_(ecx, eax); patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear); - __ cmp(edx, Operand(eax)); + __ cmp(edx, eax); __ j(not_equal, &next_test); __ Drop(1); // Switch value is no longer needed. __ jmp(clause->body_target()); @@ -862,7 +862,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { Handle ic = CompareIC::GetUninitialized(Token::EQ_STRICT); __ call(ic, RelocInfo::CODE_TARGET, clause->CompareId()); patch_site.EmitPatchInfo(); - __ test(eax, Operand(eax)); + __ test(eax, eax); __ j(not_equal, &next_test); __ Drop(1); // Switch value is no longer needed. __ jmp(clause->body_target()); @@ -951,7 +951,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { // For all objects but the receiver, check that the cache is empty. Label check_prototype; - __ cmp(ecx, Operand(eax)); + __ cmp(ecx, eax); __ j(equal, &check_prototype, Label::kNear); __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset)); __ cmp(edx, isolate()->factory()->empty_fixed_array()); @@ -1033,9 +1033,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ push(ecx); // Enumerable. __ push(ebx); // Current entry. __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION); - __ test(eax, Operand(eax)); + __ test(eax, eax); __ j(equal, loop_statement.continue_label()); - __ mov(ebx, Operand(eax)); + __ mov(ebx, eax); // Update the 'each' property or variable from the possibly filtered // entry in register ebx. @@ -1059,7 +1059,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { // Remove the pointers stored on the stack. __ bind(loop_statement.break_label()); - __ add(Operand(esp), Immediate(5 * kPointerSize)); + __ add(esp, Immediate(5 * kPointerSize)); decrement_stack_height(ForIn::kElementCount); // Exit and decrement the loop depth. @@ -1665,7 +1665,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr, __ pop(edx); decrement_stack_height(); __ mov(ecx, eax); - __ or_(eax, Operand(edx)); + __ or_(eax, edx); JumpPatchSite patch_site(masm_); patch_site.EmitJumpIfSmi(eax, &smi_case, Label::kNear); @@ -1715,32 +1715,32 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr, break; } case Token::ADD: - __ add(eax, Operand(ecx)); + __ add(eax, ecx); __ j(overflow, &stub_call); break; case Token::SUB: - __ sub(eax, Operand(ecx)); + __ sub(eax, ecx); __ j(overflow, &stub_call); break; case Token::MUL: { __ SmiUntag(eax); - __ imul(eax, Operand(ecx)); + __ imul(eax, ecx); __ j(overflow, &stub_call); - __ test(eax, Operand(eax)); + __ test(eax, eax); __ j(not_zero, &done, Label::kNear); __ mov(ebx, edx); - __ or_(ebx, Operand(ecx)); + __ or_(ebx, ecx); __ j(negative, &stub_call); break; } case Token::BIT_OR: - __ or_(eax, Operand(ecx)); + __ or_(eax, ecx); break; case Token::BIT_AND: - __ and_(eax, Operand(ecx)); + __ and_(eax, ecx); break; case Token::BIT_XOR: - __ xor_(eax, Operand(ecx)); + __ xor_(eax, ecx); break; default: UNREACHABLE(); @@ -2485,9 +2485,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( STATIC_ASSERT(kPointerSize == 4); __ lea(ecx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize)); // Calculate location of the first key name. - __ add(Operand(ebx), - Immediate(FixedArray::kHeaderSize + - DescriptorArray::kFirstIndex * kPointerSize)); + __ add(ebx, + Immediate(FixedArray::kHeaderSize + + DescriptorArray::kFirstIndex * kPointerSize)); // Loop through all the keys in the descriptor array. If one of these is the // symbol valueOf the result is false. Label entry, loop; @@ -2496,9 +2496,9 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( __ mov(edx, FieldOperand(ebx, 0)); __ cmp(edx, FACTORY->value_of_symbol()); __ j(equal, if_false); - __ add(Operand(ebx), Immediate(kPointerSize)); + __ add(ebx, Immediate(kPointerSize)); __ bind(&entry); - __ cmp(ebx, Operand(ecx)); + __ cmp(ebx, ecx); __ j(not_equal, &loop); // Reload map as register ebx was used as temporary above. @@ -2638,7 +2638,7 @@ void FullCodeGenerator::EmitObjectEquals(ZoneList* args) { __ pop(ebx); decrement_stack_height(); - __ cmp(eax, Operand(ebx)); + __ cmp(eax, ebx); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); Split(equal, if_true, if_false, fall_through); @@ -2792,8 +2792,8 @@ void FullCodeGenerator::EmitRandomHeapNumber(ZoneList* args) { if (CpuFeatures::IsSupported(SSE2)) { CpuFeatures::Scope fscope(SSE2); __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single. - __ movd(xmm1, Operand(ebx)); - __ movd(xmm0, Operand(eax)); + __ movd(xmm1, ebx); + __ movd(xmm0, eax); __ cvtss2sd(xmm1, xmm1); __ xorps(xmm0, xmm1); __ subsd(xmm0, xmm1); @@ -3171,14 +3171,14 @@ void FullCodeGenerator::EmitSwapElements(ZoneList* args) { __ mov(index_1, Operand(esp, 1 * kPointerSize)); __ mov(index_2, Operand(esp, 0)); __ mov(temp, index_1); - __ or_(temp, Operand(index_2)); + __ or_(temp, index_2); __ JumpIfNotSmi(temp, &slow_case); // Check that both indices are valid. __ mov(temp, FieldOperand(object, JSArray::kLengthOffset)); - __ cmp(temp, Operand(index_1)); + __ cmp(temp, index_1); __ j(below_equal, &slow_case); - __ cmp(temp, Operand(index_2)); + __ cmp(temp, index_2); __ j(below_equal, &slow_case); // Bring addresses into index1 and index2. @@ -3219,7 +3219,7 @@ void FullCodeGenerator::EmitSwapElements(ZoneList* args) { __ bind(&no_remembered_set); // We are done. Drop elements from the stack, and return undefined. - __ add(Operand(esp), Immediate(3 * kPointerSize)); + __ add(esp, Immediate(3 * kPointerSize)); __ mov(eax, isolate()->factory()->undefined_value()); __ jmp(&done); @@ -3292,11 +3292,11 @@ void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList* args) { __ pop(left); Label done, fail, ok; - __ cmp(left, Operand(right)); + __ cmp(left, right); __ j(equal, &ok); // Fail if either is a non-HeapObject. __ mov(tmp, left); - __ and_(Operand(tmp), right); + __ and_(tmp, right); __ JumpIfSmi(tmp, &fail); __ mov(tmp, FieldOperand(left, HeapObject::kMapOffset)); __ CmpInstanceType(tmp, JS_REGEXP_TYPE); @@ -3387,7 +3387,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList* args) { Operand separator_operand = Operand(esp, 2 * kPointerSize); Operand result_operand = Operand(esp, 1 * kPointerSize); Operand array_length_operand = Operand(esp, 0); - __ sub(Operand(esp), Immediate(2 * kPointerSize)); + __ sub(esp, Immediate(2 * kPointerSize)); __ cld(); // Check that the array is a JSArray __ JumpIfSmi(array, &bailout); @@ -3423,7 +3423,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList* args) { // Live loop registers: index, array_length, string, // scratch, string_length, elements. if (FLAG_debug_code) { - __ cmp(index, Operand(array_length)); + __ cmp(index, array_length); __ Assert(less, "No empty arrays here in EmitFastAsciiArrayJoin"); } __ bind(&loop); @@ -3441,8 +3441,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList* args) { __ add(string_length, FieldOperand(string, SeqAsciiString::kLengthOffset)); __ j(overflow, &bailout); - __ add(Operand(index), Immediate(1)); - __ cmp(index, Operand(array_length)); + __ add(index, Immediate(1)); + __ cmp(index, array_length); __ j(less, &loop); // If array_length is 1, return elements[0], a string. @@ -3476,10 +3476,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList* args) { // to string_length. __ mov(scratch, separator_operand); __ mov(scratch, FieldOperand(scratch, SeqAsciiString::kLengthOffset)); - __ sub(string_length, Operand(scratch)); // May be negative, temporarily. + __ sub(string_length, scratch); // May be negative, temporarily. __ imul(scratch, array_length_operand); __ j(overflow, &bailout); - __ add(string_length, Operand(scratch)); + __ add(string_length, scratch); __ j(overflow, &bailout); __ shr(string_length, 1); @@ -3520,7 +3520,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList* args) { __ lea(string, FieldOperand(string, SeqAsciiString::kHeaderSize)); __ CopyBytes(string, result_pos, string_length, scratch); - __ add(Operand(index), Immediate(1)); + __ add(index, Immediate(1)); __ bind(&loop_1_condition); __ cmp(index, array_length_operand); __ j(less, &loop_1); // End while (index < length). @@ -3561,7 +3561,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList* args) { __ lea(string, FieldOperand(string, SeqAsciiString::kHeaderSize)); __ CopyBytes(string, result_pos, string_length, scratch); - __ add(Operand(index), Immediate(1)); + __ add(index, Immediate(1)); __ cmp(index, array_length_operand); __ j(less, &loop_2); // End while (index < length). @@ -3602,7 +3602,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList* args) { __ lea(string, FieldOperand(string, SeqAsciiString::kHeaderSize)); __ CopyBytes(string, result_pos, string_length, scratch); - __ add(Operand(index), Immediate(1)); + __ add(index, Immediate(1)); __ cmp(index, array_length_operand); __ j(less, &loop_3); // End while (index < length). @@ -3614,7 +3614,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList* args) { __ bind(&done); __ mov(eax, result_operand); // Drop temp values from the stack, and restore context register. - __ add(Operand(esp), Immediate(3 * kPointerSize)); + __ add(esp, Immediate(3 * kPointerSize)); __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); decrement_stack_height(); @@ -3894,9 +3894,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { if (ShouldInlineSmiCase(expr->op())) { if (expr->op() == Token::INC) { - __ add(Operand(eax), Immediate(Smi::FromInt(1))); + __ add(eax, Immediate(Smi::FromInt(1))); } else { - __ sub(Operand(eax), Immediate(Smi::FromInt(1))); + __ sub(eax, Immediate(Smi::FromInt(1))); } __ j(overflow, &stub_call, Label::kNear); // We could eliminate this smi check if we split the code at @@ -3906,9 +3906,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { __ bind(&stub_call); // Call stub. Undo operation first. if (expr->op() == Token::INC) { - __ sub(Operand(eax), Immediate(Smi::FromInt(1))); + __ sub(eax, Immediate(Smi::FromInt(1))); } else { - __ add(Operand(eax), Immediate(Smi::FromInt(1))); + __ add(eax, Immediate(Smi::FromInt(1))); } } @@ -4134,7 +4134,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { __ CallStub(&stub); decrement_stack_height(2); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); - __ test(eax, Operand(eax)); + __ test(eax, eax); // The stub returns 0 for true. Split(zero, if_true, if_false, fall_through); break; @@ -4180,10 +4180,10 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { JumpPatchSite patch_site(masm_); if (inline_smi_code) { Label slow_case; - __ mov(ecx, Operand(edx)); - __ or_(ecx, Operand(eax)); + __ mov(ecx, edx); + __ or_(ecx, eax); patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear); - __ cmp(edx, Operand(eax)); + __ cmp(edx, eax); Split(cc, if_true, if_false, NULL); __ bind(&slow_case); } @@ -4195,7 +4195,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { patch_site.EmitPatchInfo(); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); - __ test(eax, Operand(eax)); + __ test(eax, eax); Split(cc, if_true, if_false, fall_through); } } @@ -4296,7 +4296,7 @@ void FullCodeGenerator::EnterFinallyBlock() { // Cook return address on top of stack (smi encoded Code* delta) ASSERT(!result_register().is(edx)); __ pop(edx); - __ sub(Operand(edx), Immediate(masm_->CodeObject())); + __ sub(edx, Immediate(masm_->CodeObject())); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); STATIC_ASSERT(kSmiTag == 0); __ SmiTag(edx); @@ -4312,8 +4312,8 @@ void FullCodeGenerator::ExitFinallyBlock() { // Uncook return address. __ pop(edx); __ SmiUntag(edx); - __ add(Operand(edx), Immediate(masm_->CodeObject())); - __ jmp(Operand(edx)); + __ add(edx, Immediate(masm_->CodeObject())); + __ jmp(edx); } diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc index 2271fef4a..f66b289d1 100644 --- a/src/ia32/ic-ia32.cc +++ b/src/ia32/ic-ia32.cc @@ -326,7 +326,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, // Fast case: Do the load. STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0)); __ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize)); - __ cmp(Operand(scratch), Immediate(FACTORY->the_hole_value())); + __ cmp(scratch, Immediate(FACTORY->the_hole_value())); // In case the loaded value is the_hole we have to consult GetProperty // to ensure the prototype chain is searched. __ j(equal, out_of_range); @@ -394,8 +394,8 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm, // Check if element is in the range of mapped arguments. If not, jump // to the unmapped lookup with the parameter map in scratch1. __ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset)); - __ sub(Operand(scratch2), Immediate(Smi::FromInt(2))); - __ cmp(key, Operand(scratch2)); + __ sub(scratch2, Immediate(Smi::FromInt(2))); + __ cmp(key, scratch2); __ j(greater_equal, unmapped_case); // Load element index and check whether it is the hole. @@ -432,7 +432,7 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm, Handle fixed_array_map(masm->isolate()->heap()->fixed_array_map()); __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK); __ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset)); - __ cmp(key, Operand(scratch)); + __ cmp(key, scratch); __ j(greater_equal, slow_case); return FieldOperand(backing_store, key, @@ -534,7 +534,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ shr(ecx, KeyedLookupCache::kMapHashShift); __ mov(edi, FieldOperand(eax, String::kHashFieldOffset)); __ shr(edi, String::kHashShift); - __ xor_(ecx, Operand(edi)); + __ xor_(ecx, edi); __ and_(ecx, KeyedLookupCache::kCapacityMask); // Load the key (consisting of map and symbol) from the cache and @@ -545,7 +545,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ shl(edi, kPointerSizeLog2 + 1); __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys)); __ j(not_equal, &slow); - __ add(Operand(edi), Immediate(kPointerSize)); + __ add(edi, Immediate(kPointerSize)); __ cmp(eax, Operand::StaticArray(edi, times_1, cache_keys)); __ j(not_equal, &slow); @@ -559,12 +559,12 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ mov(edi, Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets)); __ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset)); - __ sub(edi, Operand(ecx)); + __ sub(edi, ecx); __ j(above_equal, &property_array_property); // Load in-object property. __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset)); - __ add(ecx, Operand(edi)); + __ add(ecx, edi); __ mov(eax, FieldOperand(edx, ecx, times_pointer_size, 0)); __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1); __ ret(0); @@ -651,8 +651,8 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { // Check that it has indexed interceptor and access checks // are not enabled for this object. __ movzx_b(ecx, FieldOperand(ecx, Map::kBitFieldOffset)); - __ and_(Operand(ecx), Immediate(kSlowCaseBitFieldMask)); - __ cmp(Operand(ecx), Immediate(1 << Map::kHasIndexedInterceptor)); + __ and_(ecx, Immediate(kSlowCaseBitFieldMask)); + __ cmp(ecx, Immediate(1 << Map::kHasIndexedInterceptor)); __ j(not_zero, &slow); // Everything is fine, call runtime. @@ -846,7 +846,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, // Fast elements array, store the value to the elements backing store. __ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax); // Update write barrier for the elements array address. - __ mov(edx, Operand(eax)); // Preserve the value which is returned. + __ mov(edx, eax); // Preserve the value which is returned. __ RecordWriteArray( ebx, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); __ ret(0); diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc index c234980f9..3aaa22acc 100644 --- a/src/ia32/macro-assembler-ia32.cc +++ b/src/ia32/macro-assembler-ia32.cc @@ -64,7 +64,7 @@ void MacroAssembler::InNewSpace( and_(scratch, Immediate(~Page::kPageAlignmentMask)); } else { mov(scratch, Immediate(~Page::kPageAlignmentMask)); - and_(scratch, Operand(object)); + and_(scratch, object); } // Check that we can use a test_b. ASSERT(MemoryChunk::IN_FROM_SPACE < 8); @@ -98,7 +98,7 @@ void MacroAssembler::RememberedSetHelper( // Store pointer to buffer. mov(Operand(scratch, 0), addr); // Increment buffer top. - add(Operand(scratch), Immediate(kPointerSize)); + add(scratch, Immediate(kPointerSize)); // Write back new top of buffer. mov(Operand::StaticVariable(store_buffer), scratch); // Call stub on end of buffer. @@ -217,7 +217,7 @@ void MacroAssembler::RecordWriteField( lea(dst, FieldOperand(object, offset)); if (emit_debug_code()) { Label ok; - test_b(Operand(dst), (1 << kPointerSizeLog2) - 1); + test_b(dst, (1 << kPointerSizeLog2) - 1); j(zero, &ok, Label::kNear); int3(); bind(&ok); @@ -311,7 +311,7 @@ void MacroAssembler::DebugBreak() { void MacroAssembler::Set(Register dst, const Immediate& x) { if (x.is_zero()) { - xor_(dst, Operand(dst)); // Shorter than mov. + xor_(dst, dst); // Shorter than mov. } else { mov(dst, x); } @@ -466,7 +466,7 @@ void MacroAssembler::StoreNumberToDoubleElements( SmiUntag(scratch1); if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { CpuFeatures::Scope fscope(SSE2); - cvtsi2sd(scratch2, Operand(scratch1)); + cvtsi2sd(scratch2, scratch1); movdbl(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize), scratch2); } else { @@ -530,7 +530,7 @@ void MacroAssembler::IsInstanceJSObjectType(Register map, Register scratch, Label* fail) { movzx_b(scratch, FieldOperand(map, Map::kInstanceTypeOffset)); - sub(Operand(scratch), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); + sub(scratch, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); cmp(scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE); j(above, fail); @@ -587,7 +587,7 @@ void MacroAssembler::AbortIfSmi(Register object) { void MacroAssembler::EnterFrame(StackFrame::Type type) { push(ebp); - mov(ebp, Operand(esp)); + mov(ebp, esp); push(esi); push(Immediate(Smi::FromInt(type))); push(Immediate(CodeObject())); @@ -614,7 +614,7 @@ void MacroAssembler::EnterExitFramePrologue() { ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize); ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize); push(ebp); - mov(ebp, Operand(esp)); + mov(ebp, esp); // Reserve room for entry stack pointer and push the code object. ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize); @@ -636,14 +636,14 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) { if (save_doubles) { CpuFeatures::Scope scope(SSE2); int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize; - sub(Operand(esp), Immediate(space)); + sub(esp, Immediate(space)); const int offset = -2 * kPointerSize; for (int i = 0; i < XMMRegister::kNumRegisters; i++) { XMMRegister reg = XMMRegister::from_code(i); movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg); } } else { - sub(Operand(esp), Immediate(argc * kPointerSize)); + sub(esp, Immediate(argc * kPointerSize)); } // Get the required frame alignment for the OS. @@ -663,7 +663,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles) { // Setup argc and argv in callee-saved registers. int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize; - mov(edi, Operand(eax)); + mov(edi, eax); lea(esi, Operand(ebp, eax, times_4, offset)); // Reserve space for argc, argv and isolate. @@ -717,7 +717,7 @@ void MacroAssembler::LeaveExitFrameEpilogue() { void MacroAssembler::LeaveApiExitFrame() { - mov(esp, Operand(ebp)); + mov(esp, ebp); pop(ebp); LeaveExitFrameEpilogue(); @@ -765,7 +765,7 @@ void MacroAssembler::PopTryHandler() { STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); pop(Operand::StaticVariable(ExternalReference(Isolate::kHandlerAddress, isolate()))); - add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize)); + add(esp, Immediate(StackHandlerConstants::kSize - kPointerSize)); } @@ -797,7 +797,7 @@ void MacroAssembler::Throw(Register value) { // (edx == ENTRY) == (ebp == 0) == (esi == 0), so we could test any // of them. Label skip; - cmp(Operand(edx), Immediate(StackHandler::ENTRY)); + cmp(edx, Immediate(StackHandler::ENTRY)); j(equal, &skip, Label::kNear); mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi); bind(&skip); @@ -881,7 +881,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, // When generating debug code, make sure the lexical context is set. if (emit_debug_code()) { - cmp(Operand(scratch), Immediate(0)); + cmp(scratch, Immediate(0)); Check(not_equal, "we should not have an empty lexical context"); } // Load the global context of the current context. @@ -969,23 +969,23 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss, mov(r1, r0); not_(r0); shl(r1, 15); - add(r0, Operand(r1)); + add(r0, r1); // hash = hash ^ (hash >> 12); mov(r1, r0); shr(r1, 12); - xor_(r0, Operand(r1)); + xor_(r0, r1); // hash = hash + (hash << 2); lea(r0, Operand(r0, r0, times_4, 0)); // hash = hash ^ (hash >> 4); mov(r1, r0); shr(r1, 4); - xor_(r0, Operand(r1)); + xor_(r0, r1); // hash = hash * 2057; imul(r0, r0, 2057); // hash = hash ^ (hash >> 16); mov(r1, r0); shr(r1, 16); - xor_(r0, Operand(r1)); + xor_(r0, r1); // Compute capacity mask. mov(r1, FieldOperand(elements, NumberDictionary::kCapacityOffset)); @@ -999,9 +999,9 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss, mov(r2, r0); // Compute the masked index: (hash + i + i * i) & mask. if (i > 0) { - add(Operand(r2), Immediate(NumberDictionary::GetProbeOffset(i))); + add(r2, Immediate(NumberDictionary::GetProbeOffset(i))); } - and_(r2, Operand(r1)); + and_(r2, r1); // Scale the index by multiplying by the entry size. ASSERT(NumberDictionary::kEntrySize == 3); @@ -1057,7 +1057,7 @@ void MacroAssembler::LoadAllocationTopHelper(Register result, if (scratch.is(no_reg)) { mov(result, Operand::StaticVariable(new_space_allocation_top)); } else { - mov(Operand(scratch), Immediate(new_space_allocation_top)); + mov(scratch, Immediate(new_space_allocation_top)); mov(result, Operand(scratch, 0)); } } @@ -1116,7 +1116,7 @@ void MacroAssembler::AllocateInNewSpace(int object_size, if (!top_reg.is(result)) { mov(top_reg, result); } - add(Operand(top_reg), Immediate(object_size)); + add(top_reg, Immediate(object_size)); j(carry, gc_required); cmp(top_reg, Operand::StaticVariable(new_space_allocation_limit)); j(above, gc_required); @@ -1127,12 +1127,12 @@ void MacroAssembler::AllocateInNewSpace(int object_size, // Tag result if requested. if (top_reg.is(result)) { if ((flags & TAG_OBJECT) != 0) { - sub(Operand(result), Immediate(object_size - kHeapObjectTag)); + sub(result, Immediate(object_size - kHeapObjectTag)); } else { - sub(Operand(result), Immediate(object_size)); + sub(result, Immediate(object_size)); } } else if ((flags & TAG_OBJECT) != 0) { - add(Operand(result), Immediate(kHeapObjectTag)); + add(result, Immediate(kHeapObjectTag)); } } @@ -1170,7 +1170,7 @@ void MacroAssembler::AllocateInNewSpace(int header_size, // We assume that element_count*element_size + header_size does not // overflow. lea(result_end, Operand(element_count, element_size, header_size)); - add(result_end, Operand(result)); + add(result_end, result); j(carry, gc_required); cmp(result_end, Operand::StaticVariable(new_space_allocation_limit)); j(above, gc_required); @@ -1215,7 +1215,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size, if (!object_size.is(result_end)) { mov(result_end, object_size); } - add(result_end, Operand(result)); + add(result_end, result); j(carry, gc_required); cmp(result_end, Operand::StaticVariable(new_space_allocation_limit)); j(above, gc_required); @@ -1235,7 +1235,7 @@ void MacroAssembler::UndoAllocationInNewSpace(Register object) { ExternalReference::new_space_allocation_top_address(isolate()); // Make sure the object has no tag before resetting top. - and_(Operand(object), Immediate(~kHeapObjectTagMask)); + and_(object, Immediate(~kHeapObjectTagMask)); #ifdef DEBUG cmp(object, Operand::StaticVariable(new_space_allocation_top)); Check(below, "Undo allocation of non allocated memory"); @@ -1274,7 +1274,7 @@ void MacroAssembler::AllocateTwoByteString(Register result, ASSERT(kShortSize == 2); // scratch1 = length * 2 + kObjectAlignmentMask. lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask)); - and_(Operand(scratch1), Immediate(~kObjectAlignmentMask)); + and_(scratch1, Immediate(~kObjectAlignmentMask)); // Allocate two byte string in new space. AllocateInNewSpace(SeqTwoByteString::kHeaderSize, @@ -1308,8 +1308,8 @@ void MacroAssembler::AllocateAsciiString(Register result, ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0); mov(scratch1, length); ASSERT(kCharSize == 1); - add(Operand(scratch1), Immediate(kObjectAlignmentMask)); - and_(Operand(scratch1), Immediate(~kObjectAlignmentMask)); + add(scratch1, Immediate(kObjectAlignmentMask)); + and_(scratch1, Immediate(~kObjectAlignmentMask)); // Allocate ascii string in new space. AllocateInNewSpace(SeqAsciiString::kHeaderSize, @@ -1443,7 +1443,7 @@ void MacroAssembler::CopyBytes(Register source, Register scratch) { Label loop, done, short_string, short_loop; // Experimentation shows that the short string loop is faster if length < 10. - cmp(Operand(length), Immediate(10)); + cmp(length, Immediate(10)); j(less_equal, &short_string); ASSERT(source.is(esi)); @@ -1458,12 +1458,12 @@ void MacroAssembler::CopyBytes(Register source, mov(scratch, ecx); shr(ecx, 2); rep_movs(); - and_(Operand(scratch), Immediate(0x3)); - add(destination, Operand(scratch)); + and_(scratch, Immediate(0x3)); + add(destination, scratch); jmp(&done); bind(&short_string); - test(length, Operand(length)); + test(length, length); j(zero, &done); bind(&short_loop); @@ -1485,9 +1485,9 @@ void MacroAssembler::InitializeFieldsWithFiller(Register start_offset, jmp(&entry); bind(&loop); mov(Operand(start_offset, 0), filler); - add(Operand(start_offset), Immediate(kPointerSize)); + add(start_offset, Immediate(kPointerSize)); bind(&entry); - cmp(start_offset, Operand(end_offset)); + cmp(start_offset, end_offset); j(less, &loop); } @@ -1496,9 +1496,9 @@ void MacroAssembler::NegativeZeroTest(Register result, Register op, Label* then_label) { Label ok; - test(result, Operand(result)); + test(result, result); j(not_zero, &ok); - test(op, Operand(op)); + test(op, op); j(sign, then_label); bind(&ok); } @@ -1510,10 +1510,10 @@ void MacroAssembler::NegativeZeroTest(Register result, Register scratch, Label* then_label) { Label ok; - test(result, Operand(result)); + test(result, result); j(not_zero, &ok); - mov(scratch, Operand(op1)); - or_(scratch, Operand(op2)); + mov(scratch, op1); + or_(scratch, op2); j(sign, then_label); bind(&ok); } @@ -1543,7 +1543,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function, // If the prototype or initial map is the hole, don't return it and // simply miss the cache instead. This will allow us to allocate a // prototype object on-demand in the runtime system. - cmp(Operand(result), Immediate(isolate()->factory()->the_hole_value())); + cmp(result, Immediate(isolate()->factory()->the_hole_value())); j(equal, miss); // If the function does not have an initial map, we're done. @@ -1612,7 +1612,7 @@ bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { void MacroAssembler::IllegalOperation(int num_arguments) { if (num_arguments > 0) { - add(Operand(esp), Immediate(num_arguments * kPointerSize)); + add(esp, Immediate(num_arguments * kPointerSize)); } mov(eax, Immediate(isolate()->factory()->undefined_value())); } @@ -1826,7 +1826,7 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(ApiFunction* function, Label leave_exit_frame; // Check if the result handle holds 0. - test(eax, Operand(eax)); + test(eax, eax); j(zero, &empty_handle); // It was non-zero. Dereference to get the result value. mov(eax, Operand(eax, 0)); @@ -1867,7 +1867,7 @@ MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(ApiFunction* function, mov(edi, eax); mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address())); mov(eax, Immediate(delete_extensions)); - call(Operand(eax)); + call(eax); mov(eax, edi); jmp(&leave_exit_frame); @@ -1901,10 +1901,10 @@ void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) { if (call_kind == CALL_AS_FUNCTION) { // Set to some non-zero smi by updating the least significant // byte. - mov_b(Operand(dst), 1 << kSmiTagSize); + mov_b(dst, 1 << kSmiTagSize); } else { // Set to smi zero by clearing the register. - xor_(dst, Operand(dst)); + xor_(dst, dst); } } @@ -1949,7 +1949,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected, } else if (!expected.reg().is(actual.reg())) { // Both expected and actual are in (different) registers. This // is the case when we invoke functions using call and apply. - cmp(expected.reg(), Operand(actual.reg())); + cmp(expected.reg(), actual.reg()); j(equal, &invoke); ASSERT(actual.reg().is(eax)); ASSERT(expected.reg().is(ebx)); @@ -1961,7 +1961,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected, isolate()->builtins()->ArgumentsAdaptorTrampoline(); if (!code_constant.is_null()) { mov(edx, Immediate(code_constant)); - add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag)); + add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag)); } else if (!code_operand.is_reg(edx)) { mov(edx, code_operand); } @@ -2019,7 +2019,7 @@ void MacroAssembler::InvokeCode(Handle code, ASSERT(flag == JUMP_FUNCTION || has_frame()); Label done; - Operand dummy(eax); + Operand dummy(eax, 0); InvokePrologue(expected, actual, code, dummy, &done, flag, Label::kNear, call_wrapper, call_kind); if (flag == CALL_FUNCTION) { @@ -2211,7 +2211,7 @@ void MacroAssembler::Ret(int bytes_dropped, Register scratch) { ret(bytes_dropped); } else { pop(scratch); - add(Operand(esp), Immediate(bytes_dropped)); + add(esp, Immediate(bytes_dropped)); push(scratch); ret(0); } @@ -2222,7 +2222,7 @@ void MacroAssembler::Ret(int bytes_dropped, Register scratch) { void MacroAssembler::Drop(int stack_elements) { if (stack_elements > 0) { - add(Operand(esp), Immediate(stack_elements * kPointerSize)); + add(esp, Immediate(stack_elements * kPointerSize)); } } @@ -2400,7 +2400,7 @@ void MacroAssembler::LoadPowerOf2(XMMRegister dst, ASSERT(is_uintn(power + HeapNumber::kExponentBias, HeapNumber::kExponentBits)); mov(scratch, Immediate(power + HeapNumber::kExponentBias)); - movd(dst, Operand(scratch)); + movd(dst, scratch); psllq(dst, HeapNumber::kMantissaBits); } @@ -2426,8 +2426,8 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1, Label* failure) { // Check that both objects are not smis. STATIC_ASSERT(kSmiTag == 0); - mov(scratch1, Operand(object1)); - and_(scratch1, Operand(object2)); + mov(scratch1, object1); + and_(scratch1, object2); JumpIfSmi(scratch1, failure); // Load instance type for both strings. @@ -2456,12 +2456,12 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) { // Make stack end at alignment and make room for num_arguments words // and the original value of esp. mov(scratch, esp); - sub(Operand(esp), Immediate((num_arguments + 1) * kPointerSize)); + sub(esp, Immediate((num_arguments + 1) * kPointerSize)); ASSERT(IsPowerOf2(frame_alignment)); and_(esp, -frame_alignment); mov(Operand(esp, num_arguments * kPointerSize), scratch); } else { - sub(Operand(esp), Immediate(num_arguments * kPointerSize)); + sub(esp, Immediate(num_arguments * kPointerSize)); } } @@ -2469,7 +2469,7 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) { void MacroAssembler::CallCFunction(ExternalReference function, int num_arguments) { // Trashing eax is ok as it will be the return value. - mov(Operand(eax), Immediate(function)); + mov(eax, Immediate(function)); CallCFunction(eax, num_arguments); } @@ -2482,11 +2482,11 @@ void MacroAssembler::CallCFunction(Register function, CheckStackAlignment(); } - call(Operand(function)); + call(function); if (OS::ActivationFrameAlignment() != 0) { mov(esp, Operand(esp, num_arguments * kPointerSize)); } else { - add(Operand(esp), Immediate(num_arguments * kPointerSize)); + add(esp, Immediate(num_arguments * kPointerSize)); } } @@ -2535,7 +2535,7 @@ void MacroAssembler::CheckPageFlag( and_(scratch, Immediate(~Page::kPageAlignmentMask)); } else { mov(scratch, Immediate(~Page::kPageAlignmentMask)); - and_(scratch, Operand(object)); + and_(scratch, object); } if (mask < (1 << kBitsPerByte)) { test_b(Operand(scratch, MemoryChunk::kFlagsOffset), @@ -2573,7 +2573,7 @@ void MacroAssembler::HasColor(Register object, Label other_color, word_boundary; test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize)); j(first_bit == 1 ? zero : not_zero, &other_color, Label::kNear); - add(mask_scratch, Operand(mask_scratch)); // Shift left 1 by adding. + add(mask_scratch, mask_scratch); // Shift left 1 by adding. j(zero, &word_boundary, Label::kNear); test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize)); j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance); @@ -2592,16 +2592,16 @@ void MacroAssembler::GetMarkBits(Register addr_reg, Register mask_reg) { ASSERT(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx)); mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask)); - and_(Operand(bitmap_reg), addr_reg); - mov(ecx, Operand(addr_reg)); + and_(bitmap_reg, addr_reg); + mov(ecx, addr_reg); int shift = Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2; shr(ecx, shift); and_(ecx, (Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1)); - add(bitmap_reg, Operand(ecx)); - mov(ecx, Operand(addr_reg)); + add(bitmap_reg, ecx); + mov(ecx, addr_reg); shr(ecx, kPointerSizeLog2); and_(ecx, (1 << Bitmap::kBitsPerCellLog2) - 1); mov(mask_reg, Immediate(1)); @@ -2636,7 +2636,7 @@ void MacroAssembler::EnsureNotWhite( Label ok; push(mask_scratch); // shl. May overflow making the check conservative. - add(mask_scratch, Operand(mask_scratch)); + add(mask_scratch, mask_scratch); test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize)); j(zero, &ok, Label::kNear); int3(); @@ -2666,7 +2666,7 @@ void MacroAssembler::EnsureNotWhite( // no GC pointers. Register instance_type = ecx; movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset)); - test_b(Operand(instance_type), kIsIndirectStringMask | kIsNotStringMask); + test_b(instance_type, kIsIndirectStringMask | kIsNotStringMask); j(not_zero, value_is_white_and_not_data); // It's a non-indirect (non-cons and non-slice) string. // If it's external, the length is just ExternalString::kSize. @@ -2676,7 +2676,7 @@ void MacroAssembler::EnsureNotWhite( // set. ASSERT_EQ(0, kSeqStringTag & kExternalStringTag); ASSERT_EQ(0, kConsStringTag & kExternalStringTag); - test_b(Operand(instance_type), kExternalStringTag); + test_b(instance_type, kExternalStringTag); j(zero, ¬_external, Label::kNear); mov(length, Immediate(ExternalString::kSize)); jmp(&is_data_object, Label::kNear); @@ -2684,9 +2684,9 @@ void MacroAssembler::EnsureNotWhite( bind(¬_external); // Sequential string, either ASCII or UC16. ASSERT(kAsciiStringTag == 0x04); - and_(Operand(length), Immediate(kStringEncodingMask)); - xor_(Operand(length), Immediate(kStringEncodingMask)); - add(Operand(length), Immediate(0x04)); + and_(length, Immediate(kStringEncodingMask)); + xor_(length, Immediate(kStringEncodingMask)); + add(length, Immediate(0x04)); // Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted // by 2. If we multiply the string length as smi by this, it still // won't overflow a 32-bit value. @@ -2695,10 +2695,8 @@ void MacroAssembler::EnsureNotWhite( static_cast(0xffffffffu >> (2 + kSmiTagSize))); imul(length, FieldOperand(value, String::kLengthOffset)); shr(length, 2 + kSmiTagSize + kSmiShiftSize); - add(Operand(length), - Immediate(SeqString::kHeaderSize + kObjectAlignmentMask)); - and_(Operand(length), - Immediate(~kObjectAlignmentMask)); + add(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask)); + and_(length, Immediate(~kObjectAlignmentMask)); bind(&is_data_object); // Value is a data object, and it is white. Mark it black. Since we know diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h index ff9f747eb..a1b42c280 100644 --- a/src/ia32/macro-assembler-ia32.h +++ b/src/ia32/macro-assembler-ia32.h @@ -246,6 +246,15 @@ class MacroAssembler: public Assembler { void SetCallKind(Register dst, CallKind kind); // Invoke the JavaScript function code by either calling or jumping. + void InvokeCode(Register code, + const ParameterCount& expected, + const ParameterCount& actual, + InvokeFlag flag, + const CallWrapper& call_wrapper, + CallKind call_kind) { + InvokeCode(Operand(code), expected, actual, flag, call_wrapper, call_kind); + } + void InvokeCode(const Operand& code, const ParameterCount& expected, const ParameterCount& actual, @@ -387,7 +396,7 @@ class MacroAssembler: public Assembler { void SmiTag(Register reg) { STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagSize == 1); - add(reg, Operand(reg)); + add(reg, reg); } void SmiUntag(Register reg) { sar(reg, kSmiTagSize); diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc index 394b1f998..8b0b9ab91 100644 --- a/src/ia32/regexp-macro-assembler-ia32.cc +++ b/src/ia32/regexp-macro-assembler-ia32.cc @@ -1,4 +1,4 @@ -// Copyright 2008-2009 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -134,7 +134,7 @@ int RegExpMacroAssemblerIA32::stack_limit_slack() { void RegExpMacroAssemblerIA32::AdvanceCurrentPosition(int by) { if (by != 0) { - __ add(Operand(edi), Immediate(by * char_size())); + __ add(edi, Immediate(by * char_size())); } } @@ -152,8 +152,8 @@ void RegExpMacroAssemblerIA32::Backtrack() { CheckPreemption(); // Pop Code* offset from backtrack stack, add Code* and jump to location. Pop(ebx); - __ add(Operand(ebx), Immediate(masm_->CodeObject())); - __ jmp(Operand(ebx)); + __ add(ebx, Immediate(masm_->CodeObject())); + __ jmp(ebx); } @@ -219,7 +219,7 @@ void RegExpMacroAssemblerIA32::CheckCharacters(Vector str, int byte_offset = cp_offset * char_size(); if (check_end_of_string) { // Check that there are at least str.length() characters left in the input. - __ cmp(Operand(edi), Immediate(-(byte_offset + byte_length))); + __ cmp(edi, Immediate(-(byte_offset + byte_length))); BranchOrBacktrack(greater, on_failure); } @@ -288,7 +288,7 @@ void RegExpMacroAssemblerIA32::CheckGreedyLoop(Label* on_equal) { Label fallthrough; __ cmp(edi, Operand(backtrack_stackpointer(), 0)); __ j(not_equal, &fallthrough); - __ add(Operand(backtrack_stackpointer()), Immediate(kPointerSize)); // Pop. + __ add(backtrack_stackpointer(), Immediate(kPointerSize)); // Pop. BranchOrBacktrack(no_condition, on_equal); __ bind(&fallthrough); } @@ -300,7 +300,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase( Label fallthrough; __ mov(edx, register_location(start_reg)); // Index of start of capture __ mov(ebx, register_location(start_reg + 1)); // Index of end of capture - __ sub(ebx, Operand(edx)); // Length of capture. + __ sub(ebx, edx); // Length of capture. // The length of a capture should not be negative. This can only happen // if the end of the capture is unrecorded, or at a point earlier than @@ -320,9 +320,9 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase( __ push(backtrack_stackpointer()); // After this, the eax, ecx, and edi registers are available. - __ add(edx, Operand(esi)); // Start of capture - __ add(edi, Operand(esi)); // Start of text to match against capture. - __ add(ebx, Operand(edi)); // End of text to match against capture. + __ add(edx, esi); // Start of capture + __ add(edi, esi); // Start of text to match against capture. + __ add(ebx, edi); // End of text to match against capture. Label loop; __ bind(&loop); @@ -339,15 +339,15 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase( __ movzx_b(ecx, Operand(edx, 0)); __ or_(ecx, 0x20); - __ cmp(eax, Operand(ecx)); + __ cmp(eax, ecx); __ j(not_equal, &fail); __ bind(&loop_increment); // Increment pointers into match and capture strings. - __ add(Operand(edx), Immediate(1)); - __ add(Operand(edi), Immediate(1)); + __ add(edx, Immediate(1)); + __ add(edi, Immediate(1)); // Compare to end of match, and loop if not done. - __ cmp(edi, Operand(ebx)); + __ cmp(edi, ebx); __ j(below, &loop); __ jmp(&success); @@ -361,9 +361,9 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase( // Restore original value before continuing. __ pop(backtrack_stackpointer()); // Drop original value of character position. - __ add(Operand(esp), Immediate(kPointerSize)); + __ add(esp, Immediate(kPointerSize)); // Compute new value of character position after the matched part. - __ sub(edi, Operand(esi)); + __ sub(edi, esi); } else { ASSERT(mode_ == UC16); // Save registers before calling C function. @@ -389,11 +389,11 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase( // Set byte_offset2. // Found by adding negative string-end offset of current position (edi) // to end of string. - __ add(edi, Operand(esi)); + __ add(edi, esi); __ mov(Operand(esp, 1 * kPointerSize), edi); // Set byte_offset1. // Start of capture, where edx already holds string-end negative offset. - __ add(edx, Operand(esi)); + __ add(edx, esi); __ mov(Operand(esp, 0 * kPointerSize), edx); { @@ -409,10 +409,10 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase( __ pop(esi); // Check if function returned non-zero for success or zero for failure. - __ or_(eax, Operand(eax)); + __ or_(eax, eax); BranchOrBacktrack(zero, on_no_match); // On success, increment position by length of capture. - __ add(edi, Operand(ebx)); + __ add(edi, ebx); } __ bind(&fallthrough); } @@ -428,7 +428,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReference( // Find length of back-referenced capture. __ mov(edx, register_location(start_reg)); __ mov(eax, register_location(start_reg + 1)); - __ sub(eax, Operand(edx)); // Length to check. + __ sub(eax, edx); // Length to check. // Fail on partial or illegal capture (start of capture after end of capture). BranchOrBacktrack(less, on_no_match); // Succeed on empty capture (including no capture) @@ -436,7 +436,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReference( // Check that there are sufficient characters left in the input. __ mov(ebx, edi); - __ add(ebx, Operand(eax)); + __ add(ebx, eax); BranchOrBacktrack(greater, on_no_match); // Save register to make it available below. @@ -444,7 +444,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReference( // Compute pointers to match string and capture string __ lea(ebx, Operand(esi, edi, times_1, 0)); // Start of match. - __ add(edx, Operand(esi)); // Start of capture. + __ add(edx, esi); // Start of capture. __ lea(ecx, Operand(eax, ebx, times_1, 0)); // End of match Label loop; @@ -459,10 +459,10 @@ void RegExpMacroAssemblerIA32::CheckNotBackReference( } __ j(not_equal, &fail); // Increment pointers into capture and match string. - __ add(Operand(edx), Immediate(char_size())); - __ add(Operand(ebx), Immediate(char_size())); + __ add(edx, Immediate(char_size())); + __ add(ebx, Immediate(char_size())); // Check if we have reached end of match area. - __ cmp(ebx, Operand(ecx)); + __ cmp(ebx, ecx); __ j(below, &loop); __ jmp(&success); @@ -474,7 +474,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReference( __ bind(&success); // Move current character position to position after match. __ mov(edi, ecx); - __ sub(Operand(edi), esi); + __ sub(edi, esi); // Restore backtrack stackpointer. __ pop(backtrack_stackpointer()); @@ -577,17 +577,17 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type, return true; case '.': { // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029) - __ mov(Operand(eax), current_character()); - __ xor_(Operand(eax), Immediate(0x01)); + __ mov(eax, current_character()); + __ xor_(eax, Immediate(0x01)); // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c - __ sub(Operand(eax), Immediate(0x0b)); + __ sub(eax, Immediate(0x0b)); __ cmp(eax, 0x0c - 0x0b); BranchOrBacktrack(below_equal, on_no_match); if (mode_ == UC16) { // Compare original value to 0x2028 and 0x2029, using the already // computed (current_char ^ 0x01 - 0x0b). I.e., check for // 0x201d (0x2028 - 0x0b) or 0x201e. - __ sub(Operand(eax), Immediate(0x2028 - 0x0b)); + __ sub(eax, Immediate(0x2028 - 0x0b)); __ cmp(eax, 0x2029 - 0x2028); BranchOrBacktrack(below_equal, on_no_match); } @@ -596,7 +596,7 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type, case 'w': { if (mode_ != ASCII) { // Table is 128 entries, so all ASCII characters can be tested. - __ cmp(Operand(current_character()), Immediate('z')); + __ cmp(current_character(), Immediate('z')); BranchOrBacktrack(above, on_no_match); } ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char. @@ -610,7 +610,7 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type, Label done; if (mode_ != ASCII) { // Table is 128 entries, so all ASCII characters can be tested. - __ cmp(Operand(current_character()), Immediate('z')); + __ cmp(current_character(), Immediate('z')); __ j(above, &done); } ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char. @@ -630,10 +630,10 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type, case 'n': { // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 or 0x2029). // The opposite of '.'. - __ mov(Operand(eax), current_character()); - __ xor_(Operand(eax), Immediate(0x01)); + __ mov(eax, current_character()); + __ xor_(eax, Immediate(0x01)); // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c - __ sub(Operand(eax), Immediate(0x0b)); + __ sub(eax, Immediate(0x0b)); __ cmp(eax, 0x0c - 0x0b); if (mode_ == ASCII) { BranchOrBacktrack(above, on_no_match); @@ -644,7 +644,7 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type, // Compare original value to 0x2028 and 0x2029, using the already // computed (current_char ^ 0x01 - 0x0b). I.e., check for // 0x201d (0x2028 - 0x0b) or 0x201e. - __ sub(Operand(eax), Immediate(0x2028 - 0x0b)); + __ sub(eax, Immediate(0x2028 - 0x0b)); __ cmp(eax, 1); BranchOrBacktrack(above, on_no_match); __ bind(&done); @@ -707,7 +707,7 @@ Handle RegExpMacroAssemblerIA32::GetCode(Handle source) { __ bind(&stack_limit_hit); CallCheckStackGuardState(ebx); - __ or_(eax, Operand(eax)); + __ or_(eax, eax); // If returned value is non-zero, we exit with the returned value as result. __ j(not_zero, &exit_label_); @@ -716,13 +716,13 @@ Handle RegExpMacroAssemblerIA32::GetCode(Handle source) { __ mov(ebx, Operand(ebp, kStartIndex)); // Allocate space on stack for registers. - __ sub(Operand(esp), Immediate(num_registers_ * kPointerSize)); + __ sub(esp, Immediate(num_registers_ * kPointerSize)); // Load string length. __ mov(esi, Operand(ebp, kInputEnd)); // Load input position. __ mov(edi, Operand(ebp, kInputStart)); // Set up edi to be negative offset from string end. - __ sub(edi, Operand(esi)); + __ sub(edi, esi); // Set eax to address of char before start of the string. // (effectively string position -1). @@ -744,7 +744,7 @@ Handle RegExpMacroAssemblerIA32::GetCode(Handle source) { Label init_loop; __ bind(&init_loop); __ mov(Operand(ebp, ecx, times_1, +0), eax); - __ sub(Operand(ecx), Immediate(kPointerSize)); + __ sub(ecx, Immediate(kPointerSize)); __ cmp(ecx, kRegisterZero - num_saved_registers_ * kPointerSize); __ j(greater, &init_loop); } @@ -785,12 +785,12 @@ Handle RegExpMacroAssemblerIA32::GetCode(Handle source) { if (mode_ == UC16) { __ lea(ecx, Operand(ecx, edx, times_2, 0)); } else { - __ add(ecx, Operand(edx)); + __ add(ecx, edx); } for (int i = 0; i < num_saved_registers_; i++) { __ mov(eax, register_location(i)); // Convert to index from start of string, not end. - __ add(eax, Operand(ecx)); + __ add(eax, ecx); if (mode_ == UC16) { __ sar(eax, 1); // Convert byte index to character index. } @@ -827,7 +827,7 @@ Handle RegExpMacroAssemblerIA32::GetCode(Handle source) { __ push(edi); CallCheckStackGuardState(ebx); - __ or_(eax, Operand(eax)); + __ or_(eax, eax); // If returning non-zero, we should end execution with the given // result as return value. __ j(not_zero, &exit_label_); @@ -862,7 +862,7 @@ Handle RegExpMacroAssemblerIA32::GetCode(Handle source) { __ CallCFunction(grow_stack, num_arguments); // If return NULL, we have failed to grow the stack, and // must exit with a stack-overflow exception. - __ or_(eax, Operand(eax)); + __ or_(eax, eax); __ j(equal, &exit_with_exception); // Otherwise use return value as new stack pointer. __ mov(backtrack_stackpointer(), eax); @@ -1191,8 +1191,8 @@ void RegExpMacroAssemblerIA32::SafeCall(Label* to) { void RegExpMacroAssemblerIA32::SafeReturn() { __ pop(ebx); - __ add(Operand(ebx), Immediate(masm_->CodeObject())); - __ jmp(Operand(ebx)); + __ add(ebx, Immediate(masm_->CodeObject())); + __ jmp(ebx); } @@ -1204,14 +1204,14 @@ void RegExpMacroAssemblerIA32::SafeCallTarget(Label* name) { void RegExpMacroAssemblerIA32::Push(Register source) { ASSERT(!source.is(backtrack_stackpointer())); // Notice: This updates flags, unlike normal Push. - __ sub(Operand(backtrack_stackpointer()), Immediate(kPointerSize)); + __ sub(backtrack_stackpointer(), Immediate(kPointerSize)); __ mov(Operand(backtrack_stackpointer(), 0), source); } void RegExpMacroAssemblerIA32::Push(Immediate value) { // Notice: This updates flags, unlike normal Push. - __ sub(Operand(backtrack_stackpointer()), Immediate(kPointerSize)); + __ sub(backtrack_stackpointer(), Immediate(kPointerSize)); __ mov(Operand(backtrack_stackpointer(), 0), value); } @@ -1220,7 +1220,7 @@ void RegExpMacroAssemblerIA32::Pop(Register target) { ASSERT(!target.is(backtrack_stackpointer())); __ mov(target, Operand(backtrack_stackpointer(), 0)); // Notice: This updates flags, unlike normal Pop. - __ add(Operand(backtrack_stackpointer()), Immediate(kPointerSize)); + __ add(backtrack_stackpointer(), Immediate(kPointerSize)); } diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc index f52aa8755..9b8f09662 100644 --- a/src/ia32/stub-cache-ia32.cc +++ b/src/ia32/stub-cache-ia32.cc @@ -66,8 +66,8 @@ static void ProbeTable(Isolate* isolate, __ j(not_equal, &miss); // Jump to the first instruction in the code stub. - __ add(Operand(extra), Immediate(Code::kHeaderSize - kHeapObjectTag)); - __ jmp(Operand(extra)); + __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag)); + __ jmp(extra); __ bind(&miss); } else { @@ -92,8 +92,8 @@ static void ProbeTable(Isolate* isolate, __ mov(offset, Operand::StaticArray(offset, times_2, value_offset)); // Jump to the first instruction in the code stub. - __ add(Operand(offset), Immediate(Code::kHeaderSize - kHeapObjectTag)); - __ jmp(Operand(offset)); + __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag)); + __ jmp(offset); // Pop at miss. __ bind(&miss); @@ -204,8 +204,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, __ add(scratch, FieldOperand(receiver, HeapObject::kMapOffset)); __ xor_(scratch, flags); __ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize); - __ sub(scratch, Operand(name)); - __ add(Operand(scratch), Immediate(flags)); + __ sub(scratch, name); + __ add(scratch, Immediate(flags)); __ and_(scratch, (kSecondaryTableSize - 1) << kHeapObjectTagSize); // Probe the secondary table. @@ -318,7 +318,7 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm, Register scratch2, Label* miss_label) { __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label); - __ mov(eax, Operand(scratch1)); + __ mov(eax, scratch1); __ ret(0); } @@ -406,7 +406,7 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) { // frame. // ----------------------------------- __ pop(scratch); - __ add(Operand(esp), Immediate(kPointerSize * kFastApiCallArguments)); + __ add(esp, Immediate(kPointerSize * kFastApiCallArguments)); __ push(scratch); } @@ -462,7 +462,7 @@ static MaybeObject* GenerateFastApiCall(MacroAssembler* masm, __ PrepareCallApiFunction(kApiArgc + kApiStackSpace); __ mov(ApiParameterOperand(1), eax); // v8::Arguments::implicit_args_. - __ add(Operand(eax), Immediate(argc * kPointerSize)); + __ add(eax, Immediate(argc * kPointerSize)); __ mov(ApiParameterOperand(2), eax); // v8::Arguments::values_. __ Set(ApiParameterOperand(3), Immediate(argc)); // v8::Arguments::length_. // v8::Arguments::is_construct_call_. @@ -789,7 +789,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, // Update the write barrier for the array address. // Pass the value being stored in the now unused name_reg. - __ mov(name_reg, Operand(eax)); + __ mov(name_reg, eax); __ RecordWriteField(receiver_reg, offset, name_reg, @@ -804,7 +804,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, // Update the write barrier for the array address. // Pass the value being stored in the now unused name_reg. - __ mov(name_reg, Operand(eax)); + __ mov(name_reg, eax); __ RecordWriteField(scratch, offset, name_reg, @@ -943,7 +943,7 @@ Register StubCompiler::CheckPrototypes(JSObject* object, } else if (heap()->InNewSpace(prototype)) { // Get the map of the current object. __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset)); - __ cmp(Operand(scratch1), Immediate(Handle(current->map()))); + __ cmp(scratch1, Immediate(Handle(current->map()))); // Branch on the result of the map check. __ j(not_equal, miss); // Check access rights to the global object. This has to happen @@ -1064,7 +1064,7 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object, __ pop(scratch3); // Get return address to place it below. __ push(receiver); // receiver - __ mov(scratch2, Operand(esp)); + __ mov(scratch2, esp); ASSERT(!scratch2.is(reg)); __ push(reg); // holder // Push data from AccessorInfo. @@ -1095,7 +1095,7 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object, __ PrepareCallApiFunction(kApiArgc); __ mov(ApiParameterOperand(0), ebx); // name. - __ add(Operand(ebx), Immediate(kPointerSize)); + __ add(ebx, Immediate(kPointerSize)); __ mov(ApiParameterOperand(1), ebx); // arguments pointer. // Emitting a stub call may try to allocate (if the code is not @@ -1272,7 +1272,7 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) { if (kind_ == Code::KEYED_CALL_IC) { - __ cmp(Operand(ecx), Immediate(Handle(name))); + __ cmp(ecx, Immediate(Handle(name))); __ j(not_equal, miss); } } @@ -1329,7 +1329,7 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell, Immediate(Handle(function->shared()))); __ j(not_equal, miss); } else { - __ cmp(Operand(edi), Immediate(Handle(function))); + __ cmp(edi, Immediate(Handle(function))); __ j(not_equal, miss); } } @@ -1460,13 +1460,13 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset)); STATIC_ASSERT(kSmiTagSize == 1); STATIC_ASSERT(kSmiTag == 0); - __ add(Operand(eax), Immediate(Smi::FromInt(argc))); + __ add(eax, Immediate(Smi::FromInt(argc))); // Get the element's length into ecx. __ mov(ecx, FieldOperand(ebx, FixedArray::kLengthOffset)); // Check if we could survive without allocation. - __ cmp(eax, Operand(ecx)); + __ cmp(eax, ecx); __ j(greater, &attempt_to_grow_elements); // Check if value is a smi. @@ -1538,9 +1538,9 @@ MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object, __ lea(edx, FieldOperand(ebx, eax, times_half_pointer_size, FixedArray::kHeaderSize - argc * kPointerSize)); - __ cmp(edx, Operand(ecx)); + __ cmp(edx, ecx); __ j(not_equal, &call_builtin); - __ add(Operand(ecx), Immediate(kAllocationDelta * kPointerSize)); + __ add(ecx, Immediate(kAllocationDelta * kPointerSize)); __ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit)); __ j(above, &call_builtin); @@ -1636,7 +1636,7 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object, // Get the array's length into ecx and calculate new length. __ mov(ecx, FieldOperand(edx, JSArray::kLengthOffset)); - __ sub(Operand(ecx), Immediate(Smi::FromInt(1))); + __ sub(ecx, Immediate(Smi::FromInt(1))); __ j(negative, &return_undefined); // Get the last element. @@ -1645,7 +1645,7 @@ MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object, __ mov(eax, FieldOperand(ebx, ecx, times_half_pointer_size, FixedArray::kHeaderSize)); - __ cmp(Operand(eax), Immediate(factory()->the_hole_value())); + __ cmp(eax, Immediate(factory()->the_hole_value())); __ j(equal, &call_builtin); // Set the array's length. @@ -2109,10 +2109,10 @@ MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object, __ sar(ebx, kBitsPerInt - 1); // Do bitwise not or do nothing depending on ebx. - __ xor_(eax, Operand(ebx)); + __ xor_(eax, ebx); // Add 1 or do nothing depending on ebx. - __ sub(eax, Operand(ebx)); + __ sub(eax, ebx); // If the result is still negative, go to the slow case. // This only happens for the most negative smi. @@ -2195,7 +2195,7 @@ MaybeObject* CallStubCompiler::CompileFastApiCall( // Allocate space for v8::Arguments implicit values. Must be initialized // before calling any runtime function. - __ sub(Operand(esp), Immediate(kFastApiCallArguments * kPointerSize)); + __ sub(esp, Immediate(kFastApiCallArguments * kPointerSize)); // Check that the maps haven't changed and find a Holder as a side effect. CheckPrototypes(JSObject::cast(object), edx, holder, @@ -2211,7 +2211,7 @@ MaybeObject* CallStubCompiler::CompileFastApiCall( if (result->IsFailure()) return result; __ bind(&miss); - __ add(Operand(esp), Immediate(kFastApiCallArguments * kPointerSize)); + __ add(esp, Immediate(kFastApiCallArguments * kPointerSize)); __ bind(&miss_before_stack_reserved); MaybeObject* maybe_result = GenerateMissBranch(); @@ -2711,7 +2711,7 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object, __ IncrementCounter(counters->keyed_store_field(), 1); // Check that the name has not changed. - __ cmp(Operand(ecx), Immediate(Handle(name))); + __ cmp(ecx, Immediate(Handle(name))); __ j(not_equal, &miss); // Generate store field code. Trashes the name register. @@ -3003,7 +3003,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name, __ IncrementCounter(counters->keyed_load_field(), 1); // Check that the name has not changed. - __ cmp(Operand(eax), Immediate(Handle(name))); + __ cmp(eax, Immediate(Handle(name))); __ j(not_equal, &miss); GenerateLoadField(receiver, holder, edx, ebx, ecx, edi, index, name, &miss); @@ -3033,7 +3033,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback( __ IncrementCounter(counters->keyed_load_callback(), 1); // Check that the name has not changed. - __ cmp(Operand(eax), Immediate(Handle(name))); + __ cmp(eax, Immediate(Handle(name))); __ j(not_equal, &miss); MaybeObject* result = GenerateLoadCallback(receiver, holder, edx, eax, ebx, @@ -3068,7 +3068,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name, __ IncrementCounter(counters->keyed_load_constant_function(), 1); // Check that the name has not changed. - __ cmp(Operand(eax), Immediate(Handle(name))); + __ cmp(eax, Immediate(Handle(name))); __ j(not_equal, &miss); GenerateLoadConstant(receiver, holder, edx, ebx, ecx, edi, @@ -3096,7 +3096,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver, __ IncrementCounter(counters->keyed_load_interceptor(), 1); // Check that the name has not changed. - __ cmp(Operand(eax), Immediate(Handle(name))); + __ cmp(eax, Immediate(Handle(name))); __ j(not_equal, &miss); LookupResult lookup; @@ -3132,7 +3132,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) { __ IncrementCounter(counters->keyed_load_array_length(), 1); // Check that the name has not changed. - __ cmp(Operand(eax), Immediate(Handle(name))); + __ cmp(eax, Immediate(Handle(name))); __ j(not_equal, &miss); GenerateLoadArrayLength(masm(), edx, ecx, &miss); @@ -3157,7 +3157,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) { __ IncrementCounter(counters->keyed_load_string_length(), 1); // Check that the name has not changed. - __ cmp(Operand(eax), Immediate(Handle(name))); + __ cmp(eax, Immediate(Handle(name))); __ j(not_equal, &miss); GenerateLoadStringLength(masm(), edx, ecx, ebx, &miss, true); @@ -3182,7 +3182,7 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) { __ IncrementCounter(counters->keyed_load_function_prototype(), 1); // Check that the name has not changed. - __ cmp(Operand(eax), Immediate(Handle(name))); + __ cmp(eax, Immediate(Handle(name))); __ j(not_equal, &miss); GenerateLoadFunctionPrototype(masm(), edx, ecx, ebx, &miss); @@ -3360,7 +3360,7 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) { // Move argc to ebx and retrieve and tag the JSObject to return. __ mov(ebx, eax); __ pop(eax); - __ or_(Operand(eax), Immediate(kHeapObjectTag)); + __ or_(eax, Immediate(kHeapObjectTag)); // Remove caller arguments and receiver from the stack and return. __ pop(ecx); @@ -3741,10 +3741,10 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // If the value is NaN or +/-infinity, the result is 0x80000000, // which is automatically zero when taken mod 2^n, n < 32. __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset)); - __ sub(Operand(esp), Immediate(2 * kPointerSize)); + __ sub(esp, Immediate(2 * kPointerSize)); __ fisttp_d(Operand(esp, 0)); __ pop(ebx); - __ add(Operand(esp), Immediate(kPointerSize)); + __ add(esp, Immediate(kPointerSize)); } else { ASSERT(CpuFeatures::IsSupported(SSE2)); CpuFeatures::Scope scope(SSE2); @@ -3953,7 +3953,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( FixedArray::kHeaderSize)); __ mov(Operand(ecx, 0), eax); // Make sure to preserve the value in register eax. - __ mov(edx, Operand(eax)); + __ mov(edx, eax); __ RecordWrite(edi, ecx, edx, kDontSaveFPRegs); } diff --git a/test/cctest/test-assembler-ia32.cc b/test/cctest/test-assembler-ia32.cc index 839b7f562..cdab8f7cb 100644 --- a/test/cctest/test-assembler-ia32.cc +++ b/test/cctest/test-assembler-ia32.cc @@ -1,4 +1,4 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -93,15 +93,15 @@ TEST(AssemblerIa321) { Label L, C; __ mov(edx, Operand(esp, 4)); - __ xor_(eax, Operand(eax)); // clear eax + __ xor_(eax, eax); // clear eax __ jmp(&C); __ bind(&L); - __ add(eax, Operand(edx)); - __ sub(Operand(edx), Immediate(1)); + __ add(eax, edx); + __ sub(edx, Immediate(1)); __ bind(&C); - __ test(edx, Operand(edx)); + __ test(edx, edx); __ j(not_zero, &L); __ ret(0); @@ -135,11 +135,11 @@ TEST(AssemblerIa322) { __ jmp(&C); __ bind(&L); - __ imul(eax, Operand(edx)); - __ sub(Operand(edx), Immediate(1)); + __ imul(eax, edx); + __ sub(edx, Immediate(1)); __ bind(&C); - __ test(edx, Operand(edx)); + __ test(edx, edx); __ j(not_zero, &L); __ ret(0); @@ -275,10 +275,10 @@ TEST(AssemblerIa326) { __ subsd(xmm0, xmm1); __ divsd(xmm0, xmm1); // Copy xmm0 to st(0) using eight bytes of stack. - __ sub(Operand(esp), Immediate(8)); + __ sub(esp, Immediate(8)); __ movdbl(Operand(esp, 0), xmm0); __ fld_d(Operand(esp, 0)); - __ add(Operand(esp), Immediate(8)); + __ add(esp, Immediate(8)); __ ret(0); CodeDesc desc; @@ -314,12 +314,12 @@ TEST(AssemblerIa328) { v8::internal::byte buffer[256]; Assembler assm(Isolate::Current(), buffer, sizeof buffer); __ mov(eax, Operand(esp, 4)); - __ cvtsi2sd(xmm0, Operand(eax)); + __ cvtsi2sd(xmm0, eax); // Copy xmm0 to st(0) using eight bytes of stack. - __ sub(Operand(esp), Immediate(8)); + __ sub(esp, Immediate(8)); __ movdbl(Operand(esp, 0), xmm0); __ fld_d(Operand(esp, 0)); - __ add(Operand(esp), Immediate(8)); + __ add(esp, Immediate(8)); __ ret(0); CodeDesc desc; assm.GetCode(&desc); diff --git a/test/cctest/test-disasm-ia32.cc b/test/cctest/test-disasm-ia32.cc index 845f94058..1e38e4ea7 100644 --- a/test/cctest/test-disasm-ia32.cc +++ b/test/cctest/test-disasm-ia32.cc @@ -1,4 +1,4 @@ -// Copyright 2007-2008 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -63,9 +63,9 @@ TEST(DisasmIa320) { // Short immediate instructions __ adc(eax, 12345678); - __ add(Operand(eax), Immediate(12345678)); + __ add(eax, Immediate(12345678)); __ or_(eax, 12345678); - __ sub(Operand(eax), Immediate(12345678)); + __ sub(eax, Immediate(12345678)); __ xor_(eax, 12345678); __ and_(eax, 12345678); Handle foo = FACTORY->NewFixedArray(10, TENURED); @@ -75,7 +75,7 @@ TEST(DisasmIa320) { __ mov(ebx, Operand(esp, ecx, times_2, 0)); // [esp+ecx*4] // ---- All instructions that I can think of - __ add(edx, Operand(ebx)); + __ add(edx, ebx); __ add(edx, Operand(12, RelocInfo::NONE)); __ add(edx, Operand(ebx, 0)); __ add(edx, Operand(ebx, 16)); @@ -89,7 +89,7 @@ TEST(DisasmIa320) { __ add(Operand(ebp, ecx, times_4, 12), Immediate(12)); __ nop(); - __ add(Operand(ebx), Immediate(12)); + __ add(ebx, Immediate(12)); __ nop(); __ adc(ecx, 12); __ adc(ecx, 1000); @@ -116,16 +116,16 @@ TEST(DisasmIa320) { CpuFeatures::Scope fscope(RDTSC); __ rdtsc(); } - __ movsx_b(edx, Operand(ecx)); - __ movsx_w(edx, Operand(ecx)); - __ movzx_b(edx, Operand(ecx)); - __ movzx_w(edx, Operand(ecx)); + __ movsx_b(edx, ecx); + __ movsx_w(edx, ecx); + __ movzx_b(edx, ecx); + __ movzx_w(edx, ecx); __ nop(); - __ imul(edx, Operand(ecx)); - __ shld(edx, Operand(ecx)); - __ shrd(edx, Operand(ecx)); - __ bts(Operand(edx), ecx); + __ imul(edx, ecx); + __ shld(edx, ecx); + __ shrd(edx, ecx); + __ bts(edx, ecx); __ bts(Operand(ebx, ecx, times_4, 0), ecx); __ nop(); __ pushad(); @@ -146,9 +146,9 @@ TEST(DisasmIa320) { __ nop(); __ add(edx, Operand(esp, 16)); - __ add(edx, Operand(ecx)); - __ mov_b(edx, Operand(ecx)); - __ mov_b(Operand(ecx), 6); + __ add(edx, ecx); + __ mov_b(edx, ecx); + __ mov_b(ecx, 6); __ mov_b(Operand(ebx, ecx, times_4, 10000), 6); __ mov_b(Operand(esp, 16), edx); __ mov_w(edx, Operand(esp, 16)); @@ -216,19 +216,19 @@ TEST(DisasmIa320) { __ adc(edx, 12345); - __ add(Operand(ebx), Immediate(12)); + __ add(ebx, Immediate(12)); __ add(Operand(edx, ecx, times_4, 10000), Immediate(12)); __ and_(ebx, 12345); __ cmp(ebx, 12345); - __ cmp(Operand(ebx), Immediate(12)); + __ cmp(ebx, Immediate(12)); __ cmp(Operand(edx, ecx, times_4, 10000), Immediate(12)); - __ cmpb(Operand(eax), 100); + __ cmpb(eax, 100); __ or_(ebx, 12345); - __ sub(Operand(ebx), Immediate(12)); + __ sub(ebx, Immediate(12)); __ sub(Operand(edx, ecx, times_4, 10000), Immediate(12)); __ xor_(ebx, 12345); @@ -242,7 +242,7 @@ TEST(DisasmIa320) { __ stos(); __ sub(edx, Operand(ebx, ecx, times_4, 10000)); - __ sub(edx, Operand(ebx)); + __ sub(edx, ebx); __ test(edx, Immediate(12345)); __ test(edx, Operand(ebx, ecx, times_8, 10000)); @@ -444,8 +444,8 @@ TEST(DisasmIa320) { { if (CpuFeatures::IsSupported(SSE4_1)) { CpuFeatures::Scope scope(SSE4_1); - __ pextrd(Operand(eax), xmm0, 1); - __ pinsrd(xmm1, Operand(eax), 0); + __ pextrd(eax, xmm0, 1); + __ pinsrd(xmm1, eax, 0); } }