__ pushfd();
__ push(ecx);
__ push(ebx);
- __ mov(ebp, Operand(esp));
+ __ mov(ebp, esp);
// If we can modify bit 21 of the EFLAGS register, then CPUID is supported.
__ pushfd();
__ pop(eax);
- __ mov(edx, Operand(eax));
+ __ mov(edx, eax);
__ xor_(eax, 0x200000); // Flip bit 21.
__ push(eax);
__ popfd();
__ pushfd();
__ pop(eax);
- __ xor_(eax, Operand(edx)); // Different if CPUID is supported.
+ __ xor_(eax, edx); // Different if CPUID is supported.
__ j(not_zero, &cpuid);
// CPUID not supported. Clear the supported features in edx:eax.
- __ xor_(eax, Operand(eax));
- __ xor_(edx, Operand(edx));
+ __ xor_(eax, eax);
+ __ xor_(edx, edx);
__ jmp(&done);
// Invoke CPUID with 1 in eax to get feature information in
// Move the result from ecx:edx to edx:eax and make sure to mark the
// CPUID feature as supported.
- __ mov(eax, Operand(edx));
+ __ mov(eax, edx);
__ or_(eax, 1 << CPUID);
- __ mov(edx, Operand(ecx));
+ __ mov(edx, ecx);
// Done.
__ bind(&done);
- __ mov(esp, Operand(ebp));
+ __ mov(esp, ebp);
__ pop(ebx);
__ pop(ecx);
__ popfd();
}
-void Assembler::cmpb(const Operand& dst, Register src) {
- ASSERT(src.is_byte_register());
+void Assembler::cmpb(const Operand& op, Register reg) {
+ ASSERT(reg.is_byte_register());
EnsureSpace ensure_space(this);
EMIT(0x38);
- emit_operand(src, dst);
+ emit_operand(reg, op);
}
-void Assembler::cmpb(Register dst, const Operand& src) {
- ASSERT(dst.is_byte_register());
+void Assembler::cmpb(Register reg, const Operand& op) {
+ ASSERT(reg.is_byte_register());
EnsureSpace ensure_space(this);
EMIT(0x3A);
- emit_operand(dst, src);
+ emit_operand(reg, op);
}
}
-void Assembler::xor_(const Operand& src, Register dst) {
+void Assembler::xor_(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
EMIT(0x31);
- emit_operand(dst, src);
+ emit_operand(src, dst);
}
class Operand BASE_EMBEDDED {
public:
- // reg
- INLINE(explicit Operand(Register reg));
-
// XMM reg
INLINE(explicit Operand(XMMRegister xmm_reg));
Register reg() const;
private:
- byte buf_[6];
- // The number of bytes in buf_.
- unsigned int len_;
- // Only valid if len_ > 4.
- RelocInfo::Mode rmode_;
+ // reg
+ INLINE(explicit Operand(Register reg));
// Set the ModRM byte without an encoded 'reg' register. The
// register is encoded later as part of the emit_operand operation.
inline void set_disp8(int8_t disp);
inline void set_dispr(int32_t disp, RelocInfo::Mode rmode);
+ byte buf_[6];
+ // The number of bytes in buf_.
+ unsigned int len_;
+ // Only valid if len_ > 4.
+ RelocInfo::Mode rmode_;
+
friend class Assembler;
+ friend class MacroAssembler;
+ friend class LCodeGen;
};
void leave();
// Moves
+ void mov_b(Register dst, Register src) { mov_b(dst, Operand(src)); }
void mov_b(Register dst, const Operand& src);
+ void mov_b(Register dst, int8_t imm8) { mov_b(Operand(dst), imm8); }
void mov_b(const Operand& dst, int8_t imm8);
void mov_b(const Operand& dst, Register src);
void mov(const Operand& dst, Handle<Object> handle);
void mov(const Operand& dst, Register src);
+ void movsx_b(Register dst, Register src) { movsx_b(dst, Operand(src)); }
void movsx_b(Register dst, const Operand& src);
+ void movsx_w(Register dst, Register src) { movsx_w(dst, Operand(src)); }
void movsx_w(Register dst, const Operand& src);
+ void movzx_b(Register dst, Register src) { movzx_b(dst, Operand(src)); }
void movzx_b(Register dst, const Operand& src);
+ void movzx_w(Register dst, Register src) { movzx_w(dst, Operand(src)); }
void movzx_w(Register dst, const Operand& src);
// Conditional moves
void cmov(Condition cc, Register dst, int32_t imm32);
void cmov(Condition cc, Register dst, Handle<Object> handle);
+ void cmov(Condition cc, Register dst, Register src) {
+ cmov(cc, dst, Operand(src));
+ }
void cmov(Condition cc, Register dst, const Operand& src);
// Flag management.
void adc(Register dst, int32_t imm32);
void adc(Register dst, const Operand& src);
+ void add(Register dst, Register src) { add(dst, Operand(src)); }
void add(Register dst, const Operand& src);
void add(const Operand& dst, Register src);
+ void add(Register dst, const Immediate& imm) { add(Operand(dst), imm); }
void add(const Operand& dst, const Immediate& x);
void and_(Register dst, int32_t imm32);
void and_(Register dst, const Immediate& x);
+ void and_(Register dst, Register src) { and_(dst, Operand(src)); }
void and_(Register dst, const Operand& src);
- void and_(const Operand& src, Register dst);
+ void and_(const Operand& dst, Register src);
void and_(const Operand& dst, const Immediate& x);
+ void cmpb(Register reg, int8_t imm8) { cmpb(Operand(reg), imm8); }
void cmpb(const Operand& op, int8_t imm8);
- void cmpb(Register src, const Operand& dst);
- void cmpb(const Operand& dst, Register src);
+ void cmpb(Register reg, const Operand& op);
+ void cmpb(const Operand& op, Register reg);
void cmpb_al(const Operand& op);
void cmpw_ax(const Operand& op);
void cmpw(const Operand& op, Immediate imm16);
void cmp(Register reg, int32_t imm32);
void cmp(Register reg, Handle<Object> handle);
+ void cmp(Register reg0, Register reg1) { cmp(reg0, Operand(reg1)); }
void cmp(Register reg, const Operand& op);
+ void cmp(Register reg, const Immediate& imm) { cmp(Operand(reg), imm); }
void cmp(const Operand& op, const Immediate& imm);
void cmp(const Operand& op, Handle<Object> handle);
// Signed multiply instructions.
void imul(Register src); // edx:eax = eax * src.
+ void imul(Register dst, Register src) { imul(dst, Operand(src)); }
void imul(Register dst, const Operand& src); // dst = dst * src.
void imul(Register dst, Register src, int32_t imm32); // dst = src * imm32.
void not_(Register dst);
void or_(Register dst, int32_t imm32);
+ void or_(Register dst, Register src) { or_(dst, Operand(src)); }
void or_(Register dst, const Operand& src);
void or_(const Operand& dst, Register src);
+ void or_(Register dst, const Immediate& imm) { or_(Operand(dst), imm); }
void or_(const Operand& dst, const Immediate& x);
void rcl(Register dst, uint8_t imm8);
void sbb(Register dst, const Operand& src);
+ void shld(Register dst, Register src) { shld(dst, Operand(src)); }
void shld(Register dst, const Operand& src);
void shl(Register dst, uint8_t imm8);
void shl_cl(Register dst);
+ void shrd(Register dst, Register src) { shrd(dst, Operand(src)); }
void shrd(Register dst, const Operand& src);
void shr(Register dst, uint8_t imm8);
void shr_cl(Register dst);
+ void sub(Register dst, const Immediate& imm) { sub(Operand(dst), imm); }
void sub(const Operand& dst, const Immediate& x);
+ void sub(Register dst, Register src) { sub(dst, Operand(src)); }
void sub(Register dst, const Operand& src);
void sub(const Operand& dst, Register src);
void test(Register reg, const Immediate& imm);
+ void test(Register reg0, Register reg1) { test(reg0, Operand(reg1)); }
void test(Register reg, const Operand& op);
void test_b(Register reg, const Operand& op);
void test(const Operand& op, const Immediate& imm);
+ void test_b(Register reg, uint8_t imm8) { test_b(Operand(reg), imm8); }
void test_b(const Operand& op, uint8_t imm8);
void xor_(Register dst, int32_t imm32);
+ void xor_(Register dst, Register src) { xor_(dst, Operand(src)); }
void xor_(Register dst, const Operand& src);
- void xor_(const Operand& src, Register dst);
+ void xor_(const Operand& dst, Register src);
+ void xor_(Register dst, const Immediate& imm) { xor_(Operand(dst), imm); }
void xor_(const Operand& dst, const Immediate& x);
// Bit operations.
void bt(const Operand& dst, Register src);
+ void bts(Register dst, Register src) { bts(Operand(dst), src); }
void bts(const Operand& dst, Register src);
// Miscellaneous
void call(Label* L);
void call(byte* entry, RelocInfo::Mode rmode);
int CallSize(const Operand& adr);
+ void call(Register reg) { call(Operand(reg)); }
void call(const Operand& adr);
int CallSize(Handle<Code> code, RelocInfo::Mode mode);
void call(Handle<Code> code,
// unconditional jump to L
void jmp(Label* L, Label::Distance distance = Label::kFar);
void jmp(byte* entry, RelocInfo::Mode rmode);
+ void jmp(Register reg) { jmp(Operand(reg)); }
void jmp(const Operand& adr);
void jmp(Handle<Code> code, RelocInfo::Mode rmode);
void cvttss2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, const Operand& src);
+ void cvtsi2sd(XMMRegister dst, Register src) { cvtsi2sd(dst, Operand(src)); }
void cvtsi2sd(XMMRegister dst, const Operand& src);
void cvtss2sd(XMMRegister dst, XMMRegister src);
void cvtsd2ss(XMMRegister dst, XMMRegister src);
void movdbl(XMMRegister dst, const Operand& src);
void movdbl(const Operand& dst, XMMRegister src);
+ void movd(XMMRegister dst, Register src) { movd(dst, Operand(src)); }
void movd(XMMRegister dst, const Operand& src);
- void movd(const Operand& src, XMMRegister dst);
+ void movd(Register dst, XMMRegister src) { movd(Operand(dst), src); }
+ void movd(const Operand& dst, XMMRegister src);
void movsd(XMMRegister dst, XMMRegister src);
void movss(XMMRegister dst, const Operand& src);
- void movss(const Operand& src, XMMRegister dst);
+ void movss(const Operand& dst, XMMRegister src);
void movss(XMMRegister dst, XMMRegister src);
void pand(XMMRegister dst, XMMRegister src);
void psrlq(XMMRegister reg, int8_t shift);
void psrlq(XMMRegister dst, XMMRegister src);
void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle);
+ void pextrd(Register dst, XMMRegister src, int8_t offset) {
+ pextrd(Operand(dst), src, offset);
+ }
void pextrd(const Operand& dst, XMMRegister src, int8_t offset);
+ void pinsrd(XMMRegister dst, Register src, int8_t offset) {
+ pinsrd(dst, Operand(src), offset);
+ }
void pinsrd(XMMRegister dst, const Operand& src, int8_t offset);
// Parallel XMM operations.
- void movntdqa(XMMRegister src, const Operand& dst);
+ void movntdqa(XMMRegister dst, const Operand& src);
void movntdq(const Operand& dst, XMMRegister src);
// Prefetch src position into cache level.
// Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a
// JumpToExternalReference expects eax to contain the number of arguments
// including the receiver and the extra arguments.
- __ add(Operand(eax), Immediate(num_extra_args + 1));
+ __ add(eax, Immediate(num_extra_args + 1));
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
__ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kConstructStubOffset));
__ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
- __ jmp(Operand(ebx));
+ __ jmp(ebx);
// edi: called object
// eax: number of arguments
Operand(ebx, esi, times_pointer_size, JSObject::kHeaderSize));
// esi: offset of first field after pre-allocated fields
if (FLAG_debug_code) {
- __ cmp(esi, Operand(edi));
+ __ cmp(esi, edi);
__ Assert(less_equal,
"Unexpected number of pre-allocated property fields.");
}
// eax: initial map
// ebx: JSObject
// edi: start of next object
- __ or_(Operand(ebx), Immediate(kHeapObjectTag));
+ __ or_(ebx, Immediate(kHeapObjectTag));
// Check if a non-empty properties array is needed.
// Allocate and initialize a FixedArray if it is.
__ movzx_b(edx, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
__ movzx_b(ecx,
FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
- __ add(edx, Operand(ecx));
+ __ add(edx, ecx);
// Calculate unused properties past the end of the in-object properties.
__ movzx_b(ecx, FieldOperand(eax, Map::kInObjectPropertiesOffset));
- __ sub(edx, Operand(ecx));
+ __ sub(edx, ecx);
// Done if no extra properties are to be allocated.
__ j(zero, &allocated);
__ Assert(positive, "Property allocation count failed.");
__ jmp(&entry);
__ bind(&loop);
__ mov(Operand(eax, 0), edx);
- __ add(Operand(eax), Immediate(kPointerSize));
+ __ add(eax, Immediate(kPointerSize));
__ bind(&entry);
- __ cmp(eax, Operand(ecx));
+ __ cmp(eax, ecx);
__ j(below, &loop);
}
// the JSObject
// ebx: JSObject
// edi: FixedArray
- __ or_(Operand(edi), Immediate(kHeapObjectTag)); // add the heap tag
+ __ or_(edi, Immediate(kHeapObjectTag)); // add the heap tag
__ mov(FieldOperand(ebx, JSObject::kPropertiesOffset), edi);
// edi: function (constructor)
__ push(edi);
__ CallRuntime(Runtime::kNewObject, 1);
- __ mov(ebx, Operand(eax)); // store result in ebx
+ __ mov(ebx, eax); // store result in ebx
// New object allocated.
// ebx: newly allocated object
// Copy arguments and receiver to the expression stack.
Label loop, entry;
- __ mov(ecx, Operand(eax));
+ __ mov(ecx, eax);
__ jmp(&entry);
__ bind(&loop);
__ push(Operand(ebx, ecx, times_4, 0));
__ bind(&loop);
__ mov(edx, Operand(ebx, ecx, times_4, 0)); // push parameter from argv
__ push(Operand(edx, 0)); // dereference handle
- __ inc(Operand(ecx));
+ __ inc(ecx);
__ bind(&entry);
- __ cmp(ecx, Operand(eax));
+ __ cmp(ecx, eax);
__ j(not_equal, &loop);
// Get the function from the stack and call it.
// Do a tail-call of the compiled function.
__ lea(eax, FieldOperand(eax, Code::kHeaderSize));
- __ jmp(Operand(eax));
+ __ jmp(eax);
}
// Do a tail-call of the compiled function.
__ lea(eax, FieldOperand(eax, Code::kHeaderSize));
- __ jmp(Operand(eax));
+ __ jmp(eax);
}
// 1. Make sure we have at least one argument.
{ Label done;
- __ test(eax, Operand(eax));
+ __ test(eax, eax);
__ j(not_zero, &done);
__ pop(ebx);
__ push(Immediate(factory->undefined_value()));
// 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
// or a function proxy via CALL_FUNCTION_PROXY.
{ Label function, non_proxy;
- __ test(edx, Operand(edx));
+ __ test(edx, edx);
__ j(zero, &function);
__ Set(ebx, Immediate(0));
__ SetCallKind(ecx, CALL_AS_METHOD);
- __ cmp(Operand(edx), Immediate(1));
+ __ cmp(edx, Immediate(1));
__ j(not_equal, &non_proxy);
__ pop(edx); // return address
__ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
__ SmiUntag(ebx);
__ SetCallKind(ecx, CALL_AS_METHOD);
- __ cmp(eax, Operand(ebx));
+ __ cmp(eax, ebx);
__ j(not_equal,
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline());
ParameterCount expected(0);
- __ InvokeCode(Operand(edx), expected, expected, JUMP_FUNCTION,
- NullCallWrapper(), CALL_AS_METHOD);
+ __ InvokeCode(edx, expected, expected, JUMP_FUNCTION, NullCallWrapper(),
+ CALL_AS_METHOD);
}
__ mov(edi, Operand::StaticVariable(real_stack_limit));
// Make ecx the space we have left. The stack might already be overflowed
// here which will cause ecx to become negative.
- __ mov(ecx, Operand(esp));
- __ sub(ecx, Operand(edi));
+ __ mov(ecx, esp);
+ __ sub(ecx, edi);
// Make edx the space we need for the array when it is unrolled onto the
// stack.
- __ mov(edx, Operand(eax));
+ __ mov(edx, eax);
__ shl(edx, kPointerSizeLog2 - kSmiTagSize);
// Check if the arguments will overflow the stack.
- __ cmp(ecx, Operand(edx));
+ __ cmp(ecx, edx);
__ j(greater, &okay); // Signed comparison.
// Out of stack space.
__ bind(&call_to_object);
__ push(ebx);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ mov(ebx, Operand(eax));
+ __ mov(ebx, eax);
__ jmp(&push_receiver);
// Use the current global receiver object as the receiver.
// Update the index on the stack and in register eax.
__ mov(eax, Operand(ebp, kIndexOffset));
- __ add(Operand(eax), Immediate(1 << kSmiTagSize));
+ __ add(eax, Immediate(1 << kSmiTagSize));
__ mov(Operand(ebp, kIndexOffset), eax);
__ bind(&entry);
__ jmp(&entry);
__ bind(&loop);
__ mov(Operand(scratch1, 0), factory->the_hole_value());
- __ add(Operand(scratch1), Immediate(kPointerSize));
+ __ add(scratch1, Immediate(kPointerSize));
__ bind(&entry);
- __ cmp(scratch1, Operand(scratch2));
+ __ cmp(scratch1, scratch2);
__ j(below, &loop);
}
}
__ bind(&loop);
__ stos();
__ bind(&entry);
- __ cmp(edi, Operand(elements_array_end));
+ __ cmp(edi, elements_array_end);
__ j(below, &loop);
__ bind(&done);
}
__ push(eax);
// Check for array construction with zero arguments.
- __ test(eax, Operand(eax));
+ __ test(eax, eax);
__ j(not_zero, &argc_one_or_more);
__ bind(&empty_array);
__ j(not_equal, &argc_two_or_more);
STATIC_ASSERT(kSmiTag == 0);
__ mov(ecx, Operand(esp, (push_count + 1) * kPointerSize));
- __ test(ecx, Operand(ecx));
+ __ test(ecx, ecx);
__ j(not_zero, ¬_empty_array);
// The single argument passed is zero, so we jump to the code above used to
__ mov(eax, Operand(esp, i * kPointerSize));
__ mov(Operand(esp, (i + 1) * kPointerSize), eax);
}
- __ add(Operand(esp), Immediate(2 * kPointerSize)); // Drop two stack slots.
+ __ add(esp, Immediate(2 * kPointerSize)); // Drop two stack slots.
__ push(Immediate(0)); // Treat this as a call with argc of zero.
__ jmp(&empty_array);
__ bind(&loop);
__ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
__ mov(Operand(edx, 0), eax);
- __ add(Operand(edx), Immediate(kPointerSize));
+ __ add(edx, Immediate(kPointerSize));
__ bind(&entry);
__ dec(ecx);
__ j(greater_equal, &loop);
if (FLAG_debug_code) {
__ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, ecx);
- __ cmp(edi, Operand(ecx));
+ __ cmp(edi, ecx);
__ Assert(equal, "Unexpected String function");
}
// Load the first argument into eax and get rid of the rest
// (including the receiver).
Label no_arguments;
- __ test(eax, Operand(eax));
+ __ test(eax, eax);
__ j(zero, &no_arguments);
__ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
__ pop(ecx);
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ push(ebp);
- __ mov(ebp, Operand(esp));
+ __ mov(ebp, esp);
// Store the arguments adaptor context sentinel.
__ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ IncrementCounter(masm->isolate()->counters()->arguments_adaptors(), 1);
Label enough, too_few;
- __ cmp(eax, Operand(ebx));
+ __ cmp(eax, ebx);
__ j(less, &too_few);
__ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
__ j(equal, &dont_adapt_arguments);
__ bind(©);
__ inc(edi);
__ push(Operand(eax, 0));
- __ sub(Operand(eax), Immediate(kPointerSize));
- __ cmp(edi, Operand(ebx));
+ __ sub(eax, Immediate(kPointerSize));
+ __ cmp(edi, ebx);
__ j(less, ©);
__ jmp(&invoke);
}
const int offset = StandardFrameConstants::kCallerSPOffset;
__ lea(edi, Operand(ebp, eax, times_4, offset));
// ebx = expected - actual.
- __ sub(ebx, Operand(eax));
+ __ sub(ebx, eax);
// eax = -actual - 1
__ neg(eax);
- __ sub(Operand(eax), Immediate(1));
+ __ sub(eax, Immediate(1));
Label copy;
__ bind(©);
__ inc(eax);
__ push(Operand(edi, 0));
- __ sub(Operand(edi), Immediate(kPointerSize));
- __ test(eax, Operand(eax));
+ __ sub(edi, Immediate(kPointerSize));
+ __ test(eax, eax);
__ j(not_zero, ©);
// Fill remaining expected arguments with undefined values.
__ bind(&fill);
__ inc(eax);
__ push(Immediate(masm->isolate()->factory()->undefined_value()));
- __ cmp(eax, Operand(ebx));
+ __ cmp(eax, ebx);
__ j(less, &fill);
}
__ bind(&invoke);
// Restore function pointer.
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ call(Operand(edx));
+ __ call(edx);
// Leave frame and return.
LeaveArgumentsAdaptorFrame(masm);
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
- __ jmp(Operand(edx));
+ __ jmp(edx);
}
// If the result was -1 it means that we couldn't optimize the
// function. Just return and continue in the unoptimized version.
Label skip;
- __ cmp(Operand(eax), Immediate(Smi::FromInt(-1)));
+ __ cmp(eax, Immediate(Smi::FromInt(-1)));
__ j(not_equal, &skip, Label::kNear);
__ ret(0);
__ bind(&check_heap_number);
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
Factory* factory = masm->isolate()->factory();
- __ cmp(Operand(ebx), Immediate(factory->heap_number_map()));
+ __ cmp(ebx, Immediate(factory->heap_number_map()));
__ j(not_equal, &call_builtin, Label::kNear);
__ ret(0);
}
// Return and remove the on-stack parameter.
- __ mov(esi, Operand(eax));
+ __ mov(esi, eax);
__ ret(1 * kPointerSize);
// Need to collect. Call into runtime system.
__ pushad();
if (save_doubles_ == kSaveFPRegs) {
CpuFeatures::Scope scope(SSE2);
- __ sub(Operand(esp), Immediate(kDoubleSize * XMMRegister::kNumRegisters));
+ __ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
__ movdbl(Operand(esp, i * kDoubleSize), reg);
XMMRegister reg = XMMRegister::from_code(i);
__ movdbl(reg, Operand(esp, i * kDoubleSize));
}
- __ add(Operand(esp), Immediate(kDoubleSize * XMMRegister::kNumRegisters));
+ __ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
}
__ popad();
__ ret(0);
// Check whether the exponent is too big for a 64 bit signed integer.
static const uint32_t kTooBigExponent =
(HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
- __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
+ __ cmp(scratch2, Immediate(kTooBigExponent));
__ j(greater_equal, conversion_failure);
// Load x87 register with heap number.
__ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
// Reserve space for 64 bit answer.
- __ sub(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
+ __ sub(esp, Immediate(sizeof(uint64_t))); // Nolint.
// Do conversion, which cannot fail because we checked the exponent.
__ fisttp_d(Operand(esp, 0));
__ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx.
- __ add(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
+ __ add(esp, Immediate(sizeof(uint64_t))); // Nolint.
} else {
// Load ecx with zero. We use this either for the final shift or
// for the answer.
- __ xor_(ecx, Operand(ecx));
+ __ xor_(ecx, ecx);
// Check whether the exponent matches a 32 bit signed int that cannot be
// represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
// exponent is 30 (biased). This is the exponent that we are fastest at and
// also the highest exponent we can handle here.
const uint32_t non_smi_exponent =
(HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- __ cmp(Operand(scratch2), Immediate(non_smi_exponent));
+ __ cmp(scratch2, Immediate(non_smi_exponent));
// If we have a match of the int32-but-not-Smi exponent then skip some
// logic.
__ j(equal, &right_exponent, Label::kNear);
// >>> operator has a tendency to generate numbers with an exponent of 31.
const uint32_t big_non_smi_exponent =
(HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
- __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent));
+ __ cmp(scratch2, Immediate(big_non_smi_exponent));
__ j(not_equal, conversion_failure);
// We have the big exponent, typically from >>>. This means the number is
// in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
// Shift down 21 bits to get the most significant 11 bits or the low
// mantissa word.
__ shr(ecx, 32 - big_shift_distance);
- __ or_(ecx, Operand(scratch2));
+ __ or_(ecx, scratch2);
// We have the answer in ecx, but we may need to negate it.
- __ test(scratch, Operand(scratch));
+ __ test(scratch, scratch);
__ j(positive, &done, Label::kNear);
__ neg(ecx);
__ jmp(&done, Label::kNear);
// it rounds to zero.
const uint32_t zero_exponent =
(HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
- __ sub(Operand(scratch2), Immediate(zero_exponent));
+ __ sub(scratch2, Immediate(zero_exponent));
// ecx already has a Smi zero.
__ j(less, &done, Label::kNear);
// We have a shifted exponent between 0 and 30 in scratch2.
__ shr(scratch2, HeapNumber::kExponentShift);
__ mov(ecx, Immediate(30));
- __ sub(ecx, Operand(scratch2));
+ __ sub(ecx, scratch2);
__ bind(&right_exponent);
// Here ecx is the shift, scratch is the exponent word.
// Shift down 22 bits to get the most significant 10 bits or the low
// mantissa word.
__ shr(scratch2, 32 - shift_distance);
- __ or_(scratch2, Operand(scratch));
+ __ or_(scratch2, scratch);
// Move down according to the exponent.
__ shr_cl(scratch2);
// Now the unsigned answer is in scratch2. We need to move it to ecx and
// we may need to fix the sign.
Label negative;
- __ xor_(ecx, Operand(ecx));
+ __ xor_(ecx, ecx);
__ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
__ j(greater, &negative, Label::kNear);
__ mov(ecx, scratch2);
__ jmp(&done, Label::kNear);
__ bind(&negative);
- __ sub(ecx, Operand(scratch2));
+ __ sub(ecx, scratch2);
__ bind(&done);
}
}
__ JumpIfNotSmi(eax, non_smi, non_smi_near);
// We can't handle -0 with smis, so use a type transition for that case.
- __ test(eax, Operand(eax));
+ __ test(eax, eax);
__ j(zero, slow, slow_near);
// Try optimistic subtraction '0 - value', saving operand in eax for undo.
- __ mov(edx, Operand(eax));
+ __ mov(edx, eax);
__ Set(eax, Immediate(0));
- __ sub(eax, Operand(edx));
+ __ sub(eax, edx);
__ j(overflow, undo, undo_near);
__ ret(0);
}
void UnaryOpStub::GenerateSmiCodeUndo(MacroAssembler* masm) {
- __ mov(eax, Operand(edx));
+ __ mov(eax, edx);
}
__ xor_(FieldOperand(eax, HeapNumber::kExponentOffset),
Immediate(HeapNumber::kSignMask)); // Flip sign.
} else {
- __ mov(edx, Operand(eax));
+ __ mov(edx, eax);
// edx: operand
Label slow_allocate_heapnumber, heapnumber_allocated;
}
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, Operand(ecx));
+ __ cvtsi2sd(xmm0, ecx);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
} else {
__ push(ecx);
// eax in case the result is not a smi.
ASSERT(!left.is(ecx) && !right.is(ecx));
__ mov(ecx, right);
- __ or_(right, Operand(left)); // Bitwise or is commutative.
+ __ or_(right, left); // Bitwise or is commutative.
combined = right;
break;
case Token::DIV:
case Token::MOD:
__ mov(combined, right);
- __ or_(combined, Operand(left));
+ __ or_(combined, left);
break;
case Token::SHL:
// for the smi check register.
ASSERT(!left.is(ecx) && !right.is(ecx));
__ mov(ecx, right);
- __ or_(right, Operand(left));
+ __ or_(right, left);
combined = right;
break;
case Token::BIT_XOR:
ASSERT(right.is(eax));
- __ xor_(right, Operand(left)); // Bitwise xor is commutative.
+ __ xor_(right, left); // Bitwise xor is commutative.
break;
case Token::BIT_AND:
ASSERT(right.is(eax));
- __ and_(right, Operand(left)); // Bitwise and is commutative.
+ __ and_(right, left); // Bitwise and is commutative.
break;
case Token::SHL:
case Token::ADD:
ASSERT(right.is(eax));
- __ add(right, Operand(left)); // Addition is commutative.
+ __ add(right, left); // Addition is commutative.
__ j(overflow, &use_fp_on_smis);
break;
case Token::SUB:
- __ sub(left, Operand(right));
+ __ sub(left, right);
__ j(overflow, &use_fp_on_smis);
__ mov(eax, left);
break;
// Remove tag from one of the operands (but keep sign).
__ SmiUntag(right);
// Do multiplication.
- __ imul(right, Operand(left)); // Multiplication is commutative.
+ __ imul(right, left); // Multiplication is commutative.
__ j(overflow, &use_fp_on_smis);
// Check for negative zero result. Use combined = left | right.
__ NegativeZeroTest(right, combined, &use_fp_on_smis);
// save the left operand.
__ mov(edi, left);
// Check for 0 divisor.
- __ test(right, Operand(right));
+ __ test(right, right);
__ j(zero, &use_fp_on_smis);
// Sign extend left into edx:eax.
ASSERT(left.is(eax));
// Check for negative zero result. Use combined = left | right.
__ NegativeZeroTest(eax, combined, &use_fp_on_smis);
// Check that the remainder is zero.
- __ test(edx, Operand(edx));
+ __ test(edx, edx);
__ j(not_zero, &use_fp_on_smis);
// Tag the result and store it in register eax.
__ SmiTag(eax);
case Token::MOD:
// Check for 0 divisor.
- __ test(right, Operand(right));
+ __ test(right, right);
__ j(zero, ¬_smis);
// Sign extend left into edx:eax.
break;
case Token::ADD:
// Revert right = right + left.
- __ sub(right, Operand(left));
+ __ sub(right, left);
break;
case Token::SUB:
// Revert left = left - right.
- __ add(left, Operand(right));
+ __ add(left, right);
break;
case Token::MUL:
// Right was clobbered but a copy is in ebx.
ASSERT_EQ(Token::SHL, op_);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, Operand(left));
+ __ cvtsi2sd(xmm0, left);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
} else {
__ mov(Operand(esp, 1 * kPointerSize), left);
switch (op_) {
case Token::ADD:
// Revert right = right + left.
- __ sub(right, Operand(left));
+ __ sub(right, left);
break;
case Token::SUB:
// Revert left = left - right.
- __ add(left, Operand(right));
+ __ add(left, right);
break;
case Token::MUL:
// Right was clobbered but a copy is in ebx.
// Check result type if it is currently Int32.
if (result_type_ <= BinaryOpIC::INT32) {
__ cvttsd2si(ecx, Operand(xmm0));
- __ cvtsi2sd(xmm2, Operand(ecx));
+ __ cvtsi2sd(xmm2, ecx);
__ ucomisd(xmm0, xmm2);
__ j(not_zero, ¬_int32);
__ j(carry, ¬_int32);
FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3_,
¬_int32);
switch (op_) {
- case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
- case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
- case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
+ case Token::BIT_OR: __ or_(eax, ecx); break;
+ case Token::BIT_AND: __ and_(eax, ecx); break;
+ case Token::BIT_XOR: __ xor_(eax, ecx); break;
case Token::SAR: __ sar_cl(eax); break;
case Token::SHL: __ shl_cl(eax); break;
case Token::SHR: __ shr_cl(eax); break;
if (op_ != Token::SHR) {
__ bind(&non_smi_result);
// Allocate a heap number if needed.
- __ mov(ebx, Operand(eax)); // ebx: result
+ __ mov(ebx, eax); // ebx: result
Label skip_allocation;
switch (mode_) {
case OVERWRITE_LEFT:
// Store the result in the HeapNumber and return.
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, Operand(ebx));
+ __ cvtsi2sd(xmm0, ebx);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
} else {
__ mov(Operand(esp, 1 * kPointerSize), ebx);
__ cmp(edx, factory->undefined_value());
__ j(not_equal, &check, Label::kNear);
if (Token::IsBitOp(op_)) {
- __ xor_(edx, Operand(edx));
+ __ xor_(edx, edx);
} else {
__ mov(edx, Immediate(factory->nan_value()));
}
__ cmp(eax, factory->undefined_value());
__ j(not_equal, &done, Label::kNear);
if (Token::IsBitOp(op_)) {
- __ xor_(eax, Operand(eax));
+ __ xor_(eax, eax);
} else {
__ mov(eax, Immediate(factory->nan_value()));
}
use_sse3_,
¬_floats);
switch (op_) {
- case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
- case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
- case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
+ case Token::BIT_OR: __ or_(eax, ecx); break;
+ case Token::BIT_AND: __ and_(eax, ecx); break;
+ case Token::BIT_XOR: __ xor_(eax, ecx); break;
case Token::SAR: __ sar_cl(eax); break;
case Token::SHL: __ shl_cl(eax); break;
case Token::SHR: __ shr_cl(eax); break;
if (op_ != Token::SHR) {
__ bind(&non_smi_result);
// Allocate a heap number if needed.
- __ mov(ebx, Operand(eax)); // ebx: result
+ __ mov(ebx, eax); // ebx: result
Label skip_allocation;
switch (mode_) {
case OVERWRITE_LEFT:
// Store the result in the HeapNumber and return.
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, Operand(ebx));
+ __ cvtsi2sd(xmm0, ebx);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
} else {
__ mov(Operand(esp, 1 * kPointerSize), ebx);
use_sse3_,
&call_runtime);
switch (op_) {
- case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
- case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
- case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
+ case Token::BIT_OR: __ or_(eax, ecx); break;
+ case Token::BIT_AND: __ and_(eax, ecx); break;
+ case Token::BIT_XOR: __ xor_(eax, ecx); break;
case Token::SAR: __ sar_cl(eax); break;
case Token::SHL: __ shl_cl(eax); break;
case Token::SHR: __ shr_cl(eax); break;
if (op_ != Token::SHR) {
__ bind(&non_smi_result);
// Allocate a heap number if needed.
- __ mov(ebx, Operand(eax)); // ebx: result
+ __ mov(ebx, eax); // ebx: result
Label skip_allocation;
switch (mode_) {
case OVERWRITE_LEFT:
// Store the result in the HeapNumber and return.
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
- __ cvtsi2sd(xmm0, Operand(ebx));
+ __ cvtsi2sd(xmm0, ebx);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
} else {
__ mov(Operand(esp, 1 * kPointerSize), ebx);
__ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
// Now edx can be overwritten losing one of the arguments as we are
// now done and will not need it any more.
- __ mov(edx, Operand(ebx));
+ __ mov(edx, ebx);
__ bind(&skip_allocation);
// Use object in edx as a result holder
- __ mov(eax, Operand(edx));
+ __ mov(eax, edx);
break;
}
case OVERWRITE_RIGHT:
// Then load the low and high words of the double into ebx, edx.
STATIC_ASSERT(kSmiTagSize == 1);
__ sar(eax, 1);
- __ sub(Operand(esp), Immediate(2 * kPointerSize));
+ __ sub(esp, Immediate(2 * kPointerSize));
__ mov(Operand(esp, 0), eax);
__ fild_s(Operand(esp, 0));
__ fst_d(Operand(esp, 0));
// Check if input is a HeapNumber.
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
Factory* factory = masm->isolate()->factory();
- __ cmp(Operand(ebx), Immediate(factory->heap_number_map()));
+ __ cmp(ebx, Immediate(factory->heap_number_map()));
__ j(not_equal, &runtime_call);
// Input is a HeapNumber. Push it on the FPU stack and load its
// low and high words into ebx, edx.
} else { // UNTAGGED.
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatures::Scope sse4_scope(SSE4_1);
- __ pextrd(Operand(edx), xmm1, 0x1); // copy xmm1[63..32] to edx.
+ __ pextrd(edx, xmm1, 0x1); // copy xmm1[63..32] to edx.
} else {
__ pshufd(xmm0, xmm1, 0x1);
- __ movd(Operand(edx), xmm0);
+ __ movd(edx, xmm0);
}
- __ movd(Operand(ebx), xmm1);
+ __ movd(ebx, xmm1);
}
// ST[0] or xmm1 == double value
// Compute hash (the shifts are arithmetic):
// h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
__ mov(ecx, ebx);
- __ xor_(ecx, Operand(edx));
+ __ xor_(ecx, edx);
__ mov(eax, ecx);
__ sar(eax, 16);
- __ xor_(ecx, Operand(eax));
+ __ xor_(ecx, eax);
__ mov(eax, ecx);
__ sar(eax, 8);
- __ xor_(ecx, Operand(eax));
+ __ xor_(ecx, eax);
ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
- __ and_(Operand(ecx),
+ __ and_(ecx,
Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
// ST[0] or xmm1 == double value.
__ mov(eax, Operand(eax, cache_array_index));
// Eax points to the cache for the type type_.
// If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ test(eax, Operand(eax));
+ __ test(eax, eax);
__ j(zero, &runtime_call_clear_stack);
#ifdef DEBUG
// Check that the layout of cache elements match expectations.
__ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
} else { // UNTAGGED.
__ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
- __ sub(Operand(esp), Immediate(kDoubleSize));
+ __ sub(esp, Immediate(kDoubleSize));
__ movdbl(Operand(esp, 0), xmm1);
__ fld_d(Operand(esp, 0));
- __ add(Operand(esp), Immediate(kDoubleSize));
+ __ add(esp, Immediate(kDoubleSize));
}
GenerateOperation(masm);
__ mov(Operand(ecx, 0), ebx);
// Skip cache and return answer directly, only in untagged case.
__ bind(&skip_cache);
- __ sub(Operand(esp), Immediate(kDoubleSize));
+ __ sub(esp, Immediate(kDoubleSize));
__ movdbl(Operand(esp, 0), xmm1);
__ fld_d(Operand(esp, 0));
GenerateOperation(masm);
__ fstp_d(Operand(esp, 0));
__ movdbl(xmm1, Operand(esp, 0));
- __ add(Operand(esp), Immediate(kDoubleSize));
+ __ add(esp, Immediate(kDoubleSize));
// We return the value in xmm1 without adding it to the cache, but
// we cause a scavenging GC so that future allocations will succeed.
{
// If argument is outside the range -2^63..2^63, fsin/cos doesn't
// work. We must reduce it to the appropriate range.
__ mov(edi, edx);
- __ and_(Operand(edi), Immediate(0x7ff00000)); // Exponent only.
+ __ and_(edi, Immediate(0x7ff00000)); // Exponent only.
int supported_exponent_limit =
(63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
- __ cmp(Operand(edi), Immediate(supported_exponent_limit));
+ __ cmp(edi, Immediate(supported_exponent_limit));
__ j(below, &in_range, Label::kNear);
// Check for infinity and NaN. Both return NaN for sin.
- __ cmp(Operand(edi), Immediate(0x7ff00000));
+ __ cmp(edi, Immediate(0x7ff00000));
Label non_nan_result;
__ j(not_equal, &non_nan_result, Label::kNear);
// Input is +/-Infinity or NaN. Result is NaN.
__ push(Immediate(0x7ff80000));
__ push(Immediate(0));
__ fld_d(Operand(esp, 0));
- __ add(Operand(esp), Immediate(2 * kPointerSize));
+ __ add(esp, Immediate(2 * kPointerSize));
__ jmp(&done, Label::kNear);
__ bind(&non_nan_result);
__ fwait();
__ fnstsw_ax();
// Clear if Illegal Operand or Zero Division exceptions are set.
- __ test(Operand(eax), Immediate(5));
+ __ test(eax, Immediate(5));
__ j(zero, &no_exceptions, Label::kNear);
__ fnclex();
__ bind(&no_exceptions);
__ fprem1();
__ fwait();
__ fnstsw_ax();
- __ test(Operand(eax), Immediate(0x400 /* C2 */));
+ __ test(eax, Immediate(0x400 /* C2 */));
// If C2 is set, computation only has partial result. Loop to
// continue computation.
__ j(not_zero, &partial_remainder_loop);
__ bind(&load_smi_edx);
__ SmiUntag(edx); // Untag smi before converting to float.
- __ cvtsi2sd(xmm0, Operand(edx));
+ __ cvtsi2sd(xmm0, edx);
__ SmiTag(edx); // Retag smi for heap number overwriting test.
__ jmp(&load_eax);
__ bind(&load_smi_eax);
__ SmiUntag(eax); // Untag smi before converting to float.
- __ cvtsi2sd(xmm1, Operand(eax));
+ __ cvtsi2sd(xmm1, eax);
__ SmiTag(eax); // Retag smi for heap number overwriting test.
__ bind(&done);
__ jmp(not_numbers); // Argument in eax is not a number.
__ bind(&load_smi_edx);
__ SmiUntag(edx); // Untag smi before converting to float.
- __ cvtsi2sd(xmm0, Operand(edx));
+ __ cvtsi2sd(xmm0, edx);
__ SmiTag(edx); // Retag smi for heap number overwriting test.
__ jmp(&load_eax);
__ bind(&load_smi_eax);
__ SmiUntag(eax); // Untag smi before converting to float.
- __ cvtsi2sd(xmm1, Operand(eax));
+ __ cvtsi2sd(xmm1, eax);
__ SmiTag(eax); // Retag smi for heap number overwriting test.
__ jmp(&done, Label::kNear);
__ bind(&load_float_eax);
__ mov(scratch, left);
ASSERT(!scratch.is(right)); // We're about to clobber scratch.
__ SmiUntag(scratch);
- __ cvtsi2sd(xmm0, Operand(scratch));
+ __ cvtsi2sd(xmm0, scratch);
__ mov(scratch, right);
__ SmiUntag(scratch);
- __ cvtsi2sd(xmm1, Operand(scratch));
+ __ cvtsi2sd(xmm1, scratch);
}
Label* non_int32,
Register scratch) {
__ cvttsd2si(scratch, Operand(xmm0));
- __ cvtsi2sd(xmm2, Operand(scratch));
+ __ cvtsi2sd(xmm2, scratch);
__ ucomisd(xmm0, xmm2);
__ j(not_zero, non_int32);
__ j(carry, non_int32);
__ cvttsd2si(scratch, Operand(xmm1));
- __ cvtsi2sd(xmm2, Operand(scratch));
+ __ cvtsi2sd(xmm2, scratch);
__ ucomisd(xmm1, xmm2);
__ j(not_zero, non_int32);
__ j(carry, non_int32);
// Save 1 in xmm3 - we need this several times later on.
__ mov(ecx, Immediate(1));
- __ cvtsi2sd(xmm3, Operand(ecx));
+ __ cvtsi2sd(xmm3, ecx);
Label exponent_nonsmi;
Label base_nonsmi;
// Optimized version when both exponent and base are smis.
Label powi;
__ SmiUntag(edx);
- __ cvtsi2sd(xmm0, Operand(edx));
+ __ cvtsi2sd(xmm0, edx);
__ jmp(&powi);
// exponent is smi and base is a heapnumber.
__ bind(&base_nonsmi);
// base has the original value of the exponent - if the exponent is
// negative return 1/result.
- __ test(edx, Operand(edx));
+ __ test(edx, edx);
__ j(positive, &allocate_return);
// Special case if xmm1 has reached infinity.
__ mov(ecx, Immediate(0x7FB00000));
- __ movd(xmm0, Operand(ecx));
+ __ movd(xmm0, ecx);
__ cvtss2sd(xmm0, xmm0);
__ ucomisd(xmm0, xmm1);
__ j(equal, &call_runtime);
Label handle_special_cases;
__ JumpIfNotSmi(edx, &base_not_smi, Label::kNear);
__ SmiUntag(edx);
- __ cvtsi2sd(xmm0, Operand(edx));
+ __ cvtsi2sd(xmm0, edx);
__ jmp(&handle_special_cases, Label::kNear);
__ bind(&base_not_smi);
__ j(not_equal, &call_runtime);
__ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
__ and_(ecx, HeapNumber::kExponentMask);
- __ cmp(Operand(ecx), Immediate(HeapNumber::kExponentMask));
+ __ cmp(ecx, Immediate(HeapNumber::kExponentMask));
// base is NaN or +/-Infinity
__ j(greater_equal, &call_runtime);
__ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
// Test for -0.5.
// Load xmm2 with -0.5.
__ mov(ecx, Immediate(0xBF000000));
- __ movd(xmm2, Operand(ecx));
+ __ movd(xmm2, ecx);
__ cvtss2sd(xmm2, xmm2);
// xmm2 now has -0.5.
__ ucomisd(xmm2, xmm1);
Label adaptor;
__ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
- __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adaptor, Label::kNear);
// Check index against formal parameters count limit passed in
// through register eax. Use unsigned comparison to get negative
// check for free.
- __ cmp(edx, Operand(eax));
+ __ cmp(edx, eax);
__ j(above_equal, &slow, Label::kNear);
// Read the argument from the stack and return it.
// comparison to get negative check for free.
__ bind(&adaptor);
__ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmp(edx, Operand(ecx));
+ __ cmp(edx, ecx);
__ j(above_equal, &slow, Label::kNear);
// Read the argument from the stack and return it.
Label runtime;
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &runtime, Label::kNear);
// Patch the arguments.length and the parameters pointer.
Label adaptor_frame, try_allocate;
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adaptor_frame, Label::kNear);
// No adaptor, parameter count = argument count.
// esp[4] = parameter count (tagged)
// esp[8] = address of receiver argument
// Compute the mapped parameter count = min(ebx, ecx) in ebx.
- __ cmp(ebx, Operand(ecx));
+ __ cmp(ebx, ecx);
__ j(less_equal, &try_allocate, Label::kNear);
__ mov(ebx, ecx);
const int kParameterMapHeaderSize =
FixedArray::kHeaderSize + 2 * kPointerSize;
Label no_parameter_map;
- __ test(ebx, Operand(ebx));
+ __ test(ebx, ebx);
__ j(zero, &no_parameter_map, Label::kNear);
__ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize));
__ bind(&no_parameter_map);
__ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
// 3. Arguments object.
- __ add(Operand(ebx), Immediate(Heap::kArgumentsObjectSize));
+ __ add(ebx, Immediate(Heap::kArgumentsObjectSize));
// Do the allocation of all three objects in one go.
__ AllocateInNewSpace(ebx, eax, edx, edi, &runtime, TAG_OBJECT);
__ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
__ mov(ebx, Operand(esp, 0 * kPointerSize));
- __ test(ebx, Operand(ebx));
+ __ test(ebx, ebx);
__ j(not_zero, &has_mapped_parameters, Label::kNear);
__ mov(edi, Operand(edi,
Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX)));
// Initialize parameter map. If there are no mapped arguments, we're done.
Label skip_parameter_map;
- __ test(ebx, Operand(ebx));
+ __ test(ebx, ebx);
__ j(zero, &skip_parameter_map);
__ mov(FieldOperand(edi, FixedArray::kMapOffset),
__ mov(eax, Operand(esp, 2 * kPointerSize));
__ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
__ add(ebx, Operand(esp, 4 * kPointerSize));
- __ sub(ebx, Operand(eax));
+ __ sub(ebx, eax);
__ mov(ecx, FACTORY->the_hole_value());
__ mov(edx, edi);
__ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
__ jmp(¶meters_test, Label::kNear);
__ bind(¶meters_loop);
- __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+ __ sub(eax, Immediate(Smi::FromInt(1)));
__ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx);
__ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx);
- __ add(Operand(ebx), Immediate(Smi::FromInt(1)));
+ __ add(ebx, Immediate(Smi::FromInt(1)));
__ bind(¶meters_test);
- __ test(eax, Operand(eax));
+ __ test(eax, eax);
__ j(not_zero, ¶meters_loop, Label::kNear);
__ pop(ecx);
Label arguments_loop, arguments_test;
__ mov(ebx, Operand(esp, 1 * kPointerSize));
__ mov(edx, Operand(esp, 4 * kPointerSize));
- __ sub(Operand(edx), ebx); // Is there a smarter way to do negative scaling?
- __ sub(Operand(edx), ebx);
+ __ sub(edx, ebx); // Is there a smarter way to do negative scaling?
+ __ sub(edx, ebx);
__ jmp(&arguments_test, Label::kNear);
__ bind(&arguments_loop);
- __ sub(Operand(edx), Immediate(kPointerSize));
+ __ sub(edx, Immediate(kPointerSize));
__ mov(eax, Operand(edx, 0));
__ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax);
- __ add(Operand(ebx), Immediate(Smi::FromInt(1)));
+ __ add(ebx, Immediate(Smi::FromInt(1)));
__ bind(&arguments_test);
- __ cmp(ebx, Operand(ecx));
+ __ cmp(ebx, ecx);
__ j(less, &arguments_loop, Label::kNear);
// Restore.
Label adaptor_frame, try_allocate, runtime;
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adaptor_frame, Label::kNear);
// Get the length from the frame.
// the arguments object and the elements array.
Label add_arguments_object;
__ bind(&try_allocate);
- __ test(ecx, Operand(ecx));
+ __ test(ecx, ecx);
__ j(zero, &add_arguments_object, Label::kNear);
__ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
__ bind(&add_arguments_object);
- __ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSizeStrict));
+ __ add(ecx, Immediate(Heap::kArgumentsObjectSizeStrict));
// Do the allocation of both objects in one go.
__ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
// If there are no actual arguments, we're done.
Label done;
- __ test(ecx, Operand(ecx));
+ __ test(ecx, ecx);
__ j(zero, &done, Label::kNear);
// Get the parameters pointer from the stack.
__ bind(&loop);
__ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver.
__ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
- __ add(Operand(edi), Immediate(kPointerSize));
- __ sub(Operand(edx), Immediate(kPointerSize));
+ __ add(edi, Immediate(kPointerSize));
+ __ sub(edx, Immediate(kPointerSize));
__ dec(ecx);
__ j(not_zero, &loop);
ExternalReference address_of_regexp_stack_memory_size =
ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
__ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
- __ test(ebx, Operand(ebx));
+ __ test(ebx, ebx);
__ j(zero, &runtime);
// Check that the first argument is a JSRegExp object.
// ecx: RegExp data (FixedArray)
// Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
__ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset));
- __ cmp(Operand(ebx), Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
+ __ cmp(ebx, Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
__ j(not_equal, &runtime);
// ecx: RegExp data (FixedArray)
// uses the asumption that smis are 2 * their untagged value.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(Operand(edx), Immediate(2)); // edx was a smi.
+ __ add(edx, Immediate(2)); // edx was a smi.
// Check that the static offsets vector buffer is large enough.
__ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize);
__ j(above, &runtime);
// string length. A negative value will be greater (unsigned comparison).
__ mov(eax, Operand(esp, kPreviousIndexOffset));
__ JumpIfNotSmi(eax, &runtime);
- __ cmp(eax, Operand(ebx));
+ __ cmp(eax, ebx);
__ j(above_equal, &runtime);
// ecx: RegExp data (FixedArray)
// additional information.
__ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
__ SmiUntag(eax);
- __ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead));
- __ cmp(edx, Operand(eax));
+ __ add(edx, Immediate(RegExpImpl::kLastMatchOverhead));
+ __ cmp(edx, eax);
__ j(greater, &runtime);
// Reset offset for possibly sliced string.
STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
__ j(zero, &seq_two_byte_string, Label::kNear);
// Any other flat string must be a flat ascii string.
- __ and_(Operand(ebx),
- Immediate(kIsNotStringMask | kStringRepresentationMask));
+ __ and_(ebx, Immediate(kIsNotStringMask | kStringRepresentationMask));
__ j(zero, &seq_ascii_string, Label::kNear);
// Check for flat cons string or sliced string.
Label cons_string, check_encoding;
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
- __ cmp(Operand(ebx), Immediate(kExternalStringTag));
+ __ cmp(ebx, Immediate(kExternalStringTag));
__ j(less, &cons_string);
__ j(equal, &runtime);
// Prepare start and end index of the input.
// Load the length from the original sliced string if that is the case.
__ mov(esi, FieldOperand(esi, String::kLengthOffset));
- __ add(esi, Operand(edi)); // Calculate input end wrt offset.
+ __ add(esi, edi); // Calculate input end wrt offset.
__ SmiUntag(edi);
- __ add(ebx, Operand(edi)); // Calculate input start wrt offset.
+ __ add(ebx, edi); // Calculate input start wrt offset.
// ebx: start index of the input string
// esi: end index of the input string
Label setup_two_byte, setup_rest;
- __ test(ecx, Operand(ecx));
+ __ test(ecx, ecx);
__ j(zero, &setup_two_byte, Label::kNear);
__ SmiUntag(esi);
__ lea(ecx, FieldOperand(eax, esi, times_1, SeqAsciiString::kHeaderSize));
__ bind(&setup_rest);
// Locate the code entry and call it.
- __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ call(Operand(edx));
+ __ add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ call(edx);
// Drop arguments and come back to JS mode.
__ LeaveApiExitFrame();
Operand::StaticVariable(ExternalReference::the_hole_value_location(
masm->isolate())));
__ mov(eax, Operand::StaticVariable(pending_exception));
- __ cmp(edx, Operand(eax));
+ __ cmp(edx, eax);
__ j(equal, &runtime);
// For exception, throw the exception again.
__ bind(&failure);
// For failure to match, return null.
- __ mov(Operand(eax), factory->null_value());
+ __ mov(eax, factory->null_value());
__ ret(4 * kPointerSize);
// Load RegExp data.
// Calculate number of capture registers (number_of_captures + 1) * 2.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(Operand(edx), Immediate(2)); // edx was a smi.
+ __ add(edx, Immediate(2)); // edx was a smi.
// edx: Number of capture registers
// Load last_match_info which is still known to be a fast case JSArray.
// Capture register counter starts from number of capture registers and
// counts down until wraping after zero.
__ bind(&next_capture);
- __ sub(Operand(edx), Immediate(1));
+ __ sub(edx, Immediate(1));
__ j(negative, &done, Label::kNear);
// Read the value from the static offsets vector buffer.
__ mov(edi, Operand(ecx, edx, times_int_size, 0));
Label done;
__ mov(ebx, Operand(esp, kPointerSize * 3));
__ JumpIfNotSmi(ebx, &slowcase);
- __ cmp(Operand(ebx), Immediate(Smi::FromInt(kMaxInlineLength)));
+ __ cmp(ebx, Immediate(Smi::FromInt(kMaxInlineLength)));
__ j(above, &slowcase);
// Smi-tagging is equivalent to multiplying by 2.
STATIC_ASSERT(kSmiTag == 0);
// ebx: Start of elements in FixedArray.
// edx: the hole.
Label loop;
- __ test(ecx, Operand(ecx));
+ __ test(ecx, ecx);
__ bind(&loop);
__ j(less_equal, &done, Label::kNear); // Jump if ecx is negative or zero.
- __ sub(Operand(ecx), Immediate(1));
+ __ sub(ecx, Immediate(1));
__ mov(Operand(ebx, ecx, times_pointer_size, 0), edx);
__ jmp(&loop);
// contains two elements (number and string) for each cache entry.
__ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
__ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
- __ sub(Operand(mask), Immediate(1)); // Make mask.
+ __ sub(mask, Immediate(1)); // Make mask.
// Calculate the entry in the number string cache. The hash value in the
// number string cache for smis is just the smi value, and the hash for
__ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
__ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
// Object is heap number and hash is now in scratch. Calculate cache index.
- __ and_(scratch, Operand(mask));
+ __ and_(scratch, mask);
Register index = scratch;
Register probe = mask;
__ mov(probe,
__ bind(&smi_hash_calculated);
// Object is smi and hash is now in scratch. Calculate cache index.
- __ and_(scratch, Operand(mask));
+ __ and_(scratch, mask);
Register index = scratch;
// Check if the entry is the smi we are looking for.
__ cmp(object,
// Compare two smis if required.
if (include_smi_compare_) {
Label non_smi, smi_done;
- __ mov(ecx, Operand(edx));
- __ or_(ecx, Operand(eax));
+ __ mov(ecx, edx);
+ __ or_(ecx, eax);
__ JumpIfNotSmi(ecx, &non_smi, Label::kNear);
- __ sub(edx, Operand(eax)); // Return on the result of the subtraction.
+ __ sub(edx, eax); // Return on the result of the subtraction.
__ j(no_overflow, &smi_done, Label::kNear);
__ not_(edx); // Correct sign in case of overflow. edx is never 0 here.
__ bind(&smi_done);
__ ret(0);
__ bind(&non_smi);
} else if (FLAG_debug_code) {
- __ mov(ecx, Operand(edx));
- __ or_(ecx, Operand(eax));
+ __ mov(ecx, edx);
+ __ or_(ecx, eax);
__ test(ecx, Immediate(kSmiTagMask));
__ Assert(not_zero, "Unexpected smi operands.");
}
// for NaN and undefined.
{
Label not_identical;
- __ cmp(eax, Operand(edx));
+ __ cmp(eax, edx);
__ j(not_equal, ¬_identical);
if (cc_ != equal) {
__ Set(eax, Immediate(0));
// Shift value and mask so kQuietNaNHighBitsMask applies to topmost
// bits.
- __ add(edx, Operand(edx));
+ __ add(edx, edx);
__ cmp(edx, kQuietNaNHighBitsMask << 1);
if (cc_ == equal) {
STATIC_ASSERT(EQUAL != 1);
STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(0, Smi::FromInt(0));
__ mov(ecx, Immediate(kSmiTagMask));
- __ and_(ecx, Operand(eax));
- __ test(ecx, Operand(edx));
+ __ and_(ecx, eax);
+ __ test(ecx, edx);
__ j(not_zero, ¬_smis, Label::kNear);
// One operand is a smi.
// Check whether the non-smi is a heap number.
STATIC_ASSERT(kSmiTagMask == 1);
// ecx still holds eax & kSmiTag, which is either zero or one.
- __ sub(Operand(ecx), Immediate(0x01));
+ __ sub(ecx, Immediate(0x01));
__ mov(ebx, edx);
- __ xor_(ebx, Operand(eax));
- __ and_(ebx, Operand(ecx)); // ebx holds either 0 or eax ^ edx.
- __ xor_(ebx, Operand(eax));
+ __ xor_(ebx, eax);
+ __ and_(ebx, ecx); // ebx holds either 0 or eax ^ edx.
+ __ xor_(ebx, eax);
// if eax was smi, ebx is now edx, else eax.
// Check if the non-smi operand is a heap number.
// Return a result of -1, 0, or 1, based on EFLAGS.
__ mov(eax, 0); // equal
__ mov(ecx, Immediate(Smi::FromInt(1)));
- __ cmov(above, eax, Operand(ecx));
+ __ cmov(above, eax, ecx);
__ mov(ecx, Immediate(Smi::FromInt(-1)));
- __ cmov(below, eax, Operand(ecx));
+ __ cmov(below, eax, ecx);
__ ret(0);
} else {
FloatingPointHelper::CheckFloatOperands(
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
- __ cmp(ecx, Operand(edi));
+ __ cmp(ecx, edi);
__ j(equal, &call, Label::kNear);
- __ cmp(Operand(ecx), Immediate(MegamorphicSentinel(isolate)));
+ __ cmp(ecx, Immediate(MegamorphicSentinel(isolate)));
__ j(equal, &call, Label::kNear);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
- __ cmp(Operand(ecx), Immediate(UninitializedSentinel(isolate)));
+ __ cmp(ecx, Immediate(UninitializedSentinel(isolate)));
__ j(equal, &initialize, Label::kNear);
// MegamorphicSentinel is a root so no write-barrier is needed.
__ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
__ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
__ mov(Operand(esp, 2 * kPointerSize),
Immediate(ExternalReference::isolate_address()));
- __ call(Operand(ebx));
+ __ call(ebx);
// Result is in eax or edx:eax - do not destroy these registers!
if (always_allocate_scope) {
// Setup frame.
__ push(ebp);
- __ mov(ebp, Operand(esp));
+ __ mov(ebp, esp);
// Push marker in two places.
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
}
__ mov(edx, Operand(edx, 0)); // deref address
__ lea(edx, FieldOperand(edx, Code::kHeaderSize));
- __ call(Operand(edx));
+ __ call(edx);
// Unlink this frame from the handler chain.
__ PopTryHandler();
__ bind(&exit);
// Check if the current stack frame is marked as the outermost JS frame.
__ pop(ebx);
- __ cmp(Operand(ebx),
- Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+ __ cmp(ebx, Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
__ j(not_equal, ¬_outermost_js_2);
__ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
__ bind(¬_outermost_js_2);
__ pop(ebx);
__ pop(esi);
__ pop(edi);
- __ add(Operand(esp), Immediate(2 * kPointerSize)); // remove markers
+ __ add(esp, Immediate(2 * kPointerSize)); // remove markers
// Restore frame pointer and return.
__ pop(ebp);
__ mov(scratch, FieldOperand(map, Map::kPrototypeOffset));
Label loop, is_instance, is_not_instance;
__ bind(&loop);
- __ cmp(scratch, Operand(prototype));
+ __ cmp(scratch, prototype);
__ j(equal, &is_instance, Label::kNear);
Factory* factory = masm->isolate()->factory();
- __ cmp(Operand(scratch), Immediate(factory->null_value()));
+ __ cmp(scratch, Immediate(factory->null_value()));
__ j(equal, &is_not_instance, Label::kNear);
__ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
__ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
__ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
}
Label true_value, done;
- __ test(eax, Operand(eax));
+ __ test(eax, eax);
__ j(zero, &true_value, Label::kNear);
__ mov(eax, factory->false_value());
__ jmp(&done, Label::kNear);
Label second_not_zero_length, both_not_zero_length;
__ mov(ecx, FieldOperand(edx, String::kLengthOffset));
STATIC_ASSERT(kSmiTag == 0);
- __ test(ecx, Operand(ecx));
+ __ test(ecx, ecx);
__ j(not_zero, &second_not_zero_length, Label::kNear);
// Second string is empty, result is first string which is already in eax.
Counters* counters = masm->isolate()->counters();
__ bind(&second_not_zero_length);
__ mov(ebx, FieldOperand(eax, String::kLengthOffset));
STATIC_ASSERT(kSmiTag == 0);
- __ test(ebx, Operand(ebx));
+ __ test(ebx, ebx);
__ j(not_zero, &both_not_zero_length, Label::kNear);
// First string is empty, result is second string which is in edx.
__ mov(eax, edx);
// Look at the length of the result of adding the two strings.
Label string_add_flat_result, longer_than_two;
__ bind(&both_not_zero_length);
- __ add(ebx, Operand(ecx));
+ __ add(ebx, ecx);
STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
// Handle exceptionally long strings in the runtime system.
__ j(overflow, &string_add_runtime);
// Use the symbol table when adding two one character strings, as it
// helps later optimizations to return a symbol here.
- __ cmp(Operand(ebx), Immediate(Smi::FromInt(2)));
+ __ cmp(ebx, Immediate(Smi::FromInt(2)));
__ j(not_equal, &longer_than_two);
// Check that both strings are non-external ascii strings.
&string_add_runtime);
// Pack both characters in ebx.
__ shl(ecx, kBitsPerByte);
- __ or_(ebx, Operand(ecx));
+ __ or_(ebx, ecx);
// Set the characters in the new string.
__ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx);
__ IncrementCounter(counters->string_add_native(), 1);
__ bind(&longer_than_two);
// Check if resulting string will be flat.
- __ cmp(Operand(ebx), Immediate(Smi::FromInt(String::kMinNonFlatLength)));
+ __ cmp(ebx, Immediate(Smi::FromInt(String::kMinNonFlatLength)));
__ j(below, &string_add_flat_result);
// If result is not supposed to be flat allocate a cons string object. If both
__ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset));
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
- __ and_(ecx, Operand(edi));
+ __ and_(ecx, edi);
STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ test(ecx, Immediate(kStringEncodingMask));
__ j(not_zero, &ascii_data);
__ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ xor_(edi, Operand(ecx));
+ __ xor_(edi, ecx);
STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
__ and_(edi, kAsciiStringTag | kAsciiDataHintTag);
__ cmp(edi, kAsciiStringTag | kAsciiDataHintTag);
// eax: result string
__ mov(ecx, eax);
// Locate first character of result.
- __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(ecx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// Load first argument and locate first character.
__ mov(edx, Operand(esp, 2 * kPointerSize));
__ mov(edi, FieldOperand(edx, String::kLengthOffset));
__ SmiUntag(edi);
- __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// eax: result string
// ecx: first character of result
// edx: first char of first argument
__ mov(edx, Operand(esp, 1 * kPointerSize));
__ mov(edi, FieldOperand(edx, String::kLengthOffset));
__ SmiUntag(edi);
- __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// eax: result string
// ecx: next character of result
// edx: first char of second argument
// eax: result string
__ mov(ecx, eax);
// Locate first character of result.
- __ add(Operand(ecx),
+ __ add(ecx,
Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// Load first argument and locate first character.
__ mov(edx, Operand(esp, 2 * kPointerSize));
__ mov(edi, FieldOperand(edx, String::kLengthOffset));
__ SmiUntag(edi);
- __ add(Operand(edx),
+ __ add(edx,
Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// eax: result string
// ecx: first character of result
__ mov(edx, Operand(esp, 1 * kPointerSize));
__ mov(edi, FieldOperand(edx, String::kLengthOffset));
__ SmiUntag(edi);
- __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// eax: result string
// ecx: next character of result
// edx: first char of second argument
if (ascii) {
__ mov_b(scratch, Operand(src, 0));
__ mov_b(Operand(dest, 0), scratch);
- __ add(Operand(src), Immediate(1));
- __ add(Operand(dest), Immediate(1));
+ __ add(src, Immediate(1));
+ __ add(dest, Immediate(1));
} else {
__ mov_w(scratch, Operand(src, 0));
__ mov_w(Operand(dest, 0), scratch);
- __ add(Operand(src), Immediate(2));
- __ add(Operand(dest), Immediate(2));
+ __ add(src, Immediate(2));
+ __ add(dest, Immediate(2));
}
- __ sub(Operand(count), Immediate(1));
+ __ sub(count, Immediate(1));
__ j(not_zero, &loop);
}
// Nothing to do for zero characters.
Label done;
- __ test(count, Operand(count));
+ __ test(count, count);
__ j(zero, &done);
// Make count the number of bytes to copy.
// Check if there are more bytes to copy.
__ bind(&last_bytes);
- __ test(count, Operand(count));
+ __ test(count, count);
__ j(zero, &done);
// Copy remaining characters.
__ bind(&loop);
__ mov_b(scratch, Operand(src, 0));
__ mov_b(Operand(dest, 0), scratch);
- __ add(Operand(src), Immediate(1));
- __ add(Operand(dest), Immediate(1));
- __ sub(Operand(count), Immediate(1));
+ __ add(src, Immediate(1));
+ __ add(dest, Immediate(1));
+ __ sub(count, Immediate(1));
__ j(not_zero, &loop);
__ bind(&done);
// different hash algorithm. Don't try to look for these in the symbol table.
Label not_array_index;
__ mov(scratch, c1);
- __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
- __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
+ __ sub(scratch, Immediate(static_cast<int>('0')));
+ __ cmp(scratch, Immediate(static_cast<int>('9' - '0')));
__ j(above, ¬_array_index, Label::kNear);
__ mov(scratch, c2);
- __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
- __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
+ __ sub(scratch, Immediate(static_cast<int>('0')));
+ __ cmp(scratch, Immediate(static_cast<int>('9' - '0')));
__ j(below_equal, not_probed);
__ bind(¬_array_index);
// Collect the two characters in a register.
Register chars = c1;
__ shl(c2, kBitsPerByte);
- __ or_(chars, Operand(c2));
+ __ or_(chars, c2);
// chars: two character string, char 1 in byte 0 and char 2 in byte 1.
// hash: hash of two character string.
Register mask = scratch2;
__ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
__ SmiUntag(mask);
- __ sub(Operand(mask), Immediate(1));
+ __ sub(mask, Immediate(1));
// Registers
// chars: two character string, char 1 in byte 0 and char 2 in byte 1.
// Calculate entry in symbol table.
__ mov(scratch, hash);
if (i > 0) {
- __ add(Operand(scratch), Immediate(SymbolTable::GetProbeOffset(i)));
+ __ add(scratch, Immediate(SymbolTable::GetProbeOffset(i)));
}
- __ and_(scratch, Operand(mask));
+ __ and_(scratch, mask);
// Load the entry from the symbol table.
Register candidate = scratch; // Scratch register contains candidate.
// Check if the two characters match.
__ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
__ and_(temp, 0x0000ffff);
- __ cmp(chars, Operand(temp));
+ __ cmp(chars, temp);
__ j(equal, &found_in_symbol_table);
__ bind(&next_probe_pop_mask[i]);
__ pop(mask);
// hash = character + (character << 10);
__ mov(hash, character);
__ shl(hash, 10);
- __ add(hash, Operand(character));
+ __ add(hash, character);
// hash ^= hash >> 6;
__ mov(scratch, hash);
__ sar(scratch, 6);
- __ xor_(hash, Operand(scratch));
+ __ xor_(hash, scratch);
}
Register character,
Register scratch) {
// hash += character;
- __ add(hash, Operand(character));
+ __ add(hash, character);
// hash += hash << 10;
__ mov(scratch, hash);
__ shl(scratch, 10);
- __ add(hash, Operand(scratch));
+ __ add(hash, scratch);
// hash ^= hash >> 6;
__ mov(scratch, hash);
__ sar(scratch, 6);
- __ xor_(hash, Operand(scratch));
+ __ xor_(hash, scratch);
}
// hash += hash << 3;
__ mov(scratch, hash);
__ shl(scratch, 3);
- __ add(hash, Operand(scratch));
+ __ add(hash, scratch);
// hash ^= hash >> 11;
__ mov(scratch, hash);
__ sar(scratch, 11);
- __ xor_(hash, Operand(scratch));
+ __ xor_(hash, scratch);
// hash += hash << 15;
__ mov(scratch, hash);
__ shl(scratch, 15);
- __ add(hash, Operand(scratch));
+ __ add(hash, scratch);
// if (hash == 0) hash = 27;
Label hash_not_zero;
- __ test(hash, Operand(hash));
+ __ test(hash, hash);
__ j(not_zero, &hash_not_zero, Label::kNear);
__ mov(hash, Immediate(27));
__ bind(&hash_not_zero);
__ JumpIfNotSmi(ecx, &runtime);
__ mov(edx, Operand(esp, 2 * kPointerSize)); // From index.
__ JumpIfNotSmi(edx, &runtime);
- __ sub(ecx, Operand(edx));
+ __ sub(ecx, edx);
__ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
Label return_eax;
__ j(equal, &return_eax);
__ mov(edx, esi); // esi used by following code.
// Locate first character of result.
__ mov(edi, eax);
- __ add(Operand(edi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(edi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// Load string argument and locate character of sub string start.
__ mov(esi, Operand(esp, 3 * kPointerSize));
- __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(esi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
__ SmiUntag(ebx);
- __ add(esi, Operand(ebx));
+ __ add(esi, ebx);
// eax: result string
// ecx: result length
__ mov(edx, esi); // esi used by following code.
// Locate first character of result.
__ mov(edi, eax);
- __ add(Operand(edi),
+ __ add(edi,
Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// Load string argument and locate character of sub string start.
__ mov(esi, Operand(esp, 3 * kPointerSize));
- __ add(Operand(esi),
- Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ add(esi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
__ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
// As from is a smi it is 2 times the value which matches the size of a two
// byte character.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(esi, Operand(ebx));
+ __ add(esi, ebx);
// eax: result string
// ecx: result length
Label compare_chars;
__ bind(&check_zero_length);
STATIC_ASSERT(kSmiTag == 0);
- __ test(length, Operand(length));
+ __ test(length, length);
__ j(not_zero, &compare_chars, Label::kNear);
__ Set(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
__ j(less_equal, &left_shorter, Label::kNear);
// Right string is shorter. Change scratch1 to be length of right string.
- __ sub(scratch1, Operand(length_delta));
+ __ sub(scratch1, length_delta);
__ bind(&left_shorter);
Register min_length = scratch1;
// If either length is zero, just compare lengths.
Label compare_lengths;
- __ test(min_length, Operand(min_length));
+ __ test(min_length, min_length);
__ j(zero, &compare_lengths, Label::kNear);
// Compare characters.
// Compare lengths - strings up to min-length are equal.
__ bind(&compare_lengths);
- __ test(length_delta, Operand(length_delta));
+ __ test(length_delta, length_delta);
__ j(not_zero, &result_not_equal, Label::kNear);
// Result is EQUAL.
__ mov_b(scratch, Operand(left, index, times_1, 0));
__ cmpb(scratch, Operand(right, index, times_1, 0));
__ j(not_equal, chars_not_equal, chars_not_equal_near);
- __ add(Operand(index), Immediate(1));
+ __ add(index, Immediate(1));
__ j(not_zero, &loop);
}
__ mov(eax, Operand(esp, 1 * kPointerSize)); // right
Label not_same;
- __ cmp(edx, Operand(eax));
+ __ cmp(edx, eax);
__ j(not_equal, ¬_same, Label::kNear);
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
// Compare flat ascii strings.
// Drop arguments from the stack.
__ pop(ecx);
- __ add(Operand(esp), Immediate(2 * kPointerSize));
+ __ add(esp, Immediate(2 * kPointerSize));
__ push(ecx);
GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::SMIS);
Label miss;
- __ mov(ecx, Operand(edx));
- __ or_(ecx, Operand(eax));
+ __ mov(ecx, edx);
+ __ or_(ecx, eax);
__ JumpIfNotSmi(ecx, &miss, Label::kNear);
if (GetCondition() == equal) {
// For equality we do not care about the sign of the result.
- __ sub(eax, Operand(edx));
+ __ sub(eax, edx);
} else {
Label done;
- __ sub(edx, Operand(eax));
+ __ sub(edx, eax);
__ j(no_overflow, &done, Label::kNear);
// Correct sign of result in case of overflow.
__ not_(edx);
Label generic_stub;
Label unordered;
Label miss;
- __ mov(ecx, Operand(edx));
- __ and_(ecx, Operand(eax));
+ __ mov(ecx, edx);
+ __ and_(ecx, eax);
__ JumpIfSmi(ecx, &generic_stub, Label::kNear);
__ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
// Performing mov, because xor would destroy the flag register.
__ mov(eax, 0); // equal
__ mov(ecx, Immediate(Smi::FromInt(1)));
- __ cmov(above, eax, Operand(ecx));
+ __ cmov(above, eax, ecx);
__ mov(ecx, Immediate(Smi::FromInt(-1)));
- __ cmov(below, eax, Operand(ecx));
+ __ cmov(below, eax, ecx);
__ ret(0);
__ bind(&unordered);
// Check that both operands are heap objects.
Label miss;
- __ mov(tmp1, Operand(left));
+ __ mov(tmp1, left);
STATIC_ASSERT(kSmiTag == 0);
- __ and_(tmp1, Operand(right));
+ __ and_(tmp1, right);
__ JumpIfSmi(tmp1, &miss, Label::kNear);
// Check that both operands are symbols.
__ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
__ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
STATIC_ASSERT(kSymbolTag != 0);
- __ and_(tmp1, Operand(tmp2));
+ __ and_(tmp1, tmp2);
__ test(tmp1, Immediate(kIsSymbolMask));
__ j(zero, &miss, Label::kNear);
// Symbols are compared by identity.
Label done;
- __ cmp(left, Operand(right));
+ __ cmp(left, right);
// Make sure eax is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(eax));
Register tmp3 = edi;
// Check that both operands are heap objects.
- __ mov(tmp1, Operand(left));
+ __ mov(tmp1, left);
STATIC_ASSERT(kSmiTag == 0);
- __ and_(tmp1, Operand(right));
+ __ and_(tmp1, right);
__ JumpIfSmi(tmp1, &miss);
// Check that both operands are strings. This leaves the instance
__ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
__ mov(tmp3, tmp1);
STATIC_ASSERT(kNotStringTag != 0);
- __ or_(tmp3, Operand(tmp2));
+ __ or_(tmp3, tmp2);
__ test(tmp3, Immediate(kIsNotStringMask));
__ j(not_zero, &miss);
// Fast check for identical strings.
Label not_same;
- __ cmp(left, Operand(right));
+ __ cmp(left, right);
__ j(not_equal, ¬_same, Label::kNear);
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
// because we already know they are not identical.
Label do_compare;
STATIC_ASSERT(kSymbolTag != 0);
- __ and_(tmp1, Operand(tmp2));
+ __ and_(tmp1, tmp2);
__ test(tmp1, Immediate(kIsSymbolMask));
__ j(zero, &do_compare, Label::kNear);
// Make sure eax is non-zero. At this point input operands are
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
ASSERT(state_ == CompareIC::OBJECTS);
Label miss;
- __ mov(ecx, Operand(edx));
- __ and_(ecx, Operand(eax));
+ __ mov(ecx, edx);
+ __ and_(ecx, eax);
__ JumpIfSmi(ecx, &miss, Label::kNear);
__ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
__ j(not_equal, &miss, Label::kNear);
ASSERT(GetCondition() == equal);
- __ sub(eax, Operand(edx));
+ __ sub(eax, edx);
__ ret(0);
__ bind(&miss);
__ push(ecx);
// Do a tail call to the rewritten stub.
- __ jmp(Operand(edi));
+ __ jmp(edi);
}
// Capacity is smi 2^n.
__ mov(index, FieldOperand(properties, kCapacityOffset));
__ dec(index);
- __ and_(Operand(index),
- Immediate(Smi::FromInt(name->Hash() +
+ __ and_(index,
+ Immediate(Smi::FromInt(name->Hash() +
StringDictionary::GetProbeOffset(i))));
// Scale the index by multiplying by the entry size.
__ push(Immediate(name->Hash()));
MaybeObject* result = masm->TryCallStub(&stub);
if (result->IsFailure()) return result;
- __ test(r0, Operand(r0));
+ __ test(r0, r0);
__ j(not_zero, miss);
__ jmp(done);
return result;
__ mov(r0, FieldOperand(name, String::kHashFieldOffset));
__ shr(r0, String::kHashShift);
if (i > 0) {
- __ add(Operand(r0), Immediate(StringDictionary::GetProbeOffset(i)));
+ __ add(r0, Immediate(StringDictionary::GetProbeOffset(i)));
}
- __ and_(r0, Operand(r1));
+ __ and_(r0, r1);
// Scale the index by multiplying by the entry size.
ASSERT(StringDictionary::kEntrySize == 3);
__ push(r0);
__ CallStub(&stub);
- __ test(r1, Operand(r1));
+ __ test(r1, r1);
__ j(zero, miss);
__ jmp(done);
}
// Compute the masked index: (hash + i + i * i) & mask.
__ mov(scratch, Operand(esp, 2 * kPointerSize));
if (i > 0) {
- __ add(Operand(scratch),
- Immediate(StringDictionary::GetProbeOffset(i)));
+ __ add(scratch, Immediate(StringDictionary::GetProbeOffset(i)));
}
__ and_(scratch, Operand(esp, 0));
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx);
if (mode == kSaveFPRegs) {
CpuFeatures::Scope scope(SSE2);
- masm->sub(Operand(esp),
- Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
+ masm->sub(esp,
+ Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
// Save all XMM registers except XMM0.
for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
XMMRegister reg = XMMRegister::from_code(i);
XMMRegister reg = XMMRegister::from_code(i);
masm->movdbl(reg, Operand(esp, (i - 1) * kDoubleSize));
}
- masm->add(Operand(esp),
+ masm->add(esp,
Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
}
if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->pop(edx);
__ mov(edx, dst);
__ and_(edx, 0xF);
__ neg(edx);
- __ add(Operand(edx), Immediate(16));
- __ add(dst, Operand(edx));
- __ add(src, Operand(edx));
- __ sub(Operand(count), edx);
+ __ add(edx, Immediate(16));
+ __ add(dst, edx);
+ __ add(src, edx);
+ __ sub(count, edx);
// edi is now aligned. Check if esi is also aligned.
Label unaligned_source;
- __ test(Operand(src), Immediate(0x0F));
+ __ test(src, Immediate(0x0F));
__ j(not_zero, &unaligned_source);
{
// Copy loop for aligned source and destination.
__ prefetch(Operand(src, 0x20), 1);
__ movdqa(xmm0, Operand(src, 0x00));
__ movdqa(xmm1, Operand(src, 0x10));
- __ add(Operand(src), Immediate(0x20));
+ __ add(src, Immediate(0x20));
__ movdqa(Operand(dst, 0x00), xmm0);
__ movdqa(Operand(dst, 0x10), xmm1);
- __ add(Operand(dst), Immediate(0x20));
+ __ add(dst, Immediate(0x20));
__ dec(loop_count);
__ j(not_zero, &loop);
// At most 31 bytes to copy.
Label move_less_16;
- __ test(Operand(count), Immediate(0x10));
+ __ test(count, Immediate(0x10));
__ j(zero, &move_less_16);
__ movdqa(xmm0, Operand(src, 0));
- __ add(Operand(src), Immediate(0x10));
+ __ add(src, Immediate(0x10));
__ movdqa(Operand(dst, 0), xmm0);
- __ add(Operand(dst), Immediate(0x10));
+ __ add(dst, Immediate(0x10));
__ bind(&move_less_16);
// At most 15 bytes to copy. Copy 16 bytes at end of string.
__ prefetch(Operand(src, 0x20), 1);
__ movdqu(xmm0, Operand(src, 0x00));
__ movdqu(xmm1, Operand(src, 0x10));
- __ add(Operand(src), Immediate(0x20));
+ __ add(src, Immediate(0x20));
__ movdqa(Operand(dst, 0x00), xmm0);
__ movdqa(Operand(dst, 0x10), xmm1);
- __ add(Operand(dst), Immediate(0x20));
+ __ add(dst, Immediate(0x20));
__ dec(loop_count);
__ j(not_zero, &loop);
// At most 31 bytes to copy.
Label move_less_16;
- __ test(Operand(count), Immediate(0x10));
+ __ test(count, Immediate(0x10));
__ j(zero, &move_less_16);
__ movdqu(xmm0, Operand(src, 0));
- __ add(Operand(src), Immediate(0x10));
+ __ add(src, Immediate(0x10));
__ movdqa(Operand(dst, 0), xmm0);
- __ add(Operand(dst), Immediate(0x10));
+ __ add(dst, Immediate(0x10));
__ bind(&move_less_16);
// At most 15 bytes to copy. Copy 16 bytes at end of string.
__ mov(edx, dst);
__ and_(edx, 0x03);
__ neg(edx);
- __ add(Operand(edx), Immediate(4)); // edx = 4 - (dst & 3)
- __ add(dst, Operand(edx));
- __ add(src, Operand(edx));
- __ sub(Operand(count), edx);
+ __ add(edx, Immediate(4)); // edx = 4 - (dst & 3)
+ __ add(dst, edx);
+ __ add(src, edx);
+ __ sub(count, edx);
// edi is now aligned, ecx holds number of remaning bytes to copy.
__ mov(edx, count);
// If this call did not replace a call but patched other code then there will
// be an unwanted return address left on the stack. Here we get rid of that.
if (convert_call_to_jmp) {
- __ add(Operand(esp), Immediate(kPointerSize));
+ __ add(esp, Immediate(kPointerSize));
}
// Now that the break point has been handled, resume normal execution by
__ lea(edx, FieldOperand(edx, Code::kHeaderSize));
// Re-run JSFunction, edi is function, esi is context.
- __ jmp(Operand(edx));
+ __ jmp(edx);
}
const bool Debug::kFrameDropperSupported = true;
const int kDoubleRegsSize = kDoubleSize *
XMMRegister::kNumAllocatableRegisters;
- __ sub(Operand(esp), Immediate(kDoubleRegsSize));
+ __ sub(esp, Immediate(kDoubleRegsSize));
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int offset = i * kDoubleSize;
__ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
__ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
}
- __ sub(edx, Operand(ebp));
+ __ sub(edx, ebp);
__ neg(edx);
// Allocate a new deoptimizer object.
// Remove the bailout id and the double registers from the stack.
if (type() == EAGER) {
- __ add(Operand(esp), Immediate(kDoubleRegsSize + kPointerSize));
+ __ add(esp, Immediate(kDoubleRegsSize + kPointerSize));
} else {
- __ add(Operand(esp), Immediate(kDoubleRegsSize + 2 * kPointerSize));
+ __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
}
// Compute a pointer to the unwinding limit in register ecx; that is
// the first stack slot not part of the input frame.
__ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
- __ add(ecx, Operand(esp));
+ __ add(ecx, esp);
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
Label pop_loop;
__ bind(&pop_loop);
__ pop(Operand(edx, 0));
- __ add(Operand(edx), Immediate(sizeof(uint32_t)));
- __ cmp(ecx, Operand(esp));
+ __ add(edx, Immediate(sizeof(uint32_t)));
+ __ cmp(ecx, esp);
__ j(not_equal, &pop_loop);
// If frame was dynamically aligned, pop padding.
Label sentinel, sentinel_done;
- __ pop(Operand(ecx));
+ __ pop(ecx);
__ cmp(ecx, Operand(eax, Deoptimizer::frame_alignment_marker_offset()));
__ j(equal, &sentinel);
- __ push(Operand(ecx));
+ __ push(ecx);
__ jmp(&sentinel_done);
__ bind(&sentinel);
__ mov(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
__ mov(ebx, Operand(eax, 0));
__ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
__ bind(&inner_push_loop);
- __ sub(Operand(ecx), Immediate(sizeof(uint32_t)));
+ __ sub(ecx, Immediate(sizeof(uint32_t)));
__ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset()));
- __ test(ecx, Operand(ecx));
+ __ test(ecx, ecx);
__ j(not_zero, &inner_push_loop);
- __ add(Operand(eax), Immediate(kPointerSize));
- __ cmp(eax, Operand(edx));
+ __ add(eax, Immediate(kPointerSize));
+ __ cmp(eax, edx);
__ j(below, &outer_push_loop);
// In case of OSR, we have to restore the XMM registers.
// function calls.
if (info->is_strict_mode() || info->is_native()) {
Label ok;
- __ test(ecx, Operand(ecx));
+ __ test(ecx, ecx);
__ j(zero, &ok, Label::kNear);
// +1 for return address.
int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
void FullCodeGenerator::verify_stack_height() {
ASSERT(FLAG_verify_stack_height);
- __ sub(Operand(ebp), Immediate(kPointerSize * stack_height()));
- __ cmp(ebp, Operand(esp));
+ __ sub(ebp, Immediate(kPointerSize * stack_height()));
+ __ cmp(ebp, esp);
__ Assert(equal, "Full codegen stack height not as expected.");
- __ add(Operand(ebp), Immediate(kPointerSize * stack_height()));
+ __ add(ebp, Immediate(kPointerSize * stack_height()));
}
ToBooleanStub stub(result_register());
__ push(result_register());
__ CallStub(&stub, condition->test_id());
- __ test(result_register(), Operand(result_register()));
+ __ test(result_register(), result_register());
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
}
if (inline_smi_code) {
Label slow_case;
__ mov(ecx, edx);
- __ or_(ecx, Operand(eax));
+ __ or_(ecx, eax);
patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
- __ cmp(edx, Operand(eax));
+ __ cmp(edx, eax);
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ jmp(clause->body_target());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
__ call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
patch_site.EmitPatchInfo();
- __ test(eax, Operand(eax));
+ __ test(eax, eax);
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ jmp(clause->body_target());
// For all objects but the receiver, check that the cache is empty.
Label check_prototype;
- __ cmp(ecx, Operand(eax));
+ __ cmp(ecx, eax);
__ j(equal, &check_prototype, Label::kNear);
__ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
__ cmp(edx, isolate()->factory()->empty_fixed_array());
__ push(ecx); // Enumerable.
__ push(ebx); // Current entry.
__ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
- __ test(eax, Operand(eax));
+ __ test(eax, eax);
__ j(equal, loop_statement.continue_label());
- __ mov(ebx, Operand(eax));
+ __ mov(ebx, eax);
// Update the 'each' property or variable from the possibly filtered
// entry in register ebx.
// Remove the pointers stored on the stack.
__ bind(loop_statement.break_label());
- __ add(Operand(esp), Immediate(5 * kPointerSize));
+ __ add(esp, Immediate(5 * kPointerSize));
decrement_stack_height(ForIn::kElementCount);
// Exit and decrement the loop depth.
__ pop(edx);
decrement_stack_height();
__ mov(ecx, eax);
- __ or_(eax, Operand(edx));
+ __ or_(eax, edx);
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(eax, &smi_case, Label::kNear);
break;
}
case Token::ADD:
- __ add(eax, Operand(ecx));
+ __ add(eax, ecx);
__ j(overflow, &stub_call);
break;
case Token::SUB:
- __ sub(eax, Operand(ecx));
+ __ sub(eax, ecx);
__ j(overflow, &stub_call);
break;
case Token::MUL: {
__ SmiUntag(eax);
- __ imul(eax, Operand(ecx));
+ __ imul(eax, ecx);
__ j(overflow, &stub_call);
- __ test(eax, Operand(eax));
+ __ test(eax, eax);
__ j(not_zero, &done, Label::kNear);
__ mov(ebx, edx);
- __ or_(ebx, Operand(ecx));
+ __ or_(ebx, ecx);
__ j(negative, &stub_call);
break;
}
case Token::BIT_OR:
- __ or_(eax, Operand(ecx));
+ __ or_(eax, ecx);
break;
case Token::BIT_AND:
- __ and_(eax, Operand(ecx));
+ __ and_(eax, ecx);
break;
case Token::BIT_XOR:
- __ xor_(eax, Operand(ecx));
+ __ xor_(eax, ecx);
break;
default:
UNREACHABLE();
STATIC_ASSERT(kPointerSize == 4);
__ lea(ecx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
// Calculate location of the first key name.
- __ add(Operand(ebx),
- Immediate(FixedArray::kHeaderSize +
- DescriptorArray::kFirstIndex * kPointerSize));
+ __ add(ebx,
+ Immediate(FixedArray::kHeaderSize +
+ DescriptorArray::kFirstIndex * kPointerSize));
// Loop through all the keys in the descriptor array. If one of these is the
// symbol valueOf the result is false.
Label entry, loop;
__ mov(edx, FieldOperand(ebx, 0));
__ cmp(edx, FACTORY->value_of_symbol());
__ j(equal, if_false);
- __ add(Operand(ebx), Immediate(kPointerSize));
+ __ add(ebx, Immediate(kPointerSize));
__ bind(&entry);
- __ cmp(ebx, Operand(ecx));
+ __ cmp(ebx, ecx);
__ j(not_equal, &loop);
// Reload map as register ebx was used as temporary above.
__ pop(ebx);
decrement_stack_height();
- __ cmp(eax, Operand(ebx));
+ __ cmp(eax, ebx);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope fscope(SSE2);
__ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
- __ movd(xmm1, Operand(ebx));
- __ movd(xmm0, Operand(eax));
+ __ movd(xmm1, ebx);
+ __ movd(xmm0, eax);
__ cvtss2sd(xmm1, xmm1);
__ xorps(xmm0, xmm1);
__ subsd(xmm0, xmm1);
__ mov(index_1, Operand(esp, 1 * kPointerSize));
__ mov(index_2, Operand(esp, 0));
__ mov(temp, index_1);
- __ or_(temp, Operand(index_2));
+ __ or_(temp, index_2);
__ JumpIfNotSmi(temp, &slow_case);
// Check that both indices are valid.
__ mov(temp, FieldOperand(object, JSArray::kLengthOffset));
- __ cmp(temp, Operand(index_1));
+ __ cmp(temp, index_1);
__ j(below_equal, &slow_case);
- __ cmp(temp, Operand(index_2));
+ __ cmp(temp, index_2);
__ j(below_equal, &slow_case);
// Bring addresses into index1 and index2.
__ bind(&no_remembered_set);
// We are done. Drop elements from the stack, and return undefined.
- __ add(Operand(esp), Immediate(3 * kPointerSize));
+ __ add(esp, Immediate(3 * kPointerSize));
__ mov(eax, isolate()->factory()->undefined_value());
__ jmp(&done);
__ pop(left);
Label done, fail, ok;
- __ cmp(left, Operand(right));
+ __ cmp(left, right);
__ j(equal, &ok);
// Fail if either is a non-HeapObject.
__ mov(tmp, left);
- __ and_(Operand(tmp), right);
+ __ and_(tmp, right);
__ JumpIfSmi(tmp, &fail);
__ mov(tmp, FieldOperand(left, HeapObject::kMapOffset));
__ CmpInstanceType(tmp, JS_REGEXP_TYPE);
Operand separator_operand = Operand(esp, 2 * kPointerSize);
Operand result_operand = Operand(esp, 1 * kPointerSize);
Operand array_length_operand = Operand(esp, 0);
- __ sub(Operand(esp), Immediate(2 * kPointerSize));
+ __ sub(esp, Immediate(2 * kPointerSize));
__ cld();
// Check that the array is a JSArray
__ JumpIfSmi(array, &bailout);
// Live loop registers: index, array_length, string,
// scratch, string_length, elements.
if (FLAG_debug_code) {
- __ cmp(index, Operand(array_length));
+ __ cmp(index, array_length);
__ Assert(less, "No empty arrays here in EmitFastAsciiArrayJoin");
}
__ bind(&loop);
__ add(string_length,
FieldOperand(string, SeqAsciiString::kLengthOffset));
__ j(overflow, &bailout);
- __ add(Operand(index), Immediate(1));
- __ cmp(index, Operand(array_length));
+ __ add(index, Immediate(1));
+ __ cmp(index, array_length);
__ j(less, &loop);
// If array_length is 1, return elements[0], a string.
// to string_length.
__ mov(scratch, separator_operand);
__ mov(scratch, FieldOperand(scratch, SeqAsciiString::kLengthOffset));
- __ sub(string_length, Operand(scratch)); // May be negative, temporarily.
+ __ sub(string_length, scratch); // May be negative, temporarily.
__ imul(scratch, array_length_operand);
__ j(overflow, &bailout);
- __ add(string_length, Operand(scratch));
+ __ add(string_length, scratch);
__ j(overflow, &bailout);
__ shr(string_length, 1);
__ lea(string,
FieldOperand(string, SeqAsciiString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
- __ add(Operand(index), Immediate(1));
+ __ add(index, Immediate(1));
__ bind(&loop_1_condition);
__ cmp(index, array_length_operand);
__ j(less, &loop_1); // End while (index < length).
__ lea(string,
FieldOperand(string, SeqAsciiString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
- __ add(Operand(index), Immediate(1));
+ __ add(index, Immediate(1));
__ cmp(index, array_length_operand);
__ j(less, &loop_2); // End while (index < length).
__ lea(string,
FieldOperand(string, SeqAsciiString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
- __ add(Operand(index), Immediate(1));
+ __ add(index, Immediate(1));
__ cmp(index, array_length_operand);
__ j(less, &loop_3); // End while (index < length).
__ bind(&done);
__ mov(eax, result_operand);
// Drop temp values from the stack, and restore context register.
- __ add(Operand(esp), Immediate(3 * kPointerSize));
+ __ add(esp, Immediate(3 * kPointerSize));
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
decrement_stack_height();
if (ShouldInlineSmiCase(expr->op())) {
if (expr->op() == Token::INC) {
- __ add(Operand(eax), Immediate(Smi::FromInt(1)));
+ __ add(eax, Immediate(Smi::FromInt(1)));
} else {
- __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+ __ sub(eax, Immediate(Smi::FromInt(1)));
}
__ j(overflow, &stub_call, Label::kNear);
// We could eliminate this smi check if we split the code at
__ bind(&stub_call);
// Call stub. Undo operation first.
if (expr->op() == Token::INC) {
- __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+ __ sub(eax, Immediate(Smi::FromInt(1)));
} else {
- __ add(Operand(eax), Immediate(Smi::FromInt(1)));
+ __ add(eax, Immediate(Smi::FromInt(1)));
}
}
__ CallStub(&stub);
decrement_stack_height(2);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ test(eax, Operand(eax));
+ __ test(eax, eax);
// The stub returns 0 for true.
Split(zero, if_true, if_false, fall_through);
break;
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
Label slow_case;
- __ mov(ecx, Operand(edx));
- __ or_(ecx, Operand(eax));
+ __ mov(ecx, edx);
+ __ or_(ecx, eax);
patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
- __ cmp(edx, Operand(eax));
+ __ cmp(edx, eax);
Split(cc, if_true, if_false, NULL);
__ bind(&slow_case);
}
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
- __ test(eax, Operand(eax));
+ __ test(eax, eax);
Split(cc, if_true, if_false, fall_through);
}
}
// Cook return address on top of stack (smi encoded Code* delta)
ASSERT(!result_register().is(edx));
__ pop(edx);
- __ sub(Operand(edx), Immediate(masm_->CodeObject()));
+ __ sub(edx, Immediate(masm_->CodeObject()));
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
STATIC_ASSERT(kSmiTag == 0);
__ SmiTag(edx);
// Uncook return address.
__ pop(edx);
__ SmiUntag(edx);
- __ add(Operand(edx), Immediate(masm_->CodeObject()));
- __ jmp(Operand(edx));
+ __ add(edx, Immediate(masm_->CodeObject()));
+ __ jmp(edx);
}
// Fast case: Do the load.
STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
__ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
- __ cmp(Operand(scratch), Immediate(FACTORY->the_hole_value()));
+ __ cmp(scratch, Immediate(FACTORY->the_hole_value()));
// In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched.
__ j(equal, out_of_range);
// Check if element is in the range of mapped arguments. If not, jump
// to the unmapped lookup with the parameter map in scratch1.
__ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
- __ sub(Operand(scratch2), Immediate(Smi::FromInt(2)));
- __ cmp(key, Operand(scratch2));
+ __ sub(scratch2, Immediate(Smi::FromInt(2)));
+ __ cmp(key, scratch2);
__ j(greater_equal, unmapped_case);
// Load element index and check whether it is the hole.
Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
__ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
__ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
- __ cmp(key, Operand(scratch));
+ __ cmp(key, scratch);
__ j(greater_equal, slow_case);
return FieldOperand(backing_store,
key,
__ shr(ecx, KeyedLookupCache::kMapHashShift);
__ mov(edi, FieldOperand(eax, String::kHashFieldOffset));
__ shr(edi, String::kHashShift);
- __ xor_(ecx, Operand(edi));
+ __ xor_(ecx, edi);
__ and_(ecx, KeyedLookupCache::kCapacityMask);
// Load the key (consisting of map and symbol) from the cache and
__ shl(edi, kPointerSizeLog2 + 1);
__ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
__ j(not_equal, &slow);
- __ add(Operand(edi), Immediate(kPointerSize));
+ __ add(edi, Immediate(kPointerSize));
__ cmp(eax, Operand::StaticArray(edi, times_1, cache_keys));
__ j(not_equal, &slow);
__ mov(edi,
Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets));
__ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
- __ sub(edi, Operand(ecx));
+ __ sub(edi, ecx);
__ j(above_equal, &property_array_property);
// Load in-object property.
__ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
- __ add(ecx, Operand(edi));
+ __ add(ecx, edi);
__ mov(eax, FieldOperand(edx, ecx, times_pointer_size, 0));
__ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
__ ret(0);
// Check that it has indexed interceptor and access checks
// are not enabled for this object.
__ movzx_b(ecx, FieldOperand(ecx, Map::kBitFieldOffset));
- __ and_(Operand(ecx), Immediate(kSlowCaseBitFieldMask));
- __ cmp(Operand(ecx), Immediate(1 << Map::kHasIndexedInterceptor));
+ __ and_(ecx, Immediate(kSlowCaseBitFieldMask));
+ __ cmp(ecx, Immediate(1 << Map::kHasIndexedInterceptor));
__ j(not_zero, &slow);
// Everything is fine, call runtime.
// Fast elements array, store the value to the elements backing store.
__ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
// Update write barrier for the elements array address.
- __ mov(edx, Operand(eax)); // Preserve the value which is returned.
+ __ mov(edx, eax); // Preserve the value which is returned.
__ RecordWriteArray(
ebx, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ ret(0);
and_(scratch, Immediate(~Page::kPageAlignmentMask));
} else {
mov(scratch, Immediate(~Page::kPageAlignmentMask));
- and_(scratch, Operand(object));
+ and_(scratch, object);
}
// Check that we can use a test_b.
ASSERT(MemoryChunk::IN_FROM_SPACE < 8);
// Store pointer to buffer.
mov(Operand(scratch, 0), addr);
// Increment buffer top.
- add(Operand(scratch), Immediate(kPointerSize));
+ add(scratch, Immediate(kPointerSize));
// Write back new top of buffer.
mov(Operand::StaticVariable(store_buffer), scratch);
// Call stub on end of buffer.
lea(dst, FieldOperand(object, offset));
if (emit_debug_code()) {
Label ok;
- test_b(Operand(dst), (1 << kPointerSizeLog2) - 1);
+ test_b(dst, (1 << kPointerSizeLog2) - 1);
j(zero, &ok, Label::kNear);
int3();
bind(&ok);
void MacroAssembler::Set(Register dst, const Immediate& x) {
if (x.is_zero()) {
- xor_(dst, Operand(dst)); // Shorter than mov.
+ xor_(dst, dst); // Shorter than mov.
} else {
mov(dst, x);
}
SmiUntag(scratch1);
if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
CpuFeatures::Scope fscope(SSE2);
- cvtsi2sd(scratch2, Operand(scratch1));
+ cvtsi2sd(scratch2, scratch1);
movdbl(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize),
scratch2);
} else {
Register scratch,
Label* fail) {
movzx_b(scratch, FieldOperand(map, Map::kInstanceTypeOffset));
- sub(Operand(scratch), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ sub(scratch, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
cmp(scratch,
LAST_NONCALLABLE_SPEC_OBJECT_TYPE - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
j(above, fail);
void MacroAssembler::EnterFrame(StackFrame::Type type) {
push(ebp);
- mov(ebp, Operand(esp));
+ mov(ebp, esp);
push(esi);
push(Immediate(Smi::FromInt(type)));
push(Immediate(CodeObject()));
ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
push(ebp);
- mov(ebp, Operand(esp));
+ mov(ebp, esp);
// Reserve room for entry stack pointer and push the code object.
ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
if (save_doubles) {
CpuFeatures::Scope scope(SSE2);
int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize;
- sub(Operand(esp), Immediate(space));
+ sub(esp, Immediate(space));
const int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
}
} else {
- sub(Operand(esp), Immediate(argc * kPointerSize));
+ sub(esp, Immediate(argc * kPointerSize));
}
// Get the required frame alignment for the OS.
// Setup argc and argv in callee-saved registers.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
- mov(edi, Operand(eax));
+ mov(edi, eax);
lea(esi, Operand(ebp, eax, times_4, offset));
// Reserve space for argc, argv and isolate.
void MacroAssembler::LeaveApiExitFrame() {
- mov(esp, Operand(ebp));
+ mov(esp, ebp);
pop(ebp);
LeaveExitFrameEpilogue();
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
pop(Operand::StaticVariable(ExternalReference(Isolate::kHandlerAddress,
isolate())));
- add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
+ add(esp, Immediate(StackHandlerConstants::kSize - kPointerSize));
}
// (edx == ENTRY) == (ebp == 0) == (esi == 0), so we could test any
// of them.
Label skip;
- cmp(Operand(edx), Immediate(StackHandler::ENTRY));
+ cmp(edx, Immediate(StackHandler::ENTRY));
j(equal, &skip, Label::kNear);
mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
bind(&skip);
// When generating debug code, make sure the lexical context is set.
if (emit_debug_code()) {
- cmp(Operand(scratch), Immediate(0));
+ cmp(scratch, Immediate(0));
Check(not_equal, "we should not have an empty lexical context");
}
// Load the global context of the current context.
mov(r1, r0);
not_(r0);
shl(r1, 15);
- add(r0, Operand(r1));
+ add(r0, r1);
// hash = hash ^ (hash >> 12);
mov(r1, r0);
shr(r1, 12);
- xor_(r0, Operand(r1));
+ xor_(r0, r1);
// hash = hash + (hash << 2);
lea(r0, Operand(r0, r0, times_4, 0));
// hash = hash ^ (hash >> 4);
mov(r1, r0);
shr(r1, 4);
- xor_(r0, Operand(r1));
+ xor_(r0, r1);
// hash = hash * 2057;
imul(r0, r0, 2057);
// hash = hash ^ (hash >> 16);
mov(r1, r0);
shr(r1, 16);
- xor_(r0, Operand(r1));
+ xor_(r0, r1);
// Compute capacity mask.
mov(r1, FieldOperand(elements, NumberDictionary::kCapacityOffset));
mov(r2, r0);
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
- add(Operand(r2), Immediate(NumberDictionary::GetProbeOffset(i)));
+ add(r2, Immediate(NumberDictionary::GetProbeOffset(i)));
}
- and_(r2, Operand(r1));
+ and_(r2, r1);
// Scale the index by multiplying by the entry size.
ASSERT(NumberDictionary::kEntrySize == 3);
if (scratch.is(no_reg)) {
mov(result, Operand::StaticVariable(new_space_allocation_top));
} else {
- mov(Operand(scratch), Immediate(new_space_allocation_top));
+ mov(scratch, Immediate(new_space_allocation_top));
mov(result, Operand(scratch, 0));
}
}
if (!top_reg.is(result)) {
mov(top_reg, result);
}
- add(Operand(top_reg), Immediate(object_size));
+ add(top_reg, Immediate(object_size));
j(carry, gc_required);
cmp(top_reg, Operand::StaticVariable(new_space_allocation_limit));
j(above, gc_required);
// Tag result if requested.
if (top_reg.is(result)) {
if ((flags & TAG_OBJECT) != 0) {
- sub(Operand(result), Immediate(object_size - kHeapObjectTag));
+ sub(result, Immediate(object_size - kHeapObjectTag));
} else {
- sub(Operand(result), Immediate(object_size));
+ sub(result, Immediate(object_size));
}
} else if ((flags & TAG_OBJECT) != 0) {
- add(Operand(result), Immediate(kHeapObjectTag));
+ add(result, Immediate(kHeapObjectTag));
}
}
// We assume that element_count*element_size + header_size does not
// overflow.
lea(result_end, Operand(element_count, element_size, header_size));
- add(result_end, Operand(result));
+ add(result_end, result);
j(carry, gc_required);
cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
j(above, gc_required);
if (!object_size.is(result_end)) {
mov(result_end, object_size);
}
- add(result_end, Operand(result));
+ add(result_end, result);
j(carry, gc_required);
cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
j(above, gc_required);
ExternalReference::new_space_allocation_top_address(isolate());
// Make sure the object has no tag before resetting top.
- and_(Operand(object), Immediate(~kHeapObjectTagMask));
+ and_(object, Immediate(~kHeapObjectTagMask));
#ifdef DEBUG
cmp(object, Operand::StaticVariable(new_space_allocation_top));
Check(below, "Undo allocation of non allocated memory");
ASSERT(kShortSize == 2);
// scratch1 = length * 2 + kObjectAlignmentMask.
lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
- and_(Operand(scratch1), Immediate(~kObjectAlignmentMask));
+ and_(scratch1, Immediate(~kObjectAlignmentMask));
// Allocate two byte string in new space.
AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
mov(scratch1, length);
ASSERT(kCharSize == 1);
- add(Operand(scratch1), Immediate(kObjectAlignmentMask));
- and_(Operand(scratch1), Immediate(~kObjectAlignmentMask));
+ add(scratch1, Immediate(kObjectAlignmentMask));
+ and_(scratch1, Immediate(~kObjectAlignmentMask));
// Allocate ascii string in new space.
AllocateInNewSpace(SeqAsciiString::kHeaderSize,
Register scratch) {
Label loop, done, short_string, short_loop;
// Experimentation shows that the short string loop is faster if length < 10.
- cmp(Operand(length), Immediate(10));
+ cmp(length, Immediate(10));
j(less_equal, &short_string);
ASSERT(source.is(esi));
mov(scratch, ecx);
shr(ecx, 2);
rep_movs();
- and_(Operand(scratch), Immediate(0x3));
- add(destination, Operand(scratch));
+ and_(scratch, Immediate(0x3));
+ add(destination, scratch);
jmp(&done);
bind(&short_string);
- test(length, Operand(length));
+ test(length, length);
j(zero, &done);
bind(&short_loop);
jmp(&entry);
bind(&loop);
mov(Operand(start_offset, 0), filler);
- add(Operand(start_offset), Immediate(kPointerSize));
+ add(start_offset, Immediate(kPointerSize));
bind(&entry);
- cmp(start_offset, Operand(end_offset));
+ cmp(start_offset, end_offset);
j(less, &loop);
}
Register op,
Label* then_label) {
Label ok;
- test(result, Operand(result));
+ test(result, result);
j(not_zero, &ok);
- test(op, Operand(op));
+ test(op, op);
j(sign, then_label);
bind(&ok);
}
Register scratch,
Label* then_label) {
Label ok;
- test(result, Operand(result));
+ test(result, result);
j(not_zero, &ok);
- mov(scratch, Operand(op1));
- or_(scratch, Operand(op2));
+ mov(scratch, op1);
+ or_(scratch, op2);
j(sign, then_label);
bind(&ok);
}
// If the prototype or initial map is the hole, don't return it and
// simply miss the cache instead. This will allow us to allocate a
// prototype object on-demand in the runtime system.
- cmp(Operand(result), Immediate(isolate()->factory()->the_hole_value()));
+ cmp(result, Immediate(isolate()->factory()->the_hole_value()));
j(equal, miss);
// If the function does not have an initial map, we're done.
void MacroAssembler::IllegalOperation(int num_arguments) {
if (num_arguments > 0) {
- add(Operand(esp), Immediate(num_arguments * kPointerSize));
+ add(esp, Immediate(num_arguments * kPointerSize));
}
mov(eax, Immediate(isolate()->factory()->undefined_value()));
}
Label leave_exit_frame;
// Check if the result handle holds 0.
- test(eax, Operand(eax));
+ test(eax, eax);
j(zero, &empty_handle);
// It was non-zero. Dereference to get the result value.
mov(eax, Operand(eax, 0));
mov(edi, eax);
mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address()));
mov(eax, Immediate(delete_extensions));
- call(Operand(eax));
+ call(eax);
mov(eax, edi);
jmp(&leave_exit_frame);
if (call_kind == CALL_AS_FUNCTION) {
// Set to some non-zero smi by updating the least significant
// byte.
- mov_b(Operand(dst), 1 << kSmiTagSize);
+ mov_b(dst, 1 << kSmiTagSize);
} else {
// Set to smi zero by clearing the register.
- xor_(dst, Operand(dst));
+ xor_(dst, dst);
}
}
} else if (!expected.reg().is(actual.reg())) {
// Both expected and actual are in (different) registers. This
// is the case when we invoke functions using call and apply.
- cmp(expected.reg(), Operand(actual.reg()));
+ cmp(expected.reg(), actual.reg());
j(equal, &invoke);
ASSERT(actual.reg().is(eax));
ASSERT(expected.reg().is(ebx));
isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (!code_constant.is_null()) {
mov(edx, Immediate(code_constant));
- add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
+ add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
} else if (!code_operand.is_reg(edx)) {
mov(edx, code_operand);
}
ASSERT(flag == JUMP_FUNCTION || has_frame());
Label done;
- Operand dummy(eax);
+ Operand dummy(eax, 0);
InvokePrologue(expected, actual, code, dummy, &done, flag, Label::kNear,
call_wrapper, call_kind);
if (flag == CALL_FUNCTION) {
ret(bytes_dropped);
} else {
pop(scratch);
- add(Operand(esp), Immediate(bytes_dropped));
+ add(esp, Immediate(bytes_dropped));
push(scratch);
ret(0);
}
void MacroAssembler::Drop(int stack_elements) {
if (stack_elements > 0) {
- add(Operand(esp), Immediate(stack_elements * kPointerSize));
+ add(esp, Immediate(stack_elements * kPointerSize));
}
}
ASSERT(is_uintn(power + HeapNumber::kExponentBias,
HeapNumber::kExponentBits));
mov(scratch, Immediate(power + HeapNumber::kExponentBias));
- movd(dst, Operand(scratch));
+ movd(dst, scratch);
psllq(dst, HeapNumber::kMantissaBits);
}
Label* failure) {
// Check that both objects are not smis.
STATIC_ASSERT(kSmiTag == 0);
- mov(scratch1, Operand(object1));
- and_(scratch1, Operand(object2));
+ mov(scratch1, object1);
+ and_(scratch1, object2);
JumpIfSmi(scratch1, failure);
// Load instance type for both strings.
// Make stack end at alignment and make room for num_arguments words
// and the original value of esp.
mov(scratch, esp);
- sub(Operand(esp), Immediate((num_arguments + 1) * kPointerSize));
+ sub(esp, Immediate((num_arguments + 1) * kPointerSize));
ASSERT(IsPowerOf2(frame_alignment));
and_(esp, -frame_alignment);
mov(Operand(esp, num_arguments * kPointerSize), scratch);
} else {
- sub(Operand(esp), Immediate(num_arguments * kPointerSize));
+ sub(esp, Immediate(num_arguments * kPointerSize));
}
}
void MacroAssembler::CallCFunction(ExternalReference function,
int num_arguments) {
// Trashing eax is ok as it will be the return value.
- mov(Operand(eax), Immediate(function));
+ mov(eax, Immediate(function));
CallCFunction(eax, num_arguments);
}
CheckStackAlignment();
}
- call(Operand(function));
+ call(function);
if (OS::ActivationFrameAlignment() != 0) {
mov(esp, Operand(esp, num_arguments * kPointerSize));
} else {
- add(Operand(esp), Immediate(num_arguments * kPointerSize));
+ add(esp, Immediate(num_arguments * kPointerSize));
}
}
and_(scratch, Immediate(~Page::kPageAlignmentMask));
} else {
mov(scratch, Immediate(~Page::kPageAlignmentMask));
- and_(scratch, Operand(object));
+ and_(scratch, object);
}
if (mask < (1 << kBitsPerByte)) {
test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
Label other_color, word_boundary;
test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
j(first_bit == 1 ? zero : not_zero, &other_color, Label::kNear);
- add(mask_scratch, Operand(mask_scratch)); // Shift left 1 by adding.
+ add(mask_scratch, mask_scratch); // Shift left 1 by adding.
j(zero, &word_boundary, Label::kNear);
test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
Register mask_reg) {
ASSERT(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx));
mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
- and_(Operand(bitmap_reg), addr_reg);
- mov(ecx, Operand(addr_reg));
+ and_(bitmap_reg, addr_reg);
+ mov(ecx, addr_reg);
int shift =
Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
shr(ecx, shift);
and_(ecx,
(Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1));
- add(bitmap_reg, Operand(ecx));
- mov(ecx, Operand(addr_reg));
+ add(bitmap_reg, ecx);
+ mov(ecx, addr_reg);
shr(ecx, kPointerSizeLog2);
and_(ecx, (1 << Bitmap::kBitsPerCellLog2) - 1);
mov(mask_reg, Immediate(1));
Label ok;
push(mask_scratch);
// shl. May overflow making the check conservative.
- add(mask_scratch, Operand(mask_scratch));
+ add(mask_scratch, mask_scratch);
test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
j(zero, &ok, Label::kNear);
int3();
// no GC pointers.
Register instance_type = ecx;
movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
- test_b(Operand(instance_type), kIsIndirectStringMask | kIsNotStringMask);
+ test_b(instance_type, kIsIndirectStringMask | kIsNotStringMask);
j(not_zero, value_is_white_and_not_data);
// It's a non-indirect (non-cons and non-slice) string.
// If it's external, the length is just ExternalString::kSize.
// set.
ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
- test_b(Operand(instance_type), kExternalStringTag);
+ test_b(instance_type, kExternalStringTag);
j(zero, ¬_external, Label::kNear);
mov(length, Immediate(ExternalString::kSize));
jmp(&is_data_object, Label::kNear);
bind(¬_external);
// Sequential string, either ASCII or UC16.
ASSERT(kAsciiStringTag == 0x04);
- and_(Operand(length), Immediate(kStringEncodingMask));
- xor_(Operand(length), Immediate(kStringEncodingMask));
- add(Operand(length), Immediate(0x04));
+ and_(length, Immediate(kStringEncodingMask));
+ xor_(length, Immediate(kStringEncodingMask));
+ add(length, Immediate(0x04));
// Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted
// by 2. If we multiply the string length as smi by this, it still
// won't overflow a 32-bit value.
static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
imul(length, FieldOperand(value, String::kLengthOffset));
shr(length, 2 + kSmiTagSize + kSmiShiftSize);
- add(Operand(length),
- Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
- and_(Operand(length),
- Immediate(~kObjectAlignmentMask));
+ add(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
+ and_(length, Immediate(~kObjectAlignmentMask));
bind(&is_data_object);
// Value is a data object, and it is white. Mark it black. Since we know
void SetCallKind(Register dst, CallKind kind);
// Invoke the JavaScript function code by either calling or jumping.
+ void InvokeCode(Register code,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag,
+ const CallWrapper& call_wrapper,
+ CallKind call_kind) {
+ InvokeCode(Operand(code), expected, actual, flag, call_wrapper, call_kind);
+ }
+
void InvokeCode(const Operand& code,
const ParameterCount& expected,
const ParameterCount& actual,
void SmiTag(Register reg) {
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
- add(reg, Operand(reg));
+ add(reg, reg);
}
void SmiUntag(Register reg) {
sar(reg, kSmiTagSize);
-// Copyright 2008-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
void RegExpMacroAssemblerIA32::AdvanceCurrentPosition(int by) {
if (by != 0) {
- __ add(Operand(edi), Immediate(by * char_size()));
+ __ add(edi, Immediate(by * char_size()));
}
}
CheckPreemption();
// Pop Code* offset from backtrack stack, add Code* and jump to location.
Pop(ebx);
- __ add(Operand(ebx), Immediate(masm_->CodeObject()));
- __ jmp(Operand(ebx));
+ __ add(ebx, Immediate(masm_->CodeObject()));
+ __ jmp(ebx);
}
int byte_offset = cp_offset * char_size();
if (check_end_of_string) {
// Check that there are at least str.length() characters left in the input.
- __ cmp(Operand(edi), Immediate(-(byte_offset + byte_length)));
+ __ cmp(edi, Immediate(-(byte_offset + byte_length)));
BranchOrBacktrack(greater, on_failure);
}
Label fallthrough;
__ cmp(edi, Operand(backtrack_stackpointer(), 0));
__ j(not_equal, &fallthrough);
- __ add(Operand(backtrack_stackpointer()), Immediate(kPointerSize)); // Pop.
+ __ add(backtrack_stackpointer(), Immediate(kPointerSize)); // Pop.
BranchOrBacktrack(no_condition, on_equal);
__ bind(&fallthrough);
}
Label fallthrough;
__ mov(edx, register_location(start_reg)); // Index of start of capture
__ mov(ebx, register_location(start_reg + 1)); // Index of end of capture
- __ sub(ebx, Operand(edx)); // Length of capture.
+ __ sub(ebx, edx); // Length of capture.
// The length of a capture should not be negative. This can only happen
// if the end of the capture is unrecorded, or at a point earlier than
__ push(backtrack_stackpointer());
// After this, the eax, ecx, and edi registers are available.
- __ add(edx, Operand(esi)); // Start of capture
- __ add(edi, Operand(esi)); // Start of text to match against capture.
- __ add(ebx, Operand(edi)); // End of text to match against capture.
+ __ add(edx, esi); // Start of capture
+ __ add(edi, esi); // Start of text to match against capture.
+ __ add(ebx, edi); // End of text to match against capture.
Label loop;
__ bind(&loop);
__ movzx_b(ecx, Operand(edx, 0));
__ or_(ecx, 0x20);
- __ cmp(eax, Operand(ecx));
+ __ cmp(eax, ecx);
__ j(not_equal, &fail);
__ bind(&loop_increment);
// Increment pointers into match and capture strings.
- __ add(Operand(edx), Immediate(1));
- __ add(Operand(edi), Immediate(1));
+ __ add(edx, Immediate(1));
+ __ add(edi, Immediate(1));
// Compare to end of match, and loop if not done.
- __ cmp(edi, Operand(ebx));
+ __ cmp(edi, ebx);
__ j(below, &loop);
__ jmp(&success);
// Restore original value before continuing.
__ pop(backtrack_stackpointer());
// Drop original value of character position.
- __ add(Operand(esp), Immediate(kPointerSize));
+ __ add(esp, Immediate(kPointerSize));
// Compute new value of character position after the matched part.
- __ sub(edi, Operand(esi));
+ __ sub(edi, esi);
} else {
ASSERT(mode_ == UC16);
// Save registers before calling C function.
// Set byte_offset2.
// Found by adding negative string-end offset of current position (edi)
// to end of string.
- __ add(edi, Operand(esi));
+ __ add(edi, esi);
__ mov(Operand(esp, 1 * kPointerSize), edi);
// Set byte_offset1.
// Start of capture, where edx already holds string-end negative offset.
- __ add(edx, Operand(esi));
+ __ add(edx, esi);
__ mov(Operand(esp, 0 * kPointerSize), edx);
{
__ pop(esi);
// Check if function returned non-zero for success or zero for failure.
- __ or_(eax, Operand(eax));
+ __ or_(eax, eax);
BranchOrBacktrack(zero, on_no_match);
// On success, increment position by length of capture.
- __ add(edi, Operand(ebx));
+ __ add(edi, ebx);
}
__ bind(&fallthrough);
}
// Find length of back-referenced capture.
__ mov(edx, register_location(start_reg));
__ mov(eax, register_location(start_reg + 1));
- __ sub(eax, Operand(edx)); // Length to check.
+ __ sub(eax, edx); // Length to check.
// Fail on partial or illegal capture (start of capture after end of capture).
BranchOrBacktrack(less, on_no_match);
// Succeed on empty capture (including no capture)
// Check that there are sufficient characters left in the input.
__ mov(ebx, edi);
- __ add(ebx, Operand(eax));
+ __ add(ebx, eax);
BranchOrBacktrack(greater, on_no_match);
// Save register to make it available below.
// Compute pointers to match string and capture string
__ lea(ebx, Operand(esi, edi, times_1, 0)); // Start of match.
- __ add(edx, Operand(esi)); // Start of capture.
+ __ add(edx, esi); // Start of capture.
__ lea(ecx, Operand(eax, ebx, times_1, 0)); // End of match
Label loop;
}
__ j(not_equal, &fail);
// Increment pointers into capture and match string.
- __ add(Operand(edx), Immediate(char_size()));
- __ add(Operand(ebx), Immediate(char_size()));
+ __ add(edx, Immediate(char_size()));
+ __ add(ebx, Immediate(char_size()));
// Check if we have reached end of match area.
- __ cmp(ebx, Operand(ecx));
+ __ cmp(ebx, ecx);
__ j(below, &loop);
__ jmp(&success);
__ bind(&success);
// Move current character position to position after match.
__ mov(edi, ecx);
- __ sub(Operand(edi), esi);
+ __ sub(edi, esi);
// Restore backtrack stackpointer.
__ pop(backtrack_stackpointer());
return true;
case '.': {
// Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
- __ mov(Operand(eax), current_character());
- __ xor_(Operand(eax), Immediate(0x01));
+ __ mov(eax, current_character());
+ __ xor_(eax, Immediate(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ sub(Operand(eax), Immediate(0x0b));
+ __ sub(eax, Immediate(0x0b));
__ cmp(eax, 0x0c - 0x0b);
BranchOrBacktrack(below_equal, on_no_match);
if (mode_ == UC16) {
// Compare original value to 0x2028 and 0x2029, using the already
// computed (current_char ^ 0x01 - 0x0b). I.e., check for
// 0x201d (0x2028 - 0x0b) or 0x201e.
- __ sub(Operand(eax), Immediate(0x2028 - 0x0b));
+ __ sub(eax, Immediate(0x2028 - 0x0b));
__ cmp(eax, 0x2029 - 0x2028);
BranchOrBacktrack(below_equal, on_no_match);
}
case 'w': {
if (mode_ != ASCII) {
// Table is 128 entries, so all ASCII characters can be tested.
- __ cmp(Operand(current_character()), Immediate('z'));
+ __ cmp(current_character(), Immediate('z'));
BranchOrBacktrack(above, on_no_match);
}
ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
Label done;
if (mode_ != ASCII) {
// Table is 128 entries, so all ASCII characters can be tested.
- __ cmp(Operand(current_character()), Immediate('z'));
+ __ cmp(current_character(), Immediate('z'));
__ j(above, &done);
}
ASSERT_EQ(0, word_character_map[0]); // Character '\0' is not a word char.
case 'n': {
// Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 or 0x2029).
// The opposite of '.'.
- __ mov(Operand(eax), current_character());
- __ xor_(Operand(eax), Immediate(0x01));
+ __ mov(eax, current_character());
+ __ xor_(eax, Immediate(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ sub(Operand(eax), Immediate(0x0b));
+ __ sub(eax, Immediate(0x0b));
__ cmp(eax, 0x0c - 0x0b);
if (mode_ == ASCII) {
BranchOrBacktrack(above, on_no_match);
// Compare original value to 0x2028 and 0x2029, using the already
// computed (current_char ^ 0x01 - 0x0b). I.e., check for
// 0x201d (0x2028 - 0x0b) or 0x201e.
- __ sub(Operand(eax), Immediate(0x2028 - 0x0b));
+ __ sub(eax, Immediate(0x2028 - 0x0b));
__ cmp(eax, 1);
BranchOrBacktrack(above, on_no_match);
__ bind(&done);
__ bind(&stack_limit_hit);
CallCheckStackGuardState(ebx);
- __ or_(eax, Operand(eax));
+ __ or_(eax, eax);
// If returned value is non-zero, we exit with the returned value as result.
__ j(not_zero, &exit_label_);
__ mov(ebx, Operand(ebp, kStartIndex));
// Allocate space on stack for registers.
- __ sub(Operand(esp), Immediate(num_registers_ * kPointerSize));
+ __ sub(esp, Immediate(num_registers_ * kPointerSize));
// Load string length.
__ mov(esi, Operand(ebp, kInputEnd));
// Load input position.
__ mov(edi, Operand(ebp, kInputStart));
// Set up edi to be negative offset from string end.
- __ sub(edi, Operand(esi));
+ __ sub(edi, esi);
// Set eax to address of char before start of the string.
// (effectively string position -1).
Label init_loop;
__ bind(&init_loop);
__ mov(Operand(ebp, ecx, times_1, +0), eax);
- __ sub(Operand(ecx), Immediate(kPointerSize));
+ __ sub(ecx, Immediate(kPointerSize));
__ cmp(ecx, kRegisterZero - num_saved_registers_ * kPointerSize);
__ j(greater, &init_loop);
}
if (mode_ == UC16) {
__ lea(ecx, Operand(ecx, edx, times_2, 0));
} else {
- __ add(ecx, Operand(edx));
+ __ add(ecx, edx);
}
for (int i = 0; i < num_saved_registers_; i++) {
__ mov(eax, register_location(i));
// Convert to index from start of string, not end.
- __ add(eax, Operand(ecx));
+ __ add(eax, ecx);
if (mode_ == UC16) {
__ sar(eax, 1); // Convert byte index to character index.
}
__ push(edi);
CallCheckStackGuardState(ebx);
- __ or_(eax, Operand(eax));
+ __ or_(eax, eax);
// If returning non-zero, we should end execution with the given
// result as return value.
__ j(not_zero, &exit_label_);
__ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
- __ or_(eax, Operand(eax));
+ __ or_(eax, eax);
__ j(equal, &exit_with_exception);
// Otherwise use return value as new stack pointer.
__ mov(backtrack_stackpointer(), eax);
void RegExpMacroAssemblerIA32::SafeReturn() {
__ pop(ebx);
- __ add(Operand(ebx), Immediate(masm_->CodeObject()));
- __ jmp(Operand(ebx));
+ __ add(ebx, Immediate(masm_->CodeObject()));
+ __ jmp(ebx);
}
void RegExpMacroAssemblerIA32::Push(Register source) {
ASSERT(!source.is(backtrack_stackpointer()));
// Notice: This updates flags, unlike normal Push.
- __ sub(Operand(backtrack_stackpointer()), Immediate(kPointerSize));
+ __ sub(backtrack_stackpointer(), Immediate(kPointerSize));
__ mov(Operand(backtrack_stackpointer(), 0), source);
}
void RegExpMacroAssemblerIA32::Push(Immediate value) {
// Notice: This updates flags, unlike normal Push.
- __ sub(Operand(backtrack_stackpointer()), Immediate(kPointerSize));
+ __ sub(backtrack_stackpointer(), Immediate(kPointerSize));
__ mov(Operand(backtrack_stackpointer(), 0), value);
}
ASSERT(!target.is(backtrack_stackpointer()));
__ mov(target, Operand(backtrack_stackpointer(), 0));
// Notice: This updates flags, unlike normal Pop.
- __ add(Operand(backtrack_stackpointer()), Immediate(kPointerSize));
+ __ add(backtrack_stackpointer(), Immediate(kPointerSize));
}
__ j(not_equal, &miss);
// Jump to the first instruction in the code stub.
- __ add(Operand(extra), Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(Operand(extra));
+ __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(extra);
__ bind(&miss);
} else {
__ mov(offset, Operand::StaticArray(offset, times_2, value_offset));
// Jump to the first instruction in the code stub.
- __ add(Operand(offset), Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(Operand(offset));
+ __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(offset);
// Pop at miss.
__ bind(&miss);
__ add(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(scratch, flags);
__ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
- __ sub(scratch, Operand(name));
- __ add(Operand(scratch), Immediate(flags));
+ __ sub(scratch, name);
+ __ add(scratch, Immediate(flags));
__ and_(scratch, (kSecondaryTableSize - 1) << kHeapObjectTagSize);
// Probe the secondary table.
Register scratch2,
Label* miss_label) {
__ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
- __ mov(eax, Operand(scratch1));
+ __ mov(eax, scratch1);
__ ret(0);
}
// frame.
// -----------------------------------
__ pop(scratch);
- __ add(Operand(esp), Immediate(kPointerSize * kFastApiCallArguments));
+ __ add(esp, Immediate(kPointerSize * kFastApiCallArguments));
__ push(scratch);
}
__ PrepareCallApiFunction(kApiArgc + kApiStackSpace);
__ mov(ApiParameterOperand(1), eax); // v8::Arguments::implicit_args_.
- __ add(Operand(eax), Immediate(argc * kPointerSize));
+ __ add(eax, Immediate(argc * kPointerSize));
__ mov(ApiParameterOperand(2), eax); // v8::Arguments::values_.
__ Set(ApiParameterOperand(3), Immediate(argc)); // v8::Arguments::length_.
// v8::Arguments::is_construct_call_.
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
- __ mov(name_reg, Operand(eax));
+ __ mov(name_reg, eax);
__ RecordWriteField(receiver_reg,
offset,
name_reg,
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
- __ mov(name_reg, Operand(eax));
+ __ mov(name_reg, eax);
__ RecordWriteField(scratch,
offset,
name_reg,
} else if (heap()->InNewSpace(prototype)) {
// Get the map of the current object.
__ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- __ cmp(Operand(scratch1), Immediate(Handle<Map>(current->map())));
+ __ cmp(scratch1, Immediate(Handle<Map>(current->map())));
// Branch on the result of the map check.
__ j(not_equal, miss);
// Check access rights to the global object. This has to happen
__ pop(scratch3); // Get return address to place it below.
__ push(receiver); // receiver
- __ mov(scratch2, Operand(esp));
+ __ mov(scratch2, esp);
ASSERT(!scratch2.is(reg));
__ push(reg); // holder
// Push data from AccessorInfo.
__ PrepareCallApiFunction(kApiArgc);
__ mov(ApiParameterOperand(0), ebx); // name.
- __ add(Operand(ebx), Immediate(kPointerSize));
+ __ add(ebx, Immediate(kPointerSize));
__ mov(ApiParameterOperand(1), ebx); // arguments pointer.
// Emitting a stub call may try to allocate (if the code is not
void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
if (kind_ == Code::KEYED_CALL_IC) {
- __ cmp(Operand(ecx), Immediate(Handle<String>(name)));
+ __ cmp(ecx, Immediate(Handle<String>(name)));
__ j(not_equal, miss);
}
}
Immediate(Handle<SharedFunctionInfo>(function->shared())));
__ j(not_equal, miss);
} else {
- __ cmp(Operand(edi), Immediate(Handle<JSFunction>(function)));
+ __ cmp(edi, Immediate(Handle<JSFunction>(function)));
__ j(not_equal, miss);
}
}
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
- __ add(Operand(eax), Immediate(Smi::FromInt(argc)));
+ __ add(eax, Immediate(Smi::FromInt(argc)));
// Get the element's length into ecx.
__ mov(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
- __ cmp(eax, Operand(ecx));
+ __ cmp(eax, ecx);
__ j(greater, &attempt_to_grow_elements);
// Check if value is a smi.
__ lea(edx, FieldOperand(ebx,
eax, times_half_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
- __ cmp(edx, Operand(ecx));
+ __ cmp(edx, ecx);
__ j(not_equal, &call_builtin);
- __ add(Operand(ecx), Immediate(kAllocationDelta * kPointerSize));
+ __ add(ecx, Immediate(kAllocationDelta * kPointerSize));
__ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit));
__ j(above, &call_builtin);
// Get the array's length into ecx and calculate new length.
__ mov(ecx, FieldOperand(edx, JSArray::kLengthOffset));
- __ sub(Operand(ecx), Immediate(Smi::FromInt(1)));
+ __ sub(ecx, Immediate(Smi::FromInt(1)));
__ j(negative, &return_undefined);
// Get the last element.
__ mov(eax, FieldOperand(ebx,
ecx, times_half_pointer_size,
FixedArray::kHeaderSize));
- __ cmp(Operand(eax), Immediate(factory()->the_hole_value()));
+ __ cmp(eax, Immediate(factory()->the_hole_value()));
__ j(equal, &call_builtin);
// Set the array's length.
__ sar(ebx, kBitsPerInt - 1);
// Do bitwise not or do nothing depending on ebx.
- __ xor_(eax, Operand(ebx));
+ __ xor_(eax, ebx);
// Add 1 or do nothing depending on ebx.
- __ sub(eax, Operand(ebx));
+ __ sub(eax, ebx);
// If the result is still negative, go to the slow case.
// This only happens for the most negative smi.
// Allocate space for v8::Arguments implicit values. Must be initialized
// before calling any runtime function.
- __ sub(Operand(esp), Immediate(kFastApiCallArguments * kPointerSize));
+ __ sub(esp, Immediate(kFastApiCallArguments * kPointerSize));
// Check that the maps haven't changed and find a Holder as a side effect.
CheckPrototypes(JSObject::cast(object), edx, holder,
if (result->IsFailure()) return result;
__ bind(&miss);
- __ add(Operand(esp), Immediate(kFastApiCallArguments * kPointerSize));
+ __ add(esp, Immediate(kFastApiCallArguments * kPointerSize));
__ bind(&miss_before_stack_reserved);
MaybeObject* maybe_result = GenerateMissBranch();
__ IncrementCounter(counters->keyed_store_field(), 1);
// Check that the name has not changed.
- __ cmp(Operand(ecx), Immediate(Handle<String>(name)));
+ __ cmp(ecx, Immediate(Handle<String>(name)));
__ j(not_equal, &miss);
// Generate store field code. Trashes the name register.
__ IncrementCounter(counters->keyed_load_field(), 1);
// Check that the name has not changed.
- __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+ __ cmp(eax, Immediate(Handle<String>(name)));
__ j(not_equal, &miss);
GenerateLoadField(receiver, holder, edx, ebx, ecx, edi, index, name, &miss);
__ IncrementCounter(counters->keyed_load_callback(), 1);
// Check that the name has not changed.
- __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+ __ cmp(eax, Immediate(Handle<String>(name)));
__ j(not_equal, &miss);
MaybeObject* result = GenerateLoadCallback(receiver, holder, edx, eax, ebx,
__ IncrementCounter(counters->keyed_load_constant_function(), 1);
// Check that the name has not changed.
- __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+ __ cmp(eax, Immediate(Handle<String>(name)));
__ j(not_equal, &miss);
GenerateLoadConstant(receiver, holder, edx, ebx, ecx, edi,
__ IncrementCounter(counters->keyed_load_interceptor(), 1);
// Check that the name has not changed.
- __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+ __ cmp(eax, Immediate(Handle<String>(name)));
__ j(not_equal, &miss);
LookupResult lookup;
__ IncrementCounter(counters->keyed_load_array_length(), 1);
// Check that the name has not changed.
- __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+ __ cmp(eax, Immediate(Handle<String>(name)));
__ j(not_equal, &miss);
GenerateLoadArrayLength(masm(), edx, ecx, &miss);
__ IncrementCounter(counters->keyed_load_string_length(), 1);
// Check that the name has not changed.
- __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+ __ cmp(eax, Immediate(Handle<String>(name)));
__ j(not_equal, &miss);
GenerateLoadStringLength(masm(), edx, ecx, ebx, &miss, true);
__ IncrementCounter(counters->keyed_load_function_prototype(), 1);
// Check that the name has not changed.
- __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+ __ cmp(eax, Immediate(Handle<String>(name)));
__ j(not_equal, &miss);
GenerateLoadFunctionPrototype(masm(), edx, ecx, ebx, &miss);
// Move argc to ebx and retrieve and tag the JSObject to return.
__ mov(ebx, eax);
__ pop(eax);
- __ or_(Operand(eax), Immediate(kHeapObjectTag));
+ __ or_(eax, Immediate(kHeapObjectTag));
// Remove caller arguments and receiver from the stack and return.
__ pop(ecx);
// If the value is NaN or +/-infinity, the result is 0x80000000,
// which is automatically zero when taken mod 2^n, n < 32.
__ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ sub(Operand(esp), Immediate(2 * kPointerSize));
+ __ sub(esp, Immediate(2 * kPointerSize));
__ fisttp_d(Operand(esp, 0));
__ pop(ebx);
- __ add(Operand(esp), Immediate(kPointerSize));
+ __ add(esp, Immediate(kPointerSize));
} else {
ASSERT(CpuFeatures::IsSupported(SSE2));
CpuFeatures::Scope scope(SSE2);
FixedArray::kHeaderSize));
__ mov(Operand(ecx, 0), eax);
// Make sure to preserve the value in register eax.
- __ mov(edx, Operand(eax));
+ __ mov(edx, eax);
__ RecordWrite(edi, ecx, edx, kDontSaveFPRegs);
}
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
Label L, C;
__ mov(edx, Operand(esp, 4));
- __ xor_(eax, Operand(eax)); // clear eax
+ __ xor_(eax, eax); // clear eax
__ jmp(&C);
__ bind(&L);
- __ add(eax, Operand(edx));
- __ sub(Operand(edx), Immediate(1));
+ __ add(eax, edx);
+ __ sub(edx, Immediate(1));
__ bind(&C);
- __ test(edx, Operand(edx));
+ __ test(edx, edx);
__ j(not_zero, &L);
__ ret(0);
__ jmp(&C);
__ bind(&L);
- __ imul(eax, Operand(edx));
- __ sub(Operand(edx), Immediate(1));
+ __ imul(eax, edx);
+ __ sub(edx, Immediate(1));
__ bind(&C);
- __ test(edx, Operand(edx));
+ __ test(edx, edx);
__ j(not_zero, &L);
__ ret(0);
__ subsd(xmm0, xmm1);
__ divsd(xmm0, xmm1);
// Copy xmm0 to st(0) using eight bytes of stack.
- __ sub(Operand(esp), Immediate(8));
+ __ sub(esp, Immediate(8));
__ movdbl(Operand(esp, 0), xmm0);
__ fld_d(Operand(esp, 0));
- __ add(Operand(esp), Immediate(8));
+ __ add(esp, Immediate(8));
__ ret(0);
CodeDesc desc;
v8::internal::byte buffer[256];
Assembler assm(Isolate::Current(), buffer, sizeof buffer);
__ mov(eax, Operand(esp, 4));
- __ cvtsi2sd(xmm0, Operand(eax));
+ __ cvtsi2sd(xmm0, eax);
// Copy xmm0 to st(0) using eight bytes of stack.
- __ sub(Operand(esp), Immediate(8));
+ __ sub(esp, Immediate(8));
__ movdbl(Operand(esp, 0), xmm0);
__ fld_d(Operand(esp, 0));
- __ add(Operand(esp), Immediate(8));
+ __ add(esp, Immediate(8));
__ ret(0);
CodeDesc desc;
assm.GetCode(&desc);
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// Short immediate instructions
__ adc(eax, 12345678);
- __ add(Operand(eax), Immediate(12345678));
+ __ add(eax, Immediate(12345678));
__ or_(eax, 12345678);
- __ sub(Operand(eax), Immediate(12345678));
+ __ sub(eax, Immediate(12345678));
__ xor_(eax, 12345678);
__ and_(eax, 12345678);
Handle<FixedArray> foo = FACTORY->NewFixedArray(10, TENURED);
__ mov(ebx, Operand(esp, ecx, times_2, 0)); // [esp+ecx*4]
// ---- All instructions that I can think of
- __ add(edx, Operand(ebx));
+ __ add(edx, ebx);
__ add(edx, Operand(12, RelocInfo::NONE));
__ add(edx, Operand(ebx, 0));
__ add(edx, Operand(ebx, 16));
__ add(Operand(ebp, ecx, times_4, 12), Immediate(12));
__ nop();
- __ add(Operand(ebx), Immediate(12));
+ __ add(ebx, Immediate(12));
__ nop();
__ adc(ecx, 12);
__ adc(ecx, 1000);
CpuFeatures::Scope fscope(RDTSC);
__ rdtsc();
}
- __ movsx_b(edx, Operand(ecx));
- __ movsx_w(edx, Operand(ecx));
- __ movzx_b(edx, Operand(ecx));
- __ movzx_w(edx, Operand(ecx));
+ __ movsx_b(edx, ecx);
+ __ movsx_w(edx, ecx);
+ __ movzx_b(edx, ecx);
+ __ movzx_w(edx, ecx);
__ nop();
- __ imul(edx, Operand(ecx));
- __ shld(edx, Operand(ecx));
- __ shrd(edx, Operand(ecx));
- __ bts(Operand(edx), ecx);
+ __ imul(edx, ecx);
+ __ shld(edx, ecx);
+ __ shrd(edx, ecx);
+ __ bts(edx, ecx);
__ bts(Operand(ebx, ecx, times_4, 0), ecx);
__ nop();
__ pushad();
__ nop();
__ add(edx, Operand(esp, 16));
- __ add(edx, Operand(ecx));
- __ mov_b(edx, Operand(ecx));
- __ mov_b(Operand(ecx), 6);
+ __ add(edx, ecx);
+ __ mov_b(edx, ecx);
+ __ mov_b(ecx, 6);
__ mov_b(Operand(ebx, ecx, times_4, 10000), 6);
__ mov_b(Operand(esp, 16), edx);
__ mov_w(edx, Operand(esp, 16));
__ adc(edx, 12345);
- __ add(Operand(ebx), Immediate(12));
+ __ add(ebx, Immediate(12));
__ add(Operand(edx, ecx, times_4, 10000), Immediate(12));
__ and_(ebx, 12345);
__ cmp(ebx, 12345);
- __ cmp(Operand(ebx), Immediate(12));
+ __ cmp(ebx, Immediate(12));
__ cmp(Operand(edx, ecx, times_4, 10000), Immediate(12));
- __ cmpb(Operand(eax), 100);
+ __ cmpb(eax, 100);
__ or_(ebx, 12345);
- __ sub(Operand(ebx), Immediate(12));
+ __ sub(ebx, Immediate(12));
__ sub(Operand(edx, ecx, times_4, 10000), Immediate(12));
__ xor_(ebx, 12345);
__ stos();
__ sub(edx, Operand(ebx, ecx, times_4, 10000));
- __ sub(edx, Operand(ebx));
+ __ sub(edx, ebx);
__ test(edx, Immediate(12345));
__ test(edx, Operand(ebx, ecx, times_8, 10000));
{
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatures::Scope scope(SSE4_1);
- __ pextrd(Operand(eax), xmm0, 1);
- __ pinsrd(xmm1, Operand(eax), 0);
+ __ pextrd(eax, xmm0, 1);
+ __ pinsrd(xmm1, eax, 0);
}
}