}
-void Assembler::decq(Register dst) {
+void Assembler::emit_dec(Register dst, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xFF);
- emit_modrm(0x1, dst);
-}
-
-
-void Assembler::decq(const Operand& dst) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xFF);
- emit_operand(1, dst);
-}
-
-
-void Assembler::decl(Register dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
+ emit_rex(dst, size);
emit(0xFF);
emit_modrm(0x1, dst);
}
-void Assembler::decl(const Operand& dst) {
+void Assembler::emit_dec(const Operand& dst, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
+ emit_rex(dst, size);
emit(0xFF);
emit_operand(1, dst);
}
}
-void Assembler::incq(Register dst) {
+void Assembler::emit_inc(Register dst, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
+ emit_rex(dst, size);
emit(0xFF);
emit_modrm(0x0, dst);
}
-void Assembler::incq(const Operand& dst) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xFF);
- emit_operand(0, dst);
-}
-
-
-void Assembler::incl(const Operand& dst) {
+void Assembler::emit_inc(const Operand& dst, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
+ emit_rex(dst, size);
emit(0xFF);
emit_operand(0, dst);
}
-void Assembler::incl(Register dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xFF);
- emit_modrm(0, dst);
-}
-
-
void Assembler::int3() {
EnsureSpace ensure_space(this);
emit(0xCC);
}
-void Assembler::neg(Register dst) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xF7);
- emit_modrm(0x3, dst);
-}
-
-
-void Assembler::negl(Register dst) {
+void Assembler::emit_neg(Register dst, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
+ emit_rex(dst, size);
emit(0xF7);
emit_modrm(0x3, dst);
}
-void Assembler::neg(const Operand& dst) {
+void Assembler::emit_neg(const Operand& dst, int size) {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
emit(0xF7);
}
-void Assembler::testl(Register dst, Register src) {
+void Assembler::emit_test(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
if (src.low_bits() == 4) {
- emit_optional_rex_32(src, dst);
+ emit_rex(src, dst, size);
emit(0x85);
emit_modrm(src, dst);
} else {
- emit_optional_rex_32(dst, src);
+ emit_rex(dst, src, size);
emit(0x85);
emit_modrm(dst, src);
}
}
-void Assembler::testl(Register reg, Immediate mask) {
+void Assembler::emit_test(Register reg, Immediate mask, int size) {
// testl with a mask that fits in the low byte is exactly testb.
if (is_uint8(mask.value_)) {
testb(reg, mask);
}
EnsureSpace ensure_space(this);
if (reg.is(rax)) {
+ emit_rex(rax, size);
emit(0xA9);
emit(mask);
} else {
- emit_optional_rex_32(rax, reg);
+ emit_rex(reg, size);
emit(0xF7);
emit_modrm(0x0, reg);
emit(mask);
}
-void Assembler::testl(const Operand& op, Immediate mask) {
+void Assembler::emit_test(const Operand& op, Immediate mask, int size) {
// testl with a mask that fits in the low byte is exactly testb.
if (is_uint8(mask.value_)) {
testb(op, mask);
return;
}
EnsureSpace ensure_space(this);
- emit_optional_rex_32(rax, op);
+ emit_rex(rax, op, size);
emit(0xF7);
emit_operand(rax, op); // Operation code 0
emit(mask);
}
-void Assembler::testl(const Operand& op, Register reg) {
+void Assembler::emit_test(const Operand& op, Register reg, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(reg, op);
+ emit_rex(reg, op, size);
emit(0x85);
emit_operand(reg, op);
}
-void Assembler::testq(const Operand& op, Register reg) {
- EnsureSpace ensure_space(this);
- emit_rex_64(reg, op);
- emit(0x85);
- emit_operand(reg, op);
-}
-
-
-void Assembler::testq(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- if (src.low_bits() == 4) {
- emit_rex_64(src, dst);
- emit(0x85);
- emit_modrm(src, dst);
- } else {
- emit_rex_64(dst, src);
- emit(0x85);
- emit_modrm(dst, src);
- }
-}
-
-
-void Assembler::testq(Register dst, Immediate mask) {
- if (is_uint8(mask.value_)) {
- testb(dst, mask);
- return;
- }
- EnsureSpace ensure_space(this);
- if (dst.is(rax)) {
- emit_rex_64();
- emit(0xA9);
- emit(mask);
- } else {
- emit_rex_64(dst);
- emit(0xF7);
- emit_modrm(0, dst);
- emit(mask);
- }
-}
-
-
// FPU instructions.
#define ASSEMBLER_INSTRUCTION_LIST(V) \
V(add) \
+ V(cmp) \
+ V(dec) \
V(idiv) \
V(imul) \
+ V(inc) \
V(mov) \
- V(sub)
+ V(neg) \
+ V(sbb) \
+ V(sub) \
+ V(test)
class Assembler : public AssemblerBase {
void xchgq(Register dst, Register src);
void xchgl(Register dst, Register src);
- void sbbl(Register dst, Register src) {
- arithmetic_op_32(0x1b, dst, src);
- }
-
- void sbbq(Register dst, Register src) {
- arithmetic_op(0x1b, dst, src);
- }
-
void cmpb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x7, dst, src);
}
arithmetic_op_16(0x39, src, dst);
}
- void cmpl(Register dst, Register src) {
- arithmetic_op_32(0x3B, dst, src);
- }
-
- void cmpl(Register dst, const Operand& src) {
- arithmetic_op_32(0x3B, dst, src);
- }
-
- void cmpl(const Operand& dst, Register src) {
- arithmetic_op_32(0x39, src, dst);
- }
-
- void cmpl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x7, dst, src);
- }
-
- void cmpl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x7, dst, src);
- }
-
- void cmpq(Register dst, Register src) {
- arithmetic_op(0x3B, dst, src);
- }
-
- void cmpq(Register dst, const Operand& src) {
- arithmetic_op(0x3B, dst, src);
- }
-
- void cmpq(const Operand& dst, Register src) {
- arithmetic_op(0x39, src, dst);
- }
-
- void cmpq(Register dst, Immediate src) {
- immediate_arithmetic_op(0x7, dst, src);
- }
-
- void cmpq(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x7, dst, src);
- }
-
void and_(Register dst, Register src) {
arithmetic_op(0x23, dst, src);
}
immediate_arithmetic_op_8(0x4, dst, src);
}
- void decq(Register dst);
- void decq(const Operand& dst);
- void decl(Register dst);
- void decl(const Operand& dst);
void decb(Register dst);
void decb(const Operand& dst);
// Sign-extends eax into edx:eax.
void cdq();
- void incq(Register dst);
- void incq(const Operand& dst);
- void incl(Register dst);
- void incl(const Operand& dst);
-
void lea(Register dst, const Operand& src);
void leal(Register dst, const Operand& src);
// Multiply rax by src, put the result in rdx:rax.
void mul(Register src);
- void neg(Register dst);
- void neg(const Operand& dst);
- void negl(Register dst);
-
void not_(Register dst);
void not_(const Operand& dst);
void notl(Register dst);
void testb(Register reg, Immediate mask);
void testb(const Operand& op, Immediate mask);
void testb(const Operand& op, Register reg);
- void testl(Register dst, Register src);
- void testl(Register reg, Immediate mask);
- void testl(const Operand& op, Register reg);
- void testl(const Operand& op, Immediate mask);
- void testq(const Operand& op, Register reg);
- void testq(Register dst, Register src);
- void testq(Register dst, Immediate mask);
void xor_(Register dst, Register src) {
if (dst.code() == src.code()) {
}
}
+ void emit_cmp(Register dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x3B, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x3B, dst, src);
+ }
+ }
+
+ void emit_cmp(Register dst, const Operand& src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x3B, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x3B, dst, src);
+ }
+ }
+
+ void emit_cmp(const Operand& dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x39, src, dst);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x39, src, dst);
+ }
+ }
+
+ void emit_cmp(Register dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x7, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x7, dst, src);
+ }
+ }
+
+ void emit_cmp(const Operand& dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x7, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x7, dst, src);
+ }
+ }
+
+ void emit_dec(Register dst, int size);
+ void emit_dec(const Operand& dst, int size);
+
// Divide rdx:rax by src. Quotient in rax, remainder in rdx when size is 64.
// Divide edx:eax by lower 32 bits of src. Quotient in eax, remainder in edx
// when size is 32.
void emit_imul(Register dst, const Operand& src, int size);
void emit_imul(Register dst, Register src, Immediate imm, int size);
+ void emit_inc(Register dst, int size);
+ void emit_inc(const Operand& dst, int size);
+
+ void emit_mov(Register dst, const Operand& src, int size);
+ void emit_mov(Register dst, Register src, int size);
+ void emit_mov(const Operand& dst, Register src, int size);
+ void emit_mov(Register dst, Immediate value, int size);
+ void emit_mov(const Operand& dst, Immediate value, int size);
+
+ void emit_neg(Register dst, int size);
+ void emit_neg(const Operand& dst, int size);
+
+ void emit_sbb(Register dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x1b, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x1b, dst, src);
+ }
+ }
+
void emit_sub(Register dst, Register src, int size) {
if (size == kInt64Size) {
arithmetic_op(0x2B, dst, src);
}
}
- void emit_mov(Register dst, const Operand& src, int size);
- void emit_mov(Register dst, Register src, int size);
- void emit_mov(const Operand& dst, Register src, int size);
- void emit_mov(Register dst, Immediate value, int size);
- void emit_mov(const Operand& dst, Immediate value, int size);
+ void emit_test(Register dst, Register src, int size);
+ void emit_test(Register reg, Immediate mask, int size);
+ void emit_test(const Operand& op, Register reg, int size);
+ void emit_test(const Operand& op, Immediate mask, int size);
friend class CodePatcher;
friend class EnsureSpace;
ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address(masm->isolate());
__ Move(kScratchRegister, debug_step_in_fp);
- __ cmpq(Operand(kScratchRegister, 0), Immediate(0));
+ __ cmpp(Operand(kScratchRegister, 0), Immediate(0));
__ j(not_equal, &rt_call);
#endif
__ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
__ shl(rdi, Immediate(kPointerSizeLog2));
if (create_memento) {
- __ addq(rdi, Immediate(AllocationMemento::kSize));
+ __ addp(rdi, Immediate(AllocationMemento::kSize));
}
// rdi: size of new object
__ Allocate(rdi,
Operand(rbx, rsi, times_pointer_size, JSObject::kHeaderSize));
// rsi: offset of first field after pre-allocated fields
if (FLAG_debug_code) {
- __ cmpq(rsi, rdi);
+ __ cmpp(rsi, rdi);
__ Assert(less_equal,
kUnexpectedNumberOfPreAllocatedPropertyFields);
}
__ movp(Operand(rcx, 0), rdx);
__ addp(rcx, Immediate(kPointerSize));
__ bind(&entry);
- __ cmpq(rcx, rax);
+ __ cmpp(rcx, rax);
__ j(below, &loop);
}
__ bind(&loop);
__ Push(Operand(rbx, rcx, times_pointer_size, 0));
__ bind(&entry);
- __ decq(rcx);
+ __ decp(rcx);
__ j(greater_equal, &loop);
// Call the function.
__ Push(Operand(kScratchRegister, 0)); // dereference handle
__ addp(rcx, Immediate(1));
__ bind(&entry);
- __ cmpq(rcx, rax);
+ __ cmpp(rcx, rax);
__ j(not_equal, &loop);
// Invoke the code.
// Switch on the state.
Label not_no_registers, not_tos_rax;
- __ cmpq(kScratchRegister, Immediate(FullCodeGenerator::NO_REGISTERS));
+ __ cmpp(kScratchRegister, Immediate(FullCodeGenerator::NO_REGISTERS));
__ j(not_equal, ¬_no_registers, Label::kNear);
__ ret(1 * kPointerSize); // Remove state.
__ bind(¬_no_registers);
__ movp(rax, Operand(rsp, kPCOnStackSize + kPointerSize));
- __ cmpq(kScratchRegister, Immediate(FullCodeGenerator::TOS_REG));
+ __ cmpp(kScratchRegister, Immediate(FullCodeGenerator::TOS_REG));
__ j(not_equal, ¬_tos_rax, Label::kNear);
__ ret(2 * kPointerSize); // Remove state, rax.
//
// 1. Make sure we have at least one argument.
{ Label done;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(not_zero, &done);
__ PopReturnAddressTo(rbx);
__ Push(masm->isolate()->factory()->undefined_value());
__ PushReturnAddressFrom(rbx);
- __ incq(rax);
+ __ incp(rax);
__ bind(&done);
}
__ bind(&loop);
__ movp(rbx, Operand(rsp, rcx, times_pointer_size, 0));
__ movp(Operand(rsp, rcx, times_pointer_size, 1 * kPointerSize), rbx);
- __ decq(rcx);
+ __ decp(rcx);
__ j(not_sign, &loop); // While non-negative (to copy return address).
__ popq(rbx); // Discard copy of return address.
- __ decq(rax); // One fewer argument (first argument is new receiver).
+ __ decp(rax); // One fewer argument (first argument is new receiver).
}
// 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
// or a function proxy via CALL_FUNCTION_PROXY.
{ Label function, non_proxy;
- __ testq(rdx, rdx);
+ __ testp(rdx, rdx);
__ j(zero, &function);
__ Set(rbx, 0);
- __ cmpq(rdx, Immediate(1));
+ __ cmpp(rdx, Immediate(1));
__ j(not_equal, &non_proxy);
__ PopReturnAddressTo(rdx);
__ Push(rdi); // re-add proxy object as additional argument
__ PushReturnAddressFrom(rdx);
- __ incq(rax);
+ __ incp(rax);
__ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
__ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
FieldOperand(rdx,
SharedFunctionInfo::kFormalParameterCountOffset));
__ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- __ cmpq(rax, rbx);
+ __ cmpp(rax, rbx);
__ j(not_equal,
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
// stack.
__ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
// Check if the arguments will overflow the stack.
- __ cmpq(rcx, rdx);
+ __ cmpp(rcx, rdx);
__ j(greater, &okay); // Signed comparison.
// Out of stack space.
__ movp(Operand(rbp, kIndexOffset), rax);
__ bind(&entry);
- __ cmpq(rax, Operand(rbp, kLimitOffset));
+ __ cmpp(rax, Operand(rbp, kLimitOffset));
__ j(not_equal, &loop);
// Call the function.
// Call the function proxy.
__ bind(&call_proxy);
__ Push(rdi); // add function proxy as last argument
- __ incq(rax);
+ __ incp(rax);
__ Set(rbx, 0);
__ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
__ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
if (FLAG_debug_code) {
__ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, rcx);
- __ cmpq(rdi, rcx);
+ __ cmpp(rdi, rcx);
__ Assert(equal, kUnexpectedStringFunction);
}
// (including the receiver).
StackArgumentsAccessor args(rsp, rax);
Label no_arguments;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(zero, &no_arguments);
__ movp(rbx, args.GetArgumentOperand(1));
__ PopReturnAddressTo(rcx);
Label enough, too_few;
__ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- __ cmpq(rax, rbx);
+ __ cmpp(rax, rbx);
__ j(less, &too_few);
- __ cmpq(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ __ cmpp(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
__ j(equal, &dont_adapt_arguments);
{ // Enough parameters: Actual >= expected.
Label copy;
__ bind(©);
- __ incq(r8);
+ __ incp(r8);
__ Push(Operand(rax, 0));
__ subp(rax, Immediate(kPointerSize));
- __ cmpq(r8, rbx);
+ __ cmpp(r8, rbx);
__ j(less, ©);
__ jmp(&invoke);
}
Label copy;
__ bind(©);
- __ incq(r8);
+ __ incp(r8);
__ Push(Operand(rdi, 0));
__ subp(rdi, Immediate(kPointerSize));
- __ cmpq(r8, rax);
+ __ cmpp(r8, rax);
__ j(less, ©);
// Fill remaining expected arguments with undefined values.
Label fill;
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
__ bind(&fill);
- __ incq(r8);
+ __ incp(r8);
__ Push(kScratchRegister);
- __ cmpq(r8, rbx);
+ __ cmpp(r8, rbx);
__ j(less, &fill);
// Restore function pointer.
Label skip;
// If the code object is null, just return to the unoptimized code.
- __ cmpq(rax, Immediate(0));
+ __ cmpp(rax, Immediate(0));
__ j(not_equal, &skip, Label::kNear);
__ ret(0);
// Load operand in rdx into xmm0, or branch to not_numbers.
__ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
__ JumpIfSmi(rdx, &load_smi_rdx);
- __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
+ __ cmpp(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
__ j(not_equal, not_numbers); // Argument in rdx is not a number.
__ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
// Load operand in rax into xmm1, or branch to not_numbers.
__ JumpIfSmi(rax, &load_smi_rax);
__ bind(&load_nonsmi_rax);
- __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
+ __ cmpp(FieldOperand(rax, HeapObject::kMapOffset), rcx);
__ j(not_equal, not_numbers);
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ jmp(&done);
// Check index against formal parameters count limit passed in
// through register rax. Use unsigned comparison to get negative
// check for free.
- __ cmpq(rdx, rax);
+ __ cmpp(rdx, rax);
__ j(above_equal, &slow);
// Read the argument from the stack and return it.
// comparison to get negative check for free.
__ bind(&adaptor);
__ movp(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmpq(rdx, rcx);
+ __ cmpp(rdx, rcx);
__ j(above_equal, &slow);
// Read the argument from the stack and return it.
// rbx = parameter count (untagged)
// rcx = argument count (untagged)
// Compute the mapped parameter count = min(rbx, rcx) in rbx.
- __ cmpq(rbx, rcx);
+ __ cmpp(rbx, rcx);
__ j(less_equal, &try_allocate, Label::kNear);
__ movp(rbx, rcx);
FixedArray::kHeaderSize + 2 * kPointerSize;
Label no_parameter_map;
__ xor_(r8, r8);
- __ testq(rbx, rbx);
+ __ testp(rbx, rbx);
__ j(zero, &no_parameter_map, Label::kNear);
__ lea(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
__ bind(&no_parameter_map);
Label has_mapped_parameters, copy;
__ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ movp(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
- __ testq(rbx, rbx);
+ __ testp(rbx, rbx);
__ j(not_zero, &has_mapped_parameters, Label::kNear);
const int kIndex = Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX;
// Initialize parameter map. If there are no mapped arguments, we're done.
Label skip_parameter_map;
- __ testq(rbx, rbx);
+ __ testp(rbx, rbx);
__ j(zero, &skip_parameter_map);
__ LoadRoot(kScratchRegister, Heap::kSloppyArgumentsElementsMapRootIndex);
__ addp(r8, Immediate(1));
__ bind(&arguments_test);
- __ cmpq(r8, rcx);
+ __ cmpp(r8, rcx);
__ j(less, &arguments_loop, Label::kNear);
// Return and remove the on-stack parameters.
// the arguments object and the elements array.
Label add_arguments_object;
__ bind(&try_allocate);
- __ testq(rcx, rcx);
+ __ testp(rcx, rcx);
__ j(zero, &add_arguments_object, Label::kNear);
__ lea(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
__ bind(&add_arguments_object);
// If there are no actual arguments, we're done.
Label done;
- __ testq(rcx, rcx);
+ __ testp(rcx, rcx);
__ j(zero, &done);
// Get the parameters pointer from the stack.
__ movp(FieldOperand(rdi, FixedArray::kHeaderSize), rbx);
__ addp(rdi, Immediate(kPointerSize));
__ subp(rdx, Immediate(kPointerSize));
- __ decq(rcx);
+ __ decp(rcx);
__ j(not_zero, &loop);
// Return and remove the on-stack parameters.
ExternalReference address_of_regexp_stack_memory_size =
ExternalReference::address_of_regexp_stack_memory_size(isolate);
__ Load(kScratchRegister, address_of_regexp_stack_memory_size);
- __ testq(kScratchRegister, kScratchRegister);
+ __ testp(kScratchRegister, kScratchRegister);
__ j(zero, &runtime);
// Check that the first argument is a JSRegExp object.
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
- __ cmpq(rbx, Immediate(kExternalStringTag));
+ __ cmpp(rbx, Immediate(kExternalStringTag));
__ j(greater_equal, ¬_seq_nor_cons); // Go to (7).
// (4) Cons string. Check that it's flat.
masm->ExternalOperand(pending_exception_address, rbx);
__ movp(rax, pending_exception_operand);
__ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ cmpq(rax, rdx);
+ __ cmpp(rax, rdx);
__ j(equal, &runtime);
__ movp(pending_exception_operand, rdx);
// Two identical objects are equal unless they are both NaN or undefined.
{
Label not_identical;
- __ cmpq(rax, rdx);
+ __ cmpp(rax, rdx);
__ j(not_equal, ¬_identical, Label::kNear);
if (cc != equal) {
__ setcc(parity_even, rax);
// rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
if (cc == greater_equal || cc == greater) {
- __ neg(rax);
+ __ negp(rax);
}
__ ret(0);
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
- __ cmpq(rcx, rdi);
+ __ cmpp(rcx, rdi);
__ j(equal, &done);
__ Cmp(rcx, TypeFeedbackInfo::MegamorphicSentinel(isolate));
__ j(equal, &done);
// Make sure the function is the Array() function
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rcx);
- __ cmpq(rdi, rcx);
+ __ cmpp(rdi, rcx);
__ j(not_equal, &megamorphic);
__ jmp(&done);
}
if (!FLAG_pretenuring_call_new) {
// Make sure the function is the Array() function
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rcx);
- __ cmpq(rdi, rcx);
+ __ cmpp(rdi, rcx);
__ j(not_equal, ¬_array_function);
{
// If this is the outermost JS call, set js_entry_sp value.
ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
__ Load(rax, js_entry_sp);
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(not_zero, ¬_outermost_js);
__ Push(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ movp(rax, rbp);
Label loop, is_instance, is_not_instance;
__ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
__ bind(&loop);
- __ cmpq(rcx, rbx);
+ __ cmpp(rcx, rbx);
__ j(equal, &is_instance, Label::kNear);
- __ cmpq(rcx, kScratchRegister);
+ __ cmpp(rcx, kScratchRegister);
// The code at is_not_instance assumes that kScratchRegister contains a
// non-zero GCable value (the null object in this case).
__ j(equal, &is_not_instance, Label::kNear);
__ bind(&loop);
__ movb(kScratchRegister, Operand(src, 0));
__ movb(Operand(dest, 0), kScratchRegister);
- __ incq(src);
- __ incq(dest);
+ __ incp(src);
+ __ incp(dest);
__ decl(count);
__ j(not_zero, &loop);
__ JumpUnlessBothNonNegativeSmi(rcx, rdx, &runtime);
__ SmiSub(rcx, rcx, rdx); // Overflow doesn't happen.
- __ cmpq(rcx, FieldOperand(rax, String::kLengthOffset));
+ __ cmpp(rcx, FieldOperand(rax, String::kLengthOffset));
Label not_original_string;
// Shorter than original string's length: an actual substring.
__ j(below, ¬_original_string, Label::kNear);
// rcx: length
// If coming from the make_two_character_string path, the string
// is too short to be sliced anyways.
- __ cmpq(rcx, Immediate(SlicedString::kMinLength));
+ __ cmpp(rcx, Immediate(SlicedString::kMinLength));
// Short slice. Copy instead of slicing.
__ j(less, ©_routine);
// Allocate new sliced string. At this point we do not reload the instance
FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize));
__ lea(right,
FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize));
- __ neg(length);
+ __ negq(length);
Register index = length; // index = -length;
// Compare loop.
// Check for identity.
Label not_same;
- __ cmpq(rdx, rax);
+ __ cmpp(rdx, rax);
__ j(not_equal, ¬_same, Label::kNear);
__ Move(rax, Smi::FromInt(EQUAL));
Counters* counters = masm->isolate()->counters();
// Verify that the object can be transitioned in place.
const int origin_offset = header_size + elements_kind() * kPointerSize;
__ movp(rdi, FieldOperand(rbx, origin_offset));
- __ cmpq(rdi, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ cmpp(rdi, FieldOperand(rdx, HeapObject::kMapOffset));
__ j(not_equal, &call_builtin);
const int target_offset = header_size + target_kind * kPointerSize;
__ lea(rdx, FieldOperand(rdi,
rax, times_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
- __ cmpq(rdx, rcx);
+ __ cmpp(rdx, rcx);
__ j(not_equal, &call_builtin);
__ addp(rcx, Immediate(kAllocationDelta * kPointerSize));
Operand limit_operand = masm->ExternalOperand(new_space_allocation_limit);
- __ cmpq(rcx, limit_operand);
+ __ cmpp(rcx, limit_operand);
__ j(above, &call_builtin);
// We fit and could grow elements.
__ movl(rax, Immediate(0));
__ movl(rcx, Immediate(0));
__ setcc(above, rax); // Add one to zero if carry clear and not equal.
- __ sbbq(rax, rcx); // Subtract one if below (aka. carry set).
+ __ sbbp(rax, rcx); // Subtract one if below (aka. carry set).
__ ret(0);
__ bind(&unordered);
// Internalized strings are compared by identity.
Label done;
- __ cmpq(left, right);
+ __ cmpp(left, right);
// Make sure rax is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(rax));
// Unique names are compared by identity.
Label done;
- __ cmpq(left, right);
+ __ cmpp(left, right);
// Make sure rax is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(rax));
// Fast check for identical strings.
Label not_same;
- __ cmpq(left, right);
+ __ cmpp(left, right);
__ j(not_equal, ¬_same, Label::kNear);
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
__ Push(Handle<Object>(name));
__ Push(Immediate(name->Hash()));
__ CallStub(&stub);
- __ testq(r0, r0);
+ __ testp(r0, r0);
__ j(not_zero, miss);
__ jmp(done);
}
__ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
// Check if the key is identical to the name.
- __ cmpq(name, Operand(elements, r1, times_pointer_size,
+ __ cmpp(name, Operand(elements, r1, times_pointer_size,
kElementsStartOffset - kHeapObjectTag));
__ j(equal, done);
}
__ Push(r0);
__ CallStub(&stub);
- __ testq(r0, r0);
+ __ testp(r0, r0);
__ j(zero, miss);
__ jmp(done);
}
__ j(equal, ¬_in_dictionary);
// Stop if found the property.
- __ cmpq(scratch, args.GetArgumentOperand(0));
+ __ cmpp(scratch, args.GetArgumentOperand(0));
__ j(equal, &in_dictionary);
if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
// look at the first argument
StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
__ movp(rcx, args.GetArgumentOperand(0));
- __ testq(rcx, rcx);
+ __ testp(rcx, rcx);
__ j(zero, &normal_sequence);
if (mode == DISABLE_ALLOCATION_SITES) {
AllocationSiteOverrideMode mode) {
if (argument_count_ == ANY) {
Label not_zero_case, not_one_case;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(not_zero, ¬_zero_case);
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
Label not_zero_case, not_one_case;
Label normal_sequence;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(not_zero, ¬_zero_case);
InternalArrayNoArgumentConstructorStub stub0(kind);
__ TailCallStub(&stub0);
// look at the first argument
StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
__ movp(rcx, args.GetArgumentOperand(0));
- __ testq(rcx, rcx);
+ __ testp(rcx, rcx);
__ j(zero, &normal_sequence);
InternalArraySingleArgumentConstructorStub
__ movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15);
__ bind(&entry);
- __ decq(r9);
+ __ decp(r9);
__ j(not_sign, &loop);
__ bind(&done);
rdi);
__ bind(&entry);
- __ decq(r9);
+ __ decp(r9);
__ j(not_sign, &loop);
// Replace receiver's backing store with newly created and filled FixedArray.
kPCOnStackSize));
__ subp(arg5, rbp);
- __ neg(arg5);
+ __ negp(arg5);
// Allocate a new deoptimizer object.
__ PrepareCallCFunction(6);
__ Pop(Operand(rdx, 0));
__ addp(rdx, Immediate(sizeof(intptr_t)));
__ bind(&pop_loop_header);
- __ cmpq(rcx, rsp);
+ __ cmpp(rcx, rsp);
__ j(not_equal, &pop_loop);
// Compute the output frame in the deoptimizer.
__ subp(rcx, Immediate(sizeof(intptr_t)));
__ Push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
__ bind(&inner_loop_header);
- __ testq(rcx, rcx);
+ __ testp(rcx, rcx);
__ j(not_zero, &inner_push_loop);
__ addp(rax, Immediate(kPointerSize));
__ bind(&outer_loop_header);
- __ cmpq(rax, rdx);
+ __ cmpp(rax, rdx);
__ j(below, &outer_push_loop);
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(ic, condition->test_id());
- __ testq(result_register(), result_register());
+ __ testp(result_register(), result_register());
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
}
__ or_(rcx, rax);
patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
- __ cmpq(rdx, rax);
+ __ cmpp(rdx, rax);
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ jmp(clause->body_target());
__ jmp(clause->body_target());
__ bind(&skip);
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ jmp(clause->body_target());
__ j(equal, &exit);
Register null_value = rdi;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ cmpq(rax, null_value);
+ __ cmpp(rax, null_value);
__ j(equal, &exit);
PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
__ movp(rax, Operand(rsp, 0 * kPointerSize)); // Get the current index.
- __ cmpq(rax, Operand(rsp, 1 * kPointerSize)); // Compare to the array length.
+ __ cmpp(rax, Operand(rsp, 1 * kPointerSize)); // Compare to the array length.
__ j(above_equal, loop_statement.break_label());
// Get the current entry of the array into register rbx.
// If not, we may have to filter the key.
Label update_each;
__ movp(rcx, Operand(rsp, 4 * kPointerSize));
- __ cmpq(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ cmpp(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
__ j(equal, &update_each, Label::kNear);
// For proxies, no filtering is done.
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
+ __ cmpp(ContextOperand(context, Context::EXTENSION_INDEX),
Immediate(0));
__ j(not_equal, slow);
}
__ LoadRoot(kScratchRegister, Heap::kNativeContextMapRootIndex);
__ bind(&next);
// Terminate at native context.
- __ cmpq(kScratchRegister, FieldOperand(temp, HeapObject::kMapOffset));
+ __ cmpp(kScratchRegister, FieldOperand(temp, HeapObject::kMapOffset));
__ j(equal, &fast, Label::kNear);
// Check that extension is NULL.
- __ cmpq(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
+ __ cmpp(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
__ j(not_equal, slow);
// Load next context in chain.
__ movp(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
+ __ cmpp(ContextOperand(context, Context::EXTENSION_INDEX),
Immediate(0));
__ j(not_equal, slow);
}
}
}
// Check that last extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
+ __ cmpp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
__ j(not_equal, slow);
// This function is used only for loads, not stores, so it's safe to
__ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx,
kDontSaveFPRegs);
__ lea(rbx, Operand(rbp, StandardFrameConstants::kExpressionsOffset));
- __ cmpq(rsp, rbx);
+ __ cmpp(rsp, rbx);
__ j(equal, &post_runtime);
__ Push(rax); // generator object
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
CallLoadIC(NOT_CONTEXTUAL); // result.done in rax
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
- __ testq(result_register(), result_register());
+ __ testp(result_register(), result_register());
__ j(zero, &l_try);
// result.value
// in directly.
if (resume_mode == JSGeneratorObject::NEXT) {
Label slow_resume;
- __ cmpq(rdx, Immediate(0));
+ __ cmpp(rdx, Immediate(0));
__ j(not_zero, &slow_resume);
__ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
__ SmiToInteger64(rcx,
Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, if_false);
__ movzxbq(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- __ cmpq(rbx, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ cmpp(rbx, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ j(below, if_false);
- __ cmpq(rbx, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ cmpp(rbx, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(below_equal, if_true, if_false, fall_through);
// Skip loop if no descriptors are valid.
__ NumberOfOwnDescriptors(rcx, rbx);
- __ cmpq(rcx, Immediate(0));
+ __ cmpp(rcx, Immediate(0));
__ j(equal, &done);
__ LoadInstanceDescriptors(rbx, r8);
__ j(equal, if_false);
__ addp(r8, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
__ bind(&entry);
- __ cmpq(r8, rcx);
+ __ cmpp(r8, rcx);
__ j(not_equal, &loop);
__ bind(&done);
// If a valueOf property is not found on the object check that its
// prototype is the un-modified String prototype. If not result is false.
__ movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
- __ testq(rcx, Immediate(kSmiTagMask));
+ __ testp(rcx, Immediate(kSmiTagMask));
__ j(zero, if_false);
__ movp(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
__ movp(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ movp(rdx, FieldOperand(rdx, GlobalObject::kNativeContextOffset));
- __ cmpq(rcx,
+ __ cmpp(rcx,
ContextOperand(rdx, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
&if_true, &if_false, &fall_through);
__ Pop(rbx);
- __ cmpq(rax, rbx);
+ __ cmpp(rax, rbx);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
Operand stamp_operand = __ ExternalOperand(stamp);
__ movp(scratch, stamp_operand);
- __ cmpq(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
+ __ cmpp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
__ j(not_equal, &runtime, Label::kNear);
__ movp(result, FieldOperand(object, JSDate::kValueOffset +
kPointerSize * index->value()));
// tmp now holds finger offset as a smi.
SmiIndex index =
__ SmiToIndex(kScratchRegister, tmp, kPointerSizeLog2);
- __ cmpq(key, FieldOperand(cache,
+ __ cmpp(key, FieldOperand(cache,
index.reg,
index.scale,
FixedArray::kHeaderSize));
// Live loop registers: index(int32), array_length(int32), string(String*),
// scratch, string_length(int32), elements(FixedArray*).
if (generate_debug_code_) {
- __ cmpq(index, array_length);
+ __ cmpp(index, array_length);
__ Assert(below, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
}
__ bind(&loop);
// Copy the separator character to the result.
__ movb(Operand(result_pos, 0), scratch);
- __ incq(result_pos);
+ __ incp(result_pos);
__ bind(&loop_2_entry);
// Get string = array[index].
__ movl(index, array_length_operand);
__ lea(elements, FieldOperand(elements, index, times_pointer_size,
FixedArray::kHeaderSize));
- __ neg(index);
+ __ negq(index);
// Replace separator string with pointer to its first character, and
// make scratch be its length.
InstanceofStub stub(InstanceofStub::kNoFlags);
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ testq(rax, rax);
+ __ testp(rax, rax);
// The stub returns 0 for true.
Split(zero, if_true, if_false, fall_through);
break;
__ movp(rcx, rdx);
__ or_(rcx, rax);
patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
- __ cmpq(rdx, rax);
+ __ cmpp(rdx, rax);
Split(cc, if_true, if_false, NULL);
__ bind(&slow_case);
}
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ testq(rax, rax);
+ __ testp(rax, rax);
Split(cc, if_true, if_false, fall_through);
}
}
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
CallIC(ic, expr->CompareOperationFeedbackId());
- __ testq(rax, rax);
+ __ testp(rax, rax);
Split(not_zero, if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
__ shl(rdi, Immediate(kPointerSizeLog2 + 1));
__ LoadAddress(kScratchRegister, cache_keys);
int off = kPointerSize * i * 2;
- __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, off));
+ __ cmpp(rbx, Operand(kScratchRegister, rdi, times_1, off));
__ j(not_equal, &try_next_entry);
- __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
+ __ cmpp(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
__ j(equal, &hit_on_nth_entry[i]);
__ bind(&try_next_entry);
}
int off = kPointerSize * (kEntriesPerBucket - 1) * 2;
- __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, off));
+ __ cmpp(rbx, Operand(kScratchRegister, rdi, times_1, off));
__ j(not_equal, &slow);
- __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
+ __ cmpp(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
__ j(not_equal, &slow);
// Get field offset, which is a 32-bit integer.
// Check if element is in the range of mapped arguments.
__ movp(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
__ SmiSubConstant(scratch2, scratch2, Smi::FromInt(2));
- __ cmpq(key, scratch2);
+ __ cmpp(key, scratch2);
__ j(greater_equal, unmapped_case);
// Load element index and check whether it is the hole.
Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
__ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
__ movp(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
- __ cmpq(key, scratch);
+ __ cmpp(key, scratch);
__ j(greater_equal, slow_case);
__ SmiToInteger64(scratch, key);
return FieldOperand(backing_store,
}
__ TruncatingDiv(dividend, Abs(divisor));
- if (divisor < 0) __ neg(rdx);
+ if (divisor < 0) __ negp(rdx);
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
__ movl(rax, rdx);
// Bail out if the result is supposed to be negative zero.
Label done;
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ testq(left, left);
+ __ testp(left, left);
} else {
__ testl(left, left);
}
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
Operand stamp_operand = __ ExternalOperand(stamp);
__ movp(kScratchRegister, stamp_operand);
- __ cmpq(kScratchRegister, FieldOperand(object,
+ __ cmpp(kScratchRegister, FieldOperand(object,
JSDate::kCacheStampOffset));
__ j(not_equal, &runtime, Label::kNear);
__ movp(result, FieldOperand(object, JSDate::kValueOffset +
__ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ cmpq(string, Immediate(encoding == String::ONE_BYTE_ENCODING
+ __ cmpp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
? one_byte_seq_type : two_byte_seq_type));
__ Check(equal, kUnexpectedStringType);
__ Pop(string);
} else if (right->IsRegister()) {
Register right_reg = ToRegister(right);
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ cmpq(left_reg, right_reg);
+ __ cmpp(left_reg, right_reg);
} else {
__ cmpl(left_reg, right_reg);
}
} else {
Operand right_op = ToOperand(right);
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ cmpq(left_reg, right_op);
+ __ cmpp(left_reg, right_op);
} else {
__ cmpl(left_reg, right_op);
}
} else if (r.IsSmi()) {
ASSERT(!info()->IsStub());
Register reg = ToRegister(instr->value());
- __ testq(reg, reg);
+ __ testp(reg, reg);
EmitBranch(instr, not_zero);
} else if (r.IsDouble()) {
ASSERT(!info()->IsStub());
EmitBranch(instr, not_equal);
} else if (type.IsString()) {
ASSERT(!info()->IsStub());
- __ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0));
+ __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
EmitBranch(instr, not_equal);
} else {
ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
Label not_string;
__ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
__ j(above_equal, ¬_string, Label::kNear);
- __ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0));
+ __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
__ j(not_zero, instr->TrueLabel(chunk_));
__ jmp(instr->FalseLabel(chunk_));
__ bind(¬_string);
cc = ReverseCondition(cc);
} else if (instr->hydrogen_value()->representation().IsSmi()) {
if (right->IsRegister()) {
- __ cmpq(ToRegister(left), ToRegister(right));
+ __ cmpp(ToRegister(left), ToRegister(right));
} else {
- __ cmpq(ToRegister(left), ToOperand(right));
+ __ cmpp(ToRegister(left), ToOperand(right));
}
} else {
if (right->IsRegister()) {
__ Cmp(left, right);
} else {
Register right = ToRegister(instr->right());
- __ cmpq(left, right);
+ __ cmpp(left, right);
}
EmitBranch(instr, equal);
}
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = TokenToCondition(op, false);
- __ testq(rax, rax);
+ __ testp(rax, rax);
EmitBranch(instr, condition);
}
__ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
__ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
__ subp(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ cmpq(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
+ __ cmpp(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ j(above, is_false);
}
__ Push(ToRegister(instr->right()));
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
Label true_value, done;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(zero, &true_value, Label::kNear);
__ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
__ jmp(&done, Label::kNear);
__ bind(deferred->map_check()); // Label for calculating code patching.
Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
__ Move(kScratchRegister, cache_cell, RelocInfo::CELL);
- __ cmpq(map, Operand(kScratchRegister, 0));
+ __ cmpp(map, Operand(kScratchRegister, 0));
__ j(not_equal, &cache_miss, Label::kNear);
// Patched to load either true or false.
__ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
// PushSafepointRegisterScope.
__ movp(kScratchRegister, rax);
}
- __ testq(kScratchRegister, kScratchRegister);
+ __ testp(kScratchRegister, kScratchRegister);
Label load_false;
Label done;
__ j(not_zero, &load_false, Label::kNear);
Condition condition = TokenToCondition(op, false);
Label true_value, done;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(condition, &true_value, Label::kNear);
__ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
__ jmp(&done, Label::kNear);
// If no arguments adaptor frame the number of arguments is fixed.
if (instr->elements()->IsRegister()) {
- __ cmpq(rbp, ToRegister(instr->elements()));
+ __ cmpp(rbp, ToRegister(instr->elements()));
} else {
- __ cmpq(rbp, ToOperand(instr->elements()));
+ __ cmpp(rbp, ToOperand(instr->elements()));
}
__ movl(result, Immediate(scope()->num_parameters()));
__ j(equal, &done, Label::kNear);
// Copy the arguments to this function possibly from the
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
- __ cmpq(length, Immediate(kArgumentsLimit));
+ __ cmpp(length, Immediate(kArgumentsLimit));
DeoptimizeIf(above, instr->environment());
__ Push(receiver);
void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
- __ testq(input_reg, input_reg);
+ __ testp(input_reg, input_reg);
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
- __ neg(input_reg); // Sets flags.
+ __ negp(input_reg); // Sets flags.
DeoptimizeIf(negative, instr->environment());
__ bind(&is_positive);
}
// We might need a change here
// look at the first argument
__ movp(rcx, Operand(rsp, 0));
- __ testq(rcx, rcx);
+ __ testp(rcx, rcx);
__ j(zero, &packed_case, Label::kNear);
ElementsKind holey_kind = GetHoleyElementsKind(kind);
} else {
Register reg2 = ToRegister(instr->index());
if (representation.IsSmi()) {
- __ cmpq(reg, reg2);
+ __ cmpp(reg, reg2);
} else {
__ cmpl(reg, reg2);
}
}
} else {
if (representation.IsSmi()) {
- __ cmpq(length, ToRegister(instr->index()));
+ __ cmpp(length, ToRegister(instr->index()));
} else {
__ cmpl(length, ToRegister(instr->index()));
}
RecordSafepointWithRegisters(
instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
- __ testq(rax, Immediate(kSmiTagMask));
+ __ testp(rax, Immediate(kSmiTagMask));
}
DeoptimizeIf(zero, instr->environment());
}
Register null_value = rdi;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ cmpq(rax, null_value);
+ __ cmpp(rax, null_value);
DeoptimizeIf(equal, instr->environment());
Condition cc = masm()->CheckSmi(rax);
void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
- __ cmpq(ToRegister(instr->map()),
+ __ cmpp(ToRegister(instr->map()),
FieldOperand(object, HeapObject::kMapOffset));
DeoptimizeIf(not_equal, instr->environment());
}
void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
ASSERT(root_array_available_);
- cmpq(with, Operand(kRootRegister,
+ cmpp(with, Operand(kRootRegister,
(index << kPointerSizeLog2) - kRootRegisterBias));
}
ASSERT(root_array_available_);
ASSERT(!with.AddressUsesRegister(kScratchRegister));
LoadRoot(kScratchRegister, index);
- cmpq(with, kScratchRegister);
+ cmpp(with, kScratchRegister);
}
// Call stub on end of buffer.
Label done;
// Check for end of buffer.
- testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
+ testp(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
if (and_then == kReturnAtEnd) {
Label buffer_overflowed;
j(not_equal, &buffer_overflowed, Label::kNear);
and_(scratch, object);
}
Move(kScratchRegister, ExternalReference::new_space_start(isolate()));
- cmpq(scratch, kScratchRegister);
+ cmpp(scratch, kScratchRegister);
j(cc, branch, distance);
} else {
ASSERT(is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask())));
if (emit_debug_code()) {
Label ok;
- cmpq(value, Operand(address, 0));
+ cmpp(value, Operand(address, 0));
j(equal, &ok, Label::kNear);
int3();
bind(&ok);
if (frame_alignment > kPointerSize) {
ASSERT(IsPowerOf2(frame_alignment));
Label alignment_as_expected;
- testq(rsp, Immediate(frame_alignment_mask));
+ testp(rsp, Immediate(frame_alignment_mask));
j(zero, &alignment_as_expected, Label::kNear);
// Abort if stack is not aligned.
int3();
// previous handle scope.
subl(Operand(base_reg, kLevelOffset), Immediate(1));
movp(Operand(base_reg, kNextOffset), prev_next_address_reg);
- cmpq(prev_limit_reg, Operand(base_reg, kLimitOffset));
+ cmpp(prev_limit_reg, Operand(base_reg, kLimitOffset));
j(not_equal, &delete_allocated_handles);
bind(&leave_exit_frame);
return;
}
if (negative) {
- neg(dst);
+ negp(dst);
}
}
void MacroAssembler::SmiTest(Register src) {
AssertSmi(src);
- testq(src, src);
+ testp(src, src);
}
void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
AssertSmi(smi1);
AssertSmi(smi2);
- cmpq(smi1, smi2);
+ cmpp(smi1, smi2);
}
void MacroAssembler::Cmp(Register dst, Smi* src) {
ASSERT(!dst.is(kScratchRegister));
if (src->value() == 0) {
- testq(dst, dst);
+ testp(dst, dst);
} else {
Register constant_reg = GetSmiConstant(src);
- cmpq(dst, constant_reg);
+ cmpp(dst, constant_reg);
}
}
void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
AssertSmi(dst);
AssertSmi(src);
- cmpq(dst, src);
+ cmpp(dst, src);
}
void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
AssertSmi(dst);
AssertSmi(src);
- cmpq(dst, src);
+ cmpp(dst, src);
}
// The Operand cannot use the smi register.
Register smi_reg = GetSmiConstant(src);
ASSERT(!dst.AddressUsesRegister(smi_reg));
- cmpq(dst, smi_reg);
+ cmpp(dst, smi_reg);
}
Condition MacroAssembler::CheckIsMinSmi(Register src) {
ASSERT(!src.is(kScratchRegister));
// If we overflow by subtracting one, it's the minimal smi value.
- cmpq(src, kSmiConstantRegister);
+ cmpp(src, kSmiConstantRegister);
return overflow;
}
if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
movp(kScratchRegister, src);
- neg(dst); // Low 32 bits are retained as zero by negation.
+ negp(dst); // Low 32 bits are retained as zero by negation.
// Test if result is zero or Smi::kMinValue.
- cmpq(dst, kScratchRegister);
+ cmpp(dst, kScratchRegister);
j(not_equal, on_smi_result, near_jump);
movp(src, kScratchRegister);
} else {
movp(dst, src);
- neg(dst);
- cmpq(dst, src);
+ negp(dst);
+ cmpp(dst, src);
// If the result is zero or Smi::kMinValue, negation failed to create a smi.
j(not_equal, on_smi_result, near_jump);
}
// Check for negative zero result. If product is zero, and one
// argument is negative, go to slow case.
Label correct_result;
- testq(dst, dst);
+ testp(dst, dst);
j(not_zero, &correct_result, Label::kNear);
movp(dst, kScratchRegister);
// Check for negative zero result. If product is zero, and one
// argument is negative, go to slow case.
Label correct_result;
- testq(dst, dst);
+ testp(dst, dst);
j(not_zero, &correct_result, Label::kNear);
// One of src1 and src2 is zero, the check whether the other is
// negative.
ASSERT(!src1.is(rdx));
// Check for 0 divisor (result is +/-Infinity).
- testq(src2, src2);
+ testp(src2, src2);
j(zero, on_not_smi_result, near_jump);
if (src1.is(rax)) {
Label safe_div;
testl(rax, Immediate(0x7fffffff));
j(not_zero, &safe_div, Label::kNear);
- testq(src2, src2);
+ testp(src2, src2);
if (src1.is(rax)) {
j(positive, &safe_div, Label::kNear);
movp(src1, kScratchRegister);
ASSERT(!src1.is(rdx));
ASSERT(!src1.is(src2));
- testq(src2, src2);
+ testp(src2, src2);
j(zero, on_not_smi_result, near_jump);
if (src1.is(rax)) {
Label smi_result;
testl(rdx, rdx);
j(not_zero, &smi_result, Label::kNear);
- testq(src1, src1);
+ testp(src1, src1);
j(negative, on_not_smi_result, near_jump);
bind(&smi_result);
Integer32ToSmi(dst, rdx);
} else {
movp(dst, src);
if (shift_value == 0) {
- testq(dst, dst);
+ testp(dst, dst);
j(negative, on_not_smi_result, near_jump);
}
shr(dst, Immediate(shift_value + kSmiShift));
if (!dst.is(src)) {
movq(dst, src);
}
- neg(dst);
+ negq(dst);
if (shift < kSmiShift) {
sar(dst, Immediate(kSmiShift - shift));
} else {
shl(scratch, Immediate(kPointerSizeLog2 + 1));
// Check if the entry is the smi we are looking for.
- cmpq(object,
+ cmpp(object,
FieldOperand(number_string_cache,
index,
times_1,
Cmp(dst, Smi::cast(*source));
} else {
MoveHeapObject(kScratchRegister, source);
- cmpq(dst, kScratchRegister);
+ cmpp(dst, kScratchRegister);
}
}
Cmp(dst, Smi::cast(*source));
} else {
MoveHeapObject(kScratchRegister, source);
- cmpq(dst, kScratchRegister);
+ cmpp(dst, kScratchRegister);
}
}
// (kind == ENTRY) == (rbp == 0) == (rsi == 0), so we could test either
// rbp or rsi.
Label skip;
- testq(rsi, rsi);
+ testp(rsi, rsi);
j(zero, &skip, Label::kNear);
movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
bind(&skip);
if (emit_debug_code()) {
ASSERT(!src.is(kScratchRegister));
LoadRoot(kScratchRegister, root_value_index);
- cmpq(src, kScratchRegister);
+ cmpp(src, kScratchRegister);
Check(equal, reason);
}
}
// Expected is in register, actual is immediate. This is the
// case when we invoke function values without going through the
// IC mechanism.
- cmpq(expected.reg(), Immediate(actual.immediate()));
+ cmpp(expected.reg(), Immediate(actual.immediate()));
j(equal, &invoke, Label::kNear);
ASSERT(expected.reg().is(rbx));
Set(rax, actual.immediate());
} else if (!expected.reg().is(actual.reg())) {
// Both expected and actual are in (different) registers. This
// is the case when we invoke functions using call and apply.
- cmpq(expected.reg(), actual.reg());
+ cmpp(expected.reg(), actual.reg());
j(equal, &invoke, Label::kNear);
ASSERT(actual.reg().is(rax));
ASSERT(expected.reg().is(rbx));
Move(kScratchRegister,
isolate()->factory()->undefined_value(),
RelocInfo::EMBEDDED_OBJECT);
- cmpq(Operand(rsp, 0), kScratchRegister);
+ cmpp(Operand(rsp, 0), kScratchRegister);
Check(not_equal, kCodeObjectNotProperlyPatched);
}
}
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
if (emit_debug_code()) {
Move(kScratchRegister, Smi::FromInt(type));
- cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
+ cmpp(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
Check(equal, kStackFrameTypesMustMatch);
}
movp(rsp, rbp);
// When generating debug code, make sure the lexical context is set.
if (emit_debug_code()) {
- cmpq(scratch, Immediate(0));
+ cmpp(scratch, Immediate(0));
Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
}
// Load the native context of the current context.
}
// Check if both contexts are the same.
- cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
+ cmpp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
j(equal, &same_contexts);
// Compare security tokens.
int token_offset =
Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
movp(scratch, FieldOperand(scratch, token_offset));
- cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
+ cmpp(scratch, FieldOperand(kScratchRegister, token_offset));
j(not_equal, miss);
bind(&same_contexts);
lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
// Check if the key matches.
- cmpq(key, FieldOperand(elements,
+ cmpp(key, FieldOperand(elements,
r2,
times_pointer_size,
SeededNumberDictionary::kElementsStartOffset));
#ifdef DEBUG
// Assert that result actually contains top on entry.
Operand top_operand = ExternalOperand(allocation_top);
- cmpq(result, top_operand);
+ cmpp(result, top_operand);
Check(equal, kUnexpectedAllocationTop);
#endif
return;
Register scratch,
AllocationFlags flags) {
if (emit_debug_code()) {
- testq(result_end, Immediate(kObjectAlignmentMask));
+ testp(result_end, Immediate(kObjectAlignmentMask));
Check(zero, kUnalignedAllocationInNewSpace);
}
addp(top_reg, Immediate(object_size));
j(carry, gc_required);
Operand limit_operand = ExternalOperand(allocation_limit);
- cmpq(top_reg, limit_operand);
+ cmpp(top_reg, limit_operand);
j(above, gc_required);
// Update allocation top.
} else if (tag_result) {
// Tag the result if requested.
ASSERT(kHeapObjectTag == 1);
- incq(result);
+ incp(result);
}
}
addp(result_end, result);
j(carry, gc_required);
Operand limit_operand = ExternalOperand(allocation_limit);
- cmpq(result_end, limit_operand);
+ cmpp(result_end, limit_operand);
j(above, gc_required);
// Update allocation top.
and_(object, Immediate(~kHeapObjectTagMask));
Operand top_operand = ExternalOperand(new_space_allocation_top);
#ifdef DEBUG
- cmpq(object, top_operand);
+ cmpp(object, top_operand);
Check(below, kUndoAllocationOfNonAllocatedMemory);
#endif
movp(top_operand, object);
bind(&short_loop);
movb(scratch, Operand(source, 0));
movb(Operand(destination, 0), scratch);
- incq(source);
- incq(destination);
+ incp(source);
+ incp(destination);
decl(length);
j(not_zero, &short_loop);
}
movp(Operand(start_offset, 0), filler);
addp(start_offset, Immediate(kPointerSize));
bind(&entry);
- cmpq(start_offset, end_offset);
+ cmpp(start_offset, end_offset);
j(less, &loop);
}
int offset = expected_kind * kPointerSize +
FixedArrayBase::kHeaderSize;
- cmpq(map_in_out, FieldOperand(scratch, offset));
+ cmpp(map_in_out, FieldOperand(scratch, offset));
j(not_equal, no_map_match);
// Use the transitioned cached map.
movzxbq(value, FieldOperand(value, Map::kInstanceTypeOffset));
andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
- cmpq(value, Immediate(encoding_mask));
+ cmpp(value, Immediate(encoding_mask));
Pop(value);
Check(equal, kUnexpectedStringType);
lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
// Note that we are using a 4-byte aligned 8-byte load.
and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- cmpq(mask_scratch, rcx);
+ cmpp(mask_scratch, rcx);
j(equal, on_black, on_black_distance);
}
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
- testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+ testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
j(not_zero, &done, Label::kNear);
if (emit_debug_code()) {
Push(mask_scratch);
// shl. May overflow making the check conservative.
addp(mask_scratch, mask_scratch);
- testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+ testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
j(zero, &ok, Label::kNear);
int3();
bind(&ok);
// Check that there are no elements. Register rcx contains the current JS
// object we've reached through the prototype chain.
Label no_elements;
- cmpq(empty_fixed_array_value,
+ cmpp(empty_fixed_array_value,
FieldOperand(rcx, JSObject::kElementsOffset));
j(equal, &no_elements);
// Second chance, the object may be using the empty slow element dictionary.
LoadRoot(kScratchRegister, Heap::kEmptySlowElementDictionaryRootIndex);
- cmpq(kScratchRegister, FieldOperand(rcx, JSObject::kElementsOffset));
+ cmpp(kScratchRegister, FieldOperand(rcx, JSObject::kElementsOffset));
j(not_equal, call_runtime);
bind(&no_elements);
movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
- cmpq(rcx, null_value);
+ cmpp(rcx, null_value);
j(not_equal, &next);
}
lea(scratch_reg, Operand(receiver_reg,
JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
Move(kScratchRegister, new_space_start);
- cmpq(scratch_reg, kScratchRegister);
+ cmpp(scratch_reg, kScratchRegister);
j(less, no_memento_found);
- cmpq(scratch_reg, ExternalOperand(new_space_allocation_top));
+ cmpp(scratch_reg, ExternalOperand(new_space_allocation_top));
j(greater, no_memento_found);
CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
Heap::kAllocationMementoMapRootIndex);
movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
and_(scratch1, Immediate(Map::kElementsKindMask));
shr(scratch1, Immediate(Map::kElementsKindShift));
- cmpq(scratch1, Immediate(DICTIONARY_ELEMENTS));
+ cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
j(equal, found);
movp(current, FieldOperand(current, Map::kPrototypeOffset));
CompareRoot(current, Heap::kNullValueRootIndex);
BranchOrBacktrack(not_equal, ¬_at_start);
// If we did, are we still at the start of the input?
__ lea(rax, Operand(rsi, rdi, times_1, 0));
- __ cmpq(rax, Operand(rbp, kInputStart));
+ __ cmpp(rax, Operand(rbp, kInputStart));
BranchOrBacktrack(equal, on_at_start);
__ bind(¬_at_start);
}
BranchOrBacktrack(not_equal, on_not_at_start);
// If we did, are we still at the start of the input?
__ lea(rax, Operand(rsi, rdi, times_1, 0));
- __ cmpq(rax, Operand(rbp, kInputStart));
+ __ cmpp(rax, Operand(rbp, kInputStart));
BranchOrBacktrack(not_equal, on_not_at_start);
}
__ addp(r11, Immediate(1));
__ addp(r9, Immediate(1));
// Compare to end of capture, and loop if not done.
- __ cmpq(r9, rbx);
+ __ cmpp(r9, rbx);
__ j(below, &loop);
// Compute new value of character position after the matched part.
#endif
// Check if function returned non-zero for success or zero for failure.
- __ testq(rax, rax);
+ __ testp(rax, rax);
BranchOrBacktrack(zero, on_no_match);
// On success, increment position by length of capture.
// Requires that rbx is callee save (true for both Win64 and AMD64 ABIs).
__ addp(rbx, Immediate(char_size()));
__ addp(rdx, Immediate(char_size()));
// Check if we have reached end of match area.
- __ cmpq(rdx, r9);
+ __ cmpp(rdx, r9);
__ j(below, &loop);
// Success.
__ j(below_equal, &stack_limit_hit);
// Check if there is room for the variable number of registers above
// the stack limit.
- __ cmpq(rcx, Immediate(num_registers_ * kPointerSize));
+ __ cmpp(rcx, Immediate(num_registers_ * kPointerSize));
__ j(above_equal, &stack_ok);
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
__ bind(&stack_limit_hit);
__ Move(code_object_pointer(), masm_.CodeObject());
CallCheckStackGuardState(); // Preserves no registers beside rbp and rsp.
- __ testq(rax, rax);
+ __ testp(rax, rax);
// If returned value is non-zero, we exit with the returned value as result.
__ j(not_zero, &return_rax);
// Set rax to address of char before start of the string
// (effectively string position -1).
__ movp(rbx, Operand(rbp, kStartIndex));
- __ neg(rbx);
+ __ negq(rbx);
if (mode_ == UC16) {
__ lea(rax, Operand(rdi, rbx, times_2, -char_size()));
} else {
if (global()) {
// Restart matching if the regular expression is flagged as global.
// Increment success counter.
- __ incq(Operand(rbp, kSuccessfulCaptures));
+ __ incp(Operand(rbp, kSuccessfulCaptures));
// Capture results have been stored, so the number of remaining global
// output registers is reduced by the number of stored captures.
__ movsxlq(rcx, Operand(rbp, kNumOutputRegisters));
__ subp(rcx, Immediate(num_saved_registers_));
// Check whether we have enough room for another set of capture results.
- __ cmpq(rcx, Immediate(num_saved_registers_));
+ __ cmpp(rcx, Immediate(num_saved_registers_));
__ j(less, &exit_label_);
__ movp(Operand(rbp, kNumOutputRegisters), rcx);
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
// rdx: capture start index
- __ cmpq(rdi, rdx);
+ __ cmpp(rdi, rdx);
// Not a zero-length match, restart.
__ j(not_equal, &load_char_start_regexp);
// rdi (offset from the end) is zero if we already reached the end.
- __ testq(rdi, rdi);
+ __ testp(rdi, rdi);
__ j(zero, &exit_label_, Label::kNear);
// Advance current position after a zero-length match.
if (mode_ == UC16) {
__ pushq(rdi);
CallCheckStackGuardState();
- __ testq(rax, rax);
+ __ testp(rax, rax);
// If returning non-zero, we should end execution with the given
// result as return value.
__ j(not_zero, &return_rax);
__ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(equal, &exit_with_exception);
// Otherwise use return value as new stack pointer.
__ movp(backtrack_stackpointer(), rax);
void RegExpMacroAssemblerX64::IfRegisterGE(int reg,
int comparand,
Label* if_ge) {
- __ cmpq(register_location(reg), Immediate(comparand));
+ __ cmpp(register_location(reg), Immediate(comparand));
BranchOrBacktrack(greater_equal, if_ge);
}
void RegExpMacroAssemblerX64::IfRegisterLT(int reg,
int comparand,
Label* if_lt) {
- __ cmpq(register_location(reg), Immediate(comparand));
+ __ cmpp(register_location(reg), Immediate(comparand));
BranchOrBacktrack(less, if_lt);
}
void RegExpMacroAssemblerX64::IfRegisterEqPos(int reg,
Label* if_eq) {
- __ cmpq(rdi, register_location(reg));
+ __ cmpp(rdi, register_location(reg));
BranchOrBacktrack(equal, if_eq);
}
void RegExpMacroAssemblerX64::SetCurrentPositionFromEnd(int by) {
Label after_position;
- __ cmpq(rdi, Immediate(-by * char_size()));
+ __ cmpp(rdi, Immediate(-by * char_size()));
__ j(greater_equal, &after_position, Label::kNear);
__ movq(rdi, Immediate(-by * char_size()));
// On RegExp code entry (where this operation is used), the character before
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
__ load_rax(stack_limit);
- __ cmpq(rsp, rax);
+ __ cmpp(rsp, rax);
__ j(above, &no_preempt);
SafeCall(&check_preempt_label_);
ExternalReference stack_limit =
ExternalReference::address_of_regexp_stack_limit(isolate());
__ load_rax(stack_limit);
- __ cmpq(backtrack_stackpointer(), rax);
+ __ cmpp(backtrack_stackpointer(), rax);
__ j(above, &no_stack_overflow);
SafeCall(&stack_overflow_label_);
// Use key_offset + kPointerSize * 2, rather than loading map_offset.
__ movp(kScratchRegister,
Operand(kScratchRegister, offset, scale_factor, kPointerSize * 2));
- __ cmpq(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ cmpp(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset));
__ j(not_equal, &miss);
// Get the code entry from the cache.
Operand(dictionary, index, times_pointer_size,
kValueOffset - kHeapObjectTag));
__ Move(scratch3(), callback, RelocInfo::EMBEDDED_OBJECT);
- __ cmpq(scratch2(), scratch3());
+ __ cmpp(scratch2(), scratch3());
__ j(not_equal, &miss);
}
__ nop();
__ idivq(rdx);
__ mul(rdx);
- __ neg(rdx);
+ __ negq(rdx);
__ not_(rdx);
__ testq(Operand(rbx, rcx, times_4, 10000), rdx);