// This method must not be used with heap object references. The stored
// address is not GC safe. Use the handle version instead.
ASSERT(rmode > RelocInfo::LAST_GCED_ENUM);
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xB8 | dst.low_bits());
- emitp(value, rmode);
-}
-
-
-void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) {
- // Non-relocatable values might not need a 64-bit representation.
- ASSERT(RelocInfo::IsNone(rmode));
- if (is_uint32(value)) {
- movl(dst, Immediate(static_cast<int32_t>(value)));
- } else if (is_int32(value)) {
- movq(dst, Immediate(static_cast<int32_t>(value)));
+ if (RelocInfo::IsNone(rmode)) {
+ movq(dst, reinterpret_cast<int64_t>(value));
} else {
- // Value cannot be represented by 32 bits, so do a full 64 bit immediate
- // value.
EnsureSpace ensure_space(this);
emit_rex_64(dst);
emit(0xB8 | dst.low_bits());
- emitq(value);
+ emitp(value, rmode);
}
}
+void Assembler::movq(Register dst, int64_t value) {
+ EnsureSpace ensure_space(this);
+ emit_rex_64(dst);
+ emit(0xB8 | dst.low_bits());
+ emitq(value);
+}
+
+
void Assembler::movq(const Operand& dst, Immediate value) {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) {
AllowDeferredHandleDereference using_raw_address;
- // If there is no relocation info, emit the value of the handle efficiently
- // (possibly using less that 8 bytes for the value).
- if (RelocInfo::IsNone(mode)) {
- // There is no possible reason to store a heap pointer without relocation
- // info, so it must be a smi.
- ASSERT(value->IsSmi());
- movq(dst, reinterpret_cast<int64_t>(*value), RelocInfo::NONE64);
- } else {
- EnsureSpace ensure_space(this);
- ASSERT(value->IsHeapObject());
- ASSERT(!isolate()->heap()->InNewSpace(*value));
- emit_rex_64(dst);
- emit(0xB8 | dst.low_bits());
- emitp(value.location(), mode);
- }
+ ASSERT(!RelocInfo::IsNone(mode));
+ EnsureSpace ensure_space(this);
+ ASSERT(value->IsHeapObject());
+ ASSERT(!isolate()->heap()->InNewSpace(*value));
+ emit_rex_64(dst);
+ emit(0xB8 | dst.low_bits());
+ emitp(value.location(), mode);
}
// Move sign extended immediate to memory location.
void movq(const Operand& dst, Immediate value);
- // Instructions to load a 64-bit immediate into a register.
- // All 64-bit immediates must have a relocation mode.
+ // Loads a pointer into a register with a relocation mode.
void movq(Register dst, void* ptr, RelocInfo::Mode rmode);
- void movq(Register dst, int64_t value, RelocInfo::Mode rmode);
+ // Loads a 64-bit immediate into a register.
+ void movq(Register dst, int64_t value);
void movq(Register dst, Handle<Object> handle, RelocInfo::Mode rmode);
void movsxbq(Register dst, const Operand& src);
Label continue_sqrt, continue_rsqrt, not_plus_half;
// Test for 0.5.
// Load double_scratch with 0.5.
- __ movq(scratch, V8_UINT64_C(0x3FE0000000000000), RelocInfo::NONE64);
+ __ movq(scratch, V8_UINT64_C(0x3FE0000000000000));
__ movq(double_scratch, scratch);
// Already ruled out NaNs for exponent.
__ ucomisd(double_scratch, double_exponent);
// Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
// According to IEEE-754, double-precision -Infinity has the highest
// 12 bits set and the lowest 52 bits cleared.
- __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE64);
+ __ movq(scratch, V8_UINT64_C(0xFFF0000000000000));
__ movq(double_scratch, scratch);
__ ucomisd(double_scratch, double_base);
// Comparing -Infinity with NaN results in "unordered", which sets the
// case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
// According to IEEE-754, double-precision -Infinity has the highest
// 12 bits set and the lowest 52 bits cleared.
- __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE64);
+ __ movq(scratch, V8_UINT64_C(0xFFF0000000000000));
__ movq(double_scratch, scratch);
__ ucomisd(double_scratch, double_base);
// Comparing -Infinity with NaN results in "unordered", which sets the
// Scratch register is neither callee-save, nor an argument register on any
// platform. It's free to use at this point.
// Cannot use smi-register for loading yet.
- __ movq(kScratchRegister,
- reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
- RelocInfo::NONE64);
+ __ movq(kScratchRegister, Smi::FromInt(marker), RelocInfo::NONE64);
__ push(kScratchRegister); // context slot
__ push(kScratchRegister); // function slot
// Save callee-saved registers (X64/Win64 calling conventions).
__ j(zero, &valid_result);
__ fstp(0); // Drop result in st(0).
int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
- __ movq(rcx, kNaNValue, RelocInfo::NONE64);
+ __ movq(rcx, kNaNValue);
__ movq(Operand(rsp, kPointerSize), rcx);
__ movsd(xmm0, Operand(rsp, kPointerSize));
__ jmp(&return_result);
STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
Label loop, entry, convert_hole;
- __ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64);
+ __ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64));
// r15: the-hole NaN
__ jmp(&entry);
__ movq(FieldOperand(r11, FixedArray::kLengthOffset), r14);
// Prepare for conversion loop.
- __ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64);
+ __ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64));
__ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
// rsi: the-hole NaN
// rdi: pointer to the-hole
reset_value = Smi::kMaxValue;
}
__ movq(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT);
- __ movq(kScratchRegister,
- reinterpret_cast<uint64_t>(Smi::FromInt(reset_value)),
- RelocInfo::NONE64);
+ __ Move(kScratchRegister, Smi::FromInt(reset_value));
__ movq(FieldOperand(rbx, Cell::kValueOffset), kScratchRegister);
}
#endif
__ push(rax);
__ Set(rax, slots);
- __ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE64);
+ __ movq(kScratchRegister, kSlotsZapValue);
Label loop;
__ bind(&loop);
__ movq(MemOperand(rsp, rax, times_pointer_size, 0),
__ neg(reg1);
DeoptimizeIf(zero, instr->environment());
}
- __ movq(reg2, multiplier, RelocInfo::NONE64);
+ __ Set(reg2, multiplier);
// Result just fit in r64, because it's int32 * uint32.
__ imul(reg2, reg1);
static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5
Label done, round_to_zero, below_one_half, do_not_compensate, restore;
- __ movq(kScratchRegister, one_half, RelocInfo::NONE64);
+ __ movq(kScratchRegister, one_half);
__ movq(xmm_scratch, kScratchRegister);
__ ucomisd(xmm_scratch, input_reg);
__ j(above, &below_one_half);
__ jmp(&done);
__ bind(&below_one_half);
- __ movq(kScratchRegister, minus_one_half, RelocInfo::NONE64);
+ __ movq(kScratchRegister, minus_one_half);
__ movq(xmm_scratch, kScratchRegister);
__ ucomisd(xmm_scratch, input_reg);
__ j(below_equal, &round_to_zero);
Label done, sqrt;
// Check base for -Infinity. According to IEEE-754, double-precision
// -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
- __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000), RelocInfo::NONE64);
+ __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000));
__ movq(xmm_scratch, kScratchRegister);
__ ucomisd(xmm_scratch, input_reg);
// Comparing -Infinity with NaN results in "unordered", which sets the
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
XMMRegister result = ToDoubleRegister(instr->result());
XMMRegister scratch4 = double_scratch0();
- __ movq(scratch3, V8_INT64_C(0x4130000000000000),
- RelocInfo::NONE64); // 1.0 x 2^20 as double
+ __ movq(scratch3, V8_INT64_C(0x4130000000000000)); // 1.0 x 2^20 as double
__ movq(scratch4, scratch3);
__ movd(result, random);
__ xorps(result, scratch4);
if (int_val == 0) {
__ xorps(dst, dst);
} else {
- __ movq(kScratchRegister, int_val, RelocInfo::NONE64);
+ __ Set(kScratchRegister, int_val);
__ movq(dst, kScratchRegister);
}
} else {
int64_t address = reinterpret_cast<int64_t>(source.address());
if (is_int32(address) && !Serializer::enabled()) {
if (emit_debug_code()) {
- movq(kScratchRegister, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
+ movq(kScratchRegister, kZapValue, RelocInfo::NONE64);
}
push(Immediate(static_cast<int32_t>(address)));
return;
ASSERT(is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask())));
intptr_t new_space_start =
reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
- movq(kScratchRegister, -new_space_start, RelocInfo::NONE64);
+ movq(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
+ RelocInfo::NONE64);
if (scratch.is(object)) {
addq(scratch, kScratchRegister);
} else {
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
- movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
+ movq(value, kZapValue, RelocInfo::NONE64);
+ movq(dst, kZapValue, RelocInfo::NONE64);
}
}
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
- movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
+ movq(value, kZapValue, RelocInfo::NONE64);
+ movq(index, kZapValue, RelocInfo::NONE64);
}
}
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
- movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
+ movq(address, kZapValue, RelocInfo::NONE64);
+ movq(value, kZapValue, RelocInfo::NONE64);
}
}
#endif
push(rax);
- movq(kScratchRegister, p0, RelocInfo::NONE64);
+ movq(kScratchRegister, reinterpret_cast<Smi*>(p0), RelocInfo::NONE64);
push(kScratchRegister);
- movq(kScratchRegister,
- reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
+ movq(kScratchRegister, Smi::FromInt(static_cast<int>(p1 - p0)),
RelocInfo::NONE64);
push(kScratchRegister);
} else if (is_int32(x)) {
movq(dst, Immediate(static_cast<int32_t>(x)));
} else {
- movq(dst, x, RelocInfo::NONE64);
+ movq(dst, x);
}
}
void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
if (emit_debug_code()) {
- movq(dst,
- reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
- RelocInfo::NONE64);
+ movq(dst, Smi::FromInt(kSmiConstantRegisterValue), RelocInfo::NONE64);
cmpq(dst, kSmiConstantRegister);
if (allow_stub_calls()) {
Assert(equal, kUninitializedKSmiConstantRegister);
UNREACHABLE();
return;
default:
- movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE64);
+ movq(dst, source, RelocInfo::NONE64);
return;
}
if (negative) {
XMMRegister input_reg) {
Label done;
cvttsd2siq(result_reg, input_reg);
- movq(kScratchRegister,
- V8_INT64_C(0x8000000000000000),
- RelocInfo::NONE64);
+ movq(kScratchRegister, V8_INT64_C(0x8000000000000000));
cmpq(result_reg, kScratchRegister);
j(not_equal, &done, Label::kNear);
void MacroAssembler::AssertZeroExtended(Register int32_register) {
if (emit_debug_code()) {
ASSERT(!int32_register.is(kScratchRegister));
- movq(kScratchRegister, 0x100000000l, RelocInfo::NONE64);
+ movq(kScratchRegister, V8_INT64_C(0x0000000100000000));
cmpq(kScratchRegister, int32_register);
Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
}
void SafePush(Smi* src);
void InitializeSmiConstantRegister() {
- movq(kSmiConstantRegister,
- reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
+ movq(kSmiConstantRegister, Smi::FromInt(kSmiConstantRegisterValue),
RelocInfo::NONE64);
}
Label already_round;
__ bind(&conversion_failure);
int64_t kTwoMantissaBits= V8_INT64_C(0x4330000000000000);
- __ movq(rbx, kTwoMantissaBits, RelocInfo::NONE64);
+ __ movq(rbx, kTwoMantissaBits);
__ movq(xmm1, rbx);
__ ucomisd(xmm0, xmm1);
__ j(above_equal, &already_round);
// Subtract 1 if the argument was less than the tentative result.
int64_t kOne = V8_INT64_C(0x3ff0000000000000);
- __ movq(rbx, kOne, RelocInfo::NONE64);
+ __ movq(rbx, kOne);
__ movq(xmm1, rbx);
__ andpd(xmm1, xmm2);
__ subsd(xmm0, xmm1);
Label negative_sign;
const int sign_mask_shift =
(HeapNumber::kExponentOffset - HeapNumber::kValueOffset) * kBitsPerByte;
- __ movq(rdi, static_cast<int64_t>(HeapNumber::kSignMask) << sign_mask_shift,
- RelocInfo::NONE64);
+ __ Set(rdi, static_cast<int64_t>(HeapNumber::kSignMask) << sign_mask_shift);
__ testq(rbx, rdi);
__ j(not_zero, &negative_sign);
__ ret(2 * kPointerSize);
using v8::internal::rsi;
using v8::internal::rsp;
using v8::internal::times_pointer_size;
+using v8::internal::Address;
// Test the x64 assembler by compiling some simple functions into
// a buffer and executing them. These tests do not initialize the
ASSERT(Smi::IsValid(result));
__ movl(rax, Immediate(id));
__ Move(r8, Smi::FromInt(static_cast<int>(result)));
- __ movq(rcx, x, RelocInfo::NONE64);
+ __ movq(rcx, x);
__ movq(r11, rcx);
__ Integer64PlusConstantToSmi(rdx, rcx, y);
__ cmpq(rdx, r8);
__ lea(r13, Operand(rbp, -3 * kPointerSize));
__ lea(rbx, Operand(rbp, -5 * kPointerSize));
__ movl(rcx, Immediate(2));
- __ movq(r8, reinterpret_cast<uintptr_t>(&data[128]), RelocInfo::NONE64);
+ __ movq(r8, reinterpret_cast<Address>(&data[128]), RelocInfo::NONE64);
__ movl(rax, Immediate(1));
Operand sp0 = Operand(rsp, 0);