void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0xF2);
EMIT(0x0F);
void Assembler::andpd(XMMRegister dst, XMMRegister src) {
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
void Assembler::orpd(XMMRegister dst, XMMRegister src) {
+ ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
EMIT(0x0F);
}
-void Assembler::movdbl(XMMRegister dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- movsd(dst, src);
-}
-
-
-void Assembler::movdbl(const Operand& dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- movsd(dst, src);
-}
-
-
void Assembler::movsd(const Operand& dst, XMMRegister src ) {
ASSERT(IsEnabled(SSE2));
EnsureSpace ensure_space(this);
}
}
- // Use either movsd or movlpd.
- void movdbl(XMMRegister dst, const Operand& src);
- void movdbl(const Operand& dst, XMMRegister src);
-
void movd(XMMRegister dst, Register src) { movd(dst, Operand(src)); }
void movd(XMMRegister dst, const Operand& src);
void movd(Register dst, XMMRegister src) { movd(Operand(dst), src); }
void movd(const Operand& dst, XMMRegister src);
void movsd(XMMRegister dst, XMMRegister src);
+ void movsd(XMMRegister dst, const Operand& src);
+ void movsd(const Operand& dst, XMMRegister src);
+
void movss(XMMRegister dst, const Operand& src);
void movss(const Operand& dst, XMMRegister src);
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
protected:
- void movsd(XMMRegister dst, const Operand& src);
- void movsd(const Operand& dst, XMMRegister src);
-
void emit_sse_operand(XMMRegister reg, const Operand& adr);
void emit_sse_operand(XMMRegister dst, XMMRegister src);
void emit_sse_operand(Register dst, XMMRegister src);
__ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- __ movdbl(Operand(esp, i * kDoubleSize), reg);
+ __ movsd(Operand(esp, i * kDoubleSize), reg);
}
}
const int argument_count = 1;
CpuFeatureScope scope(masm, SSE2);
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- __ movdbl(reg, Operand(esp, i * kDoubleSize));
+ __ movsd(reg, Operand(esp, i * kDoubleSize));
}
__ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
}
__ ret(kPointerSize);
} else { // UNTAGGED.
CpuFeatureScope scope(masm, SSE2);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
}
CpuFeatureScope scope(masm, SSE2);
__ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
__ sub(esp, Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), xmm1);
+ __ movsd(Operand(esp, 0), xmm1);
__ fld_d(Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
}
__ ret(kPointerSize);
} else { // UNTAGGED.
CpuFeatureScope scope(masm, SSE2);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
// Skip cache and return answer directly, only in untagged case.
__ bind(&skip_cache);
__ sub(esp, Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), xmm1);
+ __ movsd(Operand(esp, 0), xmm1);
__ fld_d(Operand(esp, 0));
GenerateOperation(masm, type_);
__ fstp_d(Operand(esp, 0));
- __ movdbl(xmm1, Operand(esp, 0));
+ __ movsd(xmm1, Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
// We return the value in xmm1 without adding it to the cache, but
// we cause a scavenging GC so that future allocations will succeed.
__ bind(&runtime_call_clear_stack);
__ bind(&runtime_call);
__ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
+ __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(eax);
__ CallRuntime(RuntimeFunction(), 1);
}
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
}
}
Factory* factory = masm->isolate()->factory();
__ cmp(FieldOperand(edx, HeapObject::kMapOffset), factory->heap_number_map());
__ j(not_equal, not_numbers); // Argument in edx is not a number.
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+ __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
__ bind(&load_eax);
// Load operand in eax into xmm1, or branch to not_numbers.
__ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
__ SmiTag(eax); // Retag smi for heap number overwriting test.
__ jmp(&done, Label::kNear);
__ bind(&load_float_eax);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ bind(&done);
}
factory->heap_number_map());
__ j(not_equal, &call_runtime);
- __ movdbl(double_base, FieldOperand(base, HeapNumber::kValueOffset));
+ __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
__ jmp(&unpack_exponent, Label::kNear);
__ bind(&base_is_smi);
__ cmp(FieldOperand(exponent, HeapObject::kMapOffset),
factory->heap_number_map());
__ j(not_equal, &call_runtime);
- __ movdbl(double_exponent,
+ __ movsd(double_exponent,
FieldOperand(exponent, HeapNumber::kValueOffset));
} else if (exponent_type_ == TAGGED) {
__ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
__ jmp(&int_exponent);
__ bind(&exponent_not_smi);
- __ movdbl(double_exponent,
+ __ movsd(double_exponent,
FieldOperand(exponent, HeapNumber::kValueOffset));
}
__ fnclex(); // Clear flags to catch exceptions later.
// Transfer (B)ase and (E)xponent onto the FPU register stack.
__ sub(esp, Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), double_exponent);
+ __ movsd(Operand(esp, 0), double_exponent);
__ fld_d(Operand(esp, 0)); // E
- __ movdbl(Operand(esp, 0), double_base);
+ __ movsd(Operand(esp, 0), double_base);
__ fld_d(Operand(esp, 0)); // B, E
// Exponent is in st(1) and base is in st(0)
__ test_b(eax, 0x5F); // We check for all but precision exception.
__ j(not_zero, &fast_power_failed, Label::kNear);
__ fstp_d(Operand(esp, 0));
- __ movdbl(double_result, Operand(esp, 0));
+ __ movsd(double_result, Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
__ jmp(&done);
// as heap number in exponent.
__ bind(&done);
__ AllocateHeapNumber(eax, scratch, base, &call_runtime);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), double_result);
+ __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), double_result);
__ IncrementCounter(counters->math_pow(), 1);
__ ret(2 * kPointerSize);
} else {
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(4, scratch);
- __ movdbl(Operand(esp, 0 * kDoubleSize), double_base);
- __ movdbl(Operand(esp, 1 * kDoubleSize), double_exponent);
+ __ movsd(Operand(esp, 0 * kDoubleSize), double_base);
+ __ movsd(Operand(esp, 1 * kDoubleSize), double_exponent);
__ CallCFunction(
ExternalReference::power_double_double_function(masm->isolate()), 4);
}
// Store it into the (fixed) result register.
__ sub(esp, Immediate(kDoubleSize));
__ fstp_d(Operand(esp, 0));
- __ movdbl(double_result, Operand(esp, 0));
+ __ movsd(double_result, Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
__ bind(&done);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
masm->isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined1, Label::kNear);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ jmp(&left, Label::kNear);
__ bind(&right_smi);
__ mov(ecx, eax); // Can't clobber eax because we can still jump away.
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
masm->isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined2, Label::kNear);
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+ __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
__ jmp(&done);
__ bind(&left_smi);
__ mov(ecx, edx); // Can't clobber edx because we can still jump away.
// Save all XMM registers except XMM0.
for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
XMMRegister reg = XMMRegister::from_code(i);
- masm->movdbl(Operand(esp, (i - 1) * kDoubleSize), reg);
+ masm->movsd(Operand(esp, (i - 1) * kDoubleSize), reg);
}
}
}
// Restore all XMM registers except XMM0.
for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
XMMRegister reg = XMMRegister::from_code(i);
- masm->movdbl(reg, Operand(esp, (i - 1) * kDoubleSize));
+ masm->movsd(reg, Operand(esp, (i - 1) * kDoubleSize));
}
masm->add(esp,
Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
CpuFeatureScope use_sse2(&masm, SSE2);
XMMRegister input = xmm1;
XMMRegister result = xmm2;
- __ movdbl(input, Operand(esp, 1 * kPointerSize));
+ __ movsd(input, Operand(esp, 1 * kPointerSize));
__ push(eax);
__ push(ebx);
__ pop(ebx);
__ pop(eax);
- __ movdbl(Operand(esp, 1 * kPointerSize), result);
+ __ movsd(Operand(esp, 1 * kPointerSize), result);
__ fld_d(Operand(esp, 1 * kPointerSize));
__ Ret();
}
// Move double input into registers.
{
CpuFeatureScope use_sse2(&masm, SSE2);
- __ movdbl(xmm0, Operand(esp, 1 * kPointerSize));
+ __ movsd(xmm0, Operand(esp, 1 * kPointerSize));
__ sqrtsd(xmm0, xmm0);
- __ movdbl(Operand(esp, 1 * kPointerSize), xmm0);
+ __ movsd(Operand(esp, 1 * kPointerSize), xmm0);
// Load result into floating point register as return value.
__ fld_d(Operand(esp, 1 * kPointerSize));
__ Ret();
Label medium_handlers, f9_16, f17_32, f33_48, f49_63;
__ bind(&f9_16);
- __ movdbl(xmm0, Operand(src, 0));
- __ movdbl(xmm1, Operand(src, count, times_1, -8));
- __ movdbl(Operand(dst, 0), xmm0);
- __ movdbl(Operand(dst, count, times_1, -8), xmm1);
+ __ movsd(xmm0, Operand(src, 0));
+ __ movsd(xmm1, Operand(src, count, times_1, -8));
+ __ movsd(Operand(dst, 0), xmm0);
+ __ movsd(Operand(dst, count, times_1, -8), xmm1);
MemMoveEmitPopAndReturn(&masm);
__ bind(&f17_32);
XMMRegister the_hole_nan = xmm1;
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ movdbl(the_hole_nan,
+ __ movsd(the_hole_nan,
Operand::StaticVariable(canonical_the_hole_nan_reference));
}
__ jmp(&entry);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope fscope(masm, SSE2);
__ Cvtsi2sd(xmm0, ebx);
- __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
+ __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
xmm0);
} else {
__ push(ebx);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
+ __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
the_hole_nan);
} else {
__ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference));
// edx: new heap number
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope fscope(masm, SSE2);
- __ movdbl(xmm0,
+ __ movsd(xmm0,
FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
- __ movdbl(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
+ __ movsd(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
} else {
__ mov(esi, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
__ mov(FieldOperand(edx, HeapNumber::kValueOffset), esi);
Label done;
- __ movdbl(double_scratch, ExpConstant(0));
+ __ movsd(double_scratch, ExpConstant(0));
__ xorpd(result, result);
__ ucomisd(double_scratch, input);
__ j(above_equal, &done);
__ ucomisd(input, ExpConstant(1));
- __ movdbl(result, ExpConstant(2));
+ __ movsd(result, ExpConstant(2));
__ j(above_equal, &done);
- __ movdbl(double_scratch, ExpConstant(3));
- __ movdbl(result, ExpConstant(4));
+ __ movsd(double_scratch, ExpConstant(3));
+ __ movsd(result, ExpConstant(4));
__ mulsd(double_scratch, input);
__ addsd(double_scratch, result);
__ movd(temp2, double_scratch);
__ subsd(double_scratch, result);
- __ movdbl(result, ExpConstant(6));
+ __ movsd(result, ExpConstant(6));
__ mulsd(double_scratch, ExpConstant(5));
__ subsd(double_scratch, input);
__ subsd(result, double_scratch);
__ shl(temp1, 20);
__ movd(input, temp1);
__ pshufd(input, input, static_cast<uint8_t>(0xe1)); // Order: 11 10 00 01
- __ movdbl(double_scratch, Operand::StaticArray(
+ __ movsd(double_scratch, Operand::StaticArray(
temp2, times_8, ExternalReference::math_exp_log_table()));
__ por(input, double_scratch);
__ mulsd(result, input);
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int offset = i * kDoubleSize;
- __ movdbl(Operand(esp, offset), xmm_reg);
+ __ movsd(Operand(esp, offset), xmm_reg);
}
}
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
int dst_offset = i * kDoubleSize + double_regs_offset;
int src_offset = i * kDoubleSize;
- __ movdbl(xmm0, Operand(esp, src_offset));
- __ movdbl(Operand(ebx, dst_offset), xmm0);
+ __ movsd(xmm0, Operand(esp, src_offset));
+ __ movsd(Operand(ebx, dst_offset), xmm0);
}
}
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int src_offset = i * kDoubleSize + double_regs_offset;
- __ movdbl(xmm_reg, Operand(ebx, src_offset));
+ __ movsd(xmm_reg, Operand(ebx, src_offset));
}
}
__ cvtss2sd(xmm1, xmm1);
__ xorps(xmm0, xmm1);
__ subsd(xmm0, xmm1);
- __ movdbl(FieldOperand(edi, HeapNumber::kValueOffset), xmm0);
+ __ movsd(FieldOperand(edi, HeapNumber::kValueOffset), xmm0);
} else {
// 0x4130000000000000 is 1.0 x 2^20 as a double.
__ mov(FieldOperand(edi, HeapNumber::kExponentOffset),
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
while (!save_iterator.Done()) {
- __ movdbl(MemOperand(esp, count * kDoubleSize),
+ __ movsd(MemOperand(esp, count * kDoubleSize),
XMMRegister::FromAllocationIndex(save_iterator.Current()));
save_iterator.Advance();
count++;
case Token::MOD: {
// Pass two doubles as arguments on the stack.
__ PrepareCallCFunction(4, eax);
- __ movdbl(Operand(esp, 0 * kDoubleSize), left);
- __ movdbl(Operand(esp, 1 * kDoubleSize), right);
+ __ movsd(Operand(esp, 0 * kDoubleSize), left);
+ __ movsd(Operand(esp, 1 * kDoubleSize), right);
__ CallCFunction(
ExternalReference::double_fp_operation(Token::MOD, isolate()),
4);
// Store it into the result register.
__ sub(Operand(esp), Immediate(kDoubleSize));
__ fstp_d(Operand(esp, 0));
- __ movdbl(result, Operand(esp, 0));
+ __ movsd(result, Operand(esp, 0));
__ add(Operand(esp), Immediate(kDoubleSize));
break;
}
if (use_sse2) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(instr->object());
- __ movdbl(MemOperand(esp, 0), input_reg);
+ __ movsd(MemOperand(esp, 0), input_reg);
} else {
__ fstp_d(MemOperand(esp, 0));
}
BitVector::Iterator save_iterator(doubles);
int count = 0;
while (!save_iterator.Done()) {
- __ movdbl(XMMRegister::FromAllocationIndex(save_iterator.Current()),
+ __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
MemOperand(esp, count * kDoubleSize));
save_iterator.Advance();
count++;
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister result = ToDoubleRegister(instr->result());
- __ movdbl(result, FieldOperand(object, offset));
+ __ movsd(result, FieldOperand(object, offset));
} else {
X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset));
}
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
- __ movdbl(ToDoubleRegister(instr->result()), operand);
+ __ movsd(ToDoubleRegister(instr->result()), operand);
} else {
X87Mov(ToX87Register(instr->result()), operand);
}
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister result = ToDoubleRegister(instr->result());
- __ movdbl(result, double_load_operand);
+ __ movsd(result, double_load_operand);
} else {
X87Mov(ToX87Register(instr->result()), double_load_operand);
}
ExternalReference::address_of_minus_one_half();
Label done, round_to_zero, below_one_half, do_not_compensate;
- __ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
+ __ movsd(xmm_scratch, Operand::StaticVariable(one_half));
__ ucomisd(xmm_scratch, input_reg);
__ j(above, &below_one_half);
__ jmp(&done);
__ bind(&below_one_half);
- __ movdbl(xmm_scratch, Operand::StaticVariable(minus_one_half));
+ __ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half));
__ ucomisd(xmm_scratch, input_reg);
__ j(below_equal, &round_to_zero);
__ j(equal, &zero, Label::kNear);
ExternalReference nan =
ExternalReference::address_of_canonical_non_hole_nan();
- __ movdbl(input_reg, Operand::StaticVariable(nan));
+ __ movsd(input_reg, Operand::StaticVariable(nan));
__ jmp(&done, Label::kNear);
__ bind(&zero);
- __ push(Immediate(0xFFF00000));
- __ push(Immediate(0));
- __ movdbl(input_reg, Operand(esp, 0));
- __ add(Operand(esp), Immediate(kDoubleSize));
+ ExternalReference ninf =
+ ExternalReference::address_of_negative_infinity();
+ __ movsd(input_reg, Operand::StaticVariable(ninf));
__ jmp(&done, Label::kNear);
__ bind(&positive);
__ fldln2();
__ sub(Operand(esp), Immediate(kDoubleSize));
- __ movdbl(Operand(esp, 0), input_reg);
+ __ movsd(Operand(esp, 0), input_reg);
__ fld_d(Operand(esp, 0));
__ fyl2x();
__ fstp_d(Operand(esp, 0));
- __ movdbl(input_reg, Operand(esp, 0));
+ __ movsd(input_reg, Operand(esp, 0));
__ add(Operand(esp), Immediate(kDoubleSize));
__ bind(&done);
}
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister value = ToDoubleRegister(instr->value());
- __ movdbl(FieldOperand(object, offset), value);
+ __ movsd(FieldOperand(object, offset), value);
} else {
X87Register value = ToX87Register(instr->value());
X87Mov(FieldOperand(object, offset), value);
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
- __ movdbl(operand, ToDoubleRegister(instr->value()));
+ __ movsd(operand, ToDoubleRegister(instr->value()));
} else {
X87Mov(operand, ToX87Register(instr->value()));
}
__ ucomisd(value, value);
__ j(parity_odd, &have_value); // NaN.
- __ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
+ __ movsd(value, Operand::StaticVariable(canonical_nan_reference));
__ bind(&have_value);
}
- __ movdbl(double_store_operand, value);
+ __ movsd(double_store_operand, value);
} else {
// Can't use SSE2 in the serializer
if (instr->hydrogen()->IsConstantHoleStore()) {
__ bind(&done);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope feature_scope(masm(), SSE2);
- __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch);
+ __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch);
} else {
__ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
}
if (use_sse2) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(instr->value());
- __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
+ __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
} else {
__ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
}
}
// Heap number to XMM conversion.
- __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
if (deoptimize_on_minus_zero) {
XMMRegister xmm_scratch = double_scratch0();
ExternalReference nan =
ExternalReference::address_of_canonical_non_hole_nan();
- __ movdbl(result_reg, Operand::StaticVariable(nan));
+ __ movsd(result_reg, Operand::StaticVariable(nan));
__ jmp(&done, Label::kNear);
}
} else {
// Heap number
__ bind(&heap_number);
- __ movdbl(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
__ jmp(&done, Label::kNear);
} else {
__ push(Immediate(upper));
__ push(Immediate(lower));
- __ movdbl(dst, Operand(esp, 0));
+ __ movsd(dst, Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
}
} else {
} else {
ASSERT(destination->IsDoubleStackSlot());
Operand dst = cgen_->ToOperand(destination);
- __ movdbl(dst, src);
+ __ movsd(dst, src);
}
} else {
// load from the register onto the stack, store in destination, which must
Operand src = cgen_->ToOperand(source);
if (destination->IsDoubleRegister()) {
XMMRegister dst = cgen_->ToDoubleRegister(destination);
- __ movdbl(dst, src);
+ __ movsd(dst, src);
} else {
// We rely on having xmm0 available as a fixed scratch register.
Operand dst = cgen_->ToOperand(destination);
- __ movdbl(xmm0, src);
- __ movdbl(dst, xmm0);
+ __ movsd(xmm0, src);
+ __ movsd(dst, xmm0);
}
} else {
// load from the stack slot on top of the floating point stack, and then
: destination);
Operand other =
cgen_->ToOperand(source->IsDoubleRegister() ? destination : source);
- __ movdbl(xmm0, other);
- __ movdbl(other, reg);
- __ movdbl(reg, Operand(xmm0));
+ __ movsd(xmm0, other);
+ __ movsd(other, reg);
+ __ movsd(reg, Operand(xmm0));
} else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
CpuFeatureScope scope(cgen_->masm(), SSE2);
// Double-width memory-to-memory. Spill on demand to use a general
Operand src1 = cgen_->HighOperand(source);
Operand dst0 = cgen_->ToOperand(destination);
Operand dst1 = cgen_->HighOperand(destination);
- __ movdbl(xmm0, dst0); // Save destination in xmm0.
+ __ movsd(xmm0, dst0); // Save destination in xmm0.
__ mov(tmp, src0); // Then use tmp to copy source to destination.
__ mov(dst0, tmp);
__ mov(tmp, src1);
__ mov(dst1, tmp);
- __ movdbl(src0, xmm0);
+ __ movsd(src0, xmm0);
} else {
// No other combinations are possible.
j(not_equal, &done, Label::kNear);
sub(esp, Immediate(kDoubleSize));
- movdbl(MemOperand(esp, 0), input_reg);
+ movsd(MemOperand(esp, 0), input_reg);
SlowTruncateToI(result_reg, esp, 0);
add(esp, Immediate(kDoubleSize));
bind(&done);
}
} else if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(this, SSE2);
- movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
cvttsd2si(result_reg, Operand(xmm0));
cmp(result_reg, 0x80000000u);
j(not_equal, &done, Label::kNear);
if (input_reg.is(result_reg)) {
// Input is clobbered. Restore number from double scratch.
sub(esp, Immediate(kDoubleSize));
- movdbl(MemOperand(esp, 0), xmm0);
+ movsd(MemOperand(esp, 0), xmm0);
SlowTruncateToI(result_reg, esp, 0);
add(esp, Immediate(kDoubleSize));
} else {
ASSERT(!temp.is(no_xmm_reg));
CpuFeatureScope scope(this, SSE2);
- movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
cvttsd2si(result_reg, Operand(xmm0));
Cvtsi2sd(temp, Operand(result_reg));
ucomisd(xmm0, temp);
cmp(src, Immediate(0));
ExternalReference uint32_bias =
ExternalReference::address_of_uint32_bias();
- movdbl(scratch, Operand::StaticVariable(uint32_bias));
+ movsd(scratch, Operand::StaticVariable(uint32_bias));
Cvtsi2sd(dst, src);
j(not_sign, &done, Label::kNear);
addsd(dst, scratch);
ExternalReference::address_of_canonical_non_hole_nan();
if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
CpuFeatureScope use_sse2(this, SSE2);
- movdbl(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
+ movsd(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
bind(&have_double_value);
- movdbl(FieldOperand(elements, key, times_4,
+ movsd(FieldOperand(elements, key, times_4,
FixedDoubleArray::kHeaderSize - elements_offset),
scratch2);
} else {
bind(&is_nan);
if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
CpuFeatureScope use_sse2(this, SSE2);
- movdbl(scratch2, Operand::StaticVariable(canonical_nan_reference));
+ movsd(scratch2, Operand::StaticVariable(canonical_nan_reference));
} else {
fld_d(Operand::StaticVariable(canonical_nan_reference));
}
if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
CpuFeatureScope fscope(this, SSE2);
Cvtsi2sd(scratch2, scratch1);
- movdbl(FieldOperand(elements, key, times_4,
+ movsd(FieldOperand(elements, key, times_4,
FixedDoubleArray::kHeaderSize - elements_offset),
scratch2);
} else {
const int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
+ movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
}
} else {
sub(esp, Immediate(argc * kPointerSize));
const int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- movdbl(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
+ movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
}
}
JumpIfSmi(probe, not_found);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope fscope(this, SSE2);
- movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
+ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
} else {
fld_d(FieldOperand(object, HeapNumber::kValueOffset));
miss_label, DONT_DO_SMI_CHECK);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ movdbl(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
+ __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
} else {
__ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset));
}
__ bind(&do_store);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ movdbl(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0);
+ __ movsd(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0);
} else {
__ fstp_d(FieldOperand(storage_reg, HeapNumber::kValueOffset));
}
miss_label, DONT_DO_SMI_CHECK);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ movdbl(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
+ __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
} else {
__ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset));
}
__ bind(&do_store);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope use_sse2(masm, SSE2);
- __ movdbl(FieldOperand(scratch1, HeapNumber::kValueOffset), xmm0);
+ __ movsd(FieldOperand(scratch1, HeapNumber::kValueOffset), xmm0);
} else {
__ fstp_d(FieldOperand(scratch1, HeapNumber::kValueOffset));
}
// Check if the argument is a heap number and load its value into xmm0.
Label slow;
__ CheckMap(eax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK);
- __ movdbl(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ movsd(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
// Check if the argument is strictly positive. Note this also
// discards NaN.
// Return a new heap number.
__ AllocateHeapNumber(eax, ebx, edx, &slow);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
__ ret(2 * kPointerSize);
// Return the argument (when it's an already round heap number).
void LCodeGen::DoMathLog(LMathLog* instr) {
- ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
- TranscendentalCacheStub stub(TranscendentalCache::LOG,
- TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ ASSERT(instr->value()->Equals(instr->result()));
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
+ XMMRegister xmm_scratch = double_scratch0();
+ Label positive, done, zero;
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(input_reg, xmm_scratch);
+ __ j(above, &positive, Label::kNear);
+ __ j(equal, &zero, Label::kNear);
+ ExternalReference nan =
+ ExternalReference::address_of_canonical_non_hole_nan();
+ Operand nan_operand = masm()->ExternalOperand(nan);
+ __ movsd(input_reg, nan_operand);
+ __ jmp(&done, Label::kNear);
+ __ bind(&zero);
+ ExternalReference ninf =
+ ExternalReference::address_of_negative_infinity();
+ Operand ninf_operand = masm()->ExternalOperand(ninf);
+ __ movsd(input_reg, ninf_operand);
+ __ jmp(&done, Label::kNear);
+ __ bind(&positive);
+ __ fldln2();
+ __ subq(rsp, Immediate(kDoubleSize));
+ __ movsd(Operand(rsp, 0), input_reg);
+ __ fld_d(Operand(rsp, 0));
+ __ fyl2x();
+ __ fstp_d(Operand(rsp, 0));
+ __ movsd(input_reg, Operand(rsp, 0));
+ __ addq(rsp, Immediate(kDoubleSize));
+ __ bind(&done);
}
LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
- LOperand* input = UseFixedDouble(instr->value(), xmm1);
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegisterAtStart(instr->value());
LMathLog* result = new(zone()) LMathLog(input);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+ return DefineSameAsFirst(result);
}
Assembler assm(isolate, buffer, sizeof buffer);
CpuFeatureScope fscope(&assm, SSE2);
- __ movdbl(xmm0, Operand(esp, 1 * kPointerSize));
- __ movdbl(xmm1, Operand(esp, 3 * kPointerSize));
+ __ movsd(xmm0, Operand(esp, 1 * kPointerSize));
+ __ movsd(xmm1, Operand(esp, 3 * kPointerSize));
__ addsd(xmm0, xmm1);
__ mulsd(xmm0, xmm1);
__ subsd(xmm0, xmm1);
__ divsd(xmm0, xmm1);
// Copy xmm0 to st(0) using eight bytes of stack.
__ sub(esp, Immediate(8));
- __ movdbl(Operand(esp, 0), xmm0);
+ __ movsd(Operand(esp, 0), xmm0);
__ fld_d(Operand(esp, 0));
__ add(esp, Immediate(8));
__ ret(0);
__ cvtsi2sd(xmm0, eax);
// Copy xmm0 to st(0) using eight bytes of stack.
__ sub(esp, Immediate(8));
- __ movdbl(Operand(esp, 0), xmm0);
+ __ movsd(Operand(esp, 0), xmm0);
__ fld_d(Operand(esp, 0));
__ add(esp, Immediate(8));
__ ret(0);
MacroAssembler assm(isolate, buffer, sizeof buffer);
{ CpuFeatureScope fscope2(&assm, SSE2);
CpuFeatureScope fscope41(&assm, SSE4_1);
- __ movdbl(xmm1, Operand(esp, 4));
+ __ movsd(xmm1, Operand(esp, 4));
__ extractps(eax, xmm1, 0x1);
__ ret(0);
}
__ mulsd(xmm1, xmm0);
__ subsd(xmm1, xmm0);
__ divsd(xmm1, xmm0);
- __ movdbl(xmm1, Operand(ebx, ecx, times_4, 10000));
- __ movdbl(Operand(ebx, ecx, times_4, 10000), xmm1);
+ __ movsd(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ movsd(Operand(ebx, ecx, times_4, 10000), xmm1);
__ ucomisd(xmm0, xmm1);
// 128 bit move instructions.