__ movd(res, Operand(temp));
__ psllq(res, 32);
if (lower != 0) {
+ XMMRegister xmm_scratch = double_scratch0();
__ Set(temp, Immediate(lower));
- __ movd(xmm0, Operand(temp));
- __ por(res, xmm0);
+ __ movd(xmm_scratch, Operand(temp));
+ __ por(res, xmm_scratch);
}
}
}
__ jmp(&return_right, Label::kNear);
__ bind(&check_zero);
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(left_reg, xmm_scratch);
__ j(not_equal, &return_left, Label::kNear); // left == right != 0.
ASSERT(!info()->IsStub());
CpuFeatureScope scope(masm(), SSE2);
XMMRegister reg = ToDoubleRegister(instr->value());
- __ xorps(xmm0, xmm0);
- __ ucomisd(reg, xmm0);
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(reg, xmm_scratch);
EmitBranch(instr, not_equal);
} else {
ASSERT(r.IsTagged());
} else if (type.IsHeapNumber()) {
ASSERT(!info()->IsStub());
CpuFeatureScope scope(masm(), SSE2);
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
EmitBranch(instr, not_equal);
} else if (type.IsString()) {
ASSERT(!info()->IsStub());
__ j(not_equal, ¬_heap_number, Label::kNear);
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
} else {
__ fldz();
__ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
CpuFeatureScope scope(masm(), SSE2);
if (r.IsDouble()) {
- XMMRegister scratch = xmm0;
+ XMMRegister scratch = double_scratch0();
XMMRegister input_reg = ToDoubleRegister(instr->value());
__ xorps(scratch, scratch);
__ subsd(scratch, input_reg);
void LCodeGen::DoMathFloor(LMathFloor* instr) {
CpuFeatureScope scope(masm(), SSE2);
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
CpuFeatureScope scope(masm(), SSE2);
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
XMMRegister input_temp = ToDoubleRegister(instr->temp());
ExternalReference one_half = ExternalReference::address_of_one_half();
ExternalReference minus_one_half =
void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
CpuFeatureScope scope(masm(), SSE2);
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
XMMRegister input_reg = ToDoubleRegister(instr->value());
Register scratch = ToRegister(instr->temp());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
XMMRegister result = ToDoubleRegister(instr->result());
- // We use xmm0 as fixed scratch register here.
- XMMRegister scratch4 = xmm0;
+ XMMRegister scratch4 = double_scratch0();
__ mov(scratch3, Immediate(0x49800000)); // 1.0 x 2^20 as single.
__ movd(scratch4, scratch3);
__ movd(result, random);
CpuFeatureScope scope(masm(), SSE2);
ASSERT(instr->value()->Equals(instr->result()));
XMMRegister input_reg = ToDoubleRegister(instr->value());
+ XMMRegister xmm_scratch = double_scratch0();
Label positive, done, zero;
- __ xorps(xmm0, xmm0);
- __ ucomisd(input_reg, xmm0);
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(input_reg, xmm_scratch);
__ j(above, &positive, Label::kNear);
__ j(equal, &zero, Label::kNear);
ExternalReference nan =
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input = ToDoubleRegister(instr->value());
XMMRegister result = ToDoubleRegister(instr->result());
+ XMMRegister temp0 = double_scratch0();
Register temp1 = ToRegister(instr->temp1());
Register temp2 = ToRegister(instr->temp2());
- MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2);
+ MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
}
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
- __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
- __ movss(operand, xmm0);
+ XMMRegister xmm_scratch = double_scratch0();
+ __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value()));
+ __ movss(operand, xmm_scratch);
} else {
__ fld(0);
__ fstp_s(operand);
Label slow;
Register reg = ToRegister(value);
Register tmp = reg.is(eax) ? ecx : eax;
+ XMMRegister xmm_scratch = double_scratch0();
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
__ xor_(reg, 0x80000000);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope feature_scope(masm(), SSE2);
- __ Cvtsi2sd(xmm0, Operand(reg));
+ __ Cvtsi2sd(xmm_scratch, Operand(reg));
} else {
__ push(reg);
__ fild_s(Operand(esp, 0));
} else {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope feature_scope(masm(), SSE2);
- __ LoadUint32(xmm0, reg,
+ __ LoadUint32(xmm_scratch, reg,
ToDoubleRegister(LNumberTagU::cast(instr)->temp()));
} else {
// There's no fild variant for unsigned values, so zero-extend to a 64-bit
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
if (!reg.is(eax)) __ mov(reg, eax);
- // Done. Put the value in xmm0 into the value of the allocated heap
+ // Done. Put the value in xmm_scratch into the value of the allocated heap
// number.
__ bind(&done);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope feature_scope(masm(), SSE2);
- __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
+ __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch);
} else {
__ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
}
__ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
if (deoptimize_on_minus_zero) {
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(result_reg, xmm_scratch);
__ j(not_zero, &done, Label::kNear);
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(input);
- __ DoubleToI(result_reg, input_reg, xmm0,
+ XMMRegister xmm_scratch = double_scratch0();
+ __ DoubleToI(result_reg, input_reg, xmm_scratch,
instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
} else {
X87Register input_reg = ToX87Register(input);
if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(input);
- __ DoubleToI(result_reg, input_reg, xmm0,
+ XMMRegister xmm_scratch = double_scratch0();
+ __ DoubleToI(result_reg, input_reg, xmm_scratch,
instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
} else {
X87Register input_reg = ToX87Register(input);
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
+ XMMRegister xmm_scratch = double_scratch0();
Register result_reg = ToRegister(instr->result());
- __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
+ __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
}
ASSERT(instr->unclamped()->Equals(instr->result()));
Register input_reg = ToRegister(instr->unclamped());
+ XMMRegister xmm_scratch = double_scratch0();
Label is_smi, done, heap_number;
__ JumpIfSmi(input_reg, &is_smi);
// Heap number
__ bind(&heap_number);
- __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(xmm0, xmm1, input_reg);
+ __ movdbl(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ ClampDoubleToUint8(xmm_scratch, xmm1, input_reg);
__ jmp(&done, Label::kNear);
// smi
__ jmp(&return_right, Label::kNear);
__ bind(&check_zero);
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(left_reg, xmm_scratch);
__ j(not_equal, &return_left, Label::kNear); // left == right != 0.
// when there is a mulsd depending on the result
__ movaps(left, left);
break;
- case Token::MOD:
+ case Token::MOD: {
+ XMMRegister xmm_scratch = double_scratch0();
__ PrepareCallCFunction(2);
- __ movaps(xmm0, left);
+ __ movaps(xmm_scratch, left);
ASSERT(right.is(xmm1));
__ CallCFunction(
ExternalReference::double_fp_operation(Token::MOD, isolate()), 2);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ movaps(result, xmm0);
+ __ movaps(result, xmm_scratch);
break;
+ }
default:
UNREACHABLE();
break;
} else if (r.IsDouble()) {
ASSERT(!info()->IsStub());
XMMRegister reg = ToDoubleRegister(instr->value());
- __ xorps(xmm0, xmm0);
- __ ucomisd(reg, xmm0);
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(reg, xmm_scratch);
EmitBranch(instr, not_equal);
} else {
ASSERT(r.IsTagged());
EmitBranch(instr, no_condition);
} else if (type.IsHeapNumber()) {
ASSERT(!info()->IsStub());
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
EmitBranch(instr, not_equal);
} else if (type.IsString()) {
ASSERT(!info()->IsStub());
Label not_heap_number;
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
__ j(not_equal, ¬_heap_number, Label::kNear);
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
+ XMMRegister xmm_scratch = double_scratch0();
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
__ j(zero, instr->FalseLabel(chunk_));
__ jmp(instr->TrueLabel(chunk_));
__ bind(¬_heap_number);
Representation r = instr->hydrogen()->value()->representation();
if (r.IsDouble()) {
- XMMRegister scratch = xmm0;
+ XMMRegister scratch = double_scratch0();
XMMRegister input_reg = ToDoubleRegister(instr->value());
__ xorps(scratch, scratch);
__ subsd(scratch, input_reg);
void LCodeGen::DoMathFloor(LMathFloor* instr) {
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
void LCodeGen::DoMathRound(LMathRound* instr) {
- const XMMRegister xmm_scratch = xmm0;
+ const XMMRegister xmm_scratch = double_scratch0();
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5
void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
XMMRegister input_reg = ToDoubleRegister(instr->value());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
XMMRegister result = ToDoubleRegister(instr->result());
- // We use xmm0 as fixed scratch register here.
- XMMRegister scratch4 = xmm0;
+ XMMRegister scratch4 = double_scratch0();
__ movq(scratch3, V8_INT64_C(0x4130000000000000),
RelocInfo::NONE64); // 1.0 x 2^20 as double
__ movq(scratch4, scratch3);
void LCodeGen::DoMathExp(LMathExp* instr) {
XMMRegister input = ToDoubleRegister(instr->value());
XMMRegister result = ToDoubleRegister(instr->result());
+ XMMRegister temp0 = double_scratch0();
Register temp1 = ToRegister(instr->temp1());
Register temp2 = ToRegister(instr->temp2());
- MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2);
+ MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
}
// Load value into xmm1 which will be preserved across potential call to
// runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
// XMM registers on x64).
- __ LoadUint32(xmm1, reg, xmm0);
+ XMMRegister xmm_scratch = double_scratch0();
+ __ LoadUint32(xmm1, reg, xmm_scratch);
if (FLAG_inline_new) {
__ AllocateHeapNumber(reg, tmp, &slow);
}
if (deoptimize_on_minus_zero) {
- XMMRegister xmm_scratch = xmm0;
+ XMMRegister xmm_scratch = double_scratch0();
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(xmm_scratch, result_reg);
__ j(not_equal, &done, Label::kNear);
__ TruncateDoubleToI(result_reg, input_reg);
} else {
Label bailout, done;
- __ DoubleToI(result_reg, input_reg, xmm0,
+ XMMRegister xmm_scratch = double_scratch0();
+ __ DoubleToI(result_reg, input_reg, xmm_scratch,
instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
__ jmp(&done, Label::kNear);
Register result_reg = ToRegister(result);
Label bailout, done;
- __ DoubleToI(result_reg, input_reg, xmm0,
+ XMMRegister xmm_scratch = double_scratch0();
+ __ DoubleToI(result_reg, input_reg, xmm_scratch,
instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
__ jmp(&done, Label::kNear);
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
+ XMMRegister xmm_scratch = double_scratch0();
Register result_reg = ToRegister(instr->result());
- __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
+ __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
}
ASSERT(instr->unclamped()->Equals(instr->result()));
Register input_reg = ToRegister(instr->unclamped());
XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
+ XMMRegister xmm_scratch = double_scratch0();
Label is_smi, done, heap_number;
__ JumpIfSmi(input_reg, &is_smi);
// Heap number
__ bind(&heap_number);
- __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(xmm0, temp_xmm_reg, input_reg);
+ __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
__ jmp(&done, Label::kNear);
// smi