XMMRegister result = ToDoubleRegister(instr->result());
switch (instr->op()) {
case Token::ADD:
- __ addsd(left, right);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(masm(), AVX);
+ __ vaddsd(result, left, right);
+ } else {
+ DCHECK(result.is(left));
+ __ addsd(left, right);
+ }
break;
case Token::SUB:
- __ subsd(left, right);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(masm(), AVX);
+ __ vsubsd(result, left, right);
+ } else {
+ DCHECK(result.is(left));
+ __ subsd(left, right);
+ }
break;
case Token::MUL:
- __ mulsd(left, right);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(masm(), AVX);
+ __ vmulsd(result, left, right);
+ } else {
+ DCHECK(result.is(left));
+ __ mulsd(left, right);
+ }
break;
case Token::DIV:
- __ divsd(left, right);
- // Don't delete this mov. It may improve performance on some CPUs,
- // when there is a mulsd depending on the result
- __ movaps(left, left);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(masm(), AVX);
+ __ vdivsd(result, left, right);
+ } else {
+ DCHECK(result.is(left));
+ __ divsd(left, right);
+ // Don't delete this mov. It may improve performance on some CPUs,
+ // when there is a mulsd depending on the result
+ __ movaps(left, left);
+ }
break;
case Token::MOD: {
// Pass two doubles as arguments on the stack.
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineSameAsFirst(result);
+ return CpuFeatures::IsSupported(AVX) ? DefineAsRegister(result)
+ : DefineSameAsFirst(result);
}
}
XMMRegister left = ToDoubleRegister(instr->left());
XMMRegister right = ToDoubleRegister(instr->right());
XMMRegister result = ToDoubleRegister(instr->result());
- // All operations except MOD are computed in-place.
- DCHECK(instr->op() == Token::MOD || left.is(result));
switch (instr->op()) {
case Token::ADD:
- __ addsd(left, right);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(masm(), AVX);
+ __ vaddsd(result, left, right);
+ } else {
+ DCHECK(result.is(left));
+ __ addsd(left, right);
+ }
break;
case Token::SUB:
- __ subsd(left, right);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(masm(), AVX);
+ __ vsubsd(result, left, right);
+ } else {
+ DCHECK(result.is(left));
+ __ subsd(left, right);
+ }
break;
case Token::MUL:
- __ mulsd(left, right);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(masm(), AVX);
+ __ vmulsd(result, left, right);
+ } else {
+ DCHECK(result.is(left));
+ __ mulsd(left, right);
+ }
break;
case Token::DIV:
- __ divsd(left, right);
- // Don't delete this mov. It may improve performance on some CPUs,
- // when there is a mulsd depending on the result
- __ movaps(left, left);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(masm(), AVX);
+ __ vdivsd(result, left, right);
+ } else {
+ DCHECK(result.is(left));
+ __ divsd(left, right);
+ // Don't delete this mov. It may improve performance on some CPUs,
+ // when there is a mulsd depending on the result
+ __ movaps(left, left);
+ }
break;
case Token::MOD: {
XMMRegister xmm_scratch = double_scratch0();
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
- return DefineSameAsFirst(result);
+ return CpuFeatures::IsSupported(AVX) ? DefineAsRegister(result)
+ : DefineSameAsFirst(result);
}
}