"enable unaligned accesses for ARMv7 (ARM only)")
DEFINE_bool(enable_32dregs, true,
"enable use of d16-d31 registers on ARM - this requires VFP3")
-DEFINE_bool(enable_fpu, true,
- "enable use of MIPS FPU instructions if available (MIPS only)")
DEFINE_bool(enable_vldr_imm, false,
"enable use of constant pools for double immediate (ARM only)")
int Register::NumAllocatableRegisters() {
- if (CpuFeatures::IsSupported(FPU)) {
return kMaxNumAllocatableRegisters;
- } else {
- return kMaxNumAllocatableRegisters - kGPRsPerNonFPUDouble;
- }
}
int DoubleRegister::NumRegisters() {
- if (CpuFeatures::IsSupported(FPU)) {
return FPURegister::kMaxNumRegisters;
- } else {
- return 1;
- }
}
int DoubleRegister::NumAllocatableRegisters() {
- if (CpuFeatures::IsSupported(FPU)) {
return FPURegister::kMaxNumAllocatableRegisters;
- } else {
- return 1;
- }
}
const char* DoubleRegister::AllocationIndexToString(int index) {
- if (CpuFeatures::IsSupported(FPU)) {
- ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
- const char* const names[] = {
- "f0",
- "f2",
- "f4",
- "f6",
- "f8",
- "f10",
- "f12",
- "f14",
- "f16",
- "f18",
- "f20",
- "f22",
- "f24",
- "f26"
- };
- return names[index];
- } else {
- ASSERT(index == 0);
- return "sfpd0";
- }
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+ const char* const names[] = {
+ "f0",
+ "f2",
+ "f4",
+ "f6",
+ "f8",
+ "f10",
+ "f12",
+ "f14",
+ "f16",
+ "f18",
+ "f20",
+ "f22",
+ "f24",
+ "f26"
+ };
+ return names[index];
}
// If the compiler is allowed to use fpu then we can use fpu too in our
// code generation.
#if !defined(__mips__)
- // For the simulator=mips build, use FPU when FLAG_enable_fpu is enabled.
- if (FLAG_enable_fpu) {
- supported_ |= static_cast<uint64_t>(1) << FPU;
- }
+ // For the simulator build, use FPU.
+ supported_ |= static_cast<uint64_t>(1) << FPU;
#else
// Probe for additional features not already known to be available.
if (OS::MipsCpuHasFeature(FPU)) {
FPURegister fd,
SecondaryField func) {
ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
- ASSERT(IsEnabled(FPU));
Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
| (fd.code() << kFdShift) | func;
emit(instr);
FPURegister fd,
SecondaryField func) {
ASSERT(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
- ASSERT(IsEnabled(FPU));
Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
| (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
emit(instr);
FPURegister fd,
SecondaryField func) {
ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
- ASSERT(IsEnabled(FPU));
Instr instr = opcode | fmt | (rt.code() << kRtShift)
| (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
emit(instr);
FPUControlRegister fs,
SecondaryField func) {
ASSERT(fs.is_valid() && rt.is_valid());
- ASSERT(IsEnabled(FPU));
Instr instr =
opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
emit(instr);
FPURegister ft,
int32_t j) {
ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
- ASSERT(IsEnabled(FPU));
Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
| (j & kImm16Mask);
emit(instr);
// Conditions.
void Assembler::c(FPUCondition cond, SecondaryField fmt,
FPURegister fs, FPURegister ft, uint16_t cc) {
- ASSERT(IsEnabled(FPU));
ASSERT(is_uint3(cc));
ASSERT((fmt & ~(31 << kRsShift)) == 0);
Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
void Assembler::fcmp(FPURegister src1, const double src2,
FPUCondition cond) {
- ASSERT(IsEnabled(FPU));
ASSERT(src2 == 0.0);
mtc1(zero_reg, f14);
cvt_d_w(f14, f14);
void Assembler::bc1f(int16_t offset, uint16_t cc) {
- ASSERT(IsEnabled(FPU));
ASSERT(is_uint3(cc));
Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
emit(instr);
void Assembler::bc1t(int16_t offset, uint16_t cc) {
- ASSERT(IsEnabled(FPU));
ASSERT(is_uint3(cc));
Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
emit(instr);
static const int kNumRegisters = v8::internal::kNumRegisters;
static const int kMaxNumAllocatableRegisters = 14; // v0 through t7.
static const int kSizeInBytes = 4;
- static const int kGPRsPerNonFPUDouble = 2;
inline static int NumAllocatableRegisters();
const FPURegister f30 = { 30 };
const FPURegister f31 = { 31 };
-const Register sfpd_lo = { kRegister_t6_Code };
-const Register sfpd_hi = { kRegister_t7_Code };
-
// Register aliases.
// cp is assumed to be a callee saved register.
// Defined using #define instead of "static const Register&" because Clang
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
- if (f == FPU && !FLAG_enable_fpu) return false;
return (supported_ & (1u << f)) != 0;
}
Label* rhs_not_nan,
Label* slow,
bool strict);
-static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
Register lhs,
Register rhs);
FloatingPointHelper::Destination destination,
Register scratch1,
Register scratch2) {
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
- __ sra(scratch1, a0, kSmiTagSize);
- __ mtc1(scratch1, f14);
- __ cvt_d_w(f14, f14);
- __ sra(scratch1, a1, kSmiTagSize);
- __ mtc1(scratch1, f12);
- __ cvt_d_w(f12, f12);
- if (destination == kCoreRegisters) {
- __ Move(a2, a3, f14);
- __ Move(a0, a1, f12);
- }
- } else {
- ASSERT(destination == kCoreRegisters);
- // Write Smi from a0 to a3 and a2 in double format.
- __ mov(scratch1, a0);
- ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2);
- __ push(ra);
- __ Call(stub1.GetCode(masm->isolate()));
- // Write Smi from a1 to a1 and a0 in double format.
- __ mov(scratch1, a1);
- ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2);
- __ Call(stub2.GetCode(masm->isolate()));
- __ pop(ra);
+ __ sra(scratch1, a0, kSmiTagSize);
+ __ mtc1(scratch1, f14);
+ __ cvt_d_w(f14, f14);
+ __ sra(scratch1, a1, kSmiTagSize);
+ __ mtc1(scratch1, f12);
+ __ cvt_d_w(f12, f12);
+ if (destination == kCoreRegisters) {
+ __ Move(a2, a3, f14);
+ __ Move(a0, a1, f12);
}
}
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
// Handle loading a double from a heap number.
- if (CpuFeatures::IsSupported(FPU) &&
- destination == kFPURegisters) {
- CpuFeatureScope scope(masm, FPU);
+ if (destination == kFPURegisters) {
// Load the double from tagged HeapNumber to double register.
// ARM uses a workaround here because of the unaligned HeapNumber
// Handle loading a double from a smi.
__ bind(&is_smi);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
- // Convert smi to double using FPU instructions.
- __ mtc1(scratch1, dst);
- __ cvt_d_w(dst, dst);
- if (destination == kCoreRegisters) {
- // Load the converted smi to dst1 and dst2 in double format.
- __ Move(dst1, dst2, dst);
- }
- } else {
- ASSERT(destination == kCoreRegisters);
- // Write smi to dst1 and dst2 double format.
- __ mov(scratch1, object);
- ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
- __ push(ra);
- __ Call(stub.GetCode(masm->isolate()));
- __ pop(ra);
+ // Convert smi to double using FPU instructions.
+ __ mtc1(scratch1, dst);
+ __ cvt_d_w(dst, dst);
+ if (destination == kCoreRegisters) {
+ // Load the converted smi to dst1 and dst2 in double format.
+ __ Move(dst1, dst2, dst);
}
-
__ bind(&done);
}
ASSERT(!int_scratch.is(dst_mantissa));
ASSERT(!int_scratch.is(dst_exponent));
- Label done;
-
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
- __ mtc1(int_scratch, single_scratch);
- __ cvt_d_w(double_dst, single_scratch);
- if (destination == kCoreRegisters) {
- __ Move(dst_mantissa, dst_exponent, double_dst);
- }
- } else {
- Label fewer_than_20_useful_bits;
- // Expected output:
- // | dst_exponent | dst_mantissa |
- // | s | exp | mantissa |
-
- // Check for zero.
- __ mov(dst_exponent, int_scratch);
- __ mov(dst_mantissa, int_scratch);
- __ Branch(&done, eq, int_scratch, Operand(zero_reg));
-
- // Preload the sign of the value.
- __ And(dst_exponent, int_scratch, Operand(HeapNumber::kSignMask));
- // Get the absolute value of the object (as an unsigned integer).
- Label skip_sub;
- __ Branch(&skip_sub, ge, dst_exponent, Operand(zero_reg));
- __ Subu(int_scratch, zero_reg, int_scratch);
- __ bind(&skip_sub);
-
- // Get mantissa[51:20].
-
- // Get the position of the first set bit.
- __ Clz(dst_mantissa, int_scratch);
- __ li(scratch2, 31);
- __ Subu(dst_mantissa, scratch2, dst_mantissa);
-
- // Set the exponent.
- __ Addu(scratch2, dst_mantissa, Operand(HeapNumber::kExponentBias));
- __ Ins(dst_exponent, scratch2,
- HeapNumber::kExponentShift, HeapNumber::kExponentBits);
-
- // Clear the first non null bit.
- __ li(scratch2, Operand(1));
- __ sllv(scratch2, scratch2, dst_mantissa);
- __ li(at, -1);
- __ Xor(scratch2, scratch2, at);
- __ And(int_scratch, int_scratch, scratch2);
-
- // Get the number of bits to set in the lower part of the mantissa.
- __ Subu(scratch2, dst_mantissa,
- Operand(HeapNumber::kMantissaBitsInTopWord));
- __ Branch(&fewer_than_20_useful_bits, le, scratch2, Operand(zero_reg));
- // Set the higher 20 bits of the mantissa.
- __ srlv(at, int_scratch, scratch2);
- __ or_(dst_exponent, dst_exponent, at);
- __ li(at, 32);
- __ subu(scratch2, at, scratch2);
- __ sllv(dst_mantissa, int_scratch, scratch2);
- __ Branch(&done);
-
- __ bind(&fewer_than_20_useful_bits);
- __ li(at, HeapNumber::kMantissaBitsInTopWord);
- __ subu(scratch2, at, dst_mantissa);
- __ sllv(scratch2, int_scratch, scratch2);
- __ Or(dst_exponent, dst_exponent, scratch2);
- // Set dst_mantissa to 0.
- __ mov(dst_mantissa, zero_reg);
+ __ mtc1(int_scratch, single_scratch);
+ __ cvt_d_w(double_dst, single_scratch);
+ if (destination == kCoreRegisters) {
+ __ Move(dst_mantissa, dst_exponent, double_dst);
}
- __ bind(&done);
}
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
// Load the number.
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
- // Load the double value.
- __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
-
- Register except_flag = scratch2;
- __ EmitFPUTruncate(kRoundToZero,
- scratch1,
- double_dst,
- at,
- double_scratch,
- except_flag,
- kCheckForInexactConversion);
-
- // Jump to not_int32 if the operation did not succeed.
- __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
-
- if (destination == kCoreRegisters) {
- __ Move(dst_mantissa, dst_exponent, double_dst);
- }
-
- } else {
- ASSERT(!scratch1.is(object) && !scratch2.is(object));
- // Load the double value in the destination registers.
- bool save_registers = object.is(dst_mantissa) || object.is(dst_exponent);
- if (save_registers) {
- // Save both output registers, because the other one probably holds
- // an important value too.
- __ Push(dst_exponent, dst_mantissa);
- }
- if (object.is(dst_mantissa)) {
- __ lw(dst_exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ lw(dst_mantissa, FieldMemOperand(object, HeapNumber::kMantissaOffset));
- } else {
- __ lw(dst_mantissa, FieldMemOperand(object, HeapNumber::kMantissaOffset));
- __ lw(dst_exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
- }
-
- // Check for 0 and -0.
- Label zero;
- __ And(scratch1, dst_exponent, Operand(~HeapNumber::kSignMask));
- __ Or(scratch1, scratch1, Operand(dst_mantissa));
- __ Branch(&zero, eq, scratch1, Operand(zero_reg));
-
- // Check that the value can be exactly represented by a 32-bit integer.
- // Jump to not_int32 if that's not the case.
- Label restore_input_and_miss;
- DoubleIs32BitInteger(masm, dst_exponent, dst_mantissa, scratch1, scratch2,
- &restore_input_and_miss);
-
- // dst_* were trashed. Reload the double value.
- if (save_registers) {
- __ Pop(dst_exponent, dst_mantissa);
- }
- if (object.is(dst_mantissa)) {
- __ lw(dst_exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ lw(dst_mantissa, FieldMemOperand(object, HeapNumber::kMantissaOffset));
- } else {
- __ lw(dst_mantissa, FieldMemOperand(object, HeapNumber::kMantissaOffset));
- __ lw(dst_exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
- }
-
- __ Branch(&done);
-
- __ bind(&restore_input_and_miss);
- if (save_registers) {
- __ Pop(dst_exponent, dst_mantissa);
- }
- __ Branch(not_int32);
-
- __ bind(&zero);
- if (save_registers) {
- __ Drop(2);
- }
+ // Load the double value.
+ __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
+
+ Register except_flag = scratch2;
+ __ EmitFPUTruncate(kRoundToZero,
+ scratch1,
+ double_dst,
+ at,
+ double_scratch,
+ except_flag,
+ kCheckForInexactConversion);
+
+ // Jump to not_int32 if the operation did not succeed.
+ __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
+ if (destination == kCoreRegisters) {
+ __ Move(dst_mantissa, dst_exponent, double_dst);
}
-
__ bind(&done);
}
// Object is a heap number.
// Convert the floating point value to a 32-bit integer.
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
- // Load the double value.
- __ ldc1(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset));
-
- Register except_flag = scratch2;
- __ EmitFPUTruncate(kRoundToZero,
- dst,
- double_scratch0,
- scratch1,
- double_scratch1,
- except_flag,
- kCheckForInexactConversion);
-
- // Jump to not_int32 if the operation did not succeed.
- __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
- } else {
- // Load the double value in the destination registers.
- __ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
-
- // Check for 0 and -0.
- __ And(dst, scratch1, Operand(~HeapNumber::kSignMask));
- __ Or(dst, scratch2, Operand(dst));
- __ Branch(&done, eq, dst, Operand(zero_reg));
-
- DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
-
- // Registers state after DoubleIs32BitInteger.
- // dst: mantissa[51:20].
- // scratch2: 1
-
- // Shift back the higher bits of the mantissa.
- __ srlv(dst, dst, scratch3);
- // Set the implicit first bit.
- __ li(at, 32);
- __ subu(scratch3, at, scratch3);
- __ sllv(scratch2, scratch2, scratch3);
- __ Or(dst, dst, scratch2);
- // Set the sign.
- __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- Label skip_sub;
- __ Branch(&skip_sub, ge, scratch1, Operand(zero_reg));
- __ Subu(dst, zero_reg, dst);
- __ bind(&skip_sub);
- }
+ // Load the double value.
+ __ ldc1(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset));
+
+ Register except_flag = scratch2;
+ __ EmitFPUTruncate(kRoundToZero,
+ dst,
+ double_scratch0,
+ scratch1,
+ double_scratch1,
+ except_flag,
+ kCheckForInexactConversion);
+
+ // Jump to not_int32 if the operation did not succeed.
+ __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
__ Branch(&done);
__ bind(&maybe_undefined);
}
-void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
- Register src_exponent,
- Register src_mantissa,
- Register dst,
- Register scratch,
- Label* not_int32) {
- // Get exponent alone in scratch.
- __ Ext(scratch,
- src_exponent,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
-
- // Substract the bias from the exponent.
- __ Subu(scratch, scratch, Operand(HeapNumber::kExponentBias));
-
- // src1: higher (exponent) part of the double value.
- // src2: lower (mantissa) part of the double value.
- // scratch: unbiased exponent.
-
- // Fast cases. Check for obvious non 32-bit integer values.
- // Negative exponent cannot yield 32-bit integers.
- __ Branch(not_int32, lt, scratch, Operand(zero_reg));
- // Exponent greater than 31 cannot yield 32-bit integers.
- // Also, a positive value with an exponent equal to 31 is outside of the
- // signed 32-bit integer range.
- // Another way to put it is that if (exponent - signbit) > 30 then the
- // number cannot be represented as an int32.
- Register tmp = dst;
- __ srl(at, src_exponent, 31);
- __ subu(tmp, scratch, at);
- __ Branch(not_int32, gt, tmp, Operand(30));
- // - Bits [21:0] in the mantissa are not null.
- __ And(tmp, src_mantissa, 0x3fffff);
- __ Branch(not_int32, ne, tmp, Operand(zero_reg));
-
- // Otherwise the exponent needs to be big enough to shift left all the
- // non zero bits left. So we need the (30 - exponent) last bits of the
- // 31 higher bits of the mantissa to be null.
- // Because bits [21:0] are null, we can check instead that the
- // (32 - exponent) last bits of the 32 higher bits of the mantissa are null.
-
- // Get the 32 higher bits of the mantissa in dst.
- __ Ext(dst,
- src_mantissa,
- HeapNumber::kMantissaBitsInTopWord,
- 32 - HeapNumber::kMantissaBitsInTopWord);
- __ sll(at, src_exponent, HeapNumber::kNonMantissaBitsInTopWord);
- __ or_(dst, dst, at);
-
- // Create the mask and test the lower bits (of the higher bits).
- __ li(at, 32);
- __ subu(scratch, at, scratch);
- __ li(src_mantissa, 1);
- __ sllv(src_exponent, src_mantissa, scratch);
- __ Subu(src_exponent, src_exponent, Operand(1));
- __ And(src_exponent, dst, src_exponent);
- __ Branch(not_int32, ne, src_exponent, Operand(zero_reg));
-}
-
-
void FloatingPointHelper::CallCCodeForDoubleOperation(
MacroAssembler* masm,
Token::Value op,
__ push(ra);
__ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
if (!IsMipsSoftFloatABI) {
- CpuFeatureScope scope(masm, FPU);
// We are not using MIPS FPU instructions, and parameters for the runtime
// function call are prepaired in a0-a3 registers, but function we are
// calling is compiled with hard-float flag and expecting hard float ABI
}
// Store answer in the overwritable heap number.
if (!IsMipsSoftFloatABI) {
- CpuFeatureScope scope(masm, FPU);
// Double returned in register f0.
__ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
} else {
// Rhs is a smi, lhs is a number.
// Convert smi rhs to double.
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
- __ sra(at, rhs, kSmiTagSize);
- __ mtc1(at, f14);
- __ cvt_d_w(f14, f14);
- __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
- } else {
- // Load lhs to a double in a2, a3.
- __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
- __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
-
- // Write Smi from rhs to a1 and a0 in double format. t5 is scratch.
- __ mov(t6, rhs);
- ConvertToDoubleStub stub1(a1, a0, t6, t5);
- __ push(ra);
- __ Call(stub1.GetCode(masm->isolate()));
-
- __ pop(ra);
- }
+ __ sra(at, rhs, kSmiTagSize);
+ __ mtc1(at, f14);
+ __ cvt_d_w(f14, f14);
+ __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
// We now have both loaded as doubles.
__ jmp(both_loaded_as_doubles);
// Lhs is a smi, rhs is a number.
// Convert smi lhs to double.
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
- __ sra(at, lhs, kSmiTagSize);
- __ mtc1(at, f12);
- __ cvt_d_w(f12, f12);
- __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- } else {
- // Convert lhs to a double format. t5 is scratch.
- __ mov(t6, lhs);
- ConvertToDoubleStub stub2(a3, a2, t6, t5);
- __ push(ra);
- __ Call(stub2.GetCode(masm->isolate()));
- __ pop(ra);
- // Load rhs to a double in a1, a0.
- if (rhs.is(a0)) {
- __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
- __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- } else {
- __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
- }
- }
+ __ sra(at, lhs, kSmiTagSize);
+ __ mtc1(at, f12);
+ __ cvt_d_w(f12, f12);
+ __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
// Fall through to both_loaded_as_doubles.
}
-void EmitNanCheck(MacroAssembler* masm, Condition cc) {
- bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
- // Lhs and rhs are already loaded to f12 and f14 register pairs.
- __ Move(t0, t1, f14);
- __ Move(t2, t3, f12);
- } else {
- // Lhs and rhs are already loaded to GP registers.
- __ mov(t0, a0); // a0 has LS 32 bits of rhs.
- __ mov(t1, a1); // a1 has MS 32 bits of rhs.
- __ mov(t2, a2); // a2 has LS 32 bits of lhs.
- __ mov(t3, a3); // a3 has MS 32 bits of lhs.
- }
- Register rhs_exponent = exp_first ? t0 : t1;
- Register lhs_exponent = exp_first ? t2 : t3;
- Register rhs_mantissa = exp_first ? t1 : t0;
- Register lhs_mantissa = exp_first ? t3 : t2;
- Label one_is_nan, neither_is_nan;
- Label lhs_not_nan_exp_mask_is_loaded;
-
- Register exp_mask_reg = t4;
- __ li(exp_mask_reg, HeapNumber::kExponentMask);
- __ and_(t5, lhs_exponent, exp_mask_reg);
- __ Branch(&lhs_not_nan_exp_mask_is_loaded, ne, t5, Operand(exp_mask_reg));
-
- __ sll(t5, lhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
- __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
-
- __ Branch(&one_is_nan, ne, lhs_mantissa, Operand(zero_reg));
-
- __ li(exp_mask_reg, HeapNumber::kExponentMask);
- __ bind(&lhs_not_nan_exp_mask_is_loaded);
- __ and_(t5, rhs_exponent, exp_mask_reg);
-
- __ Branch(&neither_is_nan, ne, t5, Operand(exp_mask_reg));
-
- __ sll(t5, rhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
- __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
-
- __ Branch(&neither_is_nan, eq, rhs_mantissa, Operand(zero_reg));
-
- __ bind(&one_is_nan);
- // NaN comparisons always fail.
- // Load whatever we need in v0 to make the comparison fail.
-
- if (cc == lt || cc == le) {
- __ li(v0, Operand(GREATER));
- } else {
- __ li(v0, Operand(LESS));
- }
- __ Ret();
-
- __ bind(&neither_is_nan);
-}
-
-
-static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
- // f12 and f14 have the two doubles. Neither is a NaN.
- // Call a native function to do a comparison between two non-NaNs.
- // Call C routine that may not cause GC or other trouble.
- // We use a call_was and return manually because we need arguments slots to
- // be freed.
-
- Label return_result_not_equal, return_result_equal;
- if (cc == eq) {
- // Doubles are not equal unless they have the same bit pattern.
- // Exception: 0 and -0.
- bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
- // Lhs and rhs are already loaded to f12 and f14 register pairs.
- __ Move(t0, t1, f14);
- __ Move(t2, t3, f12);
- } else {
- // Lhs and rhs are already loaded to GP registers.
- __ mov(t0, a0); // a0 has LS 32 bits of rhs.
- __ mov(t1, a1); // a1 has MS 32 bits of rhs.
- __ mov(t2, a2); // a2 has LS 32 bits of lhs.
- __ mov(t3, a3); // a3 has MS 32 bits of lhs.
- }
- Register rhs_exponent = exp_first ? t0 : t1;
- Register lhs_exponent = exp_first ? t2 : t3;
- Register rhs_mantissa = exp_first ? t1 : t0;
- Register lhs_mantissa = exp_first ? t3 : t2;
-
- __ xor_(v0, rhs_mantissa, lhs_mantissa);
- __ Branch(&return_result_not_equal, ne, v0, Operand(zero_reg));
-
- __ subu(v0, rhs_exponent, lhs_exponent);
- __ Branch(&return_result_equal, eq, v0, Operand(zero_reg));
- // 0, -0 case.
- __ sll(rhs_exponent, rhs_exponent, kSmiTagSize);
- __ sll(lhs_exponent, lhs_exponent, kSmiTagSize);
- __ or_(t4, rhs_exponent, lhs_exponent);
- __ or_(t4, t4, rhs_mantissa);
-
- __ Branch(&return_result_not_equal, ne, t4, Operand(zero_reg));
-
- __ bind(&return_result_equal);
-
- __ li(v0, Operand(EQUAL));
- __ Ret();
- }
-
- __ bind(&return_result_not_equal);
-
- if (!CpuFeatures::IsSupported(FPU)) {
- __ push(ra);
- __ PrepareCallCFunction(0, 2, t4);
- if (!IsMipsSoftFloatABI) {
- // We are not using MIPS FPU instructions, and parameters for the runtime
- // function call are prepaired in a0-a3 registers, but function we are
- // calling is compiled with hard-float flag and expecting hard float ABI
- // (parameters in f12/f14 registers). We need to copy parameters from
- // a0-a3 registers to f12/f14 register pairs.
- __ Move(f12, a0, a1);
- __ Move(f14, a2, a3);
- }
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
- 0, 2);
- __ pop(ra); // Because this function returns int, result is in v0.
- __ Ret();
- } else {
- CpuFeatureScope scope(masm, FPU);
- Label equal, less_than;
- __ BranchF(&equal, NULL, eq, f12, f14);
- __ BranchF(&less_than, NULL, lt, f12, f14);
-
- // Not equal, not less, not NaN, must be greater.
-
- __ li(v0, Operand(GREATER));
- __ Ret();
-
- __ bind(&equal);
- __ li(v0, Operand(EQUAL));
- __ Ret();
-
- __ bind(&less_than);
- __ li(v0, Operand(LESS));
- __ Ret();
- }
-}
-
-
static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
Register lhs,
Register rhs) {
// Both are heap numbers. Load them up then jump to the code we have
// for that.
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
- __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
- __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- } else {
- __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
- __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
- if (rhs.is(a0)) {
- __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
- __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- } else {
- __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
- }
- }
+ __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+ __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+
__ jmp(both_loaded_as_doubles);
}
Label load_result_from_cache;
if (!object_is_smi) {
__ JumpIfSmi(object, &is_smi);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
- __ CheckMap(object,
- scratch1,
- Heap::kHeapNumberMapRootIndex,
- not_found,
- DONT_DO_SMI_CHECK);
-
- STATIC_ASSERT(8 == kDoubleSize);
- __ Addu(scratch1,
- object,
- Operand(HeapNumber::kValueOffset - kHeapObjectTag));
- __ lw(scratch2, MemOperand(scratch1, kPointerSize));
- __ lw(scratch1, MemOperand(scratch1, 0));
- __ Xor(scratch1, scratch1, Operand(scratch2));
- __ And(scratch1, scratch1, Operand(mask));
-
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
- __ Addu(scratch1, number_string_cache, scratch1);
-
- Register probe = mask;
- __ lw(probe,
- FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
- __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
- __ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
- __ Branch(not_found);
- } else {
- // Note that there is no cache check for non-FPU case, even though
- // it seems there could be. May be a tiny opimization for non-FPU
- // cores.
- __ Branch(not_found);
- }
+ __ CheckMap(object,
+ scratch1,
+ Heap::kHeapNumberMapRootIndex,
+ not_found,
+ DONT_DO_SMI_CHECK);
+
+ STATIC_ASSERT(8 == kDoubleSize);
+ __ Addu(scratch1,
+ object,
+ Operand(HeapNumber::kValueOffset - kHeapObjectTag));
+ __ lw(scratch2, MemOperand(scratch1, kPointerSize));
+ __ lw(scratch1, MemOperand(scratch1, 0));
+ __ Xor(scratch1, scratch1, Operand(scratch2));
+ __ And(scratch1, scratch1, Operand(mask));
+
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ __ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
+ __ Addu(scratch1, number_string_cache, scratch1);
+
+ Register probe = mask;
+ __ lw(probe,
+ FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ __ JumpIfSmi(probe, not_found);
+ __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
+ __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
+ __ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
+ __ Branch(not_found);
}
__ bind(&is_smi);
// left hand side and a0, a1 represent right hand side.
Isolate* isolate = masm->isolate();
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
- Label nan;
- __ li(t0, Operand(LESS));
- __ li(t1, Operand(GREATER));
- __ li(t2, Operand(EQUAL));
-
- // Check if either rhs or lhs is NaN.
- __ BranchF(NULL, &nan, eq, f12, f14);
-
- // Check if LESS condition is satisfied. If true, move conditionally
- // result to v0.
- __ c(OLT, D, f12, f14);
- __ Movt(v0, t0);
- // Use previous check to store conditionally to v0 oposite condition
- // (GREATER). If rhs is equal to lhs, this will be corrected in next
- // check.
- __ Movf(v0, t1);
- // Check if EQUAL condition is satisfied. If true, move conditionally
- // result to v0.
- __ c(EQ, D, f12, f14);
- __ Movt(v0, t2);
+ Label nan;
+ __ li(t0, Operand(LESS));
+ __ li(t1, Operand(GREATER));
+ __ li(t2, Operand(EQUAL));
+
+ // Check if either rhs or lhs is NaN.
+ __ BranchF(NULL, &nan, eq, f12, f14);
+
+ // Check if LESS condition is satisfied. If true, move conditionally
+ // result to v0.
+ __ c(OLT, D, f12, f14);
+ __ Movt(v0, t0);
+ // Use previous check to store conditionally to v0 oposite condition
+ // (GREATER). If rhs is equal to lhs, this will be corrected in next
+ // check.
+ __ Movf(v0, t1);
+ // Check if EQUAL condition is satisfied. If true, move conditionally
+ // result to v0.
+ __ c(EQ, D, f12, f14);
+ __ Movt(v0, t2);
- __ Ret();
+ __ Ret();
- __ bind(&nan);
- // NaN comparisons always fail.
- // Load whatever we need in v0 to make the comparison fail.
- if (cc == lt || cc == le) {
- __ li(v0, Operand(GREATER));
- } else {
- __ li(v0, Operand(LESS));
- }
- __ Ret();
+ __ bind(&nan);
+ // NaN comparisons always fail.
+ // Load whatever we need in v0 to make the comparison fail.
+ if (cc == lt || cc == le) {
+ __ li(v0, Operand(GREATER));
} else {
- // Checks for NaN in the doubles we have loaded. Can return the answer or
- // fall through if neither is a NaN. Also binds rhs_not_nan.
- EmitNanCheck(masm, cc);
-
- // Compares two doubles that are not NaNs. Returns the answer.
- // Never falls through.
- EmitTwoNonNanDoubleComparison(masm, cc);
+ __ li(v0, Operand(LESS));
}
+ __ Ret();
__ bind(¬_smis);
// At this point we know we are dealing with two different objects,
// The stub expects its argument in the tos_ register and returns its result in
// it, too: zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
- // This stub uses FPU instructions.
- CpuFeatureScope scope(masm, FPU);
-
Label patch;
const Register map = t5.is(tos_) ? t3 : t5;
// restore them.
__ MultiPush(kJSCallerSaved | ra.bit());
if (save_doubles_ == kSaveFPRegs) {
- CpuFeatureScope scope(masm, FPU);
__ MultiPushFPU(kCallerSavedFPU);
}
const int argument_count = 1;
ExternalReference::store_buffer_overflow_function(masm->isolate()),
argument_count);
if (save_doubles_ == kSaveFPRegs) {
- CpuFeatureScope scope(masm, FPU);
__ MultiPopFPU(kCallerSavedFPU);
}
__ mov(v0, a2); // Move newly allocated heap number to v0.
}
- if (CpuFeatures::IsSupported(FPU)) {
- // Convert the int32 in a1 to the heap number in v0. a2 is corrupted.
- CpuFeatureScope scope(masm, FPU);
- __ mtc1(a1, f0);
- __ cvt_d_w(f0, f0);
- __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
- } else {
- // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
- // have to set up a frame.
- WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3);
- __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
- }
+ // Convert the int32 in a1 to the heap number in v0. a2 is corrupted.
+ __ mtc1(a1, f0);
+ __ cvt_d_w(f0, f0);
+ __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
+ __ Ret();
__ bind(&impossible);
if (FLAG_debug_code) {
void BinaryOpStub::Initialize() {
- platform_specific_bit_ = CpuFeatures::IsSupported(FPU);
+ platform_specific_bit_ = true; // FPU is a base requirement for V8.
}
case Token::DIV:
case Token::MOD: {
// Load left and right operands into f12 and f14 or a0/a1 and a2/a3
- // depending on whether FPU is available or not.
+ // depending on operation.
FloatingPointHelper::Destination destination =
- CpuFeatures::IsSupported(FPU) &&
op != Token::MOD ?
FloatingPointHelper::kFPURegisters :
FloatingPointHelper::kCoreRegisters;
// Using FPU registers:
// f12: Left value.
// f14: Right value.
- CpuFeatureScope scope(masm, FPU);
switch (op) {
case Token::ADD:
__ add_d(f10, f12, f14);
// The code below for writing into heap numbers isn't capable of
// writing the register as an unsigned int so we go to slow case if we
// hit this case.
- if (CpuFeatures::IsSupported(FPU)) {
- __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg));
- } else {
- __ Branch(not_numbers, lt, a2, Operand(zero_reg));
- }
+ __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg));
break;
case Token::SHL:
// Use only the 5 least significant bits of the shift count.
// Nothing can go wrong now, so move the heap number to v0, which is the
// result.
__ mov(v0, t1);
-
- if (CpuFeatures::IsSupported(FPU)) {
- // Convert the int32 in a2 to the heap number in a0. As
- // mentioned above SHR needs to always produce a positive result.
- CpuFeatureScope scope(masm, FPU);
- __ mtc1(a2, f0);
- if (op == Token::SHR) {
- __ Cvt_d_uw(f0, f0, f22);
- } else {
- __ cvt_d_w(f0, f0);
- }
- // ARM uses a workaround here because of the unaligned HeapNumber
- // kValueOffset. On MIPS this workaround is built into sdc1 so
- // there's no point in generating even more instructions.
- __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
+ // Convert the int32 in a2 to the heap number in a0. As
+ // mentioned above SHR needs to always produce a positive result.
+ __ mtc1(a2, f0);
+ if (op == Token::SHR) {
+ __ Cvt_d_uw(f0, f0, f22);
} else {
- // Tail call that writes the int32 in a2 to the heap number in v0, using
- // a3 and a0 as scratch. v0 is preserved and returned.
- WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
- __ TailCallStub(&stub);
+ __ cvt_d_w(f0, f0);
}
+ // ARM uses a workaround here because of the unaligned HeapNumber
+ // kValueOffset. On MIPS this workaround is built into sdc1 so
+ // there's no point in generating even more instructions.
+ __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
+ __ Ret();
break;
}
default:
// Load both operands and check that they are 32-bit integer.
// Jump to type transition if they are not. The registers a0 and a1 (right
// and left) are preserved for the runtime call.
- FloatingPointHelper::Destination destination =
- (CpuFeatures::IsSupported(FPU) && op_ != Token::MOD)
+ FloatingPointHelper::Destination destination = (op_ != Token::MOD)
? FloatingPointHelper::kFPURegisters
: FloatingPointHelper::kCoreRegisters;
&transition);
if (destination == FloatingPointHelper::kFPURegisters) {
- CpuFeatureScope scope(masm, FPU);
Label return_heap_number;
switch (op_) {
case Token::ADD:
// We only get a negative result if the shift value (a2) is 0.
// This result cannot be respresented as a signed 32-bit integer, try
// to return a heap number if we can.
- // The non FPU code does not support this special case, so jump to
- // runtime if we don't support it.
- if (CpuFeatures::IsSupported(FPU)) {
- __ Branch((result_type_ <= BinaryOpIC::INT32)
- ? &transition
- : &return_heap_number,
- lt,
- a2,
- Operand(zero_reg));
- } else {
- __ Branch((result_type_ <= BinaryOpIC::INT32)
- ? &transition
- : &call_runtime,
- lt,
- a2,
- Operand(zero_reg));
- }
+ __ Branch((result_type_ <= BinaryOpIC::INT32)
+ ? &transition
+ : &return_heap_number,
+ lt,
+ a2,
+ Operand(zero_reg));
break;
case Token::SHL:
__ And(a2, a2, Operand(0x1f));
&call_runtime,
mode_);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
-
- if (op_ != Token::SHR) {
- // Convert the result to a floating point value.
- __ mtc1(a2, double_scratch);
- __ cvt_d_w(double_scratch, double_scratch);
- } else {
- // The result must be interpreted as an unsigned 32-bit integer.
- __ mtc1(a2, double_scratch);
- __ Cvt_d_uw(double_scratch, double_scratch, single_scratch);
- }
-
- // Store the result.
- __ mov(v0, heap_number_result);
- __ sdc1(double_scratch, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
+ if (op_ != Token::SHR) {
+ // Convert the result to a floating point value.
+ __ mtc1(a2, double_scratch);
+ __ cvt_d_w(double_scratch, double_scratch);
} else {
- // Tail call that writes the int32 in a2 to the heap number in v0, using
- // a3 and a0 as scratch. v0 is preserved and returned.
- __ mov(v0, t1);
- WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
- __ TailCallStub(&stub);
+ // The result must be interpreted as an unsigned 32-bit integer.
+ __ mtc1(a2, double_scratch);
+ __ Cvt_d_uw(double_scratch, double_scratch, single_scratch);
}
+ // Store the result.
+ __ mov(v0, heap_number_result);
+ __ sdc1(double_scratch, FieldMemOperand(v0, HeapNumber::kValueOffset));
+ __ Ret();
+
break;
}
const Register cache_entry = a0;
const bool tagged = (argument_type_ == TAGGED);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
-
- if (tagged) {
- // Argument is a number and is on stack and in a0.
- // Load argument and check if it is a smi.
- __ JumpIfNotSmi(a0, &input_not_smi);
-
- // Input is a smi. Convert to double and load the low and high words
- // of the double into a2, a3.
- __ sra(t0, a0, kSmiTagSize);
- __ mtc1(t0, f4);
- __ cvt_d_w(f4, f4);
- __ Move(a2, a3, f4);
- __ Branch(&loaded);
-
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ CheckMap(a0,
- a1,
- Heap::kHeapNumberMapRootIndex,
- &calculate,
- DONT_DO_SMI_CHECK);
- // Input is a HeapNumber. Store the
- // low and high words into a2, a3.
- __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset));
- __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4));
- } else {
- // Input is untagged double in f4. Output goes to f4.
- __ Move(a2, a3, f4);
- }
- __ bind(&loaded);
- // a2 = low 32 bits of double value.
- // a3 = high 32 bits of double value.
- // Compute hash (the shifts are arithmetic):
- // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
- __ Xor(a1, a2, a3);
- __ sra(t0, a1, 16);
- __ Xor(a1, a1, t0);
- __ sra(t0, a1, 8);
- __ Xor(a1, a1, t0);
- ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
- __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
-
- // a2 = low 32 bits of double value.
- // a3 = high 32 bits of double value.
- // a1 = TranscendentalCache::hash(double value).
- __ li(cache_entry, Operand(
- ExternalReference::transcendental_cache_array_address(
- masm->isolate())));
- // a0 points to cache array.
- __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof(
- Isolate::Current()->transcendental_cache()->caches_[0])));
- // a0 points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg));
+ if (tagged) {
+ // Argument is a number and is on stack and in a0.
+ // Load argument and check if it is a smi.
+ __ JumpIfNotSmi(a0, &input_not_smi);
+
+ // Input is a smi. Convert to double and load the low and high words
+ // of the double into a2, a3.
+ __ sra(t0, a0, kSmiTagSize);
+ __ mtc1(t0, f4);
+ __ cvt_d_w(f4, f4);
+ __ Move(a2, a3, f4);
+ __ Branch(&loaded);
+
+ __ bind(&input_not_smi);
+ // Check if input is a HeapNumber.
+ __ CheckMap(a0,
+ a1,
+ Heap::kHeapNumberMapRootIndex,
+ &calculate,
+ DONT_DO_SMI_CHECK);
+ // Input is a HeapNumber. Store the
+ // low and high words into a2, a3.
+ __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset));
+ __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4));
+ } else {
+ // Input is untagged double in f4. Output goes to f4.
+ __ Move(a2, a3, f4);
+ }
+ __ bind(&loaded);
+ // a2 = low 32 bits of double value.
+ // a3 = high 32 bits of double value.
+ // Compute hash (the shifts are arithmetic):
+ // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
+ __ Xor(a1, a2, a3);
+ __ sra(t0, a1, 16);
+ __ Xor(a1, a1, t0);
+ __ sra(t0, a1, 8);
+ __ Xor(a1, a1, t0);
+ ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
+ __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
+
+ // a2 = low 32 bits of double value.
+ // a3 = high 32 bits of double value.
+ // a1 = TranscendentalCache::hash(double value).
+ __ li(cache_entry, Operand(
+ ExternalReference::transcendental_cache_array_address(
+ masm->isolate())));
+ // a0 points to cache array.
+ __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof(
+ Isolate::Current()->transcendental_cache()->caches_[0])));
+ // a0 points to the cache for the type type_.
+ // If NULL, the cache hasn't been initialized yet, so go through runtime.
+ __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg));
#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { TranscendentalCache::SubCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
- CHECK_EQ(0, elem_in0 - elem_start);
- CHECK_EQ(kIntSize, elem_in1 - elem_start);
- CHECK_EQ(2 * kIntSize, elem_out - elem_start);
- }
+ // Check that the layout of cache elements match expectations.
+ { TranscendentalCache::SubCache::Element test_elem[2];
+ char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
+ char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
+ char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
+ char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
+ char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
+ CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
+ CHECK_EQ(0, elem_in0 - elem_start);
+ CHECK_EQ(kIntSize, elem_in1 - elem_start);
+ CHECK_EQ(2 * kIntSize, elem_out - elem_start);
+ }
#endif
- // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12].
- __ sll(t0, a1, 1);
- __ Addu(a1, a1, t0);
- __ sll(t0, a1, 2);
- __ Addu(cache_entry, cache_entry, t0);
-
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- __ lw(t0, MemOperand(cache_entry, 0));
- __ lw(t1, MemOperand(cache_entry, 4));
- __ lw(t2, MemOperand(cache_entry, 8));
- __ Branch(&calculate, ne, a2, Operand(t0));
- __ Branch(&calculate, ne, a3, Operand(t1));
- // Cache hit. Load result, cleanup and return.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(
- counters->transcendental_cache_hit(), 1, scratch0, scratch1);
- if (tagged) {
- // Pop input value from stack and load result into v0.
- __ Drop(1);
- __ mov(v0, t2);
- } else {
- // Load result into f4.
- __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
- }
- __ Ret();
- } // if (CpuFeatures::IsSupported(FPU))
+ // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12].
+ __ sll(t0, a1, 1);
+ __ Addu(a1, a1, t0);
+ __ sll(t0, a1, 2);
+ __ Addu(cache_entry, cache_entry, t0);
+
+ // Check if cache matches: Double value is stored in uint32_t[2] array.
+ __ lw(t0, MemOperand(cache_entry, 0));
+ __ lw(t1, MemOperand(cache_entry, 4));
+ __ lw(t2, MemOperand(cache_entry, 8));
+ __ Branch(&calculate, ne, a2, Operand(t0));
+ __ Branch(&calculate, ne, a3, Operand(t1));
+ // Cache hit. Load result, cleanup and return.
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(
+ counters->transcendental_cache_hit(), 1, scratch0, scratch1);
+ if (tagged) {
+ // Pop input value from stack and load result into v0.
+ __ Drop(1);
+ __ mov(v0, t2);
+ } else {
+ // Load result into f4.
+ __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
+ }
+ __ Ret();
__ bind(&calculate);
- Counters* counters = masm->isolate()->counters();
__ IncrementCounter(
counters->transcendental_cache_miss(), 1, scratch0, scratch1);
if (tagged) {
1,
1);
} else {
- ASSERT(CpuFeatures::IsSupported(FPU));
- CpuFeatureScope scope(masm, FPU);
-
Label no_update;
Label skip_cache;
void MathPowStub::Generate(MacroAssembler* masm) {
- CpuFeatureScope fpu_scope(masm, FPU);
const Register base = a1;
const Register exponent = a2;
const Register heapnumbermap = t1;
void CodeStub::GenerateFPStubs(Isolate* isolate) {
- SaveFPRegsMode mode = CpuFeatures::IsSupported(FPU)
- ? kSaveFPRegs
- : kDontSaveFPRegs;
+ SaveFPRegsMode mode = kSaveFPRegs;
CEntryStub save_doubles(1, mode);
StoreBufferOverflowStub stub(mode);
// These stubs might already be in the snapshot, detect that and don't
// Save callee saved registers on the stack.
__ MultiPush(kCalleeSaved | ra.bit());
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
- // Save callee-saved FPU registers.
- __ MultiPushFPU(kCalleeSavedFPU);
- // Set up the reserved register for 0.0.
- __ Move(kDoubleRegZero, 0.0);
- }
+ // Save callee-saved FPU registers.
+ __ MultiPushFPU(kCalleeSavedFPU);
+ // Set up the reserved register for 0.0.
+ __ Move(kDoubleRegZero, 0.0);
// Load argv in s0 register.
int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
- if (CpuFeatures::IsSupported(FPU)) {
- offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
- }
+ offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
__ InitializeRootRegister();
__ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
// Reset the stack to the callee saved registers.
__ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
- // Restore callee-saved fpu registers.
- __ MultiPopFPU(kCalleeSavedFPU);
- }
+ // Restore callee-saved fpu registers.
+ __ MultiPopFPU(kCalleeSavedFPU);
// Restore callee saved registers from the stack.
__ MultiPop(kCalleeSaved | ra.bit());
}
// Inlining the double comparison and falling back to the general compare
- // stub if NaN is involved or FPU is unsupported.
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
-
- // Load left and right operand.
- Label done, left, left_smi, right_smi;
- __ JumpIfSmi(a0, &right_smi);
- __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
- DONT_DO_SMI_CHECK);
- __ Subu(a2, a0, Operand(kHeapObjectTag));
- __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
- __ Branch(&left);
- __ bind(&right_smi);
- __ SmiUntag(a2, a0); // Can't clobber a0 yet.
- FPURegister single_scratch = f6;
- __ mtc1(a2, single_scratch);
- __ cvt_d_w(f2, single_scratch);
-
- __ bind(&left);
- __ JumpIfSmi(a1, &left_smi);
- __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
- DONT_DO_SMI_CHECK);
- __ Subu(a2, a1, Operand(kHeapObjectTag));
- __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
- __ Branch(&done);
- __ bind(&left_smi);
- __ SmiUntag(a2, a1); // Can't clobber a1 yet.
- single_scratch = f8;
- __ mtc1(a2, single_scratch);
- __ cvt_d_w(f0, single_scratch);
+ // stub if NaN is involved.
+ // Load left and right operand.
+ Label done, left, left_smi, right_smi;
+ __ JumpIfSmi(a0, &right_smi);
+ __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
+ DONT_DO_SMI_CHECK);
+ __ Subu(a2, a0, Operand(kHeapObjectTag));
+ __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
+ __ Branch(&left);
+ __ bind(&right_smi);
+ __ SmiUntag(a2, a0); // Can't clobber a0 yet.
+ FPURegister single_scratch = f6;
+ __ mtc1(a2, single_scratch);
+ __ cvt_d_w(f2, single_scratch);
- __ bind(&done);
+ __ bind(&left);
+ __ JumpIfSmi(a1, &left_smi);
+ __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
+ DONT_DO_SMI_CHECK);
+ __ Subu(a2, a1, Operand(kHeapObjectTag));
+ __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
+ __ Branch(&done);
+ __ bind(&left_smi);
+ __ SmiUntag(a2, a1); // Can't clobber a1 yet.
+ single_scratch = f8;
+ __ mtc1(a2, single_scratch);
+ __ cvt_d_w(f0, single_scratch);
- // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
- Label fpu_eq, fpu_lt;
- // Test if equal, and also handle the unordered/NaN case.
- __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
+ __ bind(&done);
- // Test if less (unordered case is already handled).
- __ BranchF(&fpu_lt, NULL, lt, f0, f2);
+ // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
+ Label fpu_eq, fpu_lt;
+ // Test if equal, and also handle the unordered/NaN case.
+ __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
- // Otherwise it's greater, so just fall thru, and return.
- __ li(v0, Operand(GREATER));
- __ Ret();
+ // Test if less (unordered case is already handled).
+ __ BranchF(&fpu_lt, NULL, lt, f0, f2);
- __ bind(&fpu_eq);
- __ li(v0, Operand(EQUAL));
- __ Ret();
+ // Otherwise it's greater, so just fall thru, and return.
+ __ li(v0, Operand(GREATER));
+ __ Ret();
- __ bind(&fpu_lt);
- __ li(v0, Operand(LESS));
- __ Ret();
- }
+ __ bind(&fpu_eq);
+ __ li(v0, Operand(EQUAL));
+ __ Ret();
+
+ __ bind(&fpu_lt);
+ __ li(v0, Operand(LESS));
+ __ Ret();
__ bind(&unordered);
__ bind(&generic_stub);
bool CodeStub::CanUseFPRegisters() {
- return CpuFeatures::IsSupported(FPU);
+ return true; // FPU is a base requirement for V8.
}
class StoreBufferOverflowStub: public PlatformCodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
- : save_doubles_(save_fp) {
- ASSERT(CpuFeatures::IsSafeForSnapshot(FPU) || save_fp == kDontSaveFPRegs);
- }
+ : save_doubles_(save_fp) {}
void Generate(MacroAssembler* masm);
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
masm->MultiPush((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
if (mode == kSaveFPRegs) {
- CpuFeatureScope scope(masm, FPU);
masm->MultiPushFPU(kCallerSavedFPU);
}
}
inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
SaveFPRegsMode mode) {
if (mode == kSaveFPRegs) {
- CpuFeatureScope scope(masm, FPU);
masm->MultiPopFPU(kCallerSavedFPU);
}
masm->MultiPop((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
FPURegister double_scratch1,
Label* not_int32);
- // Generate non FPU code to check if a double can be exactly represented by a
- // 32-bit integer. This does not check for 0 or -0, which need
- // to be checked for separately.
- // Control jumps to not_int32 if the value is not a 32-bit integer, and falls
- // through otherwise.
- // src1 and src2 will be cloberred.
- //
- // Expected input:
- // - src1: higher (exponent) part of the double value.
- // - src2: lower (mantissa) part of the double value.
- // Output status:
- // - dst: 32 higher bits of the mantissa. (mantissa[51:20])
- // - src2: contains 1.
- // - other registers are clobbered.
- static void DoubleIs32BitInteger(MacroAssembler* masm,
- Register src1,
- Register src2,
- Register dst,
- Register scratch,
- Label* not_int32);
-
// Generates code to call a C function to do a double operation using core
// registers. (Used when FPU is not supported.)
// This code never falls through, but returns with a heap number containing
UnaryMathFunction CreateExpFunction() {
- if (!CpuFeatures::IsSupported(FPU)) return &exp;
if (!FLAG_fast_math) return &exp;
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
{
- CpuFeatureScope use_fpu(&masm, FPU);
DoubleRegister input = f12;
DoubleRegister result = f0;
DoubleRegister double_scratch1 = f4;
// -- t0 : scratch (elements)
// -----------------------------------
Label loop, entry, convert_hole, gc_required, only_change_map, done;
- bool fpu_supported = CpuFeatures::IsSupported(FPU);
Register scratch = t6;
// t2: end of destination FixedDoubleArray, not tagged
// t3: begin of FixedDoubleArray element fields, not tagged
- if (!fpu_supported) __ Push(a1, a0);
-
__ Branch(&entry);
__ bind(&only_change_map);
__ UntagAndJumpIfNotSmi(t5, t5, &convert_hole);
// Normal smi, convert to double and store.
- if (fpu_supported) {
- CpuFeatureScope scope(masm, FPU);
- __ mtc1(t5, f0);
- __ cvt_d_w(f0, f0);
- __ sdc1(f0, MemOperand(t3));
- __ Addu(t3, t3, kDoubleSize);
- } else {
- FloatingPointHelper::ConvertIntToDouble(masm,
- t5,
- FloatingPointHelper::kCoreRegisters,
- f0,
- a0,
- a1,
- t7,
- f0);
- __ sw(a0, MemOperand(t3)); // mantissa
- __ sw(a1, MemOperand(t3, kIntSize)); // exponent
- __ Addu(t3, t3, kDoubleSize);
- }
+ __ mtc1(t5, f0);
+ __ cvt_d_w(f0, f0);
+ __ sdc1(f0, MemOperand(t3));
+ __ Addu(t3, t3, kDoubleSize);
+
__ Branch(&entry);
// Hole found, store the-hole NaN.
__ bind(&entry);
__ Branch(&loop, lt, t3, Operand(t2));
- if (!fpu_supported) __ Pop(a1, a0);
__ pop(ra);
__ bind(&done);
}
// -mhard-float is passed to the compiler.
const bool IsMipsSoftFloatABI = false;
#elif(defined(__mips_soft_float) && __mips_soft_float != 0)
-// Not using floating-point coprocessor instructions. This flag is raised when
-// -msoft-float is passed to the compiler.
+// This flag is raised when -msoft-float is passed to the compiler.
+// Although FPU is a base requirement for v8, soft-float ABI is used
+// on soft-float systems with FPU kernel emulation.
const bool IsMipsSoftFloatABI = true;
#else
const bool IsMipsSoftFloatABI = true;
const int kDoubleRegsSize =
kDoubleSize * FPURegister::kMaxNumAllocatableRegisters;
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm(), FPU);
- // Save all FPU registers before messing with them.
- __ Subu(sp, sp, Operand(kDoubleRegsSize));
- for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
- FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
- int offset = i * kDoubleSize;
- __ sdc1(fpu_reg, MemOperand(sp, offset));
- }
- } else {
- __ Subu(sp, sp, Operand(kDoubleRegsSize));
+ // Save all FPU registers before messing with them.
+ __ Subu(sp, sp, Operand(kDoubleRegsSize));
+ for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
+ FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
+ int offset = i * kDoubleSize;
+ __ sdc1(fpu_reg, MemOperand(sp, offset));
}
// Push saved_regs (needed to populate FrameDescription::registers_).
}
int double_regs_offset = FrameDescription::double_registers_offset();
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm(), FPU);
- // Copy FPU registers to
- // double_registers_[DoubleRegister::kNumAllocatableRegisters]
- for (int i = 0; i < FPURegister::NumAllocatableRegisters(); ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
- __ ldc1(f0, MemOperand(sp, src_offset));
- __ sdc1(f0, MemOperand(a1, dst_offset));
- }
+ // Copy FPU registers to
+ // double_registers_[DoubleRegister::kNumAllocatableRegisters]
+ for (int i = 0; i < FPURegister::NumAllocatableRegisters(); ++i) {
+ int dst_offset = i * kDoubleSize + double_regs_offset;
+ int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ __ ldc1(f0, MemOperand(sp, src_offset));
+ __ sdc1(f0, MemOperand(a1, dst_offset));
}
// Remove the bailout id, eventually return address, and the saved registers
__ bind(&outer_loop_header);
__ Branch(&outer_push_loop, lt, t0, Operand(a1));
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm(), FPU);
-
- __ lw(a1, MemOperand(a0, Deoptimizer::input_offset()));
- for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
- const FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
- int src_offset = i * kDoubleSize + double_regs_offset;
- __ ldc1(fpu_reg, MemOperand(a1, src_offset));
- }
+ __ lw(a1, MemOperand(a0, Deoptimizer::input_offset()));
+ for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
+ const FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
+ int src_offset = i * kDoubleSize + double_regs_offset;
+ __ ldc1(fpu_reg, MemOperand(a1, src_offset));
}
// Push state, pc, and continuation from the last output frame.
Label* if_true,
Label* if_false,
Label* fall_through) {
- if (CpuFeatures::IsSupported(FPU)) {
- ToBooleanStub stub(result_register());
- __ CallStub(&stub, condition->test_id());
- __ mov(at, zero_reg);
- } else {
- // Call the runtime to find the boolean value of the source and then
- // translate it into control flow to the pair of labels.
- __ push(result_register());
- __ CallRuntime(Runtime::kToBool, 1);
- __ LoadRoot(at, Heap::kFalseValueRootIndex);
- }
+ ToBooleanStub stub(result_register());
+ __ CallStub(&stub, condition->test_id());
+ __ mov(at, zero_reg);
Split(ne, v0, Operand(at), if_true, if_false, fall_through);
}
// Convert 32 random bits in v0 to 0.(32 random bits) in a double
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- if (CpuFeatures::IsSupported(FPU)) {
- __ PrepareCallCFunction(1, a0);
- __ lw(a0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset));
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
-
- CpuFeatureScope scope(masm(), FPU);
- // 0x41300000 is the top half of 1.0 x 2^20 as a double.
- __ li(a1, Operand(0x41300000));
- // Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
- __ Move(f12, v0, a1);
- // Move 0x4130000000000000 to FPU.
- __ Move(f14, zero_reg, a1);
- // Subtract and store the result in the heap number.
- __ sub_d(f0, f12, f14);
- __ sdc1(f0, FieldMemOperand(s0, HeapNumber::kValueOffset));
- __ mov(v0, s0);
- } else {
- __ PrepareCallCFunction(2, a0);
- __ mov(a0, s0);
- __ lw(a1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ lw(a1, FieldMemOperand(a1, GlobalObject::kNativeContextOffset));
- __ CallCFunction(
- ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
- }
+ __ PrepareCallCFunction(1, a0);
+ __ lw(a0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset));
+ __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
+
+ // 0x41300000 is the top half of 1.0 x 2^20 as a double.
+ __ li(a1, Operand(0x41300000));
+ // Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
+ __ Move(f12, v0, a1);
+ // Move 0x4130000000000000 to FPU.
+ __ Move(f14, zero_reg, a1);
+ // Subtract and store the result in the heap number.
+ __ sub_d(f0, f12, f14);
+ __ sdc1(f0, FieldMemOperand(s0, HeapNumber::kValueOffset));
+ __ mov(v0, s0);
context()->Plug(v0);
}
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- if (CpuFeatures::IsSupported(FPU)) {
- MathPowStub stub(MathPowStub::ON_STACK);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kMath_pow, 2);
- }
+ MathPowStub stub(MathPowStub::ON_STACK);
+ __ CallStub(&stub);
context()->Plug(v0);
}
}
}
- if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm(), FPU);
+ if (info()->saves_caller_doubles()) {
Comment(";;; Save clobbered callee double registers");
int count = 0;
BitVector* doubles = chunk()->allocated_double_registers();
// This is computed in-place.
ASSERT(addend.is(ToDoubleRegister(instr->result())));
- CpuFeatureScope scope(masm(), FPU);
__ madd_d(addend, addend, multiplier, multiplicand);
}
void LCodeGen::DoConstantD(LConstantD* instr) {
ASSERT(instr->result()->IsDoubleRegister());
DoubleRegister result = ToDoubleRegister(instr->result());
- CpuFeatureScope scope(masm(), FPU);
double v = instr->value();
__ Move(result, v);
}
__ bind(&done);
} else {
ASSERT(instr->hydrogen()->representation().IsDouble());
- CpuFeatureScope scope(masm(), FPU);
FPURegister left_reg = ToDoubleRegister(left);
FPURegister right_reg = ToDoubleRegister(right);
FPURegister result_reg = ToDoubleRegister(instr->result());
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- CpuFeatureScope scope(masm(), FPU);
DoubleRegister left = ToDoubleRegister(instr->left());
DoubleRegister right = ToDoubleRegister(instr->right());
DoubleRegister result = ToDoubleRegister(instr->result());
Register reg = ToRegister(instr->value());
EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
} else if (r.IsDouble()) {
- CpuFeatureScope scope(masm(), FPU);
DoubleRegister reg = ToDoubleRegister(instr->value());
// Test the double value. Zero and NaN are false.
EmitBranchF(true_block, false_block, nue, reg, kDoubleRegZero);
}
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
- CpuFeatureScope scope(masm(), FPU);
// heap number -> false iff +0, -0, or NaN.
DoubleRegister dbl_scratch = double_scratch0();
Label not_heap_number;
EmitGoto(next_block);
} else {
if (instr->is_double()) {
- CpuFeatureScope scope(masm(), FPU);
// Compare left and right as doubles and load the
// resulting flags into the normal status register.
FPURegister left_reg = ToDoubleRegister(left);
__ push(v0);
__ CallRuntime(Runtime::kTraceExit, 1);
}
- if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm(), FPU);
+ if (info()->saves_caller_doubles()) {
ASSERT(NeedsEagerFrame());
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
__ sll(scratch0(), key, shift_size);
__ Addu(scratch0(), scratch0(), external_pointer);
}
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm(), FPU);
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ lwc1(result, MemOperand(scratch0(), additional_offset));
- __ cvt_d_s(result, result);
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ ldc1(result, MemOperand(scratch0(), additional_offset));
- }
- } else {
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- Register value = external_pointer;
- __ lw(value, MemOperand(scratch0(), additional_offset));
- __ And(sfpd_lo, value, Operand(kBinary32MantissaMask));
-
- __ srl(scratch0(), value, kBinary32MantissaBits);
- __ And(scratch0(), scratch0(),
- Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
-
- Label exponent_rebiased;
- __ Xor(at, scratch0(), Operand(0x00));
- __ Branch(&exponent_rebiased, eq, at, Operand(zero_reg));
-
- __ Xor(at, scratch0(), Operand(0xff));
- Label skip;
- __ Branch(&skip, ne, at, Operand(zero_reg));
- __ li(scratch0(), Operand(0x7ff));
- __ bind(&skip);
- __ Branch(&exponent_rebiased, eq, at, Operand(zero_reg));
-
- // Rebias exponent.
- __ Addu(scratch0(),
- scratch0(),
- Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
-
- __ bind(&exponent_rebiased);
- __ And(sfpd_hi, value, Operand(kBinary32SignMask));
- __ sll(at, scratch0(), HeapNumber::kMantissaBitsInTopWord);
- __ Or(sfpd_hi, sfpd_hi, at);
-
- // Shift mantissa.
- static const int kMantissaShiftForHiWord =
- kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
-
- static const int kMantissaShiftForLoWord =
- kBitsPerInt - kMantissaShiftForHiWord;
-
- __ srl(at, sfpd_lo, kMantissaShiftForHiWord);
- __ Or(sfpd_hi, sfpd_hi, at);
- __ sll(sfpd_lo, sfpd_lo, kMantissaShiftForLoWord);
-
- } else {
- __ lw(sfpd_lo, MemOperand(scratch0(), additional_offset));
- __ lw(sfpd_hi, MemOperand(scratch0(),
- additional_offset + kPointerSize));
- }
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ __ lwc1(result, MemOperand(scratch0(), additional_offset));
+ __ cvt_d_s(result, result);
+ } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+ __ ldc1(result, MemOperand(scratch0(), additional_offset));
}
} else {
Register result = ToRegister(instr->result());
__ sll(scratch, key, shift_size);
__ Addu(elements, elements, scratch);
}
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm(), FPU);
- __ Addu(elements, elements, Operand(base_offset));
- __ ldc1(result, MemOperand(elements));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
- DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
- }
- } else {
- __ lw(sfpd_hi, MemOperand(elements, base_offset + kPointerSize));
- __ lw(sfpd_lo, MemOperand(elements, base_offset));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- ASSERT(kPointerSize == sizeof(kHoleNanLower32));
- DeoptimizeIf(eq, instr->environment(), sfpd_hi, Operand(kHoleNanUpper32));
- }
+ __ Addu(elements, elements, Operand(base_offset));
+ __ ldc1(result, MemOperand(elements));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+ DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
}
}
void LCodeGen::DoMathAbs(LMathAbs* instr) {
- CpuFeatureScope scope(masm(), FPU);
// Class for deferred case.
class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
public:
void LCodeGen::DoMathFloor(LMathFloor* instr) {
- CpuFeatureScope scope(masm(), FPU);
DoubleRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
Register scratch1 = scratch0();
void LCodeGen::DoMathRound(LMathRound* instr) {
- CpuFeatureScope scope(masm(), FPU);
DoubleRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
- CpuFeatureScope scope(masm(), FPU);
DoubleRegister input = ToDoubleRegister(instr->value());
DoubleRegister result = ToDoubleRegister(instr->result());
__ sqrt_d(result, input);
void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
- CpuFeatureScope scope(masm(), FPU);
DoubleRegister input = ToDoubleRegister(instr->value());
DoubleRegister result = ToDoubleRegister(instr->result());
DoubleRegister temp = ToDoubleRegister(instr->temp());
void LCodeGen::DoPower(LPower* instr) {
- CpuFeatureScope scope(masm(), FPU);
Representation exponent_type = instr->hydrogen()->right()->representation();
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
void LCodeGen::DoRandom(LRandom* instr) {
- CpuFeatureScope scope(masm(), FPU);
class DeferredDoRandom: public LDeferredCode {
public:
DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
void LCodeGen::DoMathExp(LMathExp* instr) {
- CpuFeatureScope scope(masm(), FPU);
DoubleRegister input = ToDoubleRegister(instr->value());
DoubleRegister result = ToDoubleRegister(instr->result());
DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
- CpuFeatureScope scope(masm(), FPU);
Register external_pointer = ToRegister(instr->elements());
Register key = no_reg;
ElementsKind elements_kind = instr->elements_kind();
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
- CpuFeatureScope scope(masm(), FPU);
DoubleRegister value = ToDoubleRegister(instr->value());
Register elements = ToRegister(instr->elements());
Register key = no_reg;
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- CpuFeatureScope scope(masm(), FPU);
LOperand* input = instr->value();
ASSERT(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result();
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
- CpuFeatureScope scope(masm(), FPU);
LOperand* input = instr->value();
LOperand* output = instr->result();
}
-// Convert unsigned integer with specified number of leading zeroes in binary
-// representation to IEEE 754 double.
-// Integer to convert is passed in register src.
-// Resulting double is returned in registers hiword:loword.
-// This functions does not work correctly for 0.
-static void GenerateUInt2Double(MacroAssembler* masm,
- Register src,
- Register hiword,
- Register loword,
- Register scratch,
- int leading_zeroes) {
- const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
- const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
-
- const int mantissa_shift_for_hi_word =
- meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
- const int mantissa_shift_for_lo_word =
- kBitsPerInt - mantissa_shift_for_hi_word;
- masm->li(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
- if (mantissa_shift_for_hi_word > 0) {
- masm->sll(loword, src, mantissa_shift_for_lo_word);
- masm->srl(hiword, src, mantissa_shift_for_hi_word);
- masm->Or(hiword, scratch, hiword);
- } else {
- masm->mov(loword, zero_reg);
- masm->sll(hiword, src, mantissa_shift_for_hi_word);
- masm->Or(hiword, scratch, hiword);
- }
-
- // If least significant bit of biased exponent was not 1 it was corrupted
- // by most significant bit of mantissa so we should fix that.
- if (!(biased_exponent & 1)) {
- masm->li(scratch, 1 << HeapNumber::kExponentShift);
- masm->nor(scratch, scratch, scratch);
- masm->and_(hiword, hiword, scratch);
- }
-}
-
-
void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
LOperand* value,
IntegerSignedness signedness) {
__ SmiUntag(src, dst);
__ Xor(src, src, Operand(0x80000000));
}
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm(), FPU);
- __ mtc1(src, dbl_scratch);
- __ cvt_d_w(dbl_scratch, dbl_scratch);
- } else {
- FloatingPointHelper::Destination dest =
- FloatingPointHelper::kCoreRegisters;
- FloatingPointHelper::ConvertIntToDouble(masm(), src, dest, f0,
- sfpd_lo, sfpd_hi,
- scratch0(), f2);
- }
+ __ mtc1(src, dbl_scratch);
+ __ cvt_d_w(dbl_scratch, dbl_scratch);
} else {
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm(), FPU);
- __ mtc1(src, dbl_scratch);
- __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
- } else {
- Label no_leading_zero, convert_done;
- __ And(at, src, Operand(0x80000000));
- __ Branch(&no_leading_zero, ne, at, Operand(zero_reg));
-
- // Integer has one leading zeros.
- GenerateUInt2Double(masm(), src, sfpd_hi, sfpd_lo, t0, 1);
- __ Branch(&convert_done);
-
- __ bind(&no_leading_zero);
- GenerateUInt2Double(masm(), src, sfpd_hi, sfpd_lo, t0, 0);
- __ bind(&convert_done);
- }
+ __ mtc1(src, dbl_scratch);
+ __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
}
if (FLAG_inline_new) {
// Done. Put the value in dbl_scratch into the value of the allocated heap
// number.
__ bind(&done);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm(), FPU);
- __ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset));
- } else {
- __ sw(sfpd_lo, MemOperand(dst, HeapNumber::kMantissaOffset));
- __ sw(sfpd_hi, MemOperand(dst, HeapNumber::kExponentOffset));
- }
+ __ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset));
__ Addu(dst, dst, kHeapObjectTag);
__ StoreToSafepointRegisterSlot(dst, dst);
}
Label no_special_nan_handling;
Label done;
if (convert_hole) {
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm(), FPU);
- DoubleRegister input_reg = ToDoubleRegister(instr->value());
- __ BranchF(&no_special_nan_handling, NULL, eq, input_reg, input_reg);
- __ Move(reg, scratch0(), input_reg);
- Label canonicalize;
- __ Branch(&canonicalize, ne, scratch0(), Operand(kHoleNanUpper32));
- __ li(reg, factory()->the_hole_value());
- __ Branch(&done);
- __ bind(&canonicalize);
- __ Move(input_reg,
- FixedDoubleArray::canonical_not_the_hole_nan_as_double());
- } else {
- Label not_hole;
- __ Branch(¬_hole, ne, sfpd_hi, Operand(kHoleNanUpper32));
- __ li(reg, factory()->the_hole_value());
- __ Branch(&done);
- __ bind(¬_hole);
- __ And(scratch, sfpd_hi, Operand(0x7ff00000));
- __ Branch(&no_special_nan_handling, ne, scratch, Operand(0x7ff00000));
- Label special_nan_handling;
- __ And(at, sfpd_hi, Operand(0x000FFFFF));
- __ Branch(&special_nan_handling, ne, at, Operand(zero_reg));
- __ Branch(&no_special_nan_handling, eq, sfpd_lo, Operand(zero_reg));
- __ bind(&special_nan_handling);
- double canonical_nan =
- FixedDoubleArray::canonical_not_the_hole_nan_as_double();
- uint64_t casted_nan = BitCast<uint64_t>(canonical_nan);
- __ li(sfpd_lo,
- Operand(static_cast<uint32_t>(casted_nan & 0xFFFFFFFF)));
- __ li(sfpd_hi,
- Operand(static_cast<uint32_t>(casted_nan >> 32)));
- }
+ DoubleRegister input_reg = ToDoubleRegister(instr->value());
+ __ BranchF(&no_special_nan_handling, NULL, eq, input_reg, input_reg);
+ __ Move(reg, scratch0(), input_reg);
+ Label canonicalize;
+ __ Branch(&canonicalize, ne, scratch0(), Operand(kHoleNanUpper32));
+ __ li(reg, factory()->the_hole_value());
+ __ Branch(&done);
+ __ bind(&canonicalize);
+ __ Move(input_reg,
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double());
}
__ bind(&no_special_nan_handling);
__ Branch(deferred->entry());
}
__ bind(deferred->exit());
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm(), FPU);
- __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
- } else {
- __ sw(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset));
- __ sw(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize));
- }
+ __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
// Now that we have finished with the object's real address tag it
__ Addu(reg, reg, kHeapObjectTag);
__ bind(&done);
LEnvironment* env,
NumberUntagDMode mode) {
Register scratch = scratch0();
- CpuFeatureScope scope(masm(), FPU);
Label load_smi, heap_number, done;
// This 'at' value and scratch1 map value are used for tests in both clauses
// of the if.
- CpuFeatureScope scope(masm(), FPU);
if (instr->truncating()) {
Register scratch3 = ToRegister(instr->temp2());
FPURegister single_scratch = double_scratch.low();
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
- CpuFeatureScope vfp_scope(masm(), FPU);
DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
- CpuFeatureScope vfp_scope(masm(), FPU);
Register unclamped_reg = ToRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
__ ClampUint8(result_reg, unclamped_reg);
void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
- CpuFeatureScope vfp_scope(masm(), FPU);
Register scratch = scratch0();
Register input_reg = ToRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
} else if (source->IsStackSlot()) {
__ lw(kLithiumScratchReg, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
- CpuFeatureScope scope(cgen_->masm(), FPU);
__ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) {
- CpuFeatureScope scope(cgen_->masm(), FPU);
__ ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
} else {
UNREACHABLE();
} else if (saved_destination_->IsStackSlot()) {
__ sw(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) {
- CpuFeatureScope scope(cgen_->masm(), FPU);
__ mov_d(cgen_->ToDoubleRegister(saved_destination_),
kLithiumScratchDouble);
} else if (saved_destination_->IsDoubleStackSlot()) {
- CpuFeatureScope scope(cgen_->masm(), FPU);
__ sdc1(kLithiumScratchDouble,
cgen_->ToMemOperand(saved_destination_));
} else {
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) {
if (!destination_operand.OffsetIsInt16Encodable()) {
- CpuFeatureScope scope(cgen_->masm(), FPU);
// 'at' is overwritten while saving the value to the destination.
// Therefore we can't use 'at'. It is OK if the read from the source
// destroys 'at', since that happens before the value is read.
}
} else if (source->IsDoubleRegister()) {
- CpuFeatureScope scope(cgen_->masm(), FPU);
DoubleRegister source_register = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
__ mov_d(cgen_->ToDoubleRegister(destination), source_register);
}
} else if (source->IsDoubleStackSlot()) {
- CpuFeatureScope scope(cgen_->masm(), FPU);
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsDoubleRegister()) {
__ ldc1(cgen_->ToDoubleRegister(destination), source_operand);
(instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- // float->double conversion on soft float requires an extra scratch
- // register. For convenience, just mark the elements register as "UseTemp"
- // so that it can be used as a temp during the float->double conversion
- // after it's no longer needed after the float load.
- bool needs_temp =
- !CpuFeatures::IsSupported(FPU) &&
- (elements_kind == EXTERNAL_FLOAT_ELEMENTS);
- LOperand* external_pointer = needs_temp
- ? UseTempRegister(instr->elements())
- : UseRegister(instr->elements());
+ LOperand* external_pointer = UseRegister(instr->elements());
result = new(zone()) LLoadKeyed(external_pointer, key);
}
void MacroAssembler::MultiPushFPU(RegList regs) {
- CpuFeatureScope scope(this, FPU);
int16_t num_to_push = NumberOfBitsSet(regs);
int16_t stack_offset = num_to_push * kDoubleSize;
void MacroAssembler::MultiPushReversedFPU(RegList regs) {
- CpuFeatureScope scope(this, FPU);
int16_t num_to_push = NumberOfBitsSet(regs);
int16_t stack_offset = num_to_push * kDoubleSize;
void MacroAssembler::MultiPopFPU(RegList regs) {
- CpuFeatureScope scope(this, FPU);
int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
void MacroAssembler::MultiPopReversedFPU(RegList regs) {
- CpuFeatureScope scope(this, FPU);
int16_t stack_offset = 0;
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
void MacroAssembler::Move(FPURegister dst, double imm) {
- ASSERT(IsEnabled(FPU));
static const DoubleRepresentation minus_zero(-0.0);
static const DoubleRepresentation zero(0.0);
DoubleRepresentation value(imm);
Subu(scratch2, scratch2, Operand(zero_exponent));
// Dest already has a Smi zero.
Branch(&done, lt, scratch2, Operand(zero_reg));
- if (!CpuFeatures::IsSupported(FPU)) {
- // We have a shifted exponent between 0 and 30 in scratch2.
- srl(dest, scratch2, HeapNumber::kExponentShift);
- // We now have the exponent in dest. Subtract from 30 to get
- // how much to shift down.
- li(at, Operand(30));
- subu(dest, at, dest);
- }
bind(&right_exponent);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(this, FPU);
- // MIPS FPU instructions implementing double precision to integer
- // conversion using round to zero. Since the FP value was qualified
- // above, the resulting integer should be a legal int32.
- // The original 'Exponent' word is still in scratch.
- lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
- mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1));
- trunc_w_d(double_scratch, double_scratch);
- mfc1(dest, double_scratch);
- } else {
- // On entry, dest has final downshift, scratch has original sign/exp/mant.
- // Save sign bit in top bit of dest.
- And(scratch2, scratch, Operand(0x80000000));
- Or(dest, dest, Operand(scratch2));
- // Put back the implicit 1, just above mantissa field.
- Or(scratch, scratch, Operand(1 << HeapNumber::kExponentShift));
-
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We just orred in the implicit bit so that took care of one and
- // we want to leave the sign bit 0 so we subtract 2 bits from the shift
- // distance. But we want to clear the sign-bit so shift one more bit
- // left, then shift right one bit.
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- sll(scratch, scratch, shift_distance + 1);
- srl(scratch, scratch, 1);
-
- // Get the second half of the double. For some exponents we don't
- // actually need this because the bits get shifted out again, but
- // it's probably slower to test than just to do it.
- lw(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
- // Extract the top 10 bits, and insert those bottom 10 bits of scratch.
- // The width of the field here is the same as the shift amount above.
- const int field_width = shift_distance;
- Ext(scratch2, scratch2, 32-shift_distance, field_width);
- Ins(scratch, scratch2, 0, field_width);
- // Move down according to the exponent.
- srlv(scratch, scratch, dest);
- // Prepare the negative version of our integer.
- subu(scratch2, zero_reg, scratch);
- // Trick to check sign bit (msb) held in dest, count leading zero.
- // 0 indicates negative, save negative version with conditional move.
- Clz(dest, dest);
- Movz(scratch, scratch2, dest);
- mov(dest, scratch);
- }
+
+ // MIPS FPU instructions implementing double precision to integer
+ // conversion using round to zero. Since the FP value was qualified
+ // above, the resulting integer should be a legal int32.
+ // The original 'Exponent' word is still in scratch.
+ lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
+ mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1));
+ trunc_w_d(double_scratch, double_scratch);
+ mfc1(dest, double_scratch);
+
bind(&done);
}
ASSERT(!double_input.is(double_scratch));
ASSERT(!except_flag.is(scratch));
- ASSERT(CpuFeatures::IsSupported(FPU));
- CpuFeatureScope scope(this, FPU);
Label done;
// Clear the except flag (0 = no exception)
Register scratch,
Register scratch2,
Register scratch3) {
- CpuFeatureScope scope(this, FPU);
ASSERT(!scratch2.is(result));
ASSERT(!scratch3.is(result));
ASSERT(!scratch3.is(scratch2));
// scratch1 is now effective address of the double element
FloatingPointHelper::Destination destination;
- if (CpuFeatures::IsSupported(FPU)) {
- destination = FloatingPointHelper::kFPURegisters;
- } else {
- destination = FloatingPointHelper::kCoreRegisters;
- }
+ destination = FloatingPointHelper::kFPURegisters;
Register untagged_value = elements_reg;
SmiUntag(untagged_value, value_reg);
scratch4,
f2);
if (destination == FloatingPointHelper::kFPURegisters) {
- CpuFeatureScope scope(this, FPU);
sdc1(f0, MemOperand(scratch1, 0));
} else {
sw(mantissa_reg, MemOperand(scratch1, 0));
void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
- CpuFeatureScope scope(this, FPU);
if (IsMipsSoftFloatABI) {
Move(dst, v0, v1);
} else {
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
- CpuFeatureScope scope(this, FPU);
if (!IsMipsSoftFloatABI) {
Move(f12, dreg);
} else {
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
DoubleRegister dreg2) {
- CpuFeatureScope scope(this, FPU);
if (!IsMipsSoftFloatABI) {
if (dreg2.is(f12)) {
ASSERT(!dreg1.is(f14));
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
Register reg) {
- CpuFeatureScope scope(this, FPU);
if (!IsMipsSoftFloatABI) {
Move(f12, dreg);
Move(a2, reg);
const Runtime::Function* function = Runtime::FunctionForId(id);
PrepareCEntryArgs(function->nargs);
PrepareCEntryFunction(ExternalReference(function, isolate()));
- SaveFPRegsMode mode = CpuFeatures::IsSupported(FPU)
- ? kSaveFPRegs
- : kDontSaveFPRegs;
- CEntryStub stub(1, mode);
+ CEntryStub stub(1, kSaveFPRegs);
CallStub(&stub);
}
const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
if (save_doubles) {
- CpuFeatureScope scope(this, FPU);
// The stack must be allign to 0 modulo 8 for stores with sdc1.
ASSERT(kDoubleSize == frame_alignment);
if (frame_alignment > 0) {
bool do_return) {
// Optionally restore all double registers.
if (save_doubles) {
- CpuFeatureScope scope(this, FPU);
// Remember: we only need to restore every 2nd double FPU value.
lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
Register dst,
Register wordoffset,
Register ival,
- Register fval,
- Register scratch1,
- Register scratch2) {
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
- __ mtc1(ival, f0);
- __ cvt_s_w(f0, f0);
- __ sll(scratch1, wordoffset, 2);
- __ addu(scratch1, dst, scratch1);
- __ swc1(f0, MemOperand(scratch1, 0));
- } else {
- // FPU is not available, do manual conversions.
-
- Label not_special, done;
- // Move sign bit from source to destination. This works because the sign
- // bit in the exponent word of the double has the same position and polarity
- // as the 2's complement sign bit in a Smi.
- ASSERT(kBinary32SignMask == 0x80000000u);
-
- __ And(fval, ival, Operand(kBinary32SignMask));
- // Negate value if it is negative.
- __ subu(scratch1, zero_reg, ival);
- __ Movn(ival, scratch1, fval);
-
- // We have -1, 0 or 1, which we treat specially. Register ival contains
- // absolute value: it is either equal to 1 (special case of -1 and 1),
- // greater than 1 (not a special case) or less than 1 (special case of 0).
- __ Branch(¬_special, gt, ival, Operand(1));
-
- // For 1 or -1 we need to or in the 0 exponent (biased).
- static const uint32_t exponent_word_for_1 =
- kBinary32ExponentBias << kBinary32ExponentShift;
-
- __ Xor(scratch1, ival, Operand(1));
- __ li(scratch2, exponent_word_for_1);
- __ or_(scratch2, fval, scratch2);
- __ Movz(fval, scratch2, scratch1); // Only if ival is equal to 1.
- __ Branch(&done);
-
- __ bind(¬_special);
- // Count leading zeros.
- // Gets the wrong answer for 0, but we already checked for that case above.
- Register zeros = scratch2;
- __ Clz(zeros, ival);
-
- // Compute exponent and or it into the exponent register.
- __ li(scratch1, (kBitsPerInt - 1) + kBinary32ExponentBias);
- __ subu(scratch1, scratch1, zeros);
-
- __ sll(scratch1, scratch1, kBinary32ExponentShift);
- __ or_(fval, fval, scratch1);
-
- // Shift up the source chopping the top bit off.
- __ Addu(zeros, zeros, Operand(1));
- // This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
- __ sllv(ival, ival, zeros);
- // And the top (top 20 bits).
- __ srl(scratch1, ival, kBitsPerInt - kBinary32MantissaBits);
- __ or_(fval, fval, scratch1);
-
- __ bind(&done);
-
- __ sll(scratch1, wordoffset, 2);
- __ addu(scratch1, dst, scratch1);
- __ sw(fval, MemOperand(scratch1, 0));
- }
+ Register scratch1) {
+ __ mtc1(ival, f0);
+ __ cvt_s_w(f0, f0);
+ __ sll(scratch1, wordoffset, 2);
+ __ addu(scratch1, dst, scratch1);
+ __ swc1(f0, MemOperand(scratch1, 0));
}
// -- sp[argc * 4] : receiver
// -----------------------------------
- if (!CpuFeatures::IsSupported(FPU)) {
- return Handle<Code>::null();
- }
- CpuFeatureScope scope_fpu(masm(), FPU);
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
}
-static bool IsElementTypeSigned(ElementsKind elements_kind) {
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- return true;
-
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- return false;
-
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- return false;
- }
- return false;
-}
-
-
static void GenerateSmiKeyCheck(MacroAssembler* masm,
Register key,
Register scratch0,
FPURegister double_scratch0,
FPURegister double_scratch1,
Label* fail) {
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
- Label key_ok;
- // Check for smi or a smi inside a heap number. We convert the heap
- // number and check if the conversion is exact and fits into the smi
- // range.
- __ JumpIfSmi(key, &key_ok);
- __ CheckMap(key,
- scratch0,
- Heap::kHeapNumberMapRootIndex,
- fail,
- DONT_DO_SMI_CHECK);
- __ ldc1(double_scratch0, FieldMemOperand(key, HeapNumber::kValueOffset));
- __ EmitFPUTruncate(kRoundToZero,
- scratch0,
- double_scratch0,
- at,
- double_scratch1,
- scratch1,
- kCheckForInexactConversion);
-
- __ Branch(fail, ne, scratch1, Operand(zero_reg));
-
- __ SmiTagCheckOverflow(key, scratch0, scratch1);
- __ BranchOnOverflow(fail, scratch1);
- __ bind(&key_ok);
- } else {
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, fail);
- }
+ Label key_ok;
+ // Check for smi or a smi inside a heap number. We convert the heap
+ // number and check if the conversion is exact and fits into the smi
+ // range.
+ __ JumpIfSmi(key, &key_ok);
+ __ CheckMap(key,
+ scratch0,
+ Heap::kHeapNumberMapRootIndex,
+ fail,
+ DONT_DO_SMI_CHECK);
+ __ ldc1(double_scratch0, FieldMemOperand(key, HeapNumber::kValueOffset));
+ __ EmitFPUTruncate(kRoundToZero,
+ scratch0,
+ double_scratch0,
+ at,
+ double_scratch1,
+ scratch1,
+ kCheckForInexactConversion);
+
+ __ Branch(fail, ne, scratch1, Operand(zero_reg));
+
+ __ SmiTagCheckOverflow(key, scratch0, scratch1);
+ __ BranchOnOverflow(fail, scratch1);
+ __ bind(&key_ok);
}
case EXTERNAL_FLOAT_ELEMENTS:
// Perform int-to-float conversion and store to memory.
__ SmiUntag(t0, key);
- StoreIntAsFloat(masm, a3, t0, t1, t2, t3, t4);
+ StoreIntAsFloat(masm, a3, t0, t1, t2);
break;
case EXTERNAL_DOUBLE_ELEMENTS:
__ sll(t8, key, 2);
__ addu(a3, a3, t8);
// a3: effective address of the double element
FloatingPointHelper::Destination destination;
- if (CpuFeatures::IsSupported(FPU)) {
- destination = FloatingPointHelper::kFPURegisters;
- } else {
- destination = FloatingPointHelper::kCoreRegisters;
- }
+ destination = FloatingPointHelper::kFPURegisters;
FloatingPointHelper::ConvertIntToDouble(
masm, t1, destination,
f0, t2, t3, // These are: double_dst, dst_mantissa, dst_exponent.
t0, f2); // These are: scratch2, single_scratch.
- if (destination == FloatingPointHelper::kFPURegisters) {
- CpuFeatureScope scope(masm, FPU);
- __ sdc1(f0, MemOperand(a3, 0));
- } else {
- __ sw(t2, MemOperand(a3, 0));
- __ sw(t3, MemOperand(a3, Register::kSizeInBytes));
- }
+ __ sdc1(f0, MemOperand(a3, 0));
break;
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
- __ ldc1(f0, FieldMemOperand(a0, HeapNumber::kValueOffset));
+ __ ldc1(f0, FieldMemOperand(a0, HeapNumber::kValueOffset));
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ cvt_s_d(f0, f0);
- __ sll(t8, key, 1);
- __ addu(t8, a3, t8);
- __ swc1(f0, MemOperand(t8, 0));
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ sll(t8, key, 2);
- __ addu(t8, a3, t8);
- __ sdc1(f0, MemOperand(t8, 0));
- } else {
- __ EmitECMATruncate(t3, f0, f2, t2, t1, t5);
-
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ srl(t8, key, 1);
- __ addu(t8, a3, t8);
- __ sb(t3, MemOperand(t8, 0));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ addu(t8, a3, key);
- __ sh(t3, MemOperand(t8, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ sll(t8, key, 1);
- __ addu(t8, a3, t8);
- __ sw(t3, MemOperand(t8, 0));
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-
- // Entry registers are intact, a0 holds the value
- // which is the return value.
- __ mov(v0, a0);
- __ Ret();
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ __ cvt_s_d(f0, f0);
+ __ sll(t8, key, 1);
+ __ addu(t8, a3, t8);
+ __ swc1(f0, MemOperand(t8, 0));
+ } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ __ sll(t8, key, 2);
+ __ addu(t8, a3, t8);
+ __ sdc1(f0, MemOperand(t8, 0));
} else {
- // FPU is not available, do manual conversions.
-
- __ lw(t3, FieldMemOperand(value, HeapNumber::kExponentOffset));
- __ lw(t4, FieldMemOperand(value, HeapNumber::kMantissaOffset));
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- Label done, nan_or_infinity_or_zero;
- static const int kMantissaInHiWordShift =
- kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
-
- static const int kMantissaInLoWordShift =
- kBitsPerInt - kMantissaInHiWordShift;
-
- // Test for all special exponent values: zeros, subnormal numbers, NaNs
- // and infinities. All these should be converted to 0.
- __ li(t5, HeapNumber::kExponentMask);
- __ and_(t6, t3, t5);
- __ Branch(&nan_or_infinity_or_zero, eq, t6, Operand(zero_reg));
-
- __ xor_(t1, t6, t5);
- __ li(t2, kBinary32ExponentMask);
- __ Movz(t6, t2, t1); // Only if t6 is equal to t5.
- __ Branch(&nan_or_infinity_or_zero, eq, t1, Operand(zero_reg));
-
- // Rebias exponent.
- __ srl(t6, t6, HeapNumber::kExponentShift);
- __ Addu(t6,
- t6,
- Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
-
- __ li(t1, Operand(kBinary32MaxExponent));
- __ Slt(t1, t1, t6);
- __ And(t2, t3, Operand(HeapNumber::kSignMask));
- __ Or(t2, t2, Operand(kBinary32ExponentMask));
- __ Movn(t3, t2, t1); // Only if t6 is gt kBinary32MaxExponent.
- __ Branch(&done, gt, t6, Operand(kBinary32MaxExponent));
-
- __ Slt(t1, t6, Operand(kBinary32MinExponent));
- __ And(t2, t3, Operand(HeapNumber::kSignMask));
- __ Movn(t3, t2, t1); // Only if t6 is lt kBinary32MinExponent.
- __ Branch(&done, lt, t6, Operand(kBinary32MinExponent));
-
- __ And(t7, t3, Operand(HeapNumber::kSignMask));
- __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
- __ sll(t3, t3, kMantissaInHiWordShift);
- __ or_(t7, t7, t3);
- __ srl(t4, t4, kMantissaInLoWordShift);
- __ or_(t7, t7, t4);
- __ sll(t6, t6, kBinary32ExponentShift);
- __ or_(t3, t7, t6);
-
- __ bind(&done);
- __ sll(t9, key, 1);
- __ addu(t9, a3, t9);
- __ sw(t3, MemOperand(t9, 0));
-
- // Entry registers are intact, a0 holds the value which is the return
- // value.
- __ mov(v0, a0);
- __ Ret();
-
- __ bind(&nan_or_infinity_or_zero);
- __ And(t7, t3, Operand(HeapNumber::kSignMask));
- __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
- __ or_(t6, t6, t7);
- __ sll(t3, t3, kMantissaInHiWordShift);
- __ or_(t6, t6, t3);
- __ srl(t4, t4, kMantissaInLoWordShift);
- __ or_(t3, t6, t4);
- __ Branch(&done);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ sll(t8, key, 2);
- __ addu(t8, a3, t8);
- // t8: effective address of destination element.
- __ sw(t4, MemOperand(t8, 0));
- __ sw(t3, MemOperand(t8, Register::kSizeInBytes));
- __ mov(v0, a0);
- __ Ret();
- } else {
- bool is_signed_type = IsElementTypeSigned(elements_kind);
- int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
- int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
-
- Label done, sign;
-
- // Test for all special exponent values: zeros, subnormal numbers, NaNs
- // and infinities. All these should be converted to 0.
- __ li(t5, HeapNumber::kExponentMask);
- __ and_(t6, t3, t5);
- __ Movz(t3, zero_reg, t6); // Only if t6 is equal to zero.
- __ Branch(&done, eq, t6, Operand(zero_reg));
-
- __ xor_(t2, t6, t5);
- __ Movz(t3, zero_reg, t2); // Only if t6 is equal to t5.
- __ Branch(&done, eq, t6, Operand(t5));
-
- // Unbias exponent.
- __ srl(t6, t6, HeapNumber::kExponentShift);
- __ Subu(t6, t6, Operand(HeapNumber::kExponentBias));
- // If exponent is negative then result is 0.
- __ slt(t2, t6, zero_reg);
- __ Movn(t3, zero_reg, t2); // Only if exponent is negative.
- __ Branch(&done, lt, t6, Operand(zero_reg));
-
- // If exponent is too big then result is minimal value.
- __ slti(t1, t6, meaningfull_bits - 1);
- __ li(t2, min_value);
- __ Movz(t3, t2, t1); // Only if t6 is ge meaningfull_bits - 1.
- __ Branch(&done, ge, t6, Operand(meaningfull_bits - 1));
-
- __ And(t5, t3, Operand(HeapNumber::kSignMask));
- __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
- __ Or(t3, t3, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
-
- __ li(t9, HeapNumber::kMantissaBitsInTopWord);
- __ subu(t6, t9, t6);
- __ slt(t1, t6, zero_reg);
- __ srlv(t2, t3, t6);
- __ Movz(t3, t2, t1); // Only if t6 is positive.
- __ Branch(&sign, ge, t6, Operand(zero_reg));
-
- __ subu(t6, zero_reg, t6);
- __ sllv(t3, t3, t6);
- __ li(t9, meaningfull_bits);
- __ subu(t6, t9, t6);
- __ srlv(t4, t4, t6);
- __ or_(t3, t3, t4);
-
- __ bind(&sign);
- __ subu(t2, t3, zero_reg);
- __ Movz(t3, t2, t5); // Only if t5 is zero.
-
- __ bind(&done);
-
- // Result is in t3.
- // This switch block should be exactly the same as above (FPU mode).
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ srl(t8, key, 1);
- __ addu(t8, a3, t8);
- __ sb(t3, MemOperand(t8, 0));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ addu(t8, a3, key);
- __ sh(t3, MemOperand(t8, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ sll(t8, key, 1);
- __ addu(t8, a3, t8);
- __ sw(t3, MemOperand(t8, 0));
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
+ __ EmitECMATruncate(t3, f0, f2, t2, t1, t5);
+
+ switch (elements_kind) {
+ case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ srl(t8, key, 1);
+ __ addu(t8, a3, t8);
+ __ sb(t3, MemOperand(t8, 0));
+ break;
+ case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ addu(t8, a3, key);
+ __ sh(t3, MemOperand(t8, 0));
+ break;
+ case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ sll(t8, key, 1);
+ __ addu(t8, a3, t8);
+ __ sw(t3, MemOperand(t8, 0));
+ break;
+ case EXTERNAL_PIXEL_ELEMENTS:
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
}
}
+
+ // Entry registers are intact, a0 holds the value
+ // which is the return value.
+ __ mov(v0, a0);
+ __ Ret();
}
// Slow case, key and receiver still in a0 and a1.
MacroAssembler assm(isolate, NULL, 0);
Label L, C;
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(&assm, FPU);
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
+ __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
+ __ add_d(f8, f4, f6);
+ __ sdc1(f8, MemOperand(a0, OFFSET_OF(T, c)) ); // c = a + b.
- __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
- __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
- __ add_d(f8, f4, f6);
- __ sdc1(f8, MemOperand(a0, OFFSET_OF(T, c)) ); // c = a + b.
+ __ mov_d(f10, f8); // c
+ __ neg_d(f12, f6); // -b
+ __ sub_d(f10, f10, f12);
+ __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, d)) ); // d = c - (-b).
- __ mov_d(f10, f8); // c
- __ neg_d(f12, f6); // -b
- __ sub_d(f10, f10, f12);
- __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, d)) ); // d = c - (-b).
+ __ sdc1(f4, MemOperand(a0, OFFSET_OF(T, b)) ); // b = a.
- __ sdc1(f4, MemOperand(a0, OFFSET_OF(T, b)) ); // b = a.
+ __ li(t0, 120);
+ __ mtc1(t0, f14);
+ __ cvt_d_w(f14, f14); // f14 = 120.0.
+ __ mul_d(f10, f10, f14);
+ __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, e)) ); // e = d * 120 = 1.8066e16.
- __ li(t0, 120);
- __ mtc1(t0, f14);
- __ cvt_d_w(f14, f14); // f14 = 120.0.
- __ mul_d(f10, f10, f14);
- __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, e)) ); // e = d * 120 = 1.8066e16.
+ __ div_d(f12, f10, f4);
+ __ sdc1(f12, MemOperand(a0, OFFSET_OF(T, f)) ); // f = e / a = 120.44.
- __ div_d(f12, f10, f4);
- __ sdc1(f12, MemOperand(a0, OFFSET_OF(T, f)) ); // f = e / a = 120.44.
-
- __ sqrt_d(f14, f12);
- __ sdc1(f14, MemOperand(a0, OFFSET_OF(T, g)) );
- // g = sqrt(f) = 10.97451593465515908537
+ __ sqrt_d(f14, f12);
+ __ sdc1(f14, MemOperand(a0, OFFSET_OF(T, g)) );
+ // g = sqrt(f) = 10.97451593465515908537
if (kArchVariant == kMips32r2) {
__ ldc1(f4, MemOperand(a0, OFFSET_OF(T, h)) );
__ sdc1(f14, MemOperand(a0, OFFSET_OF(T, h)) );
}
- __ jr(ra);
- __ nop();
+ __ jr(ra);
+ __ nop();
- CodeDesc desc;
- assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
- desc,
- Code::ComputeFlags(Code::STUB),
- Handle<Code>())->ToObjectChecked();
- CHECK(code->IsCode());
- F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
- t.a = 1.5e14;
- t.b = 2.75e11;
- t.c = 0.0;
- t.d = 0.0;
- t.e = 0.0;
- t.f = 0.0;
- t.h = 1.5;
- t.i = 2.75;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
- USE(dummy);
- CHECK_EQ(1.5e14, t.a);
- CHECK_EQ(1.5e14, t.b);
- CHECK_EQ(1.50275e14, t.c);
- CHECK_EQ(1.50550e14, t.d);
- CHECK_EQ(1.8066e16, t.e);
- CHECK_EQ(120.44, t.f);
- CHECK_EQ(10.97451593465515908537, t.g);
- CHECK_EQ(6.875, t.h);
- }
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = HEAP->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+ F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
+ t.a = 1.5e14;
+ t.b = 2.75e11;
+ t.c = 0.0;
+ t.d = 0.0;
+ t.e = 0.0;
+ t.f = 0.0;
+ t.h = 1.5;
+ t.i = 2.75;
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
+ CHECK_EQ(1.5e14, t.a);
+ CHECK_EQ(1.5e14, t.b);
+ CHECK_EQ(1.50275e14, t.c);
+ CHECK_EQ(1.50550e14, t.d);
+ CHECK_EQ(1.8066e16, t.e);
+ CHECK_EQ(120.44, t.f);
+ CHECK_EQ(10.97451593465515908537, t.g);
+ CHECK_EQ(6.875, t.h);
}
Assembler assm(isolate, NULL, 0);
Label L, C;
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(&assm, FPU);
-
- __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
- __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
+ __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
- // Swap f4 and f6, by using four integer registers, t0-t3.
- __ mfc1(t0, f4);
- __ mfc1(t1, f5);
- __ mfc1(t2, f6);
- __ mfc1(t3, f7);
+ // Swap f4 and f6, by using four integer registers, t0-t3.
+ __ mfc1(t0, f4);
+ __ mfc1(t1, f5);
+ __ mfc1(t2, f6);
+ __ mfc1(t3, f7);
- __ mtc1(t0, f6);
- __ mtc1(t1, f7);
- __ mtc1(t2, f4);
- __ mtc1(t3, f5);
+ __ mtc1(t0, f6);
+ __ mtc1(t1, f7);
+ __ mtc1(t2, f4);
+ __ mtc1(t3, f5);
- // Store the swapped f4 and f5 back to memory.
- __ sdc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
- __ sdc1(f6, MemOperand(a0, OFFSET_OF(T, c)) );
+ // Store the swapped f4 and f5 back to memory.
+ __ sdc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
+ __ sdc1(f6, MemOperand(a0, OFFSET_OF(T, c)) );
- __ jr(ra);
- __ nop();
+ __ jr(ra);
+ __ nop();
- CodeDesc desc;
- assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
- desc,
- Code::ComputeFlags(Code::STUB),
- Handle<Code>())->ToObjectChecked();
- CHECK(code->IsCode());
- F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
- t.a = 1.5e22;
- t.b = 2.75e11;
- t.c = 17.17;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
- USE(dummy);
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = HEAP->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+ F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
+ t.a = 1.5e22;
+ t.b = 2.75e11;
+ t.c = 17.17;
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
- CHECK_EQ(2.75e11, t.a);
- CHECK_EQ(2.75e11, t.b);
- CHECK_EQ(1.5e22, t.c);
- }
+ CHECK_EQ(2.75e11, t.a);
+ CHECK_EQ(2.75e11, t.b);
+ CHECK_EQ(1.5e22, t.c);
}
Assembler assm(isolate, NULL, 0);
Label L, C;
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(&assm, FPU);
+ // Load all structure elements to registers.
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
+ __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
+ __ lw(t0, MemOperand(a0, OFFSET_OF(T, i)) );
+ __ lw(t1, MemOperand(a0, OFFSET_OF(T, j)) );
- // Load all structure elements to registers.
- __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
- __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
- __ lw(t0, MemOperand(a0, OFFSET_OF(T, i)) );
- __ lw(t1, MemOperand(a0, OFFSET_OF(T, j)) );
-
- // Convert double in f4 to int in element i.
- __ cvt_w_d(f8, f4);
- __ mfc1(t2, f8);
- __ sw(t2, MemOperand(a0, OFFSET_OF(T, i)) );
-
- // Convert double in f6 to int in element j.
- __ cvt_w_d(f10, f6);
- __ mfc1(t3, f10);
- __ sw(t3, MemOperand(a0, OFFSET_OF(T, j)) );
-
- // Convert int in original i (t0) to double in a.
- __ mtc1(t0, f12);
- __ cvt_d_w(f0, f12);
- __ sdc1(f0, MemOperand(a0, OFFSET_OF(T, a)) );
-
- // Convert int in original j (t1) to double in b.
- __ mtc1(t1, f14);
- __ cvt_d_w(f2, f14);
- __ sdc1(f2, MemOperand(a0, OFFSET_OF(T, b)) );
+ // Convert double in f4 to int in element i.
+ __ cvt_w_d(f8, f4);
+ __ mfc1(t2, f8);
+ __ sw(t2, MemOperand(a0, OFFSET_OF(T, i)) );
- __ jr(ra);
- __ nop();
+ // Convert double in f6 to int in element j.
+ __ cvt_w_d(f10, f6);
+ __ mfc1(t3, f10);
+ __ sw(t3, MemOperand(a0, OFFSET_OF(T, j)) );
- CodeDesc desc;
- assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
- desc,
- Code::ComputeFlags(Code::STUB),
- Handle<Code>())->ToObjectChecked();
- CHECK(code->IsCode());
- F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
- t.a = 1.5e4;
- t.b = 2.75e8;
- t.i = 12345678;
- t.j = -100000;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
- USE(dummy);
+ // Convert int in original i (t0) to double in a.
+ __ mtc1(t0, f12);
+ __ cvt_d_w(f0, f12);
+ __ sdc1(f0, MemOperand(a0, OFFSET_OF(T, a)) );
- CHECK_EQ(12345678.0, t.a);
- CHECK_EQ(-100000.0, t.b);
- CHECK_EQ(15000, t.i);
- CHECK_EQ(275000000, t.j);
- }
+ // Convert int in original j (t1) to double in b.
+ __ mtc1(t1, f14);
+ __ cvt_d_w(f2, f14);
+ __ sdc1(f2, MemOperand(a0, OFFSET_OF(T, b)) );
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = HEAP->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+ F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
+ t.a = 1.5e4;
+ t.b = 2.75e8;
+ t.i = 12345678;
+ t.j = -100000;
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
+
+ CHECK_EQ(12345678.0, t.a);
+ CHECK_EQ(-100000.0, t.b);
+ CHECK_EQ(15000, t.i);
+ CHECK_EQ(275000000, t.j);
}
MacroAssembler assm(isolate, NULL, 0);
Label neither_is_nan, less_than, outa_here;
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(&assm, FPU);
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
+ __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
+ __ c(UN, D, f4, f6);
+ __ bc1f(&neither_is_nan);
+ __ nop();
+ __ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
+ __ Branch(&outa_here);
- __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
- __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
- __ c(UN, D, f4, f6);
- __ bc1f(&neither_is_nan);
- __ nop();
- __ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
- __ Branch(&outa_here);
-
- __ bind(&neither_is_nan);
-
- if (kArchVariant == kLoongson) {
- __ c(OLT, D, f6, f4);
- __ bc1t(&less_than);
- } else {
- __ c(OLT, D, f6, f4, 2);
- __ bc1t(&less_than, 2);
- }
- __ nop();
- __ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
- __ Branch(&outa_here);
+ __ bind(&neither_is_nan);
- __ bind(&less_than);
- __ Addu(t0, zero_reg, Operand(1));
- __ sw(t0, MemOperand(a0, OFFSET_OF(T, result)) ); // Set true.
+ if (kArchVariant == kLoongson) {
+ __ c(OLT, D, f6, f4);
+ __ bc1t(&less_than);
+ } else {
+ __ c(OLT, D, f6, f4, 2);
+ __ bc1t(&less_than, 2);
+ }
+ __ nop();
+ __ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
+ __ Branch(&outa_here);
+ __ bind(&less_than);
+ __ Addu(t0, zero_reg, Operand(1));
+ __ sw(t0, MemOperand(a0, OFFSET_OF(T, result)) ); // Set true.
- // This test-case should have additional tests.
- __ bind(&outa_here);
+ // This test-case should have additional tests.
- __ jr(ra);
- __ nop();
+ __ bind(&outa_here);
- CodeDesc desc;
- assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
- desc,
- Code::ComputeFlags(Code::STUB),
- Handle<Code>())->ToObjectChecked();
- CHECK(code->IsCode());
- F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
- t.a = 1.5e14;
- t.b = 2.75e11;
- t.c = 2.0;
- t.d = -4.0;
- t.e = 0.0;
- t.f = 0.0;
- t.result = 0;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
- USE(dummy);
- CHECK_EQ(1.5e14, t.a);
- CHECK_EQ(2.75e11, t.b);
- CHECK_EQ(1, t.result);
- }
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = HEAP->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+ F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
+ t.a = 1.5e14;
+ t.b = 2.75e11;
+ t.c = 2.0;
+ t.d = -4.0;
+ t.e = 0.0;
+ t.f = 0.0;
+ t.result = 0;
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
+ CHECK_EQ(1.5e14, t.a);
+ CHECK_EQ(2.75e11, t.b);
+ CHECK_EQ(1, t.result);
}
Assembler assm(isolate, NULL, 0);
Label L, C;
- if (CpuFeatures::IsSupported(FPU) && kArchVariant == kMips32r2) {
- CpuFeatureScope scope(&assm, FPU);
-
+ if (kArchVariant == kMips32r2) {
// Load all structure elements to registers.
__ ldc1(f0, MemOperand(a0, OFFSET_OF(T, a)));
MacroAssembler assm(isolate, NULL, 0);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(&assm, FPU);
-
- __ sw(t0, MemOperand(a0, OFFSET_OF(T, cvt_small_in)));
- __ Cvt_d_uw(f10, t0, f22);
- __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, cvt_small_out)));
+ __ sw(t0, MemOperand(a0, OFFSET_OF(T, cvt_small_in)));
+ __ Cvt_d_uw(f10, t0, f22);
+ __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, cvt_small_out)));
- __ Trunc_uw_d(f10, f10, f22);
- __ swc1(f10, MemOperand(a0, OFFSET_OF(T, trunc_small_out)));
+ __ Trunc_uw_d(f10, f10, f22);
+ __ swc1(f10, MemOperand(a0, OFFSET_OF(T, trunc_small_out)));
- __ sw(t0, MemOperand(a0, OFFSET_OF(T, cvt_big_in)));
- __ Cvt_d_uw(f8, t0, f22);
- __ sdc1(f8, MemOperand(a0, OFFSET_OF(T, cvt_big_out)));
+ __ sw(t0, MemOperand(a0, OFFSET_OF(T, cvt_big_in)));
+ __ Cvt_d_uw(f8, t0, f22);
+ __ sdc1(f8, MemOperand(a0, OFFSET_OF(T, cvt_big_out)));
- __ Trunc_uw_d(f8, f8, f22);
- __ swc1(f8, MemOperand(a0, OFFSET_OF(T, trunc_big_out)));
+ __ Trunc_uw_d(f8, f8, f22);
+ __ swc1(f8, MemOperand(a0, OFFSET_OF(T, trunc_big_out)));
- __ jr(ra);
- __ nop();
+ __ jr(ra);
+ __ nop();
- CodeDesc desc;
- assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
- desc,
- Code::ComputeFlags(Code::STUB),
- Handle<Code>())->ToObjectChecked();
- CHECK(code->IsCode());
- F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = HEAP->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+ F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
- t.cvt_big_in = 0xFFFFFFFF;
- t.cvt_small_in = 333;
+ t.cvt_big_in = 0xFFFFFFFF;
+ t.cvt_small_in = 333;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
- USE(dummy);
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
- CHECK_EQ(t.cvt_big_out, static_cast<double>(t.cvt_big_in));
- CHECK_EQ(t.cvt_small_out, static_cast<double>(t.cvt_small_in));
+ CHECK_EQ(t.cvt_big_out, static_cast<double>(t.cvt_big_in));
+ CHECK_EQ(t.cvt_small_out, static_cast<double>(t.cvt_small_in));
- CHECK_EQ(static_cast<int>(t.trunc_big_out), static_cast<int>(t.cvt_big_in));
- CHECK_EQ(static_cast<int>(t.trunc_small_out),
- static_cast<int>(t.cvt_small_in));
- }
+ CHECK_EQ(static_cast<int>(t.trunc_big_out), static_cast<int>(t.cvt_big_in));
+ CHECK_EQ(static_cast<int>(t.trunc_small_out),
+ static_cast<int>(t.cvt_small_in));
}
MacroAssembler assm(isolate, NULL, 0);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(&assm, FPU);
-
- // Save FCSR.
- __ cfc1(a1, FCSR);
- // Disable FPU exceptions.
- __ ctc1(zero_reg, FCSR);
+ // Save FCSR.
+ __ cfc1(a1, FCSR);
+ // Disable FPU exceptions.
+ __ ctc1(zero_reg, FCSR);
#define RUN_ROUND_TEST(x) \
- __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, round_up_in))); \
- __ x##_w_d(f0, f0); \
- __ swc1(f0, MemOperand(a0, OFFSET_OF(T, x##_up_out))); \
- \
- __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, round_down_in))); \
- __ x##_w_d(f0, f0); \
- __ swc1(f0, MemOperand(a0, OFFSET_OF(T, x##_down_out))); \
- \
- __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, neg_round_up_in))); \
- __ x##_w_d(f0, f0); \
- __ swc1(f0, MemOperand(a0, OFFSET_OF(T, neg_##x##_up_out))); \
- \
- __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, neg_round_down_in))); \
- __ x##_w_d(f0, f0); \
- __ swc1(f0, MemOperand(a0, OFFSET_OF(T, neg_##x##_down_out))); \
- \
- __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err1_in))); \
- __ ctc1(zero_reg, FCSR); \
- __ x##_w_d(f0, f0); \
- __ cfc1(a2, FCSR); \
- __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err1_out))); \
- \
- __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err2_in))); \
- __ ctc1(zero_reg, FCSR); \
- __ x##_w_d(f0, f0); \
- __ cfc1(a2, FCSR); \
- __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err2_out))); \
- \
- __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err3_in))); \
- __ ctc1(zero_reg, FCSR); \
- __ x##_w_d(f0, f0); \
- __ cfc1(a2, FCSR); \
- __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err3_out))); \
- \
- __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err4_in))); \
- __ ctc1(zero_reg, FCSR); \
- __ x##_w_d(f0, f0); \
- __ cfc1(a2, FCSR); \
- __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err4_out))); \
- __ swc1(f0, MemOperand(a0, OFFSET_OF(T, x##_invalid_result)));
-
- RUN_ROUND_TEST(round)
- RUN_ROUND_TEST(floor)
- RUN_ROUND_TEST(ceil)
- RUN_ROUND_TEST(trunc)
- RUN_ROUND_TEST(cvt)
-
- // Restore FCSR.
- __ ctc1(a1, FCSR);
+ __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, round_up_in))); \
+ __ x##_w_d(f0, f0); \
+ __ swc1(f0, MemOperand(a0, OFFSET_OF(T, x##_up_out))); \
+ \
+ __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, round_down_in))); \
+ __ x##_w_d(f0, f0); \
+ __ swc1(f0, MemOperand(a0, OFFSET_OF(T, x##_down_out))); \
+ \
+ __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, neg_round_up_in))); \
+ __ x##_w_d(f0, f0); \
+ __ swc1(f0, MemOperand(a0, OFFSET_OF(T, neg_##x##_up_out))); \
+ \
+ __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, neg_round_down_in))); \
+ __ x##_w_d(f0, f0); \
+ __ swc1(f0, MemOperand(a0, OFFSET_OF(T, neg_##x##_down_out))); \
+ \
+ __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err1_in))); \
+ __ ctc1(zero_reg, FCSR); \
+ __ x##_w_d(f0, f0); \
+ __ cfc1(a2, FCSR); \
+ __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err1_out))); \
+ \
+ __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err2_in))); \
+ __ ctc1(zero_reg, FCSR); \
+ __ x##_w_d(f0, f0); \
+ __ cfc1(a2, FCSR); \
+ __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err2_out))); \
+ \
+ __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err3_in))); \
+ __ ctc1(zero_reg, FCSR); \
+ __ x##_w_d(f0, f0); \
+ __ cfc1(a2, FCSR); \
+ __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err3_out))); \
+ \
+ __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err4_in))); \
+ __ ctc1(zero_reg, FCSR); \
+ __ x##_w_d(f0, f0); \
+ __ cfc1(a2, FCSR); \
+ __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err4_out))); \
+ __ swc1(f0, MemOperand(a0, OFFSET_OF(T, x##_invalid_result)));
+
+ RUN_ROUND_TEST(round)
+ RUN_ROUND_TEST(floor)
+ RUN_ROUND_TEST(ceil)
+ RUN_ROUND_TEST(trunc)
+ RUN_ROUND_TEST(cvt)
+
+ // Restore FCSR.
+ __ ctc1(a1, FCSR);
- __ jr(ra);
- __ nop();
+ __ jr(ra);
+ __ nop();
- CodeDesc desc;
- assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
- desc,
- Code::ComputeFlags(Code::STUB),
- Handle<Code>())->ToObjectChecked();
- CHECK(code->IsCode());
- F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = HEAP->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+ F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
- t.round_up_in = 123.51;
- t.round_down_in = 123.49;
- t.neg_round_up_in = -123.5;
- t.neg_round_down_in = -123.49;
- t.err1_in = 123.51;
- t.err2_in = 1;
- t.err3_in = static_cast<double>(1) + 0xFFFFFFFF;
- t.err4_in = NAN;
+ t.round_up_in = 123.51;
+ t.round_down_in = 123.49;
+ t.neg_round_up_in = -123.5;
+ t.neg_round_down_in = -123.49;
+ t.err1_in = 123.51;
+ t.err2_in = 1;
+ t.err3_in = static_cast<double>(1) + 0xFFFFFFFF;
+ t.err4_in = NAN;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
- USE(dummy);
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
#define GET_FPU_ERR(x) (static_cast<int>(x & kFCSRFlagMask))
#define CHECK_ROUND_RESULT(type) \
CHECK(GET_FPU_ERR(t.type##_err4_out) & kFCSRInvalidOpFlagMask); \
CHECK_EQ(kFPUInvalidResult, t.type##_invalid_result);
- CHECK_ROUND_RESULT(round);
- CHECK_ROUND_RESULT(floor);
- CHECK_ROUND_RESULT(ceil);
- CHECK_ROUND_RESULT(cvt);
- }
+ CHECK_ROUND_RESULT(round);
+ CHECK_ROUND_RESULT(floor);
+ CHECK_ROUND_RESULT(ceil);
+ CHECK_ROUND_RESULT(cvt);
}