'v8_can_use_vfp2_instructions%': 'false',
'v8_can_use_vfp3_instructions%': 'false',
+ # Setting 'v8_can_use_vfp32dregs' to 'true' will cause V8 to use the VFP
+ # registers d16-d31 in the generated code, both in the snapshot and for the
+ # ARM target. Leaving the default value of 'false' will avoid the use of
+ # these registers in the snapshot and use CPU feature probing when running
+ # on the target.
+ 'v8_can_use_vfp32dregs%': 'false',
+
# Similar to vfp but on MIPS.
'v8_can_use_fpu_instructions%': 'true',
'USE_EABI_HARDFLOAT=0',
],
}],
+ [ 'v8_can_use_vfp32dregs=="true"', {
+ 'defines': [
+ 'CAN_USE_VFP32DREGS',
+ ],
+ }],
],
}], # v8_target_arch=="arm"
['v8_target_arch=="ia32"', {
int DwVfpRegister::NumRegisters() {
if (CpuFeatures::IsSupported(VFP2)) {
- return DwVfpRegister::kNumRegisters;
+ return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16;
} else {
return 1;
}
int DwVfpRegister::NumAllocatableRegisters() {
if (CpuFeatures::IsSupported(VFP2)) {
- return DwVfpRegister::kMaxNumAllocatableRegisters;
+ return NumRegisters() - kNumReservedRegisters;
} else {
return 1;
}
int DwVfpRegister::ToAllocationIndex(DwVfpRegister reg) {
ASSERT(!reg.is(kDoubleRegZero));
ASSERT(!reg.is(kScratchDoubleReg));
+ if (reg.code() > kDoubleRegZero.code()) {
+ return reg.code() - kNumReservedRegisters;
+ }
return reg.code();
}
+DwVfpRegister DwVfpRegister::FromAllocationIndex(int index) {
+ ASSERT(index >= 0 && index < NumAllocatableRegisters());
+ ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
+ kNumReservedRegisters - 1);
+ if (index >= kDoubleRegZero.code()) {
+ return from_code(index + kNumReservedRegisters);
+ }
+ return from_code(index);
+}
+
+
void RelocInfo::apply(intptr_t delta) {
if (RelocInfo::IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
unsigned CpuFeatures::found_by_runtime_probing_ = 0;
+ExternalReference ExternalReference::cpu_features() {
+ ASSERT(CpuFeatures::initialized_);
+ return ExternalReference(&CpuFeatures::supported_);
+}
+
// Get the CPU features enabled by the build. For cross compilation the
// preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP3_INSTRUCTIONS
// can be defined to enable ARMv7 and VFPv3 instructions when building the
#ifdef CAN_USE_VFP2_INSTRUCTIONS
answer |= 1u << VFP2;
#endif // CAN_USE_VFP2_INSTRUCTIONS
+#ifdef CAN_USE_VFP32DREGS
+ answer |= 1u << VFP32DREGS;
+#endif // CAN_USE_VFP32DREGS
#ifdef __arm__
// If the compiler is allowed to use VFP then we can use VFP too in our code
const char* DwVfpRegister::AllocationIndexToString(int index) {
if (CpuFeatures::IsSupported(VFP2)) {
- ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < NumAllocatableRegisters());
+ ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
+ kNumReservedRegisters - 1);
+ if (index >= kDoubleRegZero.code())
+ index += kNumReservedRegisters;
+
+ // TODO(hans): Maybe this could just use VFPRegisters::Name()?
const char* const names[] = {
"d0",
"d1",
"d11",
"d12",
"d13"
+ "d14",
+ "d15",
+ "d16",
+ "d17",
+ "d18",
+ "d19",
+ "d20",
+ "d21",
+ "d22",
+ "d23",
+ "d24",
+ "d25",
+ "d26",
+ "d27",
+ "d28",
+ "d29",
+ "d30",
+ "d31"
};
return names[index];
} else {
if (FLAG_enable_movw_movt) {
supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
}
+
+ if (FLAG_enable_32dregs) {
+ supported_ |= 1u << VFP32DREGS;
+ }
+
#else // __arm__
// Probe for additional features not already known to be available.
if (!IsSupported(VFP3) && OS::ArmCpuHasFeature(VFP3)) {
found_by_runtime_probing_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
}
+ if (!IsSupported(VFP32DREGS) && OS::ArmCpuHasFeature(VFP32DREGS)) {
+ found_by_runtime_probing_ |= 1u << VFP32DREGS;
+ }
+
supported_ |= found_by_runtime_probing_;
#endif
int offset,
const Condition cond) {
// Ddst = MEM(Rbase + offset).
- // Instruction details available in ARM DDI 0406A, A8-628.
- // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
- // Vdst(15-12) | 1011(11-8) | offset
+ // Instruction details available in ARM DDI 0406C.b, A8-924.
+ // cond(31-28) | 1101(27-24)| U(23) | D(22) | 01(21-20) | Rbase(19-16) |
+ // Vd(15-12) | 1011(11-8) | offset
ASSERT(CpuFeatures::IsEnabled(VFP2));
int u = 1;
if (offset < 0) {
offset = -offset;
u = 0;
}
+ int vd, d;
+ dst.split_code(&vd, &d);
ASSERT(offset >= 0);
if ((offset % 4) == 0 && (offset / 4) < 256) {
- emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 |
+ emit(cond | 0xD*B24 | u*B23 | d*B22 | B20 | base.code()*B16 | vd*B12 |
0xB*B8 | ((offset / 4) & 255));
} else {
// Larger offsets must be handled by computing the correct address
} else {
sub(ip, base, Operand(offset));
}
- emit(cond | 0xD1*B20 | ip.code()*B16 | dst.code()*B12 | 0xB*B8);
+ emit(cond | 0xD*B24 | d*B22 | B20 | ip.code()*B16 | vd*B12 | 0xB*B8);
}
}
int offset,
const Condition cond) {
// MEM(Rbase + offset) = Dsrc.
- // Instruction details available in ARM DDI 0406A, A8-786.
- // cond(31-28) | 1101(27-24)| U000(23-20) | | Rbase(19-16) |
- // Vsrc(15-12) | 1011(11-8) | (offset/4)
+ // Instruction details available in ARM DDI 0406C.b, A8-1082.
+ // cond(31-28) | 1101(27-24)| U(23) | D(22) | 00(21-20) | Rbase(19-16) |
+ // Vd(15-12) | 1011(11-8) | (offset/4)
ASSERT(CpuFeatures::IsEnabled(VFP2));
int u = 1;
if (offset < 0) {
u = 0;
}
ASSERT(offset >= 0);
+ int vd, d;
+ src.split_code(&vd, &d);
+
if ((offset % 4) == 0 && (offset / 4) < 256) {
- emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 |
- 0xB*B8 | ((offset / 4) & 255));
+ emit(cond | 0xD*B24 | u*B23 | d*B22 | base.code()*B16 | vd*B12 | 0xB*B8 |
+ ((offset / 4) & 255));
} else {
// Larger offsets must be handled by computing the correct address
// in the ip register.
} else {
sub(ip, base, Operand(offset));
}
- emit(cond | 0xD0*B20 | ip.code()*B16 | src.code()*B12 | 0xB*B8);
+ emit(cond | 0xD*B24 | d*B22 | ip.code()*B16 | vd*B12 | 0xB*B8);
}
}
DwVfpRegister first,
DwVfpRegister last,
Condition cond) {
- // Instruction details available in ARM DDI 0406A, A8-626.
+ // Instruction details available in ARM DDI 0406C.b, A8-922.
// cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
- // first(15-12) | 1010(11-8) | (count * 2)
+ // first(15-12) | 1011(11-8) | (count * 2)
ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
DwVfpRegister first,
DwVfpRegister last,
Condition cond) {
- // Instruction details available in ARM DDI 0406A, A8-784.
+ // Instruction details available in ARM DDI 0406C.b, A8-1080.
// cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
// first(15-12) | 1011(11-8) | (count * 2)
ASSERT(CpuFeatures::IsEnabled(VFP2));
double imm,
const Register scratch,
const Condition cond) {
- // Dd = immediate
- // Instruction details available in ARM DDI 0406B, A8-640.
ASSERT(CpuFeatures::IsEnabled(VFP2));
uint32_t enc;
if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) {
// The double can be encoded in the instruction.
- emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 | 0xB*B8 | enc);
+ //
+ // Dd = immediate
+ // Instruction details available in ARM DDI 0406C.b, A8-936.
+ // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | imm4H(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | imm4L(3-0)
+ int vd, d;
+ dst.split_code(&vd, &d);
+ emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
} else if (FLAG_enable_vldr_imm) {
// TODO(jfb) Temporarily turned off until we have constant blinding or
// some equivalent mitigation: an attacker can otherwise control
// Synthesise the double from ARM immediates.
uint32_t lo, hi;
DoubleAsTwoUInt32(imm, &lo, &hi);
- mov(ip, Operand(lo));
if (scratch.is(no_reg)) {
- // Move the low part of the double into the lower of the corresponsing S
- // registers of D register dst.
- vmov(dst.low(), ip, cond);
-
- // Move the high part of the double into the higher of the corresponsing S
- // registers of D register dst.
- mov(ip, Operand(hi));
- vmov(dst.high(), ip, cond);
+ if (dst.code() < 16) {
+ // Move the low part of the double into the lower of the corresponsing S
+ // registers of D register dst.
+ mov(ip, Operand(lo));
+ vmov(dst.low(), ip, cond);
+
+ // Move the high part of the double into the higher of the
+ // corresponsing S registers of D register dst.
+ mov(ip, Operand(hi));
+ vmov(dst.high(), ip, cond);
+ } else {
+ // D16-D31 does not have S registers, so move the low and high parts
+ // directly to the D register using vmov.32.
+ // Note: This may be slower, so we only do this when we have to.
+ mov(ip, Operand(lo));
+ vmov(dst, 0, ip, cond);
+ mov(ip, Operand(hi));
+ vmov(dst, 1, ip, cond);
+ }
} else {
// Move the low and high parts of the double to a D register in one
// instruction.
+ mov(ip, Operand(lo));
mov(scratch, Operand(hi));
vmov(dst, ip, scratch, cond);
}
const DwVfpRegister src,
const Condition cond) {
// Dd = Dm
- // Instruction details available in ARM DDI 0406B, A8-642.
+ // Instruction details available in ARM DDI 0406C.b, A8-938.
+ // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
+ // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(cond | 0xE*B24 | 0xB*B20 |
- dst.code()*B12 | 0x5*B9 | B8 | B6 | src.code());
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+ emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B6 | m*B5 |
+ vm);
+}
+
+
+void Assembler::vmov(const DwVfpRegister dst,
+ int index,
+ const Register src,
+ const Condition cond) {
+ // Dd[index] = Rt
+ // Instruction details available in ARM DDI 0406C.b, A8-940.
+ // cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) |
+ // Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
+ ASSERT(index == 0 || index == 1);
+ int vd, d;
+ dst.split_code(&vd, &d);
+ emit(cond | 0xE*B24 | index*B21 | vd*B16 | src.code()*B12 | 0xB*B8 | d*B7 |
+ B4);
}
const Register src2,
const Condition cond) {
// Dm = <Rt,Rt2>.
- // Instruction details available in ARM DDI 0406A, A8-646.
+ // Instruction details available in ARM DDI 0406C.b, A8-948.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT(!src1.is(pc) && !src2.is(pc));
+ int vm, m;
+ dst.split_code(&vm, &m);
emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
- src1.code()*B12 | 0xB*B8 | B4 | dst.code());
+ src1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
}
const DwVfpRegister src,
const Condition cond) {
// <Rt,Rt2> = Dm.
- // Instruction details available in ARM DDI 0406A, A8-646.
+ // Instruction details available in ARM DDI 0406C.b, A8-948.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT(!dst1.is(pc) && !dst2.is(pc));
+ int vm, m;
+ src.split_code(&vm, &m);
emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
- dst1.code()*B12 | 0xB*B8 | B4 | src.code());
+ dst1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
}
void Assembler::vneg(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8-968.
+ // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0001(19-16) | Vd(15-12) |
+ // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(cond | 0xE*B24 | 0xB*B20 | B16 | dst.code()*B12 |
- 0x5*B9 | B8 | B6 | src.code());
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+
+ emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | B6 |
+ m*B5 | vm);
}
void Assembler::vabs(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8-524.
+ // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
+ // 101(11-9) | sz=1(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 |
- 0x5*B9 | B8 | 0x3*B6 | src.code());
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+ emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B7 | B6 |
+ m*B5 | vm);
}
const Condition cond) {
// Dd = vadd(Dn, Dm) double precision floating point addition.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406A, A8-536.
- // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
+ // Instruction details available in ARM DDI 0406C.b, A8-830.
+ // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
- dst.code()*B12 | 0x5*B9 | B8 | src2.code());
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
+ n*B7 | m*B5 | vm);
}
const Condition cond) {
// Dd = vsub(Dn, Dm) double precision floating point subtraction.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406A, A8-784.
- // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
+ // Instruction details available in ARM DDI 0406C.b, A8-1086.
+ // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
- dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
+ n*B7 | B6 | m*B5 | vm);
}
const Condition cond) {
// Dd = vmul(Dn, Dm) double precision floating point multiplication.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406A, A8-784.
- // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
+ // Instruction details available in ARM DDI 0406C.b, A8-960.
+ // cond(31-28) | 11100(27-23)| D(22) | 10(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
- dst.code()*B12 | 0x5*B9 | B8 | src2.code());
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ emit(cond | 0x1C*B23 | d*B22 | 0x2*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
+ n*B7 | m*B5 | vm);
}
const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond) {
- // Instruction details available in ARM DDI 0406C.b, A8-892.
- // cond(31-28) | 11100(27-23) | D=?(22) | 00(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | N=?(7) | op(6)=0 | M=?(5) | 0(4) |
- // Vm(3-0)
- unsigned x = (cond | 0x1C*B23 | src1.code()*B16 |
- dst.code()*B12 | 0x5*B9 | B8 | src2.code());
- emit(x);
+ // Instruction details available in ARM DDI 0406C.b, A8-932.
+ // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=0(6) | M(5) | 0(4) | Vm(3-0)
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
+ vm);
}
const Condition cond) {
// Dd = vdiv(Dn, Dm) double precision floating point division.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406A, A8-584.
- // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
+ // Instruction details available in ARM DDI 0406C.b, A8-882.
+ // cond(31-28) | 11101(27-23)| D(22) | 00(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
- dst.code()*B12 | 0x5*B9 | B8 | src2.code());
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ emit(cond | 0x1D*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
+ vm);
}
const DwVfpRegister src2,
const Condition cond) {
// vcmp(Dd, Dm) double precision floating point comparison.
- // Instruction details available in ARM DDI 0406A, A8-570.
- // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | Vm(3-0)
+ // Instruction details available in ARM DDI 0406C.b, A8-864.
+ // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0100(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
- src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
+ int vd, d;
+ src1.split_code(&vd, &d);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x4*B16 | vd*B12 | 0x5*B9 | B8 | B6 |
+ m*B5 | vm);
}
void Assembler::vcmp(const DwVfpRegister src1,
const double src2,
const Condition cond) {
- // vcmp(Dd, Dm) double precision floating point comparison.
- // Instruction details available in ARM DDI 0406A, A8-570.
- // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0101 (19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | 0000(3-0)
+ // vcmp(Dd, #0.0) double precision floating point comparison.
+ // Instruction details available in ARM DDI 0406C.b, A8-864.
+ // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT(src2 == 0.0);
- emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | B16 |
- src1.code()*B12 | 0x5*B9 | B8 | B6);
+ int vd, d;
+ src1.split_code(&vd, &d);
+ emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x5*B16 | vd*B12 | 0x5*B9 | B8 | B6);
}
void Assembler::vsqrt(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
- // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0001 (19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | 11 (7-6) | M(5)=? | 0(4) | Vm(3-0)
+ // Instruction details available in ARM DDI 0406C.b, A8-1058.
+ // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0001(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | 11(7-6) | M(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(cond | 0xE*B24 | B23 | 0x3*B20 | B16 |
- dst.code()*B12 | 0x5*B9 | B8 | 3*B6 | src.code());
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+ emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | 0x3*B6 |
+ m*B5 | vm);
}
namespace v8 {
namespace internal {
+// CpuFeatures keeps track of which features are supported by the target CPU.
+// Supported features must be enabled by a Scope before use.
+class CpuFeatures : public AllStatic {
+ public:
+ // Detect features of the target CPU. Set safe defaults if the serializer
+ // is enabled (snapshots must be portable).
+ static void Probe();
+
+ // Check whether a feature is supported by the target CPU.
+ static bool IsSupported(CpuFeature f) {
+ ASSERT(initialized_);
+ if (f == VFP3 && !FLAG_enable_vfp3) return false;
+ if (f == VFP2 && !FLAG_enable_vfp2) return false;
+ if (f == SUDIV && !FLAG_enable_sudiv) return false;
+ if (f == UNALIGNED_ACCESSES && !FLAG_enable_unaligned_accesses) {
+ return false;
+ }
+ if (f == VFP32DREGS && !FLAG_enable_32dregs) return false;
+ return (supported_ & (1u << f)) != 0;
+ }
+
+#ifdef DEBUG
+ // Check whether a feature is currently enabled.
+ static bool IsEnabled(CpuFeature f) {
+ ASSERT(initialized_);
+ Isolate* isolate = Isolate::UncheckedCurrent();
+ if (isolate == NULL) {
+ // When no isolate is available, work as if we're running in
+ // release mode.
+ return IsSupported(f);
+ }
+ unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features());
+ return (enabled & (1u << f)) != 0;
+ }
+#endif
+
+ // Enable a specified feature within a scope.
+ class Scope BASE_EMBEDDED {
+#ifdef DEBUG
+
+ public:
+ explicit Scope(CpuFeature f) {
+ unsigned mask = 1u << f;
+ // VFP2 and ARMv7 are implied by VFP3.
+ if (f == VFP3) mask |= 1u << VFP2 | 1u << ARMv7;
+ ASSERT(CpuFeatures::IsSupported(f));
+ ASSERT(!Serializer::enabled() ||
+ (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
+ isolate_ = Isolate::UncheckedCurrent();
+ old_enabled_ = 0;
+ if (isolate_ != NULL) {
+ old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features());
+ isolate_->set_enabled_cpu_features(old_enabled_ | mask);
+ }
+ }
+ ~Scope() {
+ ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
+ if (isolate_ != NULL) {
+ isolate_->set_enabled_cpu_features(old_enabled_);
+ }
+ }
+
+ private:
+ Isolate* isolate_;
+ unsigned old_enabled_;
+#else
+
+ public:
+ explicit Scope(CpuFeature f) {}
+#endif
+ };
+
+ class TryForceFeatureScope BASE_EMBEDDED {
+ public:
+ explicit TryForceFeatureScope(CpuFeature f)
+ : old_supported_(CpuFeatures::supported_) {
+ if (CanForce()) {
+ CpuFeatures::supported_ |= (1u << f);
+ }
+ }
+
+ ~TryForceFeatureScope() {
+ if (CanForce()) {
+ CpuFeatures::supported_ = old_supported_;
+ }
+ }
+
+ private:
+ static bool CanForce() {
+ // It's only safe to temporarily force support of CPU features
+ // when there's only a single isolate, which is guaranteed when
+ // the serializer is enabled.
+ return Serializer::enabled();
+ }
+
+ const unsigned old_supported_;
+ };
+
+ private:
+#ifdef DEBUG
+ static bool initialized_;
+#endif
+ static unsigned supported_;
+ static unsigned found_by_runtime_probing_;
+
+ friend class ExternalReference;
+ DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
+};
+
+
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
// Double word VFP register.
struct DwVfpRegister {
- static const int kNumRegisters = 16;
+ static const int kNumRegisters = 32;
// A few double registers are reserved: one as a scratch register and one to
// hold 0.0, that does not fit in the immediate field of vmov instructions.
// d14: 0.0
static const int kMaxNumAllocatableRegisters = kNumRegisters -
kNumReservedRegisters;
+ // Note: the number of registers can be different at snapshot and run-time.
+ // Any code included in the snapshot must be able to run both with 16 or 32
+ // registers.
inline static int NumRegisters();
inline static int NumAllocatableRegisters();
+
inline static int ToAllocationIndex(DwVfpRegister reg);
static const char* AllocationIndexToString(int index);
-
- static DwVfpRegister FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
- return from_code(index);
- }
+ inline static DwVfpRegister FromAllocationIndex(int index);
static DwVfpRegister from_code(int code) {
DwVfpRegister r = { code };
return r;
}
- // Supporting d0 to d15, can be later extended to d31.
- bool is_valid() const { return 0 <= code_ && code_ < 16; }
+ bool is_valid() const {
+ return 0 <= code_ && code_ < kNumRegisters;
+ }
bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
SwVfpRegister low() const {
+ ASSERT(code_ < 16);
SwVfpRegister reg;
reg.code_ = code_ * 2;
return reg;
}
SwVfpRegister high() const {
+ ASSERT(code_ < 16);
SwVfpRegister reg;
reg.code_ = (code_ * 2) + 1;
const DwVfpRegister d13 = { 13 };
const DwVfpRegister d14 = { 14 };
const DwVfpRegister d15 = { 15 };
+const DwVfpRegister d16 = { 16 };
+const DwVfpRegister d17 = { 17 };
+const DwVfpRegister d18 = { 18 };
+const DwVfpRegister d19 = { 19 };
+const DwVfpRegister d20 = { 20 };
+const DwVfpRegister d21 = { 21 };
+const DwVfpRegister d22 = { 22 };
+const DwVfpRegister d23 = { 23 };
+const DwVfpRegister d24 = { 24 };
+const DwVfpRegister d25 = { 25 };
+const DwVfpRegister d26 = { 26 };
+const DwVfpRegister d27 = { 27 };
+const DwVfpRegister d28 = { 28 };
+const DwVfpRegister d29 = { 29 };
+const DwVfpRegister d30 = { 30 };
+const DwVfpRegister d31 = { 31 };
const Register sfpd_lo = { kRegister_r6_Code };
const Register sfpd_hi = { kRegister_r7_Code };
friend class Assembler;
};
-// CpuFeatures keeps track of which features are supported by the target CPU.
-// Supported features must be enabled by a Scope before use.
-class CpuFeatures : public AllStatic {
- public:
- // Detect features of the target CPU. Set safe defaults if the serializer
- // is enabled (snapshots must be portable).
- static void Probe();
-
- // Check whether a feature is supported by the target CPU.
- static bool IsSupported(CpuFeature f) {
- ASSERT(initialized_);
- if (f == VFP3 && !FLAG_enable_vfp3) return false;
- if (f == VFP2 && !FLAG_enable_vfp2) return false;
- if (f == SUDIV && !FLAG_enable_sudiv) return false;
- if (f == UNALIGNED_ACCESSES && !FLAG_enable_unaligned_accesses) {
- return false;
- }
- return (supported_ & (1u << f)) != 0;
- }
-
-#ifdef DEBUG
- // Check whether a feature is currently enabled.
- static bool IsEnabled(CpuFeature f) {
- ASSERT(initialized_);
- Isolate* isolate = Isolate::UncheckedCurrent();
- if (isolate == NULL) {
- // When no isolate is available, work as if we're running in
- // release mode.
- return IsSupported(f);
- }
- unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features());
- return (enabled & (1u << f)) != 0;
- }
-#endif
-
- // Enable a specified feature within a scope.
- class Scope BASE_EMBEDDED {
-#ifdef DEBUG
-
- public:
- explicit Scope(CpuFeature f) {
- unsigned mask = 1u << f;
- // VFP2 and ARMv7 are implied by VFP3.
- if (f == VFP3) mask |= 1u << VFP2 | 1u << ARMv7;
- ASSERT(CpuFeatures::IsSupported(f));
- ASSERT(!Serializer::enabled() ||
- (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
- isolate_ = Isolate::UncheckedCurrent();
- old_enabled_ = 0;
- if (isolate_ != NULL) {
- old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features());
- isolate_->set_enabled_cpu_features(old_enabled_ | mask);
- }
- }
- ~Scope() {
- ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
- if (isolate_ != NULL) {
- isolate_->set_enabled_cpu_features(old_enabled_);
- }
- }
-
- private:
- Isolate* isolate_;
- unsigned old_enabled_;
-#else
-
- public:
- explicit Scope(CpuFeature f) {}
-#endif
- };
-
- class TryForceFeatureScope BASE_EMBEDDED {
- public:
- explicit TryForceFeatureScope(CpuFeature f)
- : old_supported_(CpuFeatures::supported_) {
- if (CanForce()) {
- CpuFeatures::supported_ |= (1u << f);
- }
- }
-
- ~TryForceFeatureScope() {
- if (CanForce()) {
- CpuFeatures::supported_ = old_supported_;
- }
- }
-
- private:
- static bool CanForce() {
- // It's only safe to temporarily force support of CPU features
- // when there's only a single isolate, which is guaranteed when
- // the serializer is enabled.
- return Serializer::enabled();
- }
-
- const unsigned old_supported_;
- };
-
- private:
-#ifdef DEBUG
- static bool initialized_;
-#endif
- static unsigned supported_;
- static unsigned found_by_runtime_probing_;
-
- DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
-};
-
-
extern const Instr kMovLrPc;
extern const Instr kLdrPCMask;
extern const Instr kLdrPCPattern;
LFlag l = Short); // v5 and above
// Support for VFP.
- // All these APIs support S0 to S31 and D0 to D15.
- // Currently these APIs do not support extended D registers, i.e, D16 to D31.
- // However, some simple modifications can allow
- // these APIs to support D16 to D31.
+ // All these APIs support S0 to S31 and D0 to D31.
void vldr(const DwVfpRegister dst,
const Register base,
void vmov(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond = al);
+ void vmov(const DwVfpRegister dst,
+ int index,
+ const Register src,
+ const Condition cond = al);
void vmov(const DwVfpRegister dst,
const Register src1,
const Register src2,
// store the registers in any particular way, but we do have to store and
// restore them.
__ stm(db_w, sp, kCallerSaved | lr.bit());
+
+ const Register scratch = r1;
+
if (save_doubles_ == kSaveFPRegs) {
CpuFeatures::Scope scope(VFP2);
+ // Check CPU flags for number of registers, setting the Z condition flag.
+ __ CheckFor32DRegs(scratch);
+
__ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
- __ vstr(reg, MemOperand(sp, i * kDoubleSize));
+ __ vstr(reg, MemOperand(sp, i * kDoubleSize), i < 16 ? al : ne);
}
}
const int argument_count = 1;
const int fp_argument_count = 0;
- const Register scratch = r1;
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
argument_count);
if (save_doubles_ == kSaveFPRegs) {
CpuFeatures::Scope scope(VFP2);
+
+ // Check CPU flags for number of registers, setting the Z condition flag.
+ __ CheckFor32DRegs(scratch);
+
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
- __ vldr(reg, MemOperand(sp, i * kDoubleSize));
+ __ vldr(reg, MemOperand(sp, i * kDoubleSize), i < 16 ? al : ne);
}
__ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
}
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
if (mode == kSaveFPRegs) {
+ // Number of d-regs not known at snapshot time.
+ ASSERT(!Serializer::enabled());
CpuFeatures::Scope scope(VFP2);
masm->sub(sp,
sp,
- Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
+ Operand(kDoubleSize * (DwVfpRegister::NumRegisters() - 1)));
// Save all VFP registers except d0.
- for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
+ // TODO(hans): We should probably save d0 too. And maybe use vstm.
+ for (int i = DwVfpRegister::NumRegisters() - 1; i > 0; i--) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
masm->vstr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
}
inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
SaveFPRegsMode mode) {
if (mode == kSaveFPRegs) {
+ // Number of d-regs not known at snapshot time.
+ ASSERT(!Serializer::enabled());
CpuFeatures::Scope scope(VFP2);
// Restore all VFP registers except d0.
- for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
+ // TODO(hans): We should probably restore d0 too. And maybe use vldm.
+ for (int i = DwVfpRegister::NumRegisters() - 1; i > 0; i--) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
masm->vldr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
}
masm->add(sp,
sp,
- Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
+ Operand(kDoubleSize * (DwVfpRegister::NumRegisters() - 1)));
}
masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
}
}
-// Support for VFP registers s0 to s31 (d0 to d15).
-// Note that "sN:sM" is the same as "dN/2"
+// Support for VFP registers s0 to s31 (d0 to d15) and d16-d31.
+// Note that "sN:sM" is the same as "dN/2" up to d15.
// These register names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
const char* VFPRegisters::names_[kNumVFPRegisters] = {
"s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
"s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
- "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15"
+ "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
+ "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
+ "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"
};
// VFP support.
const int kNumVFPSingleRegisters = 32;
-const int kNumVFPDoubleRegisters = 16;
+const int kNumVFPDoubleRegisters = 32;
const int kNumVFPRegisters = kNumVFPSingleRegisters + kNumVFPDoubleRegisters;
// PC is register 15.
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
- // Save all VFP registers before messing with them.
- DwVfpRegister first = DwVfpRegister::FromAllocationIndex(0);
- DwVfpRegister last =
- DwVfpRegister::FromAllocationIndex(
- DwVfpRegister::kMaxNumAllocatableRegisters - 1);
- ASSERT(last.code() > first.code());
- ASSERT((last.code() - first.code()) ==
- (DwVfpRegister::kMaxNumAllocatableRegisters - 1));
-#ifdef DEBUG
- int max = DwVfpRegister::kMaxNumAllocatableRegisters - 1;
- for (int i = 0; i <= max; i++) {
- ASSERT((DwVfpRegister::FromAllocationIndex(i).code() <= last.code()) &&
- (DwVfpRegister::FromAllocationIndex(i).code() >= first.code()));
- }
-#endif
- __ vstm(db_w, sp, first, last);
+ // Save all allocatable VFP registers before messing with them.
+ ASSERT(kDoubleRegZero.code() == 14);
+ ASSERT(kScratchDoubleReg.code() == 15);
+
+ // Check CPU flags for number of registers, setting the Z condition flag.
+ __ CheckFor32DRegs(ip);
+
+ // Push registers d0-d13, and possibly d16-d31, on the stack.
+ __ vstm(db_w, sp, d16, d31, ne);
+ __ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
+ __ vstm(db_w, sp, d0, d13);
} else {
__ sub(sp, sp, Operand(kDoubleRegsSize));
}
// Copy VFP registers to
// double_registers_[DoubleRegister::kMaxNumAllocatableRegisters]
int double_regs_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); ++i) {
+ for (int i = 0; i < DwVfpRegister::kMaxNumAllocatableRegisters; ++i) {
int dst_offset = i * kDoubleSize + double_regs_offset;
int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
__ vldr(d0, sp, src_offset);
}
__ pop(r0); // Restore deoptimizer object (class Deoptimizer).
+ // TODO(hans): Change the code below to not clobber r0, so that it can be
+ // used in the "restore the d registers" code further down, making this mov
+ // redundant.
+ __ mov(r4, r0);
+
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop,
outer_loop_header, inner_loop_header;
__ cmp(r0, r1);
__ b(lt, &outer_push_loop);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+ // In case of OSR, we have to restore the d registers.
+ if (type() == OSR) {
+ // Check CPU flags for number of registers, setting the Z condition flag.
+ __ CheckFor32DRegs(ip);
+
+ __ ldr(r1, MemOperand(r4, Deoptimizer::input_offset()));
+ int src_offset = FrameDescription::double_registers_offset();
+ for (int i = 0; i < DwVfpRegister::kNumRegisters; ++i) {
+ if (i == kDoubleRegZero.code()) continue;
+ if (i == kScratchDoubleReg.code()) continue;
+
+ const DwVfpRegister reg = DwVfpRegister::from_code(i);
+ __ vldr(reg, r1, src_offset, i < 16 ? al : ne);
+ src_offset += kDoubleSize;
+ }
+ }
+ }
+
// Push state, pc, and continuation from the last output frame.
if (type() != OSR) {
__ ldr(r6, MemOperand(r2, FrameDescription::state_offset()));
Print(VFPRegisters::Name(reg, false));
}
-// Print the VFP D register name according to the active name converter.
+// Print the VFP D register name according to the active name converter.
void Decoder::PrintDRegister(int reg) {
Print(VFPRegisters::Name(reg, true));
}
} else if (format[1] == 'm') {
reg = instr->VFPMRegValue(precision);
} else if (format[1] == 'd') {
- reg = instr->VFPDRegValue(precision);
+ if ((instr->TypeValue() == 7) &&
+ (instr->Bit(24) == 0x0) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(4) == 0x1)) {
+ // vmov.32 has Vd in a different place.
+ reg = instr->Bits(19, 16) | (instr->Bit(7) << 4);
+ } else {
+ reg = instr->VFPDRegValue(precision);
+ }
+
if (format[2] == '+') {
int immed8 = instr->Immed8Value();
if (format[0] == 'S') reg += immed8 - 1;
if ((instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x0)) {
DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
+ } else if ((instr->VLValue() == 0x0) &&
+ (instr->VCValue() == 0x1) &&
+ (instr->Bit(23) == 0x0)) {
+ if (instr->Bit(21) == 0x0) {
+ Format(instr, "vmov.32'cond 'Dd[0], 'rt");
+ } else {
+ Format(instr, "vmov.32'cond 'Dd[1], 'rt");
+ }
} else if ((instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x7) &&
(instr->Bits(19, 16) == 0x1)) {
switch (instr->OpcodeValue()) {
case 0x2:
// Load and store double to two GP registers
- if (instr->Bits(7, 4) != 0x1) {
+ if (instr->Bits(7, 6) != 0 || instr->Bit(4) != 1) {
Unknown(instr); // Not used by V8.
} else if (instr->HasL()) {
Format(instr, "vmov'cond 'rt, 'rn, 'Dm");
}
break;
case 0x8:
+ case 0xA:
if (instr->HasL()) {
Format(instr, "vldr'cond 'Dd, ['rn - 4*'imm08@00]");
} else {
}
break;
case 0xC:
+ case 0xE:
if (instr->HasL()) {
Format(instr, "vldr'cond 'Dd, ['rn + 4*'imm08@00]");
} else {
break;
case 0x4:
case 0x5:
- case 0x9: {
+ case 0x6:
+ case 0x7:
+ case 0x9:
+ case 0xB: {
bool to_vfp_register = (instr->VLValue() == 0x1);
if (to_vfp_register) {
Format(instr, "vldm'cond'pu 'rn'w, {'Dd-'Dd+}");
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ vldr(result.low(), scratch0(), additional_offset);
- __ vcvt_f64_f32(result, result.low());
+ __ vldr(kScratchDoubleReg.low(), scratch0(), additional_offset);
+ __ vcvt_f64_f32(result, kScratchDoubleReg.low());
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ vldr(result, scratch0(), additional_offset);
}
void MacroAssembler::PushSafepointRegistersAndDoubles() {
+ // Number of d-regs not known at snapshot time.
+ ASSERT(!Serializer::enabled());
PushSafepointRegisters();
sub(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
kDoubleSize));
void MacroAssembler::PopSafepointRegistersAndDoubles() {
+ // Number of d-regs not known at snapshot time.
+ ASSERT(!Serializer::enabled());
for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
}
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
+ // Number of d-regs not known at snapshot time.
+ ASSERT(!Serializer::enabled());
// General purpose registers are pushed last on the stack.
int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
// Optionally save all double registers.
if (save_doubles) {
- DwVfpRegister first = d0;
- DwVfpRegister last =
- DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1);
- vstm(db_w, sp, first, last);
+ // Check CPU flags for number of registers, setting the Z condition flag.
+ CheckFor32DRegs(ip);
+
+ vstm(db_w, sp, d16, d31, ne);
+ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
+ vstm(db_w, sp, d0, d15);
// Note that d0 will be accessible at
// fp - 2 * kPointerSize - DwVfpRegister::kNumRegisters * kDoubleSize,
// since the sp slot and code slot were pushed after the fp.
// Calculate the stack location of the saved doubles and restore them.
const int offset = 2 * kPointerSize;
sub(r3, fp, Operand(offset + DwVfpRegister::kNumRegisters * kDoubleSize));
- DwVfpRegister first = d0;
- DwVfpRegister last =
- DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1);
- vldm(ia, r3, first, last);
+
+ // Check CPU flags for number of registers, setting the Z condition flag.
+ CheckFor32DRegs(ip);
+
+ vldm(ia_w, r3, d0, d15);
+ vldm(ia_w, r3, d16, d31, ne);
+ add(r3, r3, Operand(16 * kDoubleSize), LeaveCC, eq);
}
// Clear top frame.
}
+void MacroAssembler::CheckFor32DRegs(Register scratch) {
+ mov(scratch, Operand(ExternalReference::cpu_features()));
+ ldr(scratch, MemOperand(scratch));
+ tst(scratch, Operand(1u << VFP32DREGS));
+}
+
+
void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
Register first,
Register second,
Register source,
Register scratch);
+ // Check whether d16-d31 are available on the CPU. The result is given by the
+ // Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
+ void CheckFor32DRegs(Register scratch);
+
+
// ---------------------------------------------------------------------------
// Runtime calls
#include "disasm.h"
#include "assembler.h"
+#include "codegen.h"
#include "arm/constants-arm.h"
#include "arm/simulator-arm.h"
// All registers are initialized to zero to start with
// even though s_registers_ & d_registers_ share the same
// physical registers in the target.
- for (int i = 0; i < num_s_registers; i++) {
+ for (int i = 0; i < num_d_registers * 2; i++) {
vfp_register[i] = 0;
}
n_flag_FPSCR_ = false;
void Simulator::SetVFPRegister(int reg_index, const InputType& value) {
ASSERT(reg_index >= 0);
if (register_size == 1) ASSERT(reg_index < num_s_registers);
- if (register_size == 2) ASSERT(reg_index < num_d_registers);
+ if (register_size == 2) ASSERT(reg_index < DwVfpRegister::NumRegisters());
char buffer[register_size * sizeof(vfp_register[0])];
memcpy(buffer, &value, register_size * sizeof(vfp_register[0]));
ReturnType Simulator::GetFromVFPRegister(int reg_index) {
ASSERT(reg_index >= 0);
if (register_size == 1) ASSERT(reg_index < num_s_registers);
- if (register_size == 2) ASSERT(reg_index < num_d_registers);
+ if (register_size == 2) ASSERT(reg_index < DwVfpRegister::NumRegisters());
ReturnType value = 0;
char buffer[register_size * sizeof(vfp_register[0])];
address += 1;
} else {
if (load) {
- set_s_register_from_sinteger(
- 2 * reg, ReadW(reinterpret_cast<int32_t>(address), instr));
- set_s_register_from_sinteger(
- 2 * reg + 1, ReadW(reinterpret_cast<int32_t>(address + 1), instr));
+ int32_t data[] = {
+ ReadW(reinterpret_cast<int32_t>(address), instr),
+ ReadW(reinterpret_cast<int32_t>(address + 1), instr)
+ };
+ double d;
+ memcpy(&d, data, 8);
+ set_d_register_from_double(reg, d);
} else {
- WriteW(reinterpret_cast<int32_t>(address),
- get_sinteger_from_s_register(2 * reg), instr);
- WriteW(reinterpret_cast<int32_t>(address + 1),
- get_sinteger_from_s_register(2 * reg + 1), instr);
+ int32_t data[2];
+ double d = get_double_from_d_register(reg);
+ memcpy(data, &d, 8);
+ WriteW(reinterpret_cast<int32_t>(address), data[0], instr);
+ WriteW(reinterpret_cast<int32_t>(address + 1), data[1], instr);
}
address += 2;
}
if ((instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x0)) {
DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
+ } else if ((instr->VLValue() == 0x0) &&
+ (instr->VCValue() == 0x1) &&
+ (instr->Bit(23) == 0x0)) {
+ // vmov (ARM core register to scalar)
+ int vd = instr->Bits(19, 16) | (instr->Bit(7) << 4);
+ double dd_value = get_double_from_d_register(vd);
+ int32_t data[2];
+ memcpy(data, &dd_value, 8);
+ data[instr->Bit(21)] = get_register(instr->RtValue());
+ memcpy(&dd_value, data, 8);
+ set_d_register_from_double(vd, dd_value);
} else if ((instr->VLValue() == 0x1) &&
(instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x7) &&
switch (instr->OpcodeValue()) {
case 0x2:
// Load and store double to two GP registers
- if (instr->Bits(7, 4) != 0x1) {
+ if (instr->Bits(7, 6) != 0 || instr->Bit(4) != 1) {
UNIMPLEMENTED(); // Not used by V8.
} else {
int rt = instr->RtValue();
int rn = instr->RnValue();
- int vm = instr->VmValue();
+ int vm = instr->VFPMRegValue(kDoublePrecision);
if (instr->HasL()) {
- int32_t rt_int_value = get_sinteger_from_s_register(2*vm);
- int32_t rn_int_value = get_sinteger_from_s_register(2*vm+1);
-
- set_register(rt, rt_int_value);
- set_register(rn, rn_int_value);
+ int32_t data[2];
+ double d = get_double_from_d_register(vm);
+ memcpy(data, &d, 8);
+ set_register(rt, data[0]);
+ set_register(rn, data[1]);
} else {
- int32_t rs_val = get_register(rt);
- int32_t rn_val = get_register(rn);
-
- set_s_register_from_sinteger(2*vm, rs_val);
- set_s_register_from_sinteger((2*vm+1), rn_val);
+ int32_t data[] = { get_register(rt), get_register(rn) };
+ double d;
+ memcpy(&d, data, 8);
+ set_d_register_from_double(vm, d);
}
}
break;
case 0x8:
- case 0xC: { // Load and store double to memory.
+ case 0xA:
+ case 0xC:
+ case 0xE: { // Load and store double to memory.
int rn = instr->RnValue();
- int vd = instr->VdValue();
+ int vd = instr->VFPDRegValue(kDoublePrecision);
int offset = instr->Immed8Value();
if (!instr->HasU()) {
offset = -offset;
int32_t address = get_register(rn) + 4 * offset;
if (instr->HasL()) {
// Load double from memory: vldr.
- set_s_register_from_sinteger(2*vd, ReadW(address, instr));
- set_s_register_from_sinteger(2*vd + 1, ReadW(address + 4, instr));
+ int32_t data[] = {
+ ReadW(address, instr),
+ ReadW(address + 4, instr)
+ };
+ double val;
+ memcpy(&val, data, 8);
+ set_d_register_from_double(vd, val);
} else {
// Store double to memory: vstr.
- WriteW(address, get_sinteger_from_s_register(2*vd), instr);
- WriteW(address + 4, get_sinteger_from_s_register(2*vd + 1), instr);
+ int32_t data[2];
+ double val = get_double_from_d_register(vd);
+ memcpy(data, &val, 8);
+ WriteW(address, data[0], instr);
+ WriteW(address + 4, data[1], instr);
}
break;
}
case 0x4:
case 0x5:
+ case 0x6:
+ case 0x7:
case 0x9:
+ case 0xB:
// Load/store multiple double from memory: vldm/vstm.
HandleVList(instr);
break;
num_s_registers = 32,
d0 = 0, d1, d2, d3, d4, d5, d6, d7,
d8, d9, d10, d11, d12, d13, d14, d15,
- num_d_registers = 16
+ d16, d17, d18, d19, d20, d21, d22, d23,
+ d24, d25, d26, d27, d28, d29, d30, d31,
+ num_d_registers = 32
};
explicit Simulator(Isolate* isolate);
bool v_flag_;
// VFP architecture state.
- unsigned int vfp_register[num_s_registers];
+ // TODO(hans): Rename vfp_register to vfp_registers_.
+ unsigned int vfp_register[num_d_registers * 2];
bool n_flag_FPSCR_;
bool z_flag_FPSCR_;
bool c_flag_FPSCR_;
static ExternalReference ForDeoptEntry(Address entry);
+ static ExternalReference cpu_features();
+
Address address() const {return reinterpret_cast<Address>(address_);}
#ifdef ENABLE_DEBUGGER_SUPPORT
"instruction pairs (ARM only)")
DEFINE_bool(enable_unaligned_accesses, true,
"enable unaligned accesses for ARMv7 (ARM only)")
+DEFINE_bool(enable_32dregs, true,
+ "enable use of d16-d31 registers on ARM - this requires VFP3")
DEFINE_bool(enable_fpu, true,
"enable use of MIPS FPU instructions if available (MIPS only)")
DEFINE_bool(enable_vldr_imm, false,
// Skip saved double registers.
if (safepoint_entry.has_doubles()) {
+ // Number of doubles not known at snapshot time.
+ ASSERT(!Serializer::enabled());
parameters_base += DoubleRegister::NumAllocatableRegisters() *
kDoubleSize / kPointerSize;
}
uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
+ExternalReference ExternalReference::cpu_features() {
+ ASSERT(CpuFeatures::initialized_);
+ return ExternalReference(&CpuFeatures::supported_);
+}
+
+
int IntelDoubleRegister::NumAllocatableRegisters() {
if (CpuFeatures::IsSupported(SSE2)) {
return XMMRegister::kNumAllocatableRegisters;
static uint64_t supported_;
static uint64_t found_by_runtime_probing_;
+ friend class ExternalReference;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
ASSERT(inactive_live_ranges_.is_empty());
if (mode_ == DOUBLE_REGISTERS) {
- for (int i = 0; i < fixed_double_live_ranges_.length(); ++i) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
LiveRange* current = fixed_double_live_ranges_.at(i);
if (current != NULL) {
AddToInactive(current);
unsigned CpuFeatures::found_by_runtime_probing_ = 0;
+ExternalReference ExternalReference::cpu_features() {
+ ASSERT(CpuFeatures::initialized_);
+ return ExternalReference(&CpuFeatures::supported_);
+}
+
+
// Get the CPU features enabled by the build. For cross compilation the
// preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
// can be defined to enable FPU instructions when building the
static unsigned supported_;
static unsigned found_by_runtime_probing_;
+ friend class ExternalReference;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
case SUDIV:
search_string = "idiva";
break;
+ case VFP32DREGS:
+ // This case is handled specially below.
+ break;
default:
UNREACHABLE();
}
+ if (feature == VFP32DREGS) {
+ return ArmCpuHasFeature(VFP3) && !CPUInfoContainsString("d16");
+ }
+
if (CPUInfoContainsString(search_string)) {
return true;
}
UNCLASSIFIED,
51,
"Code::MakeCodeYoung");
+ Add(ExternalReference::cpu_features().address(),
+ UNCLASSIFIED,
+ 52,
+ "cpu_features");
// Add a small set of deopt entry addresses to encoder without generating the
// deopt table code, which isn't possible at deserialization time.
entry,
Deoptimizer::LAZY,
Deoptimizer::CALCULATE_ENTRY_ADDRESS);
- Add(address, LAZY_DEOPTIMIZATION, 52 + entry, "lazy_deopt");
+ Add(address, LAZY_DEOPTIMIZATION, 53 + entry, "lazy_deopt");
}
}
SUDIV = 4, // ARM
UNALIGNED_ACCESSES = 5, // ARM
MOVW_MOVT_IMMEDIATE_LOADS = 6, // ARM
+ VFP32DREGS = 7, // ARM
SAHF = 0, // x86
FPU = 1}; // MIPS
uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
+ExternalReference ExternalReference::cpu_features() {
+ ASSERT(CpuFeatures::initialized_);
+ return ExternalReference(&CpuFeatures::supported_);
+}
+
+
void CpuFeatures::Probe() {
ASSERT(supported_ == CpuFeatures::kDefaultCpuFeatures);
#ifdef DEBUG
static uint64_t supported_;
static uint64_t found_by_runtime_probing_;
+ friend class ExternalReference;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
__ nop();
}
+
+TEST(13) {
+ // Test VFP instructions using registers d16-d31.
+ InitializeVM();
+ v8::HandleScope scope;
+
+ if (!CpuFeatures::IsSupported(VFP32DREGS)) {
+ return;
+ }
+
+ typedef struct {
+ double a;
+ double b;
+ double c;
+ double x;
+ double y;
+ double z;
+ double i;
+ double j;
+ double k;
+ } T;
+ T t;
+
+ // Create a function that accepts &t, and loads, manipulates, and stores
+ // the doubles and floats.
+ Assembler assm(Isolate::Current(), NULL, 0);
+ Label L, C;
+
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+
+ __ stm(db_w, sp, r4.bit() | lr.bit());
+
+ // Load a, b, c into d16, d17, d18.
+ __ mov(r4, Operand(r0));
+ __ vldr(d16, r4, OFFSET_OF(T, a));
+ __ vldr(d17, r4, OFFSET_OF(T, b));
+ __ vldr(d18, r4, OFFSET_OF(T, c));
+
+ __ vneg(d25, d16);
+ __ vadd(d25, d25, d17);
+ __ vsub(d25, d25, d18);
+ __ vmul(d25, d25, d25);
+ __ vdiv(d25, d25, d18);
+
+ __ vmov(d16, d25);
+ __ vsqrt(d17, d25);
+ __ vneg(d17, d17);
+ __ vabs(d17, d17);
+ __ vmla(d18, d16, d17);
+
+ // Store d16, d17, d18 into a, b, c.
+ __ mov(r4, Operand(r0));
+ __ vstr(d16, r4, OFFSET_OF(T, a));
+ __ vstr(d17, r4, OFFSET_OF(T, b));
+ __ vstr(d18, r4, OFFSET_OF(T, c));
+
+ // Load x, y, z into d29-d31.
+ __ add(r4, r0, Operand(OFFSET_OF(T, x)));
+ __ vldm(ia_w, r4, d29, d31);
+
+ // Swap d29 and d30 via r registers.
+ __ vmov(r1, r2, d29);
+ __ vmov(d29, d30);
+ __ vmov(d30, r1, r2);
+
+ // Convert to and from integer.
+ __ vcvt_s32_f64(s1, d31);
+ __ vcvt_f64_u32(d31, s1);
+
+ // Store d29-d31 into x, y, z.
+ __ add(r4, r0, Operand(OFFSET_OF(T, x)));
+ __ vstm(ia_w, r4, d29, d31);
+
+ // Move constants into d20, d21, d22 and store into i, j, k.
+ __ vmov(d20, 14.7610017472335499);
+ __ vmov(d21, 16.0);
+ __ mov(r1, Operand(372106121));
+ __ mov(r2, Operand(1079146608));
+ __ vmov(d22, 0, r1);
+ __ vmov(d22, 1, r2);
+ __ add(r4, r0, Operand(OFFSET_OF(T, i)));
+ __ vstm(ia_w, r4, d20, d22);
+
+ __ ldm(ia_w, sp, r4.bit() | pc.bit());
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = HEAP->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ CHECK(code->IsCode());
+#ifdef DEBUG
+ Code::cast(code)->Print();
+#endif
+ F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
+ t.a = 1.5;
+ t.b = 2.75;
+ t.c = 17.17;
+ t.x = 1.5;
+ t.y = 2.75;
+ t.z = 17.17;
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
+ CHECK_EQ(14.7610017472335499, t.a);
+ CHECK_EQ(3.84200491244266251, t.b);
+ CHECK_EQ(73.8818412254460241, t.c);
+ CHECK_EQ(2.75, t.x);
+ CHECK_EQ(1.5, t.y);
+ CHECK_EQ(17.0, t.z);
+ CHECK_EQ(14.7610017472335499, t.i);
+ CHECK_EQ(16.0, t.j);
+ CHECK_EQ(73.8818412254460241, t.k);
+ }
+}
+
#undef __
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
+ COMPARE(vmov(d0, r2, r3),
+ "ec432b10 vmov d0, r2, r3");
+ COMPARE(vmov(r2, r3, d0),
+ "ec532b10 vmov r2, r3, d0");
COMPARE(vmov(d0, d1),
"eeb00b41 vmov.f64 d0, d1");
COMPARE(vmov(d3, d3, eq),
COMPARE(vdiv(d6, d7, d7, hi),
"8e876b07 vdiv.f64hi d6, d7, d7");
+ COMPARE(vcmp(d0, d1),
+ "eeb40b41 vcmp.f64 d0, d1");
+ COMPARE(vcmp(d0, 0.0),
+ "eeb50b40 vcmp.f64 d0, #0.0");
+
COMPARE(vsqrt(d0, d0),
"eeb10bc0 vsqrt.f64 d0, d0");
COMPARE(vsqrt(d2, d3, ne),
COMPARE(vmov(d2, -13.0),
"eeba2b0a vmov.f64 d2, #-13");
+ COMPARE(vmov(d0, 0, r0),
+ "ee000b10 vmov.32 d0[0], r0");
+ COMPARE(vmov(d0, 1, r0),
+ "ee200b10 vmov.32 d0[1], r0");
+
COMPARE(vldr(s0, r0, 0),
"ed900a00 vldr s0, [r0 + 4*0]");
COMPARE(vldr(s1, r1, 4),
"ee012b00 vmla.f64 d2, d1, d0");
COMPARE(vmla(d6, d4, d5, cc),
"3e046b05 vmla.f64cc d6, d4, d5");
+
+ COMPARE(vcvt_u32_f64(s0, d0),
+ "eebc0bc0 vcvt.u32.f64 s0, d0");
+ COMPARE(vcvt_s32_f64(s0, d0),
+ "eebd0bc0 vcvt.s32.f64 s0, d0");
+ COMPARE(vcvt_f64_u32(d0, s1),
+ "eeb80b60 vcvt.f64.u32 d0, s1");
+ COMPARE(vcvt_f64_s32(d0, s1),
+ "eeb80be0 vcvt.f64.s32 d0, s1");
+ COMPARE(vcvt_f32_s32(s0, s2),
+ "eeb80ac1 vcvt.f32.s32 s0, s2");
+
+ if (CpuFeatures::IsSupported(VFP32DREGS)) {
+ COMPARE(vmov(d3, d27),
+ "eeb03b6b vmov.f64 d3, d27");
+ COMPARE(vmov(d18, d7),
+ "eef02b47 vmov.f64 d18, d7");
+ COMPARE(vmov(d18, r2, r3),
+ "ec432b32 vmov d18, r2, r3");
+ COMPARE(vmov(r2, r3, d18),
+ "ec532b32 vmov r2, r3, d18");
+ COMPARE(vmov(d20, d31),
+ "eef04b6f vmov.f64 d20, d31");
+
+ COMPARE(vabs(d16, d31),
+ "eef00bef vabs.f64 d16, d31");
+
+ COMPARE(vneg(d16, d31),
+ "eef10b6f vneg.f64 d16, d31");
+
+ COMPARE(vadd(d16, d17, d18),
+ "ee710ba2 vadd.f64 d16, d17, d18");
+
+ COMPARE(vsub(d16, d17, d18),
+ "ee710be2 vsub.f64 d16, d17, d18");
+
+ COMPARE(vmul(d16, d17, d18),
+ "ee610ba2 vmul.f64 d16, d17, d18");
+
+ COMPARE(vdiv(d16, d17, d18),
+ "eec10ba2 vdiv.f64 d16, d17, d18");
+
+ COMPARE(vcmp(d16, d17),
+ "eef40b61 vcmp.f64 d16, d17");
+ COMPARE(vcmp(d16, 0.0),
+ "eef50b40 vcmp.f64 d16, #0.0");
+
+ COMPARE(vsqrt(d16, d17),
+ "eef10be1 vsqrt.f64 d16, d17");
+
+ COMPARE(vmov(d30, 16.0),
+ "eef3eb00 vmov.f64 d30, #16");
+
+ COMPARE(vmov(d31, 0, r7),
+ "ee0f7b90 vmov.32 d31[0], r7");
+ COMPARE(vmov(d31, 1, r7),
+ "ee2f7b90 vmov.32 d31[1], r7");
+
+ COMPARE(vldr(d25, r0, 0),
+ "edd09b00 vldr d25, [r0 + 4*0]");
+ COMPARE(vldr(d26, r1, 4),
+ "edd1ab01 vldr d26, [r1 + 4*1]");
+ COMPARE(vldr(d31, r10, 1020),
+ "eddafbff vldr d31, [r10 + 4*255]");
+
+ COMPARE(vstr(d16, r0, 0),
+ "edc00b00 vstr d16, [r0 + 4*0]");
+ COMPARE(vstr(d17, r1, 4),
+ "edc11b01 vstr d17, [r1 + 4*1]");
+ COMPARE(vstr(d31, r10, 1020),
+ "edcafbff vstr d31, [r10 + 4*255]");
+
+ COMPARE(vstm(ia, r0, d16, d31),
+ "ecc00b20 vstmia r0, {d16-d31}");
+ COMPARE(vldm(ia, r3, d16, d31),
+ "ecd30b20 vldmia r3, {d16-d31}");
+ COMPARE(vstm(ia, r0, d23, d27),
+ "ecc07b0a vstmia r0, {d23-d27}");
+ COMPARE(vldm(ia, r3, d23, d27),
+ "ecd37b0a vldmia r3, {d23-d27}");
+
+ COMPARE(vmla(d16, d17, d18),
+ "ee410ba2 vmla.f64 d16, d17, d18");
+
+ COMPARE(vcvt_u32_f64(s0, d16),
+ "eebc0be0 vcvt.u32.f64 s0, d16");
+ COMPARE(vcvt_s32_f64(s0, d16),
+ "eebd0be0 vcvt.s32.f64 s0, d16");
+ COMPARE(vcvt_f64_u32(d16, s1),
+ "eef80b60 vcvt.f64.u32 d16, s1");
+ }
}
VERIFY_RUN();
CHECK_EQ(make_code(UNCLASSIFIED, 3),
encoder.Encode(
ExternalReference::roots_array_start(isolate).address()));
+ CHECK_EQ(make_code(UNCLASSIFIED, 52),
+ encoder.Encode(ExternalReference::cpu_features().address()));
}
['armv7==1', {
# The ARM Architecture Manual mandates VFPv3 if NEON is
# available.
- # The current V8 doesn't use d16-d31, so for vfpv3-d16, we can
- # also enable vfp3 for the better performance.
+ # V8 does not use d16-d31 unless explicitly enabled
+ # (--enable_32dregs) or detected at run-time, so for vfpv3-d16,
+ # we can also enable vfp3 for the better performance.
'conditions': [
['arm_neon!=1 and arm_fpu!="vfpv3" and arm_fpu!="vfpv3-d16"', {
'variables': {