// can.
class CustomArguments : public Relocatable {
public:
- inline CustomArguments(Object *data,
- JSObject *self,
- JSObject *holder) {
+ inline CustomArguments(Object* data,
+ JSObject* self,
+ JSObject* holder) {
values_[3] = self;
values_[2] = holder;
values_[1] = Smi::FromInt(0);
}
-Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
+Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Memory::Object_Handle_at(Assembler::target_address_address_at(pc_));
}
namespace internal {
// Safe default is no features.
-uint64_t CpuFeatures::supported_ = 0;
-uint64_t CpuFeatures::enabled_ = 0;
+unsigned CpuFeatures::supported_ = 0;
+unsigned CpuFeatures::enabled_ = 0;
void CpuFeatures::Probe() {
- // Perform runtime detection of VFP.
- static const char* descriptive_file_linux = "/proc/cpuinfo";
-
- #if !defined(__arm__) || (defined(__VFP_FP__) && !defined(__SOFTFP__))
- // The supported & enabled flags for VFP are set to true for the following
- // conditions, even without runtime detection of VFP:
- // (1) For the simulator=arm build, always use VFP since
- // the arm simulator has VFP support.
- // (2) If V8 is being compiled with GCC with the vfp option turned on,
- // always use VFP since the build system assumes that V8 will run on
- // a platform that has VFP hardware.
- supported_ |= static_cast<uint64_t>(1) << VFP3;
- enabled_ |= static_cast<uint64_t>(1) << VFP3;
- #endif
-
- if (OS::fgrep_vfp(descriptive_file_linux, "vfp")) {
+ // If the compiler is allowed to use vfp then we can use vfp too in our
+ // code generation.
+#if !defined(__arm__) || (defined(__VFP_FP__) && !defined(__SOFTFP__))
+ // The supported flags for VFP are set to true for the following
+ // conditions, even without runtime detection of VFP:
+ // (1) For the simulator=arm build, always use VFP since
+ // the arm simulator has VFP support.
+ // (2) If V8 is being compiled with GCC with the vfp option turned on,
+ // always use VFP since the build system assumes that V8 will run on
+ // a platform that has VFP hardware.
+ supported_ |= 1u << VFP3;
+#else
+ if (Serializer::enabled()) return; // No features if we might serialize.
+
+ if (OS::ArmCpuHasFeature(OS::VFP)) {
// This implementation also sets the VFP flags if
// runtime detection of VFP returns true.
- supported_ |= static_cast<uint64_t>(1) << VFP3;
- enabled_ |= static_cast<uint64_t>(1) << VFP3;
+ supported_ |= 1u << VFP3;
}
+#endif
}
// -----------------------------------------------------------------------------
static bool IsSupported(Feature f) {
if (f == VFP3 && !FLAG_enable_vfp3) return false;
- return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
+ return (supported_ & (1u << f)) != 0;
}
// Check whether a feature is currently enabled.
static bool IsEnabled(Feature f) {
- return (enabled_ & (static_cast<uint64_t>(1) << f)) != 0;
+ return (enabled_ & (1u << f)) != 0;
}
+ // Enable a specified feature within a scope.
+ class Scope BASE_EMBEDDED {
+#ifdef DEBUG
+ public:
+ explicit Scope(Feature f) {
+ ASSERT(CpuFeatures::IsSupported(f));
+ old_enabled_ = CpuFeatures::enabled_;
+ CpuFeatures::enabled_ |= 1u << f;
+ }
+ ~Scope() { CpuFeatures::enabled_ = old_enabled_; }
+ private:
+ unsigned old_enabled_;
+#else
+ public:
+ explicit Scope(Feature f) {}
+#endif
+ };
private:
- static uint64_t supported_;
- static uint64_t enabled_;
+ static unsigned supported_;
+ static unsigned enabled_;
};
// Both registers are preserved by this code so no need to differentiate between
// construct call and normal call.
static void ArrayNativeCode(MacroAssembler* masm,
- Label *call_generic_code) {
+ Label* call_generic_code) {
Label argc_one_or_more, argc_two_or_more;
// Check for array construction with zero arguments or one.
// See comment for class.
-void WriteInt32ToHeapNumberStub::Generate(MacroAssembler *masm) {
+void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Label max_negative_int;
// the_int_ has the answer which is a signed int32 but not a Smi.
// We test for the special value that has a different exponent. This test
}
-static void IntegerToDoubleConversionWithVFP3(MacroAssembler* masm,
- Register inReg,
- Register outHighReg,
- Register outLowReg) {
- // ARMv7 VFP3 instructions to implement integer to double conversion.
- // This VFP3 implementation is known to work
- // on ARMv7-VFP3 Snapdragon processor.
-
- __ mov(r7, Operand(inReg, ASR, kSmiTagSize));
- __ fmsr(s15, r7);
- __ fsitod(d7, s15);
- __ fmrrd(outLowReg, outHighReg, d7);
-}
-
-
// See comment at call site.
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Label* rhs_not_nan,
__ push(lr);
if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) {
- IntegerToDoubleConversionWithVFP3(masm, r1, r3, r2);
+ CpuFeatures::Scope scope(CpuFeatures::VFP3);
+ __ IntegerToDoubleConversionWithVFP3(r1, r3, r2);
} else {
__ mov(r7, Operand(r1));
ConvertToDoubleStub stub1(r3, r2, r7, r6);
__ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) {
- IntegerToDoubleConversionWithVFP3(masm, r0, r1, r0);
+ CpuFeatures::Scope scope(CpuFeatures::VFP3);
+ __ IntegerToDoubleConversionWithVFP3(r0, r1, r0);
} else {
__ mov(r7, Operand(r0));
ConvertToDoubleStub stub2(r1, r0, r7, r6);
EmitNanCheck(masm, &rhs_not_nan, cc_);
if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) {
+ CpuFeatures::Scope scope(CpuFeatures::VFP3);
// ARMv7 VFP3 instructions to implement double precision comparison.
- // This VFP3 implementation is known to work on
- // ARMv7-VFP3 Snapdragon processor.
-
__ fmdrr(d6, r0, r1);
__ fmdrr(d7, r2, r3);
AllocateHeapNumber(masm, &slow, r5, r6, r7);
if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) {
- IntegerToDoubleConversionWithVFP3(masm, r0, r3, r2);
- IntegerToDoubleConversionWithVFP3(masm, r1, r1, r0);
+ CpuFeatures::Scope scope(CpuFeatures::VFP3);
+ __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
+ __ IntegerToDoubleConversionWithVFP3(r1, r1, r0);
} else {
// Write Smi from r0 to r3 and r2 in double format. r6 is scratch.
__ mov(r7, Operand(r0));
if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) {
- IntegerToDoubleConversionWithVFP3(masm, r0, r3, r2);
+ CpuFeatures::Scope scope(CpuFeatures::VFP3);
+ __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
} else {
// Write Smi from r0 to r3 and r2 in double format.
__ mov(r7, Operand(r0));
}
if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) {
- IntegerToDoubleConversionWithVFP3(masm, r1, r1, r0);
+ CpuFeatures::Scope scope(CpuFeatures::VFP3);
+ __ IntegerToDoubleConversionWithVFP3(r1, r1, r0);
} else {
// Write Smi from r1 to r1 and r0 in double format.
__ mov(r7, Operand(r1));
(Token::DIV == operation) ||
(Token::ADD == operation) ||
(Token::SUB == operation))) {
- // ARMv7 VFP3 instructions to implement
- // double precision, add, subtract, multiply, divide.
- // This VFP3 implementation is known to work on
- // ARMv7-VFP3 Snapdragon processor
-
- __ fmdrr(d6, r0, r1);
- __ fmdrr(d7, r2, r3);
+ CpuFeatures::Scope scope(CpuFeatures::VFP3);
+ // ARMv7 VFP3 instructions to implement
+ // double precision, add, subtract, multiply, divide.
+ __ fmdrr(d6, r0, r1);
+ __ fmdrr(d7, r2, r3);
- if (Token::MUL == operation) __ fmuld(d5, d6, d7);
- else if (Token::DIV == operation) __ fdivd(d5, d6, d7);
- else if (Token::ADD == operation) __ faddd(d5, d6, d7);
- else if (Token::SUB == operation) __ fsubd(d5, d6, d7);
+ if (Token::MUL == operation) {
+ __ fmuld(d5, d6, d7);
+ } else if (Token::DIV == operation) {
+ __ fdivd(d5, d6, d7);
+ } else if (Token::ADD == operation) {
+ __ faddd(d5, d6, d7);
+ } else if (Token::SUB == operation) {
+ __ fsubd(d5, d6, d7);
+ } else {
+ UNREACHABLE();
+ }
- __ fmrrd(r0, r1, d5);
+ __ fmrrd(r0, r1, d5);
- __ str(r0, FieldMemOperand(r5, HeapNumber::kValueOffset));
- __ str(r1, FieldMemOperand(r5, HeapNumber::kValueOffset + 4));
- __ mov(r0, Operand(r5));
- __ mov(pc, lr);
- return;
+ __ str(r0, FieldMemOperand(r5, HeapNumber::kValueOffset));
+ __ str(r1, FieldMemOperand(r5, HeapNumber::kValueOffset + 4));
+ __ mov(r0, Operand(r5));
+ __ mov(pc, lr);
+ return;
}
__ push(lr); // For later.
__ push(r5); // Address of heap number that is answer.
}
__ bind(&right_exponent);
if (CpuFeatures::IsSupported(CpuFeatures::VFP3)) {
+ CpuFeatures::Scope scope(CpuFeatures::VFP3);
// ARMv7 VFP3 instructions implementing double precision to integer
// conversion using round to zero.
- // This VFP3 implementation is known to work on
- // ARMv7-VFP3 Snapdragon processor.
__ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
__ fmdrr(d7, scratch2, scratch);
__ ftosid(s15, d7);
// Shift up the mantissa bits to take up the space the exponent used to
// take. We just orred in the implicit bit so that took care of one and
// we want to leave the sign bit 0 so we subtract 2 bits from the shift
- // distance.
+ // distance.
const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
__ mov(scratch2, Operand(scratch2, LSL, shift_distance));
// Put sign in zero flag.
return result;
}
+
// Support for VFP registers s0 to s31 (d0 to d15).
// Note that "sN:sM" is the same as "dN/2"
// These register names are defined in a way to match the native disassembler
"s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
"s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
- "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
+ "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15"
};
+
const char* VFPRegisters::Name(int reg) {
- const char* result;
- if ((0 <= reg) && (reg < kNumVFPRegisters)) {
- result = names_[reg];
- } else {
- result = "no_vfp_reg";
- }
- return result;
+ ASSERT((0 <= reg) && (reg < kNumVFPRegisters));
+ return names_[reg];
}
+
int Registers::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumRegisters; i++) {
struct RegisterAlias {
int reg;
- const char *name;
+ const char* name;
};
private:
return -1;
}
+
// Handle all VFP register based formatting in this function to reduce the
// complexity of FormatOption.
int Decoder::FormatVFPRegister(Instr* instr, const char* format) {
return -1;
}
+
int Decoder::FormatVFPinstruction(Instr* instr, const char* format) {
Print(format);
return 0;
}
-
-
// FormatOption takes a formatting string and interprets it based on
// the current instructions. The format string points to the first
// character of the option string (the option escape has already been
// void Decoder::DecodeTypeVFP(Instr* instr)
-// Implements the following
-// VFP instructions
-// fmsr :Sn = Rt
-// fmrs :Rt = Sn
+// Implements the following VFP instructions:
+// fmsr: Sn = Rt
+// fmrs: Rt = Sn
// fsitod: Dd = Sm
// ftosid: Sd = Dm
// Dd = faddd(Dn, Dm)
if (instr->Bits(15, 12) == 0xF)
Format(instr, "vmrs'cond APSR, FPSCR");
else
- Unknown(instr); // not used by V8
+ Unknown(instr); // Not used by V8.
} else {
- Unknown(instr); // not used by V8
+ Unknown(instr); // Not used by V8.
}
} else if (instr->Bit(21) == 1) {
if ((instr->Bit(20) == 0x1) &&
(instr->Bit(4) == 0)) {
Format(instr, "vmul.f64'cond 'Dd, 'Dn, 'Dm");
} else {
- Unknown(instr); // not used by V8
+ Unknown(instr); // Not used by V8.
}
} else {
if ((instr->Bit(20) == 0x0) &&
(instr->Bits(3, 0) == 0x0)) {
Format(instr, "vmov'cond 'rt, 'Sn");
} else {
- Unknown(instr); // not used by V8
+ Unknown(instr); // Not used by V8.
}
}
}
-
-// Decode Type 6 coprocessor instructions
+// Decode Type 6 coprocessor instructions.
// Dm = fmdrr(Rt, Rt2)
// <Rt, Rt2> = fmrrd(Dm)
void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
ASSERT((instr->TypeField() == 6));
if (instr->Bit(23) == 1) {
- Unknown(instr); // not used by V8
+ Unknown(instr); // Not used by V8.
} else if (instr->Bit(22) == 1) {
if ((instr->Bits(27, 24) == 0xC) &&
(instr->Bit(22) == 1) &&
Format(instr, "vmov'cond 'rt, 'rn, 'Dm");
}
} else {
- Unknown(instr); // not used by V8
+ Unknown(instr); // Not used by V8.
}
} else if (instr->Bit(21) == 1) {
- Unknown(instr); // not used by V8
+ Unknown(instr); // Not used by V8.
} else {
- Unknown(instr); // not used by V8
+ Unknown(instr); // Not used by V8.
}
}
}
+void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg,
+ Register outHighReg,
+ Register outLowReg) {
+ // ARMv7 VFP3 instructions to implement integer to double conversion.
+ mov(r7, Operand(inReg, ASR, kSmiTagSize));
+ fmsr(s15, r7);
+ fsitod(d7, s15);
+ fmrrd(outLowReg, outHighReg, d7);
+}
+
+
void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
// All parameters are on the stack. r0 has the return value after call.
// occurred.
void IllegalOperation(int num_arguments);
+ // Uses VFP instructions to Convert a Smi to a double.
+ void IntegerToDoubleConversionWithVFP3(Register inReg,
+ Register outHighReg,
+ Register outLowReg);
+
// ---------------------------------------------------------------------------
// Runtime calls
v_flag_ = false;
// Initializing VFP registers.
- // All registers are initialized to zero to start with.
+ // All registers are initialized to zero to start with
// even though s_registers_ & d_registers_ share the same
- // physical registers in the target
+ // physical registers in the target.
for (int i = 0; i < num_s_registers; i++) {
vfp_register[i] = 0;
}
return registers_[pc];
}
+
// Getting from and setting into VFP registers.
void Simulator::set_s_register(int sreg, unsigned int value) {
ASSERT((sreg >= 0) && (sreg < num_s_registers));
vfp_register[sreg] = value;
}
+
unsigned int Simulator::get_s_register(int sreg) const {
ASSERT((sreg >= 0) && (sreg < num_s_registers));
return vfp_register[sreg];
}
+
void Simulator::set_s_register_from_float(int sreg, const float flt) {
ASSERT((sreg >= 0) && (sreg < num_s_registers));
// Read the bits from the single precision floating point value
memcpy(&vfp_register[sreg], buffer, sizeof(vfp_register[0]));
}
+
void Simulator::set_s_register_from_sinteger(int sreg, const int sint) {
ASSERT((sreg >= 0) && (sreg < num_s_registers));
- // Read the bits from the integer value
- // into the unsigned integer element of vfp_register[] given by index=sreg.
+ // Read the bits from the integer value into the unsigned integer element of
+ // vfp_register[] given by index=sreg.
char buffer[sizeof(vfp_register[0])];
memcpy(buffer, &sint, sizeof(vfp_register[0]));
memcpy(&vfp_register[sreg], buffer, sizeof(vfp_register[0]));
}
+
void Simulator::set_d_register_from_double(int dreg, const double& dbl) {
ASSERT((dreg >= 0) && (dreg < num_d_registers));
- // Read the bits from the double precision floating point value
- // into the two consecutive unsigned integer elements of vfp_register[]
- // given by index 2*sreg and 2*sreg+1.
+ // Read the bits from the double precision floating point value into the two
+ // consecutive unsigned integer elements of vfp_register[] given by index
+ // 2*sreg and 2*sreg+1.
char buffer[2 * sizeof(vfp_register[0])];
memcpy(buffer, &dbl, 2 * sizeof(vfp_register[0]));
#ifndef BIG_ENDIAN_FLOATING_POINT
#endif
}
+
float Simulator::get_float_from_s_register(int sreg) {
ASSERT((sreg >= 0) && (sreg < num_s_registers));
return(sm_val);
}
+
int Simulator::get_sinteger_from_s_register(int sreg) {
ASSERT((sreg >= 0) && (sreg < num_s_registers));
return(sm_val);
}
+
double Simulator::get_double_from_d_register(int dreg) {
ASSERT((dreg >= 0) && (dreg < num_d_registers));
// Read the bits from the unsigned integer vfp_register[] array
// into the double precision floating point value and return it.
char buffer[2 * sizeof(vfp_register[0])];
-#ifndef BIG_ENDIAN_FLOATING_POINT
- memcpy(buffer, &vfp_register[2 * dreg], 2 * sizeof(vfp_register[0]));
-#else
+#ifdef BIG_ENDIAN_FLOATING_POINT
memcpy(&buffer[0], &vfp_register[2 * dreg + 1], sizeof(vfp_register[0]));
memcpy(&buffer[4], &vfp_register[2 * dreg], sizeof(vfp_register[0]));
+#else
+ memcpy(buffer, &vfp_register[2 * dreg], 2 * sizeof(vfp_register[0]));
#endif
memcpy(&dm_val, buffer, 2 * sizeof(vfp_register[0]));
return(dm_val);
return overflow;
}
+
// Support for VFP comparisons.
void Simulator::Compute_FPSCR_Flags(double val1, double val2) {
- // All Non-Nan cases
+ // All non-NaN cases.
if (val1 == val2) {
n_flag_FPSCR_ = false;
z_flag_FPSCR_ = true;
}
-
// Addressing Mode 1 - Data-processing operands:
// Get the value based on the shifter_operand with register.
int32_t Simulator::GetShiftRm(Instr* instr, bool* carry_out) {
}
}
} else {
- UNIMPLEMENTED(); // not used by V8
+ UNIMPLEMENTED(); // Not used by V8.
}
} else {
// extra load/store instructions
if (instr->Bits(15, 12) == 0xF)
Copy_FPSCR_to_APSR();
else
- UNIMPLEMENTED(); // not used by V8 now
+ UNIMPLEMENTED(); // Not used by V8.
} else {
- UNIMPLEMENTED(); // not used by V8 now
+ UNIMPLEMENTED(); // Not used by V8.
}
} else if (instr->Bit(21) == 1) {
if ((instr->Bit(20) == 0x1) &&
double dd_value = dn_value * dm_value;
set_d_register_from_double(vd, dd_value);
} else {
- UNIMPLEMENTED(); // not used by V8 now
+ UNIMPLEMENTED(); // Not used by V8.
}
} else {
if ((instr->Bit(20) == 0x0) &&
instr->NField()));
set_register(rt, int_value);
} else {
- UNIMPLEMENTED(); // not used by V8 now
+ UNIMPLEMENTED(); // Not used by V8.
}
}
}
-
// void Simulator::DecodeType6CoprocessorIns(Instr* instr)
-// Decode Type 6 coprocessor instructions
+// Decode Type 6 coprocessor instructions.
// Dm = fmdrr(Rt, Rt2)
// <Rt, Rt2> = fmrrd(Dm)
void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
bool v_flag_;
// VFP architecture state.
- unsigned int vfp_register[32/*num_s_registers*/];
+ unsigned int vfp_register[num_s_registers];
bool n_flag_FPSCR_;
bool z_flag_FPSCR_;
bool c_flag_FPSCR_;
DEFINE_bool(enable_sahf, true,
"enable use of SAHF instruction if available (X64 only)")
DEFINE_bool(enable_vfp3, true,
- "enable use of VFP3 instructions if available")
+ "enable use of VFP3 instructions if available (ARM only)")
// bootstrapper.cc
DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
}
-Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
+Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Memory::Object_Handle_at(pc_);
}
// be preserved.
static void ArrayNativeCode(MacroAssembler* masm,
bool construct_call,
- Label *call_generic_code) {
+ Label* call_generic_code) {
Label argc_one_or_more, argc_two_or_more, prepare_generic_code_call;
// Push the constructor and argc. No need to tag argc as a smi, as there will
}
-bool OS::fgrep_vfp(const char* file_name, const char* string) {
+bool OS::ArmCpuHasFeature(OS::CpuFeature feature) {
+ const char* search_string = NULL;
+ const char* file_name = "/proc/cpuinfo";
// Simple detection of VFP at runtime for Linux.
// It is based on /proc/cpuinfo, which reveals hardware configuration
// to user-space applications. According to ARM (mid 2009), no similar
// and not using STL string and ifstream because,
// on Linux, it's reading from a (non-mmap-able)
// character special device.
+ switch (feature) {
+ case VFP:
+ search_string = "vfp";
+ break;
+ default:
+ UNREACHABLE();
+ }
FILE* f = NULL;
+ const char* what = search_string;
if (NULL == (f = fopen(file_name, "r")))
return false;
- const char* what = string;
int k;
while (EOF != (k = fgetc(f))) {
if (k == *what) {
fclose(f);
return true;
} else {
- what = string;
+ what = search_string;
}
}
}
fclose(f);
- // Did not find string in the file file_name.
+ // Did not find string in the proc file.
return false;
}
// This function assumes that the layout of the file is as follows:
// hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
// If we encounter an unexpected situation we abort scanning further entries.
- FILE *fp = fopen("/proc/self/maps", "r");
+ FILE* fp = fopen("/proc/self/maps", "r");
if (fp == NULL) return;
// Allocate enough room to be able to store a full file name.
typedef struct sigcontext mcontext_t;
typedef struct ucontext {
uint32_t uc_flags;
- struct ucontext *uc_link;
+ struct ucontext* uc_link;
stack_t uc_stack;
mcontext_t uc_mcontext;
__sigset_t uc_sigmask;
return 0;
}
+
+bool OS::ArmCpuHasFeature(OS:CpuFeature feature) {
+ UNIMPLEMENTED();
+}
+
+
bool OS::IsOutsideAllocatedSpace(void* address) {
UNIMPLEMENTED();
return false;
// Returns the double constant NAN
static double nan_value();
- // Support runtime detection of VFP3 on linux platforms.
- static bool fgrep_vfp(const char * file_name, const char * string);
+ // Support runtime detection of VFP3 on ARM CPUs.
+ enum CpuFeature { VFP };
+ static bool ArmCpuHasFeature(CpuFeature feature);
// Returns the activation frame alignment constraint or zero if
// the platform doesn't care. Guaranteed to be a power of two.