}
-void Assembler::stc(Coprocessor coproc,
- CRegister crd,
- const MemOperand& dst,
- LFlag l,
- Condition cond) {
- addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst);
-}
-
-
-void Assembler::stc(Coprocessor coproc,
- CRegister crd,
- Register rn,
- int option,
- LFlag l,
- Condition cond) {
- // Unindexed addressing.
- ASSERT(is_uint8(option));
- emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
- coproc*B8 | (option & 255));
-}
-
-
-void Assembler::stc2(Coprocessor
- coproc, CRegister crd,
- const MemOperand& dst,
- LFlag l) { // v5 and above
- stc(coproc, crd, dst, l, kSpecialCondition);
-}
-
-
-void Assembler::stc2(Coprocessor coproc,
- CRegister crd,
- Register rn,
- int option,
- LFlag l) { // v5 and above
- stc(coproc, crd, rn, option, l, kSpecialCondition);
-}
-
-
// Support for VFP.
void Assembler::vldr(const DwVfpRegister dst,
void ldc2(Coprocessor coproc, CRegister crd, Register base, int option,
LFlag l = Short); // v5 and above
- void stc(Coprocessor coproc, CRegister crd, const MemOperand& dst,
- LFlag l = Short, Condition cond = al);
- void stc(Coprocessor coproc, CRegister crd, Register base, int option,
- LFlag l = Short, Condition cond = al);
-
- void stc2(Coprocessor coproc, CRegister crd, const MemOperand& dst,
- LFlag l = Short); // v5 and above
- void stc2(Coprocessor coproc, CRegister crd, Register base, int option,
- LFlag l = Short); // v5 and above
-
// Support for VFP.
// All these APIs support S0 to S31 and D0 to D15.
// Currently these APIs do not support extended D registers, i.e, D16 to D31.
void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
-#ifndef BIG_ENDIAN_FLOATING_POINT
- Register exponent = result1_;
- Register mantissa = result2_;
-#else
Register exponent = result2_;
Register mantissa = result1_;
-#endif
+
Label not_special;
// Convert from Smi to integer.
__ mov(source_, Operand(source_, ASR, kSmiTagSize));
// Call C routine that may not cause GC or other trouble.
__ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
4);
- // Store answer in the overwritable heap number.
-#if !defined(USE_ARM_EABI)
- // Double returned in fp coprocessor register 0 and 1, encoded as
- // register cr8. Offsets must be divisible by 4 for coprocessor so we
- // need to substract the tag from heap_number_result.
- __ sub(scratch, heap_number_result, Operand(kHeapObjectTag));
- __ stc(p1, cr8, MemOperand(scratch, HeapNumber::kValueOffset));
-#else
- // Double returned in registers 0 and 1.
+ // Store answer in the overwritable heap number. Double returned in
+ // registers r0 and r1.
__ Strd(r0, r1, FieldMemOperand(heap_number_result,
HeapNumber::kValueOffset));
-#endif
// Place heap_number_result in r0 and return to the pushed return address.
__ mov(r0, Operand(heap_number_result));
__ pop(pc);
// save.
__ CallCFunction(
ExternalReference::double_fp_operation(op_, masm->isolate()), 4);
- // Store answer in the overwritable heap number.
- #if !defined(USE_ARM_EABI)
- // Double returned in fp coprocessor register 0 and 1, encoded as
- // register cr8. Offsets must be divisible by 4 for coprocessor so we
- // need to substract the tag from r5.
- __ sub(r4, r5, Operand(kHeapObjectTag));
- __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset));
- #else
- // Double returned in registers 0 and 1.
+ // Store answer in the overwritable heap number. Double
+ // returned in registers r0 and r1.
__ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset));
- #endif
__ mov(r0, Operand(r5));
// And we are done.
__ pop(pc);
#ifndef V8_ARM_CONSTANTS_ARM_H_
#define V8_ARM_CONSTANTS_ARM_H_
-// The simulator emulates the EABI so we define the USE_ARM_EABI macro if we
-// are not running on real ARM hardware. One reason for this is that the
-// old ABI uses fp registers in the calling convention and the simulator does
-// not simulate fp registers or coroutine instructions.
-#if defined(__ARM_EABI__) || !defined(__arm__)
-# define USE_ARM_EABI 1
+// ARM EABI is required.
+#if defined(__arm__) && !defined(__ARM_EABI__)
+#error ARM EABI support is required.
#endif
// This means that interwork-compatible jump instructions are generated. We
register uint32_t end asm("a2") =
reinterpret_cast<uint32_t>(start) + size;
register uint32_t flg asm("a3") = 0;
- #ifdef __ARM_EABI__
- #if defined (__arm__) && !defined(__thumb__)
- // __arm__ may be defined in thumb mode.
- register uint32_t scno asm("r7") = __ARM_NR_cacheflush;
- asm volatile(
- "svc 0x0"
- : "=r" (beg)
- : "0" (beg), "r" (end), "r" (flg), "r" (scno));
- #else
- // r7 is reserved by the EABI in thumb mode.
- asm volatile(
- "@ Enter ARM Mode \n\t"
- "adr r3, 1f \n\t"
- "bx r3 \n\t"
- ".ALIGN 4 \n\t"
- ".ARM \n"
- "1: push {r7} \n\t"
- "mov r7, %4 \n\t"
- "svc 0x0 \n\t"
- "pop {r7} \n\t"
- "@ Enter THUMB Mode\n\t"
- "adr r3, 2f+1 \n\t"
- "bx r3 \n\t"
- ".THUMB \n"
- "2: \n\t"
- : "=r" (beg)
- : "0" (beg), "r" (end), "r" (flg), "r" (__ARM_NR_cacheflush)
- : "r3");
- #endif
+ #if defined (__arm__) && !defined(__thumb__)
+ // __arm__ may be defined in thumb mode.
+ register uint32_t scno asm("r7") = __ARM_NR_cacheflush;
+ asm volatile(
+ "svc 0x0"
+ : "=r" (beg)
+ : "0" (beg), "r" (end), "r" (flg), "r" (scno));
#else
- #if defined (__arm__) && !defined(__thumb__)
- // __arm__ may be defined in thumb mode.
- asm volatile(
- "svc %1"
- : "=r" (beg)
- : "i" (__ARM_NR_cacheflush), "0" (beg), "r" (end), "r" (flg));
- #else
- // Do not use the value of __ARM_NR_cacheflush in the inline assembly
- // below, because the thumb mode value would be used, which would be
- // wrong, since we switch to ARM mode before executing the svc instruction
- asm volatile(
- "@ Enter ARM Mode \n\t"
- "adr r3, 1f \n\t"
- "bx r3 \n\t"
- ".ALIGN 4 \n\t"
- ".ARM \n"
- "1: svc 0x9f0002 \n"
- "@ Enter THUMB Mode\n\t"
- "adr r3, 2f+1 \n\t"
- "bx r3 \n\t"
- ".THUMB \n"
- "2: \n\t"
- : "=r" (beg)
- : "0" (beg), "r" (end), "r" (flg)
- : "r3");
- #endif
+ // r7 is reserved by the EABI in thumb mode.
+ asm volatile(
+ "@ Enter ARM Mode \n\t"
+ "adr r3, 1f \n\t"
+ "bx r3 \n\t"
+ ".ALIGN 4 \n\t"
+ ".ARM \n"
+ "1: push {r7} \n\t"
+ "mov r7, %4 \n\t"
+ "svc 0x0 \n\t"
+ "pop {r7} \n\t"
+ "@ Enter THUMB Mode\n\t"
+ "adr r3, 2f+1 \n\t"
+ "bx r3 \n\t"
+ ".THUMB \n"
+ "2: \n\t"
+ : "=r" (beg)
+ : "0" (beg), "r" (end), "r" (flg), "r" (__ARM_NR_cacheflush)
+ : "r3");
#endif
#endif
}
}
void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
-#if !defined(USE_ARM_EABI)
- UNREACHABLE();
-#else
vmov(dst, r0, r1);
-#endif
}
// 2*sreg and 2*sreg+1.
char buffer[2 * sizeof(vfp_register[0])];
memcpy(buffer, &dbl, 2 * sizeof(vfp_register[0]));
-#ifndef BIG_ENDIAN_FLOATING_POINT
memcpy(&vfp_register[dreg * 2], buffer, 2 * sizeof(vfp_register[0]));
-#else
- memcpy(&vfp_register[dreg * 2], &buffer[4], sizeof(vfp_register[0]));
- memcpy(&vfp_register[dreg * 2 + 1], &buffer[0], sizeof(vfp_register[0]));
-#endif
}
// Read the bits from the unsigned integer vfp_register[] array
// into the double precision floating point value and return it.
char buffer[2 * sizeof(vfp_register[0])];
-#ifdef BIG_ENDIAN_FLOATING_POINT
- memcpy(&buffer[0], &vfp_register[2 * dreg + 1], sizeof(vfp_register[0]));
- memcpy(&buffer[4], &vfp_register[2 * dreg], sizeof(vfp_register[0]));
-#else
memcpy(buffer, &vfp_register[2 * dreg], 2 * sizeof(vfp_register[0]));
-#endif
memcpy(&dm_val, buffer, 2 * sizeof(vfp_register[0]));
return(dm_val);
}
if (x < k2Pow52) {
x += k2Pow52;
uint32_t result;
-#ifdef BIG_ENDIAN_FLOATING_POINT
- Address mantissa_ptr = reinterpret_cast<Address>(&x) + kIntSize;
-#else
Address mantissa_ptr = reinterpret_cast<Address>(&x);
-#endif
// Copy least significant 32 bits of mantissa.
memcpy(&result, mantissa_ptr, sizeof(result));
return negative ? ~result + 1 : result;
// is a mixture of sign, exponent and mantissa. Our current platforms are all
// little endian apart from non-EABI arm which is little endian with big
// endian floating point word ordering!
-#if !defined(V8_HOST_ARCH_ARM) || defined(USE_ARM_EABI)
static const int kMantissaOffset = kValueOffset;
static const int kExponentOffset = kValueOffset + 4;
-#else
- static const int kMantissaOffset = kValueOffset + 4;
- static const int kExponentOffset = kValueOffset;
-# define BIG_ENDIAN_FLOATING_POINT 1
-#endif
+
static const int kSize = kValueOffset + kDoubleSize;
static const uint32_t kSignMask = 0x80000000u;
static const uint32_t kExponentMask = 0x7ff00000u;
// Setup the platform OS support.
OS::Setup();
-#if defined(V8_TARGET_ARCH_ARM) && !defined(USE_ARM_EABI)
- use_crankshaft_ = false;
-#else
use_crankshaft_ = FLAG_crankshaft;
-#endif
if (Serializer::enabled()) {
use_crankshaft_ = false;
static double DoubleFromBits(uint64_t value) {
double target;
-#ifdef BIG_ENDIAN_FLOATING_POINT
- const int kIntSize = 4;
- // Somebody swapped the lower and higher half of doubles.
- memcpy(&target, reinterpret_cast<char*>(&value) + kIntSize, kIntSize);
- memcpy(reinterpret_cast<char*>(&target) + kIntSize, &value, kIntSize);
-#else
memcpy(&target, &value, sizeof(target));
-#endif
return target;
}
static uint64_t DoubleToBits(double value) {
uint64_t target;
-#ifdef BIG_ENDIAN_FLOATING_POINT
- const int kIntSize = 4;
- // Somebody swapped the lower and higher half of doubles.
- memcpy(&target, reinterpret_cast<char*>(&value) + kIntSize, kIntSize);
- memcpy(reinterpret_cast<char*>(&target) + kIntSize, &value, kIntSize);
-#else
memcpy(&target, &value, sizeof(target));
-#endif
return target;
}