if (scratch.is(no_reg)) {
if (dst.code() < 16) {
+ const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
// Move the low part of the double into the lower of the corresponsing S
// registers of D register dst.
mov(ip, Operand(lo));
- vmov(dst.low(), ip);
+ vmov(loc.low(), ip);
// Move the high part of the double into the higher of the
// corresponsing S registers of D register dst.
mov(ip, Operand(hi));
- vmov(dst.high(), ip);
+ vmov(loc.high(), ip);
} else {
// D16-D31 does not have S registers, so move the low and high parts
// directly to the D register using vmov.32.
}
+void Assembler::vmov(const Register dst,
+ const VmovIndex index,
+ const DwVfpRegister src,
+ const Condition cond) {
+ // Dd[index] = Rt
+ // Instruction details available in ARM DDI 0406C.b, A8.8.342.
+ // cond(31-28) | 1110(27-24) | U=0(23) | opc1=0index(22-21) | 1(20) |
+ // Vn(19-16) | Rt(15-12) | 1011(11-8) | N(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
+ ASSERT(index.index == 0 || index.index == 1);
+ int vn, n;
+ src.split_code(&vn, &n);
+ emit(cond | 0xE*B24 | index.index*B21 | B20 | vn*B16 | dst.code()*B12 |
+ 0xB*B8 | n*B7 | B4);
+}
+
+
void Assembler::vmov(const DwVfpRegister dst,
const Register src1,
const Register src2,
return 0 <= code_ && code_ < kMaxNumRegisters;
}
bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
- SwVfpRegister low() const {
- ASSERT(code_ < 16);
- SwVfpRegister reg;
- reg.code_ = code_ * 2;
-
- ASSERT(reg.is_valid());
- return reg;
- }
- SwVfpRegister high() const {
- ASSERT(code_ < 16);
- SwVfpRegister reg;
- reg.code_ = (code_ * 2) + 1;
-
- ASSERT(reg.is_valid());
- return reg;
- }
int code() const {
ASSERT(is_valid());
return code_;
typedef DwVfpRegister DoubleRegister;
+// Double word VFP register d0-15.
+struct LowDwVfpRegister {
+ public:
+ static const int kMaxNumLowRegisters = 16;
+ operator DwVfpRegister() const {
+ DwVfpRegister r = { code_ };
+ return r;
+ }
+ static LowDwVfpRegister from_code(int code) {
+ LowDwVfpRegister r = { code };
+ return r;
+ }
+
+ bool is_valid() const {
+ return 0 <= code_ && code_ < kMaxNumLowRegisters;
+ }
+ bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
+ bool is(LowDwVfpRegister reg) const { return code_ == reg.code_; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+ SwVfpRegister low() const {
+ SwVfpRegister reg;
+ reg.code_ = code_ * 2;
+
+ ASSERT(reg.is_valid());
+ return reg;
+ }
+ SwVfpRegister high() const {
+ SwVfpRegister reg;
+ reg.code_ = (code_ * 2) + 1;
+
+ ASSERT(reg.is_valid());
+ return reg;
+ }
+
+ int code_;
+};
+
+
// Quad word NEON register.
struct QwNeonRegister {
static const int kMaxNumRegisters = 16;
const SwVfpRegister s31 = { 31 };
const DwVfpRegister no_dreg = { -1 };
-const DwVfpRegister d0 = { 0 };
-const DwVfpRegister d1 = { 1 };
-const DwVfpRegister d2 = { 2 };
-const DwVfpRegister d3 = { 3 };
-const DwVfpRegister d4 = { 4 };
-const DwVfpRegister d5 = { 5 };
-const DwVfpRegister d6 = { 6 };
-const DwVfpRegister d7 = { 7 };
-const DwVfpRegister d8 = { 8 };
-const DwVfpRegister d9 = { 9 };
-const DwVfpRegister d10 = { 10 };
-const DwVfpRegister d11 = { 11 };
-const DwVfpRegister d12 = { 12 };
-const DwVfpRegister d13 = { 13 };
-const DwVfpRegister d14 = { 14 };
-const DwVfpRegister d15 = { 15 };
+const LowDwVfpRegister d0 = { 0 };
+const LowDwVfpRegister d1 = { 1 };
+const LowDwVfpRegister d2 = { 2 };
+const LowDwVfpRegister d3 = { 3 };
+const LowDwVfpRegister d4 = { 4 };
+const LowDwVfpRegister d5 = { 5 };
+const LowDwVfpRegister d6 = { 6 };
+const LowDwVfpRegister d7 = { 7 };
+const LowDwVfpRegister d8 = { 8 };
+const LowDwVfpRegister d9 = { 9 };
+const LowDwVfpRegister d10 = { 10 };
+const LowDwVfpRegister d11 = { 11 };
+const LowDwVfpRegister d12 = { 12 };
+const LowDwVfpRegister d13 = { 13 };
+const LowDwVfpRegister d14 = { 14 };
+const LowDwVfpRegister d15 = { 15 };
const DwVfpRegister d16 = { 16 };
const DwVfpRegister d17 = { 17 };
const DwVfpRegister d18 = { 18 };
const QwNeonRegister q14 = { 14 };
const QwNeonRegister q15 = { 15 };
+
// Aliases for double registers. Defined using #define instead of
// "static const DwVfpRegister&" because Clang complains otherwise when a
// compilation unit that includes this header doesn't use the variables.
const VmovIndex index,
const Register src,
const Condition cond = al);
+ void vmov(const Register dst,
+ const VmovIndex index,
+ const DwVfpRegister src,
+ const Condition cond = al);
void vmov(const DwVfpRegister dst,
const Register src1,
const Register src2,
Register right = r0;
Register scratch1 = r7;
Register scratch2 = r9;
- DwVfpRegister double_scratch = d0;
+ LowDwVfpRegister double_scratch = d0;
Register heap_number_result = no_reg;
Register heap_number_map = r6;
Label not_zero;
ASSERT(kSmiTag == 0);
__ b(ne, ¬_zero);
- __ vmov(scratch2, d5.high());
+ __ VmovHigh(scratch2, d5);
__ tst(scratch2, Operand(HeapNumber::kSignMask));
__ b(ne, &transition);
__ bind(¬_zero);
Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
// Copy the JS object part.
- __ CopyFields(r0, r4, d0, s0, JSObject::kHeaderSize / kPointerSize);
+ __ CopyFields(r0, r4, d0, JSObject::kHeaderSize / kPointerSize);
// Get the length (smi tagged) and set that as an in-object property too.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
// Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(r0, r3, r5, r6, &slow_elements);
+ __ StoreNumberToDoubleElements(r0, r3, r5, r6, d0, &slow_elements);
__ Ret();
}
} else {
Format(instr, "vmov'cond.32 'Dd[1], 'rt");
}
+ } else if ((instr->VLValue() == 0x1) &&
+ (instr->VCValue() == 0x1) &&
+ (instr->Bit(23) == 0x0)) {
+ if (instr->Bit(21) == 0x0) {
+ Format(instr, "vmov'cond.32 'rt, 'Dd[0]");
+ } else {
+ Format(instr, "vmov'cond.32 'rt, 'Dd[1]");
+ }
} else if ((instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x7) &&
(instr->Bits(19, 16) == 0x1)) {
// r0: Newly allocated regexp.
// r5: Materialized regexp.
// r2: temp.
- __ CopyFields(r0, r5, d0, s0, size / kPointerSize);
+ __ CopyFields(r0, r5, d0, size / kPointerSize);
context()->Plug(r0);
}
__ b(ne, slow);
}
__ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value, key, elements, r3,
+ __ StoreNumberToDoubleElements(value, key, elements, r3, d0,
&transition_double_elements);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
Representation input_rep = value->representation();
LOperand* reg = UseRegister(value);
if (input_rep.IsDouble()) {
- return DefineAsRegister(new(zone()) LClampDToUint8(reg, FixedTemp(d11)));
+ return DefineAsRegister(new(zone()) LClampDToUint8(reg));
} else if (input_rep.IsInteger32()) {
return DefineAsRegister(new(zone()) LClampIToUint8(reg));
} else {
};
-class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
+class LClampDToUint8: public LTemplateInstruction<1, 1, 0> {
public:
- LClampDToUint8(LOperand* unclamped, LOperand* temp) {
+ explicit LClampDToUint8(LOperand* unclamped) {
inputs_[0] = unclamped;
- temps_[0] = temp;
}
LOperand* unclamped() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
};
DwVfpRegister dividend = ToDoubleRegister(instr->temp());
DwVfpRegister divisor = ToDoubleRegister(instr->temp2());
ASSERT(!divisor.is(dividend));
- DwVfpRegister quotient = double_scratch0();
+ LowDwVfpRegister quotient = double_scratch0();
ASSERT(!quotient.is(dividend));
ASSERT(!quotient.is(divisor));
// Load the arguments in VFP registers. The divisor value is preloaded
// before. Be careful that 'right_reg' is only live on entry.
// TODO(svenpanne) The last comments seems to be wrong nowadays.
- __ vmov(dividend.low(), left_reg);
- __ vmov(divisor.low(), right_reg);
-
- __ vcvt_f64_s32(dividend, dividend.low());
- __ vcvt_f64_s32(divisor, divisor.low());
+ __ vmov(double_scratch0().low(), left_reg);
+ __ vcvt_f64_s32(dividend, double_scratch0().low());
+ __ vmov(double_scratch0().low(), right_reg);
+ __ vcvt_f64_s32(divisor, double_scratch0().low());
// We do not care about the sign of the divisor. Note that we still handle
// the kMinInt % -1 case correctly, though.
__ vcvt_f64_s32(quotient, quotient.low());
// Compute the remainder in result.
- DwVfpRegister double_scratch = dividend;
- __ vmul(double_scratch, divisor, quotient);
- __ vcvt_s32_f64(double_scratch.low(), double_scratch);
- __ vmov(scratch, double_scratch.low());
+ __ vmul(double_scratch0(), divisor, quotient);
+ __ vcvt_s32_f64(double_scratch0().low(), double_scratch0());
+ __ vmov(scratch, double_scratch0().low());
__ sub(result_reg, left_reg, scratch, SetCC);
// If we care about -0, test if the dividend is <0 and the result is 0.
} else {
const DoubleRegister vleft = ToDoubleRegister(instr->temp());
const DoubleRegister vright = double_scratch0();
- __ vmov(vleft.low(), left);
- __ vmov(vright.low(), right);
- __ vcvt_f64_s32(vleft, vleft.low());
- __ vcvt_f64_s32(vright, vright.low());
+ __ vmov(double_scratch0().low(), left);
+ __ vcvt_f64_s32(vleft, double_scratch0().low());
+ __ vmov(double_scratch0().low(), right);
+ __ vcvt_f64_s32(vright, double_scratch0().low());
__ vdiv(vleft, vleft, vright); // vleft now contains the result.
- __ vcvt_s32_f64(vright.low(), vleft);
- __ vmov(result, vright.low());
+ __ vcvt_s32_f64(double_scratch0().low(), vleft);
+ __ vmov(result, double_scratch0().low());
if (!instr->hydrogen()->CheckFlag(
HInstruction::kAllUsesTruncatingToInt32)) {
// Deopt if exact conversion to integer was not possible.
// Use vright as scratch register.
- __ vcvt_f64_s32(vright, vright.low());
- __ VFPCompareAndSetFlags(vleft, vright);
+ __ vcvt_f64_s32(double_scratch0(), double_scratch0().low());
+ __ VFPCompareAndSetFlags(vleft, double_scratch0());
DeoptimizeIf(ne, instr->environment());
}
}
: Operand(key, LSL, shift_size);
__ add(scratch0(), external_pointer, operand);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ vldr(kScratchDoubleReg.low(), scratch0(), additional_offset);
- __ vcvt_f64_f32(result, kScratchDoubleReg.low());
+ __ vldr(double_scratch0().low(), scratch0(), additional_offset);
+ __ vcvt_f64_f32(result, double_scratch0().low());
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ vldr(result, scratch0(), additional_offset);
}
Register input_high = scratch0();
Label done, exact;
- __ vmov(input_high, input.high());
__ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact);
DeoptimizeIf(al, instr->environment());
// If the input is +0.5, the result is 1.
__ b(hi, &convert); // Out of [-0.5, +0.5].
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ vmov(input_high, input.high());
+ __ VmovHigh(input_high, input);
__ cmp(input_high, Operand::Zero());
DeoptimizeIf(mi, instr->environment()); // [-0.5, -0].
}
__ bind(&convert);
__ vadd(input_plus_dot_five, input, dot_five);
- __ vmov(input_high, input_plus_dot_five.high());
// Reuse dot_five (double_scratch0) as we no longer need this value.
__ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(),
&done, &done);
Label slow;
Register src = ToRegister(value);
Register dst = ToRegister(instr->result());
- DwVfpRegister dbl_scratch = double_scratch0();
- SwVfpRegister flt_scratch = dbl_scratch.low();
+ LowDwVfpRegister dbl_scratch = double_scratch0();
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ SmiUntag(src, dst);
__ eor(src, src, Operand(0x80000000));
}
- __ vmov(flt_scratch, src);
- __ vcvt_f64_s32(dbl_scratch, flt_scratch);
+ __ vmov(dbl_scratch.low(), src);
+ __ vcvt_f64_s32(dbl_scratch, dbl_scratch.low());
} else {
- __ vmov(flt_scratch, src);
- __ vcvt_f64_u32(dbl_scratch, flt_scratch);
+ __ vmov(dbl_scratch.low(), src);
+ __ vcvt_f64_u32(dbl_scratch, dbl_scratch.low());
}
if (FLAG_inline_new) {
DwVfpRegister input_reg = ToDoubleRegister(instr->value());
__ VFPCompareAndSetFlags(input_reg, input_reg);
__ b(vc, &no_special_nan_handling);
- __ vmov(scratch, input_reg.high());
+ __ VmovHigh(scratch, input_reg);
__ cmp(scratch, Operand(kHoleNanUpper32));
// If not the hole NaN, force the NaN to be canonical.
__ VFPCanonicalizeNaN(input_reg, ne);
DeoptimizeIf(ne, env);
__ bind(&convert);
- __ LoadRoot(ip, Heap::kNanValueRootIndex);
- __ sub(ip, ip, Operand(kHeapObjectTag));
- __ vldr(result_reg, ip, HeapNumber::kValueOffset);
+ __ LoadRoot(scratch, Heap::kNanValueRootIndex);
+ __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
__ jmp(&done);
__ bind(&heap_number);
}
// Heap number to double register conversion.
- __ sub(ip, input_reg, Operand(kHeapObjectTag));
- __ vldr(result_reg, ip, HeapNumber::kValueOffset);
+ __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
if (deoptimize_on_minus_zero) {
- __ vmov(ip, result_reg.low());
- __ cmp(ip, Operand::Zero());
+ __ VmovLow(scratch, result_reg);
+ __ cmp(scratch, Operand::Zero());
__ b(ne, &done);
- __ vmov(ip, result_reg.high());
- __ cmp(ip, Operand(HeapNumber::kSignMask));
+ __ VmovHigh(scratch, result_reg);
+ __ cmp(scratch, Operand(HeapNumber::kSignMask));
DeoptimizeIf(eq, env);
}
__ jmp(&done);
Register input_reg = ToRegister(instr->value());
Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->temp());
- DwVfpRegister double_scratch = double_scratch0();
+ LowDwVfpRegister double_scratch = double_scratch0();
DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp3());
ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
DeoptimizeIf(ne, instr->environment());
__ sub(ip, input_reg, Operand(kHeapObjectTag));
- __ vldr(double_scratch, ip, HeapNumber::kValueOffset);
- __ TryDoubleToInt32Exact(input_reg, double_scratch, double_scratch2);
+ __ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
+ __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
DeoptimizeIf(ne, instr->environment());
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ cmp(input_reg, Operand::Zero());
__ b(ne, &done);
- __ vmov(scratch1, double_scratch.high());
+ __ VmovHigh(scratch1, double_scratch2);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr->environment());
}
Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->temp());
DwVfpRegister double_input = ToDoubleRegister(instr->value());
- DwVfpRegister double_scratch = double_scratch0();
+ LowDwVfpRegister double_scratch = double_scratch0();
if (instr->truncating()) {
Register scratch3 = ToRegister(instr->temp2());
Label done;
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
- __ vmov(scratch1, double_input.high());
+ __ VmovHigh(scratch1, double_input);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr->environment());
__ bind(&done);
Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->temp());
DwVfpRegister double_input = ToDoubleRegister(instr->value());
- DwVfpRegister double_scratch = double_scratch0();
+ LowDwVfpRegister double_scratch = double_scratch0();
if (instr->truncating()) {
Register scratch3 = ToRegister(instr->temp2());
Label done;
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
- __ vmov(scratch1, double_input.high());
+ __ VmovHigh(scratch1, double_input);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr->environment());
__ bind(&done);
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
- DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
- __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
+ __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
}
// Heap number
__ bind(&heap_number);
- __ vldr(double_scratch0(), FieldMemOperand(input_reg,
- HeapNumber::kValueOffset));
- __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
+ __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+ __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
__ jmp(&done);
// smi
__ bind(&allocated);
// Copy the content into the newly allocated memory.
- __ CopyFields(r0, r1, double_scratch0(), double_scratch0().low(),
- size / kPointerSize);
+ __ CopyFields(r0, r1, double_scratch0(), size / kPointerSize);
}
HGraph* graph() const { return chunk()->graph(); }
Register scratch0() { return r9; }
- DwVfpRegister double_scratch0() { return kScratchDoubleReg; }
+ LowDwVfpRegister double_scratch0() { return kScratchDoubleReg; }
int GetNextEmittedBlock() const;
LInstruction* GetNextInstruction();
}
+void MacroAssembler::VmovHigh(Register dst, DwVfpRegister src) {
+ if (src.code() < 16) {
+ const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
+ vmov(dst, loc.high());
+ } else {
+ vmov(dst, VmovIndexHi, src);
+ }
+}
+
+
+void MacroAssembler::VmovHigh(DwVfpRegister dst, Register src) {
+ if (dst.code() < 16) {
+ const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
+ vmov(loc.high(), src);
+ } else {
+ vmov(dst, VmovIndexHi, src);
+ }
+}
+
+
+void MacroAssembler::VmovLow(Register dst, DwVfpRegister src) {
+ if (src.code() < 16) {
+ const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
+ vmov(dst, loc.low());
+ } else {
+ vmov(dst, VmovIndexLo, src);
+ }
+}
+
+
+void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
+ if (dst.code() < 16) {
+ const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
+ vmov(loc.low(), src);
+ } else {
+ vmov(dst, VmovIndexLo, src);
+ }
+}
+
+
void MacroAssembler::ConvertNumberToInt32(Register object,
Register dst,
Register heap_number_map,
Register scratch2,
Register scratch3,
DwVfpRegister double_scratch1,
- DwVfpRegister double_scratch2,
+ LowDwVfpRegister double_scratch2,
Label* not_number) {
Label done;
UntagAndJumpIfSmi(dst, object, &done);
void MacroAssembler::LoadNumber(Register object,
- DwVfpRegister dst,
+ LowDwVfpRegister dst,
Register heap_number_map,
Register scratch,
Label* not_number) {
DwVfpRegister double_dst,
Register heap_number_map,
Register scratch,
- DwVfpRegister double_scratch,
+ LowDwVfpRegister double_scratch,
Label* not_int32) {
ASSERT(!scratch.is(object));
ASSERT(!heap_number_map.is(object) && !heap_number_map.is(scratch));
Register heap_number_map,
Register scratch,
DwVfpRegister double_scratch0,
- DwVfpRegister double_scratch1,
+ LowDwVfpRegister double_scratch1,
Label* not_int32) {
ASSERT(!dst.is(object));
ASSERT(!scratch.is(object));
}
-void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
- Register key_reg,
- Register elements_reg,
- Register scratch1,
- Label* fail,
- int elements_offset) {
+void MacroAssembler::StoreNumberToDoubleElements(
+ Register value_reg,
+ Register key_reg,
+ Register elements_reg,
+ Register scratch1,
+ LowDwVfpRegister double_scratch,
+ Label* fail,
+ int elements_offset) {
Label smi_value, store;
// Handle smi values specially.
fail,
DONT_DO_SMI_CHECK);
- vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+ vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
// Force a canonical NaN.
if (emit_debug_code()) {
vmrs(ip);
tst(ip, Operand(kVFPDefaultNaNModeControlBit));
Assert(ne, "Default NaN mode not set");
}
- VFPCanonicalizeNaN(d0);
+ VFPCanonicalizeNaN(double_scratch);
b(&store);
bind(&smi_value);
- SmiToDouble(d0, value_reg);
+ SmiToDouble(double_scratch, value_reg);
bind(&store);
add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg));
- vstr(d0, FieldMemOperand(scratch1,
- FixedDoubleArray::kHeaderSize - elements_offset));
+ vstr(double_scratch,
+ FieldMemOperand(scratch1,
+ FixedDoubleArray::kHeaderSize - elements_offset));
}
}
-void MacroAssembler::SmiToDouble(DwVfpRegister value, Register smi) {
- ASSERT(value.code() < 16);
+void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) {
if (CpuFeatures::IsSupported(VFP3)) {
vmov(value.low(), smi);
vcvt_f64_s32(value, 1);
void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input,
- DwVfpRegister double_scratch) {
+ LowDwVfpRegister double_scratch) {
ASSERT(!double_input.is(double_scratch));
vcvt_s32_f64(double_scratch.low(), double_input);
vcvt_f64_s32(double_scratch, double_scratch.low());
void MacroAssembler::TryDoubleToInt32Exact(Register result,
DwVfpRegister double_input,
- DwVfpRegister double_scratch) {
+ LowDwVfpRegister double_scratch) {
ASSERT(!double_input.is(double_scratch));
vcvt_s32_f64(double_scratch.low(), double_input);
vmov(result, double_scratch.low());
void MacroAssembler::TryInt32Floor(Register result,
DwVfpRegister double_input,
Register input_high,
- DwVfpRegister double_scratch,
+ LowDwVfpRegister double_scratch,
Label* done,
Label* exact) {
ASSERT(!result.is(input_high));
ASSERT(!double_input.is(double_scratch));
Label negative, exception;
+ VmovHigh(input_high, double_input);
+
// Test for NaN and infinities.
Sbfx(result, input_high,
HeapNumber::kExponentShift, HeapNumber::kExponentBits);
Register scratch,
Register scratch_high,
Register scratch_low,
- DwVfpRegister double_scratch) {
+ LowDwVfpRegister double_scratch) {
ASSERT(!scratch_high.is(result));
ASSERT(!scratch_low.is(result));
ASSERT(!scratch_low.is(scratch_high));
// Copies a fixed number of fields of heap objects from src to dst.
void MacroAssembler::CopyFields(Register dst,
Register src,
- DwVfpRegister double_scratch,
- SwVfpRegister single_scratch,
+ LowDwVfpRegister double_scratch,
int field_count) {
int double_count = field_count / (DwVfpRegister::kSizeInBytes / kPointerSize);
for (int i = 0; i < double_count; i++) {
int remain = field_count % (DwVfpRegister::kSizeInBytes / kPointerSize);
if (remain != 0) {
- vldr(single_scratch,
+ vldr(double_scratch.low(),
FieldMemOperand(src, (field_count - 1) * kPointerSize));
- vstr(single_scratch,
+ vstr(double_scratch.low(),
FieldMemOperand(dst, (field_count - 1) * kPointerSize));
}
}
void MacroAssembler::ClampDoubleToUint8(Register result_reg,
DwVfpRegister input_reg,
- DwVfpRegister temp_double_reg) {
+ LowDwVfpRegister double_scratch) {
Label above_zero;
Label done;
Label in_bounds;
- Vmov(temp_double_reg, 0.0);
- VFPCompareAndSetFlags(input_reg, temp_double_reg);
+ VFPCompareAndSetFlags(input_reg, 0.0);
b(gt, &above_zero);
// Double value is less than zero, NaN or Inf, return 0.
// Double value is >= 255, return 255.
bind(&above_zero);
- Vmov(temp_double_reg, 255.0, result_reg);
- VFPCompareAndSetFlags(input_reg, temp_double_reg);
+ Vmov(double_scratch, 255.0, result_reg);
+ VFPCompareAndSetFlags(input_reg, double_scratch);
b(le, &in_bounds);
mov(result_reg, Operand(255));
b(al, &done);
// Set rounding mode to round to the nearest integer by clearing bits[23:22].
bic(result_reg, ip, Operand(kVFPRoundingModeMask));
vmsr(result_reg);
- vcvt_s32_f64(input_reg.low(), input_reg, kFPSCRRounding);
- vmov(result_reg, input_reg.low());
+ vcvt_s32_f64(double_scratch.low(), input_reg, kFPSCRRounding);
+ vmov(result_reg, double_scratch.low());
// Restore FPSCR.
vmsr(ip);
bind(&done);
const double imm,
const Register scratch = no_reg);
+ void VmovHigh(Register dst, DwVfpRegister src);
+ void VmovHigh(DwVfpRegister dst, Register src);
+ void VmovLow(Register dst, DwVfpRegister src);
+ void VmovLow(DwVfpRegister dst, Register src);
+
// Converts the smi or heap number in object to an int32 using the rules
// for ToInt32 as described in ECMAScript 9.5.: the value is truncated
// and brought into the range -2^31 .. +2^31 - 1.
Register scratch2,
Register scratch3,
DwVfpRegister double_scratch1,
- DwVfpRegister double_scratch2,
+ LowDwVfpRegister double_scratch2,
Label* not_int32);
// Loads the number from object into dst register.
// If |object| is neither smi nor heap number, |not_number| is jumped to
// with |object| still intact.
void LoadNumber(Register object,
- DwVfpRegister dst,
+ LowDwVfpRegister dst,
Register heap_number_map,
Register scratch,
Label* not_number);
DwVfpRegister double_dst,
Register heap_number_map,
Register scratch,
- DwVfpRegister double_scratch,
+ LowDwVfpRegister double_scratch,
Label* not_int32);
// Loads the number from object into dst as a 32-bit integer.
Register heap_number_map,
Register scratch,
DwVfpRegister double_scratch0,
- DwVfpRegister double_scratch1,
+ LowDwVfpRegister double_scratch1,
Label* not_int32);
// Copies a fixed number of fields of heap objects from src to dst.
void CopyFields(Register dst,
Register src,
- DwVfpRegister double_scratch,
- SwVfpRegister single_scratch,
+ LowDwVfpRegister double_scratch,
int field_count);
// Copies a number of bytes from src to dst. All registers are clobbered. On
Register key_reg,
Register elements_reg,
Register scratch1,
+ LowDwVfpRegister double_scratch,
Label* fail,
int elements_offset = 0);
// Load the value of a smi object into a double register.
// The register value must be between d0 and d15.
- void SmiToDouble(DwVfpRegister value, Register smi);
+ void SmiToDouble(LowDwVfpRegister value, Register smi);
// Check if a double can be exactly represented as a signed 32-bit integer.
// Z flag set to one if true.
void TestDoubleIsInt32(DwVfpRegister double_input,
- DwVfpRegister double_scratch);
+ LowDwVfpRegister double_scratch);
// Try to convert a double to a signed 32-bit integer.
// Z flag set to one and result assigned if the conversion is exact.
void TryDoubleToInt32Exact(Register result,
DwVfpRegister double_input,
- DwVfpRegister double_scratch);
+ LowDwVfpRegister double_scratch);
// Floor a double and writes the value to the result register.
// Go to exact if the conversion is exact (to be able to test -0),
// fall through calling code if an overflow occurred, else go to done.
+ // In return, input_high is loaded with high bits of input.
void TryInt32Floor(Register result,
DwVfpRegister double_input,
Register input_high,
- DwVfpRegister double_scratch,
+ LowDwVfpRegister double_scratch,
Label* done,
Label* exact);
Register scratch,
Register scratch_high,
Register scratch_low,
- DwVfpRegister double_scratch);
+ LowDwVfpRegister double_scratch);
// Check whether d16-d31 are available on the CPU. The result is given by the
// Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
void ClampDoubleToUint8(Register result_reg,
DwVfpRegister input_reg,
- DwVfpRegister temp_double_reg);
+ LowDwVfpRegister double_scratch);
void LoadInstanceDescriptors(Register map, Register descriptors);
OS::MemCopy(&dd_value, data, 8);
set_d_register_from_double(vd, dd_value);
} else if ((instr->VLValue() == 0x1) &&
+ (instr->VCValue() == 0x1) &&
+ (instr->Bit(23) == 0x0)) {
+ // vmov (scalar to ARM core register)
+ int vn = instr->Bits(19, 16) | (instr->Bit(7) << 4);
+ double dn_value = get_double_from_d_register(vn);
+ int32_t data[2];
+ OS::MemCopy(data, &dn_value, 8);
+ set_register(instr->RtValue(), data[instr->Bit(21)]);
+ } else if ((instr->VLValue() == 0x1) &&
(instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x7) &&
(instr->Bits(19, 16) == 0x1)) {
__ b(gt, &call_builtin);
__ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
- __ StoreNumberToDoubleElements(r4, r0, elements, r5,
+ __ StoreNumberToDoubleElements(r4, r0, elements, r5, d0,
&call_builtin, argc * kDoubleSize);
// Save new length.
Register key,
Register scratch0,
DwVfpRegister double_scratch0,
- DwVfpRegister double_scratch1,
+ LowDwVfpRegister double_scratch1,
Label* fail) {
Label key_ok;
// Check for smi or a smi inside a heap number. We convert the heap
__ bind(&finish_store);
__ StoreNumberToDoubleElements(value_reg, key_reg, elements_reg,
- scratch1, &transition_elements_kind);
+ scratch1, d0, &transition_elements_kind);
__ Ret();
// Handle store cache miss, replacing the ic with the generic stub.
__ mov(scratch1, elements_reg);
__ StoreNumberToDoubleElements(value_reg, key_reg, scratch1,
- scratch2, &transition_elements_kind);
+ scratch2, d0, &transition_elements_kind);
__ mov(scratch1, Operand(kHoleNanLower32));
__ mov(scratch2, Operand(kHoleNanUpper32));
double i;
double j;
double k;
+ uint32_t low;
+ uint32_t high;
} T;
T t;
__ vmov(d22, VmovIndexHi, r2);
__ add(r4, r0, Operand(OFFSET_OF(T, i)));
__ vstm(ia_w, r4, d20, d22);
+ // Move d22 into low and high.
+ __ vmov(r4, VmovIndexLo, d22);
+ __ str(r4, MemOperand(r0, OFFSET_OF(T, low)));
+ __ vmov(r4, VmovIndexHi, d22);
+ __ str(r4, MemOperand(r0, OFFSET_OF(T, high)));
__ ldm(ia_w, sp, r4.bit() | pc.bit());
CHECK_EQ(14.7610017472335499, t.i);
CHECK_EQ(16.0, t.j);
CHECK_EQ(73.8818412254460241, t.k);
+ CHECK_EQ(372106121, t.low);
+ CHECK_EQ(1079146608, t.high);
}
}
COMPARE(vmov(d0, VmovIndexHi, r0),
"ee200b10 vmov.32 d0[1], r0");
+ COMPARE(vmov(r2, VmovIndexLo, d15),
+ "ee1f2b10 vmov.32 r2, d15[0]");
+ COMPARE(vmov(r3, VmovIndexHi, d14),
+ "ee3e3b10 vmov.32 r3, d14[1]");
+
COMPARE(vldr(s0, r0, 0),
"ed900a00 vldr s0, [r0 + 4*0]");
COMPARE(vldr(s1, r1, 4),