// Adds Arm-specific methods to convert InstructionOperands.
-class ArmOperandConverter : public InstructionOperandConverter {
+class ArmOperandConverter FINAL : public InstructionOperandConverter {
public:
ArmOperandConverter(CodeGenerator* gen, Instruction* instr)
: InstructionOperandConverter(gen, instr) {}
+ SwVfpRegister OutputFloat32Register(int index = 0) {
+ return ToFloat32Register(instr_->OutputAt(index));
+ }
+
+ SwVfpRegister InputFloat32Register(int index) {
+ return ToFloat32Register(instr_->InputAt(index));
+ }
+
+ SwVfpRegister ToFloat32Register(InstructionOperand* op) {
+ return ToFloat64Register(op).low();
+ }
+
+ LowDwVfpRegister OutputFloat64Register(int index = 0) {
+ return ToFloat64Register(instr_->OutputAt(index));
+ }
+
+ LowDwVfpRegister InputFloat64Register(int index) {
+ return ToFloat64Register(instr_->InputAt(index));
+ }
+
+ LowDwVfpRegister ToFloat64Register(InstructionOperand* op) {
+ return LowDwVfpRegister::from_code(ToDoubleRegister(op).code());
+ }
+
SBit OutputSBit() const {
switch (instr_->flags_mode()) {
case kFlags_branch:
switch (constant.type()) {
case Constant::kInt32:
return Operand(constant.ToInt32());
+ case Constant::kFloat32:
+ return Operand(
+ isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
case Constant::kFloat64:
return Operand(
isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
break;
}
case kArchJmp:
- __ b(code_->GetLabel(i.InputBlock(0)));
+ __ b(code_->GetLabel(i.InputRpo(0)));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArchNop:
AssembleReturn();
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
+ case kArchStackPointer:
+ __ mov(i.OutputRegister(), sp);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
case kArchTruncateDoubleToI:
- __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ TruncateDoubleToI(i.OutputRegister(), i.InputFloat64Register(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmAdd:
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
+ case kArmSmmul:
+ __ smmul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmSmmla:
+ __ smmla(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.InputRegister(2));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmUmull:
+ __ umull(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
+ i.InputRegister(1), i.OutputSBit());
+ break;
case kArmSdiv: {
CpuFeatureScope scope(masm(), SUDIV);
__ sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
DCHECK_EQ(SetCC, i.OutputSBit());
break;
case kArmVcmpF64:
- __ VFPCompareAndSetFlags(i.InputDoubleRegister(0),
- i.InputDoubleRegister(1));
+ __ VFPCompareAndSetFlags(i.InputFloat64Register(0),
+ i.InputFloat64Register(1));
DCHECK_EQ(SetCC, i.OutputSBit());
break;
case kArmVaddF64:
- __ vadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1));
+ __ vadd(i.OutputFloat64Register(), i.InputFloat64Register(0),
+ i.InputFloat64Register(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVsubF64:
- __ vsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1));
+ __ vsub(i.OutputFloat64Register(), i.InputFloat64Register(0),
+ i.InputFloat64Register(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmulF64:
- __ vmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1));
+ __ vmul(i.OutputFloat64Register(), i.InputFloat64Register(0),
+ i.InputFloat64Register(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmlaF64:
- __ vmla(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
- i.InputDoubleRegister(2));
+ __ vmla(i.OutputFloat64Register(), i.InputFloat64Register(1),
+ i.InputFloat64Register(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmlsF64:
- __ vmls(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
- i.InputDoubleRegister(2));
+ __ vmls(i.OutputFloat64Register(), i.InputFloat64Register(1),
+ i.InputFloat64Register(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVdivF64:
- __ vdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1));
+ __ vdiv(i.OutputFloat64Register(), i.InputFloat64Register(0),
+ i.InputFloat64Register(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmodF64: {
// and generate a CallAddress instruction instead.
FrameScope scope(masm(), StackFrame::MANUAL);
__ PrepareCallCFunction(0, 2, kScratchReg);
- __ MovToFloatParameters(i.InputDoubleRegister(0),
- i.InputDoubleRegister(1));
+ __ MovToFloatParameters(i.InputFloat64Register(0),
+ i.InputFloat64Register(1));
__ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
0, 2);
// Move the result in the double result register.
- __ MovFromFloatResult(i.OutputDoubleRegister());
+ __ MovFromFloatResult(i.OutputFloat64Register());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
+ case kArmVsqrtF64:
+ __ vsqrt(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ break;
+ case kArmVfloorF64:
+ __ vrintm(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ break;
+ case kArmVceilF64:
+ __ vrintp(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ break;
+ case kArmVroundTruncateF64:
+ __ vrintz(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ break;
+ case kArmVroundTiesAwayF64:
+ __ vrinta(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ break;
case kArmVnegF64:
- __ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ vneg(i.OutputFloat64Register(), i.InputFloat64Register(0));
break;
- case kArmVsqrtF64:
- __ vsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ case kArmVcvtF32F64: {
+ __ vcvt_f32_f64(i.OutputFloat32Register(), i.InputFloat64Register(0));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmVcvtF64F32: {
+ __ vcvt_f64_f32(i.OutputFloat64Register(), i.InputFloat32Register(0));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
+ }
case kArmVcvtF64S32: {
SwVfpRegister scratch = kScratchDoubleReg.low();
__ vmov(scratch, i.InputRegister(0));
- __ vcvt_f64_s32(i.OutputDoubleRegister(), scratch);
+ __ vcvt_f64_s32(i.OutputFloat64Register(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtF64U32: {
SwVfpRegister scratch = kScratchDoubleReg.low();
__ vmov(scratch, i.InputRegister(0));
- __ vcvt_f64_u32(i.OutputDoubleRegister(), scratch);
+ __ vcvt_f64_u32(i.OutputFloat64Register(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtS32F64: {
SwVfpRegister scratch = kScratchDoubleReg.low();
- __ vcvt_s32_f64(scratch, i.InputDoubleRegister(0));
+ __ vcvt_s32_f64(scratch, i.InputFloat64Register(0));
__ vmov(i.OutputRegister(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtU32F64: {
SwVfpRegister scratch = kScratchDoubleReg.low();
- __ vcvt_u32_f64(scratch, i.InputDoubleRegister(0));
+ __ vcvt_u32_f64(scratch, i.InputFloat64Register(0));
__ vmov(i.OutputRegister(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
- case kArmVldr32: {
- SwVfpRegister scratch = kScratchDoubleReg.low();
- __ vldr(scratch, i.InputOffset());
- __ vcvt_f64_f32(i.OutputDoubleRegister(), scratch);
+ case kArmVldrF32: {
+ __ vldr(i.OutputFloat32Register(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
- case kArmVstr32: {
+ case kArmVstrF32: {
int index = 0;
- SwVfpRegister scratch = kScratchDoubleReg.low();
MemOperand operand = i.InputOffset(&index);
- __ vcvt_f32_f64(scratch, i.InputDoubleRegister(index));
- __ vstr(scratch, operand);
+ __ vstr(i.InputFloat32Register(index), operand);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
- case kArmVldr64:
- __ vldr(i.OutputDoubleRegister(), i.InputOffset());
+ case kArmVldrF64:
+ __ vldr(i.OutputFloat64Register(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
- case kArmVstr64: {
+ case kArmVstrF64: {
int index = 0;
MemOperand operand = i.InputOffset(&index);
- __ vstr(i.InputDoubleRegister(index), operand);
+ __ vstr(i.InputFloat64Register(index), operand);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
// Emit a branch. The true and false targets are always the last two inputs
// to the instruction.
- BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
- BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
+ BasicBlock::RpoNumber tblock =
+ i.InputRpo(static_cast<int>(instr->InputCount()) - 2);
+ BasicBlock::RpoNumber fblock =
+ i.InputRpo(static_cast<int>(instr->InputCount()) - 1);
bool fallthru = IsNextInAssemblyOrder(fblock);
Label* tlabel = code()->GetLabel(tblock);
Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
__ stm(db_w, sp, saves);
}
} else if (descriptor->IsJSFunctionCall()) {
- CompilationInfo* info = linkage()->info();
+ CompilationInfo* info = this->info();
__ Prologue(info->IsCodePreAgingActive());
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
__ str(temp, g.ToMemOperand(destination));
}
} else if (source->IsConstant()) {
+ Constant src = g.ToConstant(source);
if (destination->IsRegister() || destination->IsStackSlot()) {
Register dst =
destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
- Constant src = g.ToConstant(source);
switch (src.type()) {
case Constant::kInt32:
__ mov(dst, Operand(src.ToInt32()));
case Constant::kInt64:
UNREACHABLE();
break;
+ case Constant::kFloat32:
+ __ Move(dst,
+ isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
+ break;
case Constant::kFloat64:
__ Move(dst,
isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
break;
}
if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
- } else if (destination->IsDoubleRegister()) {
- DwVfpRegister result = g.ToDoubleRegister(destination);
- __ vmov(result, g.ToDouble(source));
+ } else if (src.type() == Constant::kFloat32) {
+ SwVfpRegister dst = destination->IsDoubleRegister()
+ ? g.ToFloat32Register(destination)
+ : kScratchDoubleReg.low();
+ // TODO(turbofan): Can we do better here?
+ __ mov(ip, Operand(bit_cast<int32_t>(src.ToFloat32())));
+ __ vmov(dst, ip);
+ if (destination->IsDoubleStackSlot()) {
+ __ vstr(dst, g.ToMemOperand(destination));
+ }
} else {
- DCHECK(destination->IsDoubleStackSlot());
- DwVfpRegister temp = kScratchDoubleReg;
- __ vmov(temp, g.ToDouble(source));
- __ vstr(temp, g.ToMemOperand(destination));
+ DCHECK_EQ(Constant::kFloat64, src.type());
+ DwVfpRegister dst = destination->IsDoubleRegister()
+ ? g.ToFloat64Register(destination)
+ : kScratchDoubleReg;
+ __ vmov(dst, src.ToFloat64());
+ if (destination->IsDoubleStackSlot()) {
+ __ vstr(dst, g.ToMemOperand(destination));
+ }
}
} else if (source->IsDoubleRegister()) {
DwVfpRegister src = g.ToDoubleRegister(source);
void CodeGenerator::EnsureSpaceForLazyDeopt() {
int space_needed = Deoptimizer::patch_size();
- if (!linkage()->info()->IsStub()) {
+ if (!info()->IsStub()) {
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();