__ Ubfx(i.OutputRegister32(), i.InputRegister32(0), i.InputInt8(1),
i.InputInt8(2));
break;
+ case kArm64Bfi:
+ __ Bfi(i.OutputRegister(), i.InputRegister(1), i.InputInt6(2),
+ i.InputInt6(3));
+ break;
case kArm64TestAndBranch32:
case kArm64TestAndBranch:
// Pseudo instructions turned into tbz/tbnz in AssembleArchBranch.
__ Fmov(i.OutputRegister32(), i.InputFloat32Register(0));
break;
case kArm64Float64ExtractHighWord32:
+ // TODO(arm64): This should use MOV (to general) when NEON is supported.
__ Fmov(i.OutputRegister(), i.InputFloat64Register(0));
__ Lsr(i.OutputRegister(), i.OutputRegister(), 32);
break;
case kArm64Float64InsertLowWord32: {
+ // TODO(arm64): This should use MOV (from general) when NEON is supported.
UseScratchRegisterScope scope(masm());
Register tmp = scope.AcquireX();
__ Fmov(tmp, i.InputFloat64Register(0));
break;
}
case kArm64Float64InsertHighWord32: {
+ // TODO(arm64): This should use MOV (from general) when NEON is supported.
UseScratchRegisterScope scope(masm());
Register tmp = scope.AcquireX();
__ Fmov(tmp.W(), i.InputFloat32Register(0));
__ Fmov(i.OutputFloat64Register(), tmp);
break;
}
+ case kArm64Float64MoveU64: {
+ __ Fmov(i.OutputFloat64Register(), i.InputRegister(0));
+ break;
+ }
case kArm64Ldrb:
__ Ldrb(i.OutputRegister(), i.MemoryOperand());
break;
void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
- // TODO(arm64): Some AArch64 specialist should be able to improve this.
Arm64OperandGenerator g(this);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
+ if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
+ CanCover(node, left)) {
+ Node* right_of_left = left->InputAt(1);
+ Emit(kArm64Bfi, g.DefineSameAsFirst(right), g.UseRegister(right),
+ g.UseRegister(right_of_left), g.TempImmediate(32),
+ g.TempImmediate(32));
+ Emit(kArm64Float64MoveU64, g.DefineAsRegister(node), g.UseRegister(right));
+ return;
+ }
Emit(kArm64Float64InsertLowWord32, g.DefineAsRegister(node),
g.UseRegister(left), g.UseRegister(right));
}
void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
- // TODO(arm64): Some AArch64 specialist should be able to improve this.
Arm64OperandGenerator g(this);
Node* left = node->InputAt(0);
Node* right = node->InputAt(1);
+ if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
+ CanCover(node, left)) {
+ Node* right_of_left = left->InputAt(1);
+ Emit(kArm64Bfi, g.DefineSameAsFirst(left), g.UseRegister(right_of_left),
+ g.UseRegister(right), g.TempImmediate(32), g.TempImmediate(32));
+ Emit(kArm64Float64MoveU64, g.DefineAsRegister(node), g.UseRegister(left));
+ return;
+ }
Emit(kArm64Float64InsertHighWord32, g.DefineAsRegister(node),
g.UseRegister(left), g.UseRegister(right));
}