getActionDefinitionsBuilder({G_FSHL, G_FSHR}).lower();
+ getActionDefinitionsBuilder(G_ROTR)
+ .legalFor({{s32, s64}, {s64, s64}})
+ .customIf([=](const LegalityQuery &Q) {
+ return Q.Types[0].isScalar() && Q.Types[1].getScalarSizeInBits() < 64;
+ })
+ .lower();
+ getActionDefinitionsBuilder(G_ROTL).lower();
+
getActionDefinitionsBuilder({G_SBFX, G_UBFX}).customFor({s32, s64});
computeTables();
case TargetOpcode::G_SBFX:
case TargetOpcode::G_UBFX:
return legalizeBitfieldExtract(MI, MRI, Helper);
+ case TargetOpcode::G_ROTR:
+ return legalizeRotate(MI, MRI, Helper);
}
llvm_unreachable("expected switch to return");
}
+bool AArch64LegalizerInfo::legalizeRotate(MachineInstr &MI,
+ MachineRegisterInfo &MRI,
+ LegalizerHelper &Helper) const {
+ // To allow for imported patterns to match, we ensure that the rotate amount
+ // is 64b with an extension.
+ Register AmtReg = MI.getOperand(2).getReg();
+ LLT AmtTy = MRI.getType(AmtReg);
+ assert(AmtTy.isScalar() && "Expected a scalar rotate");
+ assert(AmtTy.getSizeInBits() < 64 && "Expected this rotate to be legal");
+ auto NewAmt = Helper.MIRBuilder.buildSExt(LLT::scalar(64), AmtReg);
+ Helper.Observer.changingInstr(MI);
+ MI.getOperand(2).setReg(NewAmt.getReg(0));
+ Helper.Observer.changedInstr(MI);
+ return true;
+}
+
static void extractParts(Register Reg, MachineRegisterInfo &MRI,
MachineIRBuilder &MIRBuilder, LLT Ty, int NumParts,
SmallVectorImpl<Register> &VRegs) {
--- /dev/null
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -O0 -mtriple=arm64-unknown-unknown -global-isel -run-pass=legalizer -verify-machineinstrs -global-isel-abort=1 %s -o - | FileCheck %s
+---
+name: rotr_s32
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; CHECK-LABEL: name: rotr_s32
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+ ; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY1]](s32)
+ ; CHECK: %rot:_(s32) = G_ROTR [[COPY]], [[SEXT]](s64)
+ ; CHECK: $w0 = COPY %rot(s32)
+ ; CHECK: RET_ReallyLR implicit $w0
+ %0:_(s32) = COPY $w0
+ %1:_(s32) = COPY $w1
+ %rot:_(s32) = G_ROTR %0(s32), %1(s32)
+ $w0 = COPY %rot(s32)
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: rotr_s64
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+ ; CHECK-LABEL: name: rotr_s64
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
+ ; CHECK: %rot:_(s64) = G_ROTR [[COPY]], [[COPY1]](s64)
+ ; CHECK: $x0 = COPY %rot(s64)
+ ; CHECK: RET_ReallyLR implicit $x0
+ %0:_(s64) = COPY $x0
+ %1:_(s64) = COPY $x1
+ %rot:_(s64) = G_ROTR %0(s64), %1(s64)
+ $x0 = COPY %rot(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: rotl_s32
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; CHECK-LABEL: name: rotl_s32
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C]], [[COPY1]]
+ ; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[SUB]](s32)
+ ; CHECK: %rot:_(s32) = G_ROTR [[COPY]], [[SEXT]](s64)
+ ; CHECK: $w0 = COPY %rot(s32)
+ ; CHECK: RET_ReallyLR implicit $w0
+ %0:_(s32) = COPY $w0
+ %1:_(s32) = COPY $w1
+ %rot:_(s32) = G_ROTL %0(s32), %1(s32)
+ $w0 = COPY %rot(s32)
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: rotl_s64
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+ ; CHECK-LABEL: name: rotl_s64
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
+ ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[C]], [[COPY1]]
+ ; CHECK: %rot:_(s64) = G_ROTR [[COPY]], [[SUB]](s64)
+ ; CHECK: $x0 = COPY %rot(s64)
+ ; CHECK: RET_ReallyLR implicit $x0
+ %0:_(s64) = COPY $x0
+ %1:_(s64) = COPY $x1
+ %rot:_(s64) = G_ROTL %0(s64), %1(s64)
+ $x0 = COPY %rot(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: test_rotl_v4s32
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $q0, $q1
+
+ ; CHECK-LABEL: name: test_rotl_v4s32
+ ; CHECK: liveins: $q0, $q1
+ ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+ ; CHECK: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+ ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+ ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C1]](s32), [[C1]](s32), [[C1]](s32)
+ ; CHECK: [[SUB:%[0-9]+]]:_(<4 x s32>) = G_SUB [[BUILD_VECTOR]], [[COPY1]]
+ ; CHECK: [[AND:%[0-9]+]]:_(<4 x s32>) = G_AND [[COPY1]], [[BUILD_VECTOR1]]
+ ; CHECK: [[SHL:%[0-9]+]]:_(<4 x s32>) = G_SHL [[COPY]], [[AND]](<4 x s32>)
+ ; CHECK: [[AND1:%[0-9]+]]:_(<4 x s32>) = G_AND [[SUB]], [[BUILD_VECTOR1]]
+ ; CHECK: [[LSHR:%[0-9]+]]:_(<4 x s32>) = G_LSHR [[COPY]], [[AND1]](<4 x s32>)
+ ; CHECK: %rot:_(<4 x s32>) = G_OR [[SHL]], [[LSHR]]
+ ; CHECK: $q0 = COPY %rot(<4 x s32>)
+ ; CHECK: RET_ReallyLR implicit $q0
+ %0:_(<4 x s32>) = COPY $q0
+ %1:_(<4 x s32>) = COPY $q1
+ %rot:_(<4 x s32>) = G_ROTL %0(<4 x s32>), %1(<4 x s32>)
+ $q0 = COPY %rot(<4 x s32>)
+ RET_ReallyLR implicit $q0
+
+...
+---
+name: test_rotr_v4s32
+alignment: 4
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $q0, $q1
+
+ ; CHECK-LABEL: name: test_rotr_v4s32
+ ; CHECK: liveins: $q0, $q1
+ ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0
+ ; CHECK: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $q1
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
+ ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+ ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C1]](s32), [[C1]](s32), [[C1]](s32)
+ ; CHECK: [[SUB:%[0-9]+]]:_(<4 x s32>) = G_SUB [[BUILD_VECTOR]], [[COPY1]]
+ ; CHECK: [[AND:%[0-9]+]]:_(<4 x s32>) = G_AND [[COPY1]], [[BUILD_VECTOR1]]
+ ; CHECK: [[LSHR:%[0-9]+]]:_(<4 x s32>) = G_LSHR [[COPY]], [[AND]](<4 x s32>)
+ ; CHECK: [[AND1:%[0-9]+]]:_(<4 x s32>) = G_AND [[SUB]], [[BUILD_VECTOR1]]
+ ; CHECK: [[SHL:%[0-9]+]]:_(<4 x s32>) = G_SHL [[COPY]], [[AND1]](<4 x s32>)
+ ; CHECK: %rot:_(<4 x s32>) = G_OR [[LSHR]], [[SHL]]
+ ; CHECK: $q0 = COPY %rot(<4 x s32>)
+ ; CHECK: RET_ReallyLR implicit $q0
+ %0:_(<4 x s32>) = COPY $q0
+ %1:_(<4 x s32>) = COPY $q1
+ %rot:_(<4 x s32>) = G_ROTR %0(<4 x s32>), %1(<4 x s32>)
+ $q0 = COPY %rot(<4 x s32>)
+ RET_ReallyLR implicit $q0
+
+...