bool expandLPMWELPMW(Block &MBB, BlockIt MBBI, bool IsELPM);
// Common implementation of LPMBRdZ and ELPMBRdZ.
bool expandLPMBELPMB(Block &MBB, BlockIt MBBI, bool IsELPM);
+ // Common implementation of ROLBRdR1 and ROLBRdR17.
+ bool expandROLBRd(Block &MBB, BlockIt MBBI);
};
char AVRExpandPseudo::ID = 0;
return true;
}
-template <>
-bool AVRExpandPseudo::expand<AVR::ROLBRd>(Block &MBB, BlockIt MBBI) {
+bool AVRExpandPseudo::expandROLBRd(Block &MBB, BlockIt MBBI) {
// In AVR, the rotate instructions behave quite unintuitively. They rotate
// bits through the carry bit in SREG, effectively rotating over 9 bits,
// instead of 8. This is useful when we are dealing with numbers over
// multiple registers, but when we actually need to rotate stuff, we have
// to explicitly add the carry bit.
- const AVRSubtarget &STI = MBB.getParent()->getSubtarget<AVRSubtarget>();
-
MachineInstr &MI = *MBBI;
unsigned OpShift, OpCarry;
Register DstReg = MI.getOperand(0).getReg();
- Register ZeroReg = STI.getZeroRegister();
+ Register ZeroReg = MI.getOperand(3).getReg();
bool DstIsDead = MI.getOperand(0).isDead();
bool DstIsKill = MI.getOperand(1).isKill();
OpShift = AVR::ADDRdRr;
return true;
}
+template <>
+bool AVRExpandPseudo::expand<AVR::ROLBRdR1>(Block &MBB, BlockIt MBBI) {
+ return expandROLBRd(MBB, MBBI);
+}
+
+template <>
+bool AVRExpandPseudo::expand<AVR::ROLBRdR17>(Block &MBB, BlockIt MBBI) {
+ return expandROLBRd(MBB, MBBI);
+}
+
template <>
bool AVRExpandPseudo::expand<AVR::RORBRd>(Block &MBB, BlockIt MBBI) {
// In AVR, the rotate instructions behave quite unintuitively. They rotate
EXPAND(AVR::OUTWARr);
EXPAND(AVR::PUSHWRr);
EXPAND(AVR::POPWRd);
- EXPAND(AVR::ROLBRd);
+ EXPAND(AVR::ROLBRdR1);
+ EXPAND(AVR::ROLBRdR17);
EXPAND(AVR::RORBRd);
EXPAND(AVR::LSLWRd);
EXPAND(AVR::LSRWRd);
//===----------------------------------------------------------------------===//
MachineBasicBlock *AVRTargetLowering::insertShift(MachineInstr &MI,
- MachineBasicBlock *BB) const {
+ MachineBasicBlock *BB,
+ bool Tiny) const {
unsigned Opc;
const TargetRegisterClass *RC;
bool HasRepeatedOperand = false;
RC = &AVR::DREGSRegClass;
break;
case AVR::Rol8:
- Opc = AVR::ROLBRd;
+ Opc = Tiny ? AVR::ROLBRdR17 : AVR::ROLBRdR1;
RC = &AVR::GPR8RegClass;
break;
case AVR::Rol16:
AVRTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *MBB) const {
int Opc = MI.getOpcode();
+ const AVRSubtarget &STI = MBB->getParent()->getSubtarget<AVRSubtarget>();
// Pseudo shift instructions with a non constant shift amount are expanded
// into a loop.
case AVR::Ror16:
case AVR::Asr8:
case AVR::Asr16:
- return insertShift(MI, MBB);
+ return insertShift(MI, MBB, STI.hasTinyEncoding());
case AVR::Lsl32:
case AVR::Lsr32:
case AVR::Asr32:
const AVRSubtarget &Subtarget;
private:
- MachineBasicBlock *insertShift(MachineInstr &MI, MachineBasicBlock *BB) const;
+ MachineBasicBlock *insertShift(MachineInstr &MI, MachineBasicBlock *BB,
+ bool Tiny) const;
MachineBasicBlock *insertWideShift(MachineInstr &MI,
MachineBasicBlock *BB) const;
MachineBasicBlock *insertMul(MachineInstr &MI, MachineBasicBlock *BB) const;
def ASRWLoRd : Pseudo<(outs DREGS:$rd), (ins DREGS:$src), "asrwlo\t$rd",
[(set i16:$rd, (AVRasrlo i16:$src)), (implicit SREG)]>;
-
- def ROLBRd : Pseudo<(outs GPR8
- : $rd),
- (ins GPR8
- : $src),
- "rolb\t$rd",
- [(set i8
- : $rd, (AVRrol i8
- : $src)),
- (implicit SREG)]>;
+ let Uses = [R1] in
+ def ROLBRdR1 : Pseudo<(outs GPR8:$rd),
+ (ins GPR8:$src),
+ "rolb\t$rd",
+ [(set i8:$rd, (AVRrol i8:$src)),
+ (implicit SREG)]>,
+ Requires<[HasNonTinyEncoding]>;
+
+ let Uses = [R17] in
+ def ROLBRdR17 : Pseudo<(outs GPR8:$rd),
+ (ins GPR8:$src),
+ "rolb\t$rd",
+ [(set i8:$rd, (AVRrol i8:$src)),
+ (implicit SREG)]>,
+ Requires<[HasTinyEncoding]>;
def RORBRd : Pseudo<(outs GPR8
: $rd),
--- /dev/null
+# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
+
+# This test checks the expansion of the 8-bit ROLB (rotate) pseudo instruction.
+
+--- |
+ target triple = "avr--"
+ define void @test_rolbrd() {
+ entry:
+ ret void
+ }
+...
+
+---
+name: test_rolbrd
+body: |
+ bb.0.entry:
+ liveins: $r14
+
+ ; CHECK-LABEL: test_rolbrd
+ ; CHECK: $r14 = ADDRdRr killed $r14, killed $r14, implicit-def $sreg
+ ; CHECK-NEXT: $r14 = ADCRdRr $r14, $r1, implicit-def dead $sreg, implicit killed $sreg
+
+ $r14 = ROLBRdR1 $r14, implicit-def $sreg, implicit $r1
+...
--- /dev/null
+# RUN: llc -O0 -run-pass=avr-expand-pseudo -mattr=+avrtiny %s -o - | FileCheck %s
+
+# This test checks the expansion of the 8-bit ROLB (rotate) pseudo instruction
+# on AVRTiny.
+
+--- |
+ target triple = "avr--"
+ define void @test_rolbrd() {
+ entry:
+ ret void
+ }
+...
+
+---
+name: test_rolbrd
+body: |
+ bb.0.entry:
+ liveins: $r24
+
+ ; CHECK-LABEL: test_rolbrd
+ ; CHECK: $r24 = ADDRdRr killed $r24, killed $r24, implicit-def $sreg
+ ; CHECK-NEXT: $r24 = ADCRdRr $r24, $r17, implicit-def dead $sreg, implicit killed $sreg
+ $r24 = ROLBRdR17 $r24, implicit-def $sreg, implicit $r17
+...
+++ /dev/null
-# RUN: llc -O0 -run-pass=avr-expand-pseudo %s -o - | FileCheck %s
-
-# This test checks the expansion of the 8-bit ROLB (rotate) pseudo instruction.
-
---- |
- target triple = "avr--"
- define void @test_rolbrd() {
- entry:
- ret void
- }
-...
-
----
-name: test_rolbrd
-body: |
- bb.0.entry:
- liveins: $r14
-
- ; CHECK-LABEL: test_rolbrd
-
- ; CHECK: $r14 = ADDRdRr killed $r14, killed $r14, implicit-def $sreg
- ; CHECK-NEXT: $r14 = ADCRdRr $r14, $r1, implicit-def dead $sreg, implicit killed $sreg
- $r14 = ROLBRd $r14, implicit-def $sreg
-...
-; RUN: llc < %s -march=avr | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc < %s -mtriple=avr | FileCheck %s
+; RUN: llc < %s -mtriple=avr -mattr=+avrtiny | FileCheck --check-prefix=TINY %s
-; Bit rotation tests.
-
-; CHECK-LABEL: rol8:
define i8 @rol8(i8 %val, i8 %amt) {
- ; CHECK: andi r22, 7
-
- ; CHECK-NEXT: dec r22
- ; CHECK-NEXT: brmi .LBB0_2
-
-; CHECK-NEXT: .LBB0_1:
- ; CHECK-NEXT: lsl r24
- ; CHECK-NEXT: adc r24, r1
- ; CHECK-NEXT: dec r22
- ; CHECK-NEXT: brpl .LBB0_1
-
-; CHECK-NEXT: .LBB0_2:
- ; CHECK-NEXT: ret
+; CHECK-LABEL: rol8:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: andi r22, 7
+; CHECK-NEXT: dec r22
+; CHECK-NEXT: brmi .LBB0_2
+; CHECK-NEXT: .LBB0_1: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: lsl r24
+; CHECK-NEXT: adc r24, r1
+; CHECK-NEXT: dec r22
+; CHECK-NEXT: brpl .LBB0_1
+; CHECK-NEXT: .LBB0_2:
+; CHECK-NEXT: ret
+;
+; TINY-LABEL: rol8:
+; TINY: ; %bb.0:
+; TINY-NEXT: andi r22, 7
+; TINY-NEXT: dec r22
+; TINY-NEXT: brmi .LBB0_2
+; TINY-NEXT: .LBB0_1: ; =>This Inner Loop Header: Depth=1
+; TINY-NEXT: lsl r24
+; TINY-NEXT: adc r24, r17
+; TINY-NEXT: dec r22
+; TINY-NEXT: brpl .LBB0_1
+; TINY-NEXT: .LBB0_2:
+; TINY-NEXT: ret
%mod = urem i8 %amt, 8
-
%inv = sub i8 8, %mod
%parta = shl i8 %val, %mod
%partb = lshr i8 %val, %inv
-
%rotl = or i8 %parta, %partb
-
ret i8 %rotl
}
-; CHECK-LABEL: ror8:
define i8 @ror8(i8 %val, i8 %amt) {
- ; CHECK: andi r22, 7
-
- ; CHECK-NEXT: dec r22
- ; CHECK-NEXT: brmi .LBB1_2
-
-; CHECK-NEXT: .LBB1_1:
- ; CHECK-NEXT: bst r24, 0
- ; CHECK-NEXT: ror r24
- ; CHECK-NEXT: bld r24, 7
- ; CHECK-NEXT: dec r22
- ; CHECK-NEXT: brpl .LBB1_1
-
-; CHECK-NEXT: .LBB1_2:
- ; CHECK-NEXT: ret
+; CHECK-LABEL: ror8:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: andi r22, 7
+; CHECK-NEXT: dec r22
+; CHECK-NEXT: brmi .LBB1_2
+; CHECK-NEXT: .LBB1_1: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: bst r24, 0
+; CHECK-NEXT: ror r24
+; CHECK-NEXT: bld r24, 7
+; CHECK-NEXT: dec r22
+; CHECK-NEXT: brpl .LBB1_1
+; CHECK-NEXT: .LBB1_2:
+; CHECK-NEXT: ret
+;
+; TINY-LABEL: ror8:
+; TINY: ; %bb.0:
+; TINY-NEXT: andi r22, 7
+; TINY-NEXT: dec r22
+; TINY-NEXT: brmi .LBB1_2
+; TINY-NEXT: .LBB1_1: ; =>This Inner Loop Header: Depth=1
+; TINY-NEXT: bst r24, 0
+; TINY-NEXT: ror r24
+; TINY-NEXT: bld r24, 7
+; TINY-NEXT: dec r22
+; TINY-NEXT: brpl .LBB1_1
+; TINY-NEXT: .LBB1_2:
+; TINY-NEXT: ret
%mod = urem i8 %amt, 8
-
%inv = sub i8 8, %mod
%parta = lshr i8 %val, %mod
%partb = shl i8 %val, %inv
-
%rotr = or i8 %parta, %partb
-
ret i8 %rotr
}