def G_REV16 : AArch64GenericInstruction {
let OutOperandList = (outs type0:$dst);
let InOperandList = (ins type0:$src);
+ let hasSideEffects = 0;
}
// Pseudo for a rev32 instruction. Produced post-legalization from
def G_REV32 : AArch64GenericInstruction {
let OutOperandList = (outs type0:$dst);
let InOperandList = (ins type0:$src);
+ let hasSideEffects = 0;
}
// Pseudo for a rev64 instruction. Produced post-legalization from
def G_REV64 : AArch64GenericInstruction {
let OutOperandList = (outs type0:$dst);
let InOperandList = (ins type0:$src);
+ let hasSideEffects = 0;
}
// Represents an uzp1 instruction. Produced post-legalization from
def G_UZP1 : AArch64GenericInstruction {
let OutOperandList = (outs type0:$dst);
let InOperandList = (ins type0:$v1, type0:$v2);
+ let hasSideEffects = 0;
}
// Represents an uzp2 instruction. Produced post-legalization from
def G_UZP2 : AArch64GenericInstruction {
let OutOperandList = (outs type0:$dst);
let InOperandList = (ins type0:$v1, type0:$v2);
+ let hasSideEffects = 0;
}
// Represents a zip1 instruction. Produced post-legalization from
def G_ZIP1 : AArch64GenericInstruction {
let OutOperandList = (outs type0:$dst);
let InOperandList = (ins type0:$v1, type0:$v2);
+ let hasSideEffects = 0;
}
// Represents a zip2 instruction. Produced post-legalization from
def G_ZIP2 : AArch64GenericInstruction {
let OutOperandList = (outs type0:$dst);
let InOperandList = (ins type0:$v1, type0:$v2);
+ let hasSideEffects = 0;
}
// Represents a dup instruction. Produced post-legalization from
def G_DUP: AArch64GenericInstruction {
let OutOperandList = (outs type0:$dst);
let InOperandList = (ins type1:$lane);
+ let hasSideEffects = 0;
}
// Represents a trn1 instruction. Produced post-legalization from
// G_SHUFFLE_VECTORs with appropriate masks.
def G_TRN1 : AArch64GenericInstruction {
let OutOperandList = (outs type0:$dst);
let InOperandList = (ins type0:$v1, type0:$v2);
+ let hasSideEffects = 0;
}
// Represents a trn2 instruction. Produced post-legalization from
def G_TRN2 : AArch64GenericInstruction {
let OutOperandList = (outs type0:$dst);
let InOperandList = (ins type0:$v1, type0:$v2);
+ let hasSideEffects = 0;
}
def : GINodeEquiv<G_REV16, AArch64rev16>;
; CHECK: liveins: $d0
; CHECK: %copy:fpr64 = COPY $d0
; CHECK: %rev:fpr64 = REV64v2i32 %copy
- ; CHECK: RET_ReallyLR
+ ; CHECK: $d0 = COPY %rev
+ ; CHECK: RET_ReallyLR implicit $d0
%copy:fpr(<2 x s32>) = COPY $d0
%rev:fpr(<2 x s32>) = G_REV64 %copy
- RET_ReallyLR
+ $d0 = COPY %rev(<2 x s32>)
+ RET_ReallyLR implicit $d0
...
---
; CHECK: liveins: $d0
; CHECK: %copy:fpr64 = COPY $d0
; CHECK: %rev:fpr64 = REV64v4i16 %copy
- ; CHECK: RET_ReallyLR
+ ; CHECK: $d0 = COPY %rev
+ ; CHECK: RET_ReallyLR implicit $d0
%copy:fpr(<4 x s16>) = COPY $d0
%rev:fpr(<4 x s16>) = G_REV64 %copy
- RET_ReallyLR
+ $d0 = COPY %rev(<4 x s16>)
+ RET_ReallyLR implicit $d0
...
---
; CHECK: liveins: $q0
; CHECK: %copy:fpr128 = COPY $q0
; CHECK: %rev:fpr128 = REV64v4i32 %copy
- ; CHECK: RET_ReallyLR
+ ; CHECK: $q0 = COPY %rev
+ ; CHECK: RET_ReallyLR implicit $q0
%copy:fpr(<4 x s32>) = COPY $q0
%rev:fpr(<4 x s32>) = G_REV64 %copy
- RET_ReallyLR
+ $q0 = COPY %rev(<4 x s32>)
+ RET_ReallyLR implicit $q0
...
---
; CHECK: liveins: $q0
; CHECK: %copy:fpr64 = COPY $d0
; CHECK: %rev:fpr64 = REV64v8i8 %copy
- ; CHECK: RET_ReallyLR
+ ; CHECK: $d0 = COPY %rev
+ ; CHECK: RET_ReallyLR implicit $d0
%copy:fpr(<8 x s8>) = COPY $d0
%rev:fpr(<8 x s8>) = G_REV64 %copy
- RET_ReallyLR
+ $d0 = COPY %rev(<8 x s8>)
+ RET_ReallyLR implicit $d0
...
---
; CHECK: liveins: $q0
; CHECK: %copy:fpr128 = COPY $q0
; CHECK: %rev:fpr128 = REV64v8i16 %copy
- ; CHECK: RET_ReallyLR
+ ; CHECK: $q0 = COPY %rev
+ ; CHECK: RET_ReallyLR implicit $q0
%copy:fpr(<8 x s16>) = COPY $q0
%rev:fpr(<8 x s16>) = G_REV64 %copy
- RET_ReallyLR
+ $q0 = COPY %rev(<8 x s16>)
+ RET_ReallyLR implicit $q0
...
---
; CHECK: liveins: $q0
; CHECK: %copy:fpr128 = COPY $q0
; CHECK: %rev:fpr128 = REV64v16i8 %copy
- ; CHECK: RET_ReallyLR
+ ; CHECK: $q0 = COPY %rev
+ ; CHECK: RET_ReallyLR implicit $q0
%copy:fpr(<16 x s8>) = COPY $q0
%rev:fpr(<16 x s8>) = G_REV64 %copy
- RET_ReallyLR
+ $q0 = COPY %rev(<16 x s8>)
+ RET_ReallyLR implicit $q0
...
---
; CHECK: liveins: $d0
; CHECK: %copy:fpr64 = COPY $d0
; CHECK: %rev:fpr64 = REV32v4i16 %copy
- ; CHECK: RET_ReallyLR
+ ; CHECK: $d0 = COPY %rev
+ ; CHECK: RET_ReallyLR implicit $d0
%copy:fpr(<4 x s16>) = COPY $d0
%rev:fpr(<4 x s16>) = G_REV32 %copy
- RET_ReallyLR
+ $d0 = COPY %rev(<4 x s16>)
+ RET_ReallyLR implicit $d0
...
---
; CHECK: liveins: $d0
; CHECK: %copy:fpr64 = COPY $d0
; CHECK: %rev:fpr64 = REV32v8i8 %copy
- ; CHECK: RET_ReallyLR
+ ; CHECK: $d0 = COPY %rev
+ ; CHECK: RET_ReallyLR implicit $d0
%copy:fpr(<8 x s8>) = COPY $d0
%rev:fpr(<8 x s8>) = G_REV32 %copy
- RET_ReallyLR
+ $d0 = COPY %rev(<8 x s8>)
+ RET_ReallyLR implicit $d0
...
---
; CHECK: liveins: $q0
; CHECK: %copy:fpr128 = COPY $q0
; CHECK: %rev:fpr128 = REV32v8i16 %copy
- ; CHECK: RET_ReallyLR
+ ; CHECK: $q0 = COPY %rev
+ ; CHECK: RET_ReallyLR implicit $q0
%copy:fpr(<8 x s16>) = COPY $q0
%rev:fpr(<8 x s16>) = G_REV32 %copy
- RET_ReallyLR
+ $q0 = COPY %rev(<8 x s16>)
+ RET_ReallyLR implicit $q0
...
---
; CHECK: liveins: $q0
; CHECK: %copy:fpr128 = COPY $q0
; CHECK: %rev:fpr128 = REV32v16i8 %copy
- ; CHECK: RET_ReallyLR
+ ; CHECK: $q0 = COPY %rev
+ ; CHECK: RET_ReallyLR implicit $q0
%copy:fpr(<16 x s8>) = COPY $q0
%rev:fpr(<16 x s8>) = G_REV32 %copy
- RET_ReallyLR
+ $q0 = COPY %rev(<16 x s8>)
+ RET_ReallyLR implicit $q0
...
---
; CHECK: liveins: $q0
; CHECK: %copy:fpr64 = COPY $d0
; CHECK: %rev:fpr64 = REV16v8i8 %copy
- ; CHECK: RET_ReallyLR
+ ; CHECK: $d0 = COPY %rev
+ ; CHECK: RET_ReallyLR implicit $d0
%copy:fpr(<8 x s8>) = COPY $d0
%rev:fpr(<8 x s8>) = G_REV16 %copy
- RET_ReallyLR
+ $d0 = COPY %rev(<8 x s8>)
+ RET_ReallyLR implicit $d0
...
---
; CHECK: liveins: $q0
; CHECK: %copy:fpr128 = COPY $q0
; CHECK: %rev:fpr128 = REV16v16i8 %copy
- ; CHECK: RET_ReallyLR
+ ; CHECK: $q0 = COPY %rev
+ ; CHECK: RET_ReallyLR implicit $q0
%copy:fpr(<16 x s8>) = COPY $q0
%rev:fpr(<16 x s8>) = G_REV16 %copy
- RET_ReallyLR
+ $q0 = COPY %rev(<16 x s8>)
+ RET_ReallyLR implicit $q0
; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
; CHECK: [[TRN1v2i32_:%[0-9]+]]:fpr64 = TRN1v2i32 [[COPY]], [[COPY1]]
- ; CHECK: RET_ReallyLR
+ ; CHECK: $d0 = COPY [[TRN1v2i32_]]
+ ; CHECK: RET_ReallyLR implicit $d0
%0:fpr(<2 x s32>) = COPY $d0
%1:fpr(<2 x s32>) = COPY $d1
%2:fpr(<2 x s32>) = G_TRN1 %0, %1
- RET_ReallyLR
+ $d0 = COPY %2(<2 x s32>)
+ RET_ReallyLR implicit $d0
...
---
; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
; CHECK: [[TRN1v2i64_:%[0-9]+]]:fpr128 = TRN1v2i64 [[COPY]], [[COPY1]]
- ; CHECK: RET_ReallyLR
+ ; CHECK: $q0 = COPY [[TRN1v2i64_]]
+ ; CHECK: RET_ReallyLR implicit $q0
%0:fpr(<2 x s64>) = COPY $q0
%1:fpr(<2 x s64>) = COPY $q1
%2:fpr(<2 x s64>) = G_TRN1 %0, %1
- RET_ReallyLR
+ $q0 = COPY %2(<2 x s64>)
+ RET_ReallyLR implicit $q0
...
---
; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
; CHECK: [[TRN1v4i16_:%[0-9]+]]:fpr64 = TRN1v4i16 [[COPY]], [[COPY1]]
- ; CHECK: RET_ReallyLR
+ ; CHECK: $d0 = COPY [[TRN1v4i16_]]
+ ; CHECK: RET_ReallyLR implicit $d0
%0:fpr(<4 x s16>) = COPY $d0
%1:fpr(<4 x s16>) = COPY $d1
%2:fpr(<4 x s16>) = G_TRN1 %0, %1
- RET_ReallyLR
+ $d0 = COPY %2(<4 x s16>)
+ RET_ReallyLR implicit $d0
...
---
; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
; CHECK: [[TRN1v4i32_:%[0-9]+]]:fpr128 = TRN1v4i32 [[COPY]], [[COPY1]]
- ; CHECK: RET_ReallyLR
+ ; CHECK: $q0 = COPY [[TRN1v4i32_]]
+ ; CHECK: RET_ReallyLR implicit $q0
%0:fpr(<4 x s32>) = COPY $q0
%1:fpr(<4 x s32>) = COPY $q1
%2:fpr(<4 x s32>) = G_TRN1 %0, %1
- RET_ReallyLR
+ $q0 = COPY %2(<4 x s32>)
+ RET_ReallyLR implicit $q0
...
---
; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
; CHECK: [[TRN1v8i8_:%[0-9]+]]:fpr64 = TRN1v8i8 [[COPY]], [[COPY1]]
- ; CHECK: RET_ReallyLR
+ ; CHECK: $d0 = COPY [[TRN1v8i8_]]
+ ; CHECK: RET_ReallyLR implicit $d0
%0:fpr(<8 x s8>) = COPY $d0
%1:fpr(<8 x s8>) = COPY $d1
%2:fpr(<8 x s8>) = G_TRN1 %0, %1
- RET_ReallyLR
+ $d0 = COPY %2(<8 x s8>)
+ RET_ReallyLR implicit $d0
...
---
; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
; CHECK: [[TRN1v8i16_:%[0-9]+]]:fpr128 = TRN1v8i16 [[COPY]], [[COPY1]]
- ; CHECK: RET_ReallyLR
+ ; CHECK: $q0 = COPY [[TRN1v8i16_]]
+ ; CHECK: RET_ReallyLR implicit $q0
%0:fpr(<8 x s16>) = COPY $q0
%1:fpr(<8 x s16>) = COPY $q1
%2:fpr(<8 x s16>) = G_TRN1 %0, %1
- RET_ReallyLR
+ $q0 = COPY %2(<8 x s16>)
+ RET_ReallyLR implicit $q0
...
---
; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
; CHECK: [[TRN1v16i8_:%[0-9]+]]:fpr128 = TRN1v16i8 [[COPY]], [[COPY1]]
- ; CHECK: RET_ReallyLR
+ ; CHECK: $q0 = COPY [[TRN1v16i8_]]
+ ; CHECK: RET_ReallyLR implicit $q0
%0:fpr(<16 x s8>) = COPY $q0
%1:fpr(<16 x s8>) = COPY $q1
%2:fpr(<16 x s8>) = G_TRN1 %0, %1
- RET_ReallyLR
+ $q0 = COPY %2(<16 x s8>)
+ RET_ReallyLR implicit $q0
...
---
; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
; CHECK: [[TRN2v2i32_:%[0-9]+]]:fpr64 = TRN2v2i32 [[COPY]], [[COPY1]]
- ; CHECK: RET_ReallyLR
+ ; CHECK: $d0 = COPY [[TRN2v2i32_]]
+ ; CHECK: RET_ReallyLR implicit $d0
%0:fpr(<2 x s32>) = COPY $d0
%1:fpr(<2 x s32>) = COPY $d1
%2:fpr(<2 x s32>) = G_TRN2 %0, %1
- RET_ReallyLR
+ $d0 = COPY %2(<2 x s32>)
+ RET_ReallyLR implicit $d0
...
---
; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
; CHECK: [[TRN2v2i64_:%[0-9]+]]:fpr128 = TRN2v2i64 [[COPY]], [[COPY1]]
- ; CHECK: RET_ReallyLR
+ ; CHECK: $q0 = COPY [[TRN2v2i64_]]
+ ; CHECK: RET_ReallyLR implicit $q0
%0:fpr(<2 x s64>) = COPY $q0
%1:fpr(<2 x s64>) = COPY $q1
%2:fpr(<2 x s64>) = G_TRN2 %0, %1
- RET_ReallyLR
+ $q0 = COPY %2(<2 x s64>)
+ RET_ReallyLR implicit $q0
...
---
; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
; CHECK: [[TRN2v4i16_:%[0-9]+]]:fpr64 = TRN2v4i16 [[COPY]], [[COPY1]]
- ; CHECK: RET_ReallyLR
+ ; CHECK: $d0 = COPY [[TRN2v4i16_]]
+ ; CHECK: RET_ReallyLR implicit $d0
%0:fpr(<4 x s16>) = COPY $d0
%1:fpr(<4 x s16>) = COPY $d1
%2:fpr(<4 x s16>) = G_TRN2 %0, %1
- RET_ReallyLR
+ $d0 = COPY %2(<4 x s16>)
+ RET_ReallyLR implicit $d0
...
---
; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
; CHECK: [[TRN2v4i32_:%[0-9]+]]:fpr128 = TRN2v4i32 [[COPY]], [[COPY1]]
- ; CHECK: RET_ReallyLR
+ ; CHECK: $q0 = COPY [[TRN2v4i32_]]
+ ; CHECK: RET_ReallyLR implicit $q0
%0:fpr(<4 x s32>) = COPY $q0
%1:fpr(<4 x s32>) = COPY $q1
%2:fpr(<4 x s32>) = G_TRN2 %0, %1
- RET_ReallyLR
+ $q0 = COPY %2(<4 x s32>)
+ RET_ReallyLR implicit $q0
...
---
; CHECK: [[COPY:%[0-9]+]]:fpr64 = COPY $d0
; CHECK: [[COPY1:%[0-9]+]]:fpr64 = COPY $d1
; CHECK: [[TRN2v8i8_:%[0-9]+]]:fpr64 = TRN2v8i8 [[COPY]], [[COPY1]]
- ; CHECK: RET_ReallyLR
+ ; CHECK: $d0 = COPY [[TRN2v8i8_]]
+ ; CHECK: RET_ReallyLR implicit $d0
%0:fpr(<8 x s8>) = COPY $d0
%1:fpr(<8 x s8>) = COPY $d1
%2:fpr(<8 x s8>) = G_TRN2 %0, %1
- RET_ReallyLR
+ $d0 = COPY %2(<8 x s8>)
+ RET_ReallyLR implicit $d0
...
---
; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
; CHECK: [[TRN2v8i16_:%[0-9]+]]:fpr128 = TRN2v8i16 [[COPY]], [[COPY1]]
- ; CHECK: RET_ReallyLR
+ ; CHECK: $q0 = COPY [[TRN2v8i16_]]
+ ; CHECK: RET_ReallyLR implicit $q0
%0:fpr(<8 x s16>) = COPY $q0
%1:fpr(<8 x s16>) = COPY $q1
%2:fpr(<8 x s16>) = G_TRN2 %0, %1
- RET_ReallyLR
+ $q0 = COPY %2(<8 x s16>)
+ RET_ReallyLR implicit $q0
...
---
; CHECK: [[COPY:%[0-9]+]]:fpr128 = COPY $q0
; CHECK: [[COPY1:%[0-9]+]]:fpr128 = COPY $q1
; CHECK: [[TRN2v16i8_:%[0-9]+]]:fpr128 = TRN2v16i8 [[COPY]], [[COPY1]]
- ; CHECK: RET_ReallyLR
+ ; CHECK: $q0 = COPY [[TRN2v16i8_]]
+ ; CHECK: RET_ReallyLR implicit $q0
%0:fpr(<16 x s8>) = COPY $q0
%1:fpr(<16 x s8>) = COPY $q1
%2:fpr(<16 x s8>) = G_TRN2 %0, %1
- RET_ReallyLR
+ $q0 = COPY %2(<16 x s8>)
+ RET_ReallyLR implicit $q0