Tidies up the representation a bit in the common case.
llvm-svn: 276772
SmallVector<LLT, 1> Tys;
if (isPreISelGenericOpcode(OpCode)) {
// For generic opcode, at least one type is mandatory.
- expectAndConsume(MIToken::lbrace);
+ auto Loc = Token.location();
+ bool ManyTypes = Token.is(MIToken::lbrace);
+ if (ManyTypes)
+ lex();
+
+ // Now actually parse the type(s).
do {
- auto Loc = Token.location();
Tys.resize(Tys.size() + 1);
if (parseLowLevelType(Loc, Tys[Tys.size() - 1]))
return true;
- } while (consumeIfPresent(MIToken::comma));
- expectAndConsume(MIToken::rbrace);
+ } while (ManyTypes && consumeIfPresent(MIToken::comma));
+
+ if (ManyTypes)
+ expectAndConsume(MIToken::rbrace);
}
// Parse the remaining machine operands.
OS << TII->getName(MI.getOpcode());
if (isPreISelGenericOpcode(MI.getOpcode())) {
assert(MI.getType().isValid() && "Generic instructions must have a type");
- OS << " { ";
- for (unsigned i = 0; i < MI.getNumTypes(); ++i) {
+ unsigned NumTypes = MI.getNumTypes();
+ OS << (NumTypes > 1 ? " {" : "") << ' ';
+ for (unsigned i = 0; i < NumTypes; ++i) {
MI.getType(i).print(OS);
- if (i + 1 != MI.getNumTypes())
+ if (i + 1 != NumTypes)
OS << ", ";
}
- OS << " } ";
+ OS << (NumTypes > 1 ? " }" : "") << ' ';
}
if (I < E)
OS << ' ';
; CHECK-LABEL: name: addi64
; CHECK: [[ARG1:%[0-9]+]](64) = COPY %x0
; CHECK-NEXT: [[ARG2:%[0-9]+]](64) = COPY %x1
-; CHECK-NEXT: [[RES:%[0-9]+]](64) = G_ADD { s64 } [[ARG1]], [[ARG2]]
+; CHECK-NEXT: [[RES:%[0-9]+]](64) = G_ADD s64 [[ARG1]], [[ARG2]]
; CHECK-NEXT: %x0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %x0
define i64 @addi64(i64 %arg1, i64 %arg2) {
; CHECK-NEXT: - { id: 0, name: ptr1, offset: 0, size: 8, alignment: 8 }
; CHECK-NEXT: - { id: 1, name: ptr2, offset: 0, size: 8, alignment: 1 }
; CHECK-NEXT: - { id: 2, name: ptr3, offset: 0, size: 128, alignment: 8 }
-; CHECK: %{{[0-9]+}}(64) = G_FRAME_INDEX { p0 } 0
-; CHECK: %{{[0-9]+}}(64) = G_FRAME_INDEX { p0 } 1
-; CHECK: %{{[0-9]+}}(64) = G_FRAME_INDEX { p0 } 2
+; CHECK: %{{[0-9]+}}(64) = G_FRAME_INDEX p0 0
+; CHECK: %{{[0-9]+}}(64) = G_FRAME_INDEX p0 1
+; CHECK: %{{[0-9]+}}(64) = G_FRAME_INDEX p0 2
define void @allocai64() {
%ptr1 = alloca i64
%ptr2 = alloca i64, align 1
; CHECK-NEXT: successors: %[[END:[0-9a-zA-Z._-]+]]({{0x[a-f0-9]+ / 0x[a-f0-9]+}} = 100.00%)
;
; Check that we emit the correct branch.
-; CHECK: G_BR { unsized } %[[END]]
+; CHECK: G_BR unsized %[[END]]
;
; Check that end contains the return instruction.
; CHECK: [[END]]:
; CHECK-LABEL: name: ori64
; CHECK: [[ARG1:%[0-9]+]](64) = COPY %x0
; CHECK-NEXT: [[ARG2:%[0-9]+]](64) = COPY %x1
-; CHECK-NEXT: [[RES:%[0-9]+]](64) = G_OR { s64 } [[ARG1]], [[ARG2]]
+; CHECK-NEXT: [[RES:%[0-9]+]](64) = G_OR s64 [[ARG1]], [[ARG2]]
; CHECK-NEXT: %x0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %x0
define i64 @ori64(i64 %arg1, i64 %arg2) {
; CHECK-LABEL: name: ori32
; CHECK: [[ARG1:%[0-9]+]](32) = COPY %w0
; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = COPY %w1
-; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_OR { s32 } [[ARG1]], [[ARG2]]
+; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_OR s32 [[ARG1]], [[ARG2]]
; CHECK-NEXT: %w0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %w0
define i32 @ori32(i32 %arg1, i32 %arg2) {
; CHECK-LABEL: name: andi64
; CHECK: [[ARG1:%[0-9]+]](64) = COPY %x0
; CHECK-NEXT: [[ARG2:%[0-9]+]](64) = COPY %x1
-; CHECK-NEXT: [[RES:%[0-9]+]](64) = G_AND { s64 } [[ARG1]], [[ARG2]]
+; CHECK-NEXT: [[RES:%[0-9]+]](64) = G_AND s64 [[ARG1]], [[ARG2]]
; CHECK-NEXT: %x0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %x0
define i64 @andi64(i64 %arg1, i64 %arg2) {
; CHECK-LABEL: name: andi32
; CHECK: [[ARG1:%[0-9]+]](32) = COPY %w0
; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = COPY %w1
-; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_AND { s32 } [[ARG1]], [[ARG2]]
+; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_AND s32 [[ARG1]], [[ARG2]]
; CHECK-NEXT: %w0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %w0
define i32 @andi32(i32 %arg1, i32 %arg2) {
; CHECK-LABEL: name: subi64
; CHECK: [[ARG1:%[0-9]+]](64) = COPY %x0
; CHECK-NEXT: [[ARG2:%[0-9]+]](64) = COPY %x1
-; CHECK-NEXT: [[RES:%[0-9]+]](64) = G_SUB { s64 } [[ARG1]], [[ARG2]]
+; CHECK-NEXT: [[RES:%[0-9]+]](64) = G_SUB s64 [[ARG1]], [[ARG2]]
; CHECK-NEXT: %x0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %x0
define i64 @subi64(i64 %arg1, i64 %arg2) {
; CHECK-LABEL: name: subi32
; CHECK: [[ARG1:%[0-9]+]](32) = COPY %w0
; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = COPY %w1
-; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_SUB { s32 } [[ARG1]], [[ARG2]]
+; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_SUB s32 [[ARG1]], [[ARG2]]
; CHECK-NEXT: %w0 = COPY [[RES]]
; CHECK-NEXT: RET_ReallyLR implicit %w0
define i32 @subi32(i32 %arg1, i32 %arg2) {
body: |
bb.0.entry:
liveins: %x0
- ; CHECK: %0(32) = G_ADD { s32 } %w0
- %0(32) = G_ADD { s32 } %w0, %w0
+ ; CHECK: %0(32) = G_ADD s32 %w0
+ %0(32) = G_ADD s32 %w0, %w0
...
---
body: |
bb.0.entry:
liveins: %d0
- ; CHECK: %0(64) = G_ADD { <2 x s32> } %d0
- %0(64) = G_ADD { <2 x s32> } %d0, %d0
+ ; CHECK: %0(64) = G_ADD <2 x s32> %d0
+ %0(64) = G_ADD <2 x s32> %d0, %d0
...
---
liveins: %s0, %x0
; CHECK: %0(32) = COPY %s0
; CHECK-NEXT: %2(32) = COPY %0
- ; CHECK-NEXT: %1(32) = G_ADD { s32 } %2, %w0
+ ; CHECK-NEXT: %1(32) = G_ADD s32 %2, %w0
%0(32) = COPY %s0
- %1(32) = G_ADD { s32 } %0, %w0
+ %1(32) = G_ADD s32 %0, %w0
...
# Check that we repair the assignment for %0 differently for both uses.
; CHECK: %0(32) = COPY %s0
; CHECK-NEXT: %2(32) = COPY %0
; CHECK-NEXT: %3(32) = COPY %0
- ; CHECK-NEXT: %1(32) = G_ADD { s32 } %2, %3
+ ; CHECK-NEXT: %1(32) = G_ADD s32 %2, %3
%0(32) = COPY %s0
- %1(32) = G_ADD { s32 } %0, %0
+ %1(32) = G_ADD s32 %0, %0
...
---
bb.0.entry:
liveins: %w0
; CHECK: %0(32) = COPY %w0
- ; CHECK-NEXT: %2(32) = G_ADD { s32 } %0, %w0
+ ; CHECK-NEXT: %2(32) = G_ADD s32 %0, %w0
; CHECK-NEXT: %1(32) = COPY %2
%0(32) = COPY %w0
- %1(32) = G_ADD { s32 } %0, %w0
+ %1(32) = G_ADD s32 %0, %w0
...
---
bb.1.then:
successors: %bb.2.end
- %3(32) = G_ADD { s32 } %0, %0
+ %3(32) = G_ADD s32 %0, %0
bb.2.end:
%4(32) = PHI %0, %bb.0.entry, %3, %bb.1.then
liveins: %w0, %s0
; CHECK: %0(32) = COPY %w0
; CHECK-NEXT: %2(32) = COPY %s0
- ; CHECK-NEXT: %1(32) = G_ADD { s32 } %0, %2
+ ; CHECK-NEXT: %1(32) = G_ADD s32 %0, %2
%0(32) = COPY %w0
- %1(32) = G_ADD { s32 } %0, %s0
+ %1(32) = G_ADD s32 %0, %s0
...
---
bb.0.entry:
liveins: %w0
; CHECK: %0(32) = COPY %w0
- ; CHECK-NEXT: %1(32) = G_ADD { s32 } %0, %0
+ ; CHECK-NEXT: %1(32) = G_ADD s32 %0, %0
; CHECK-NEXT: %s0 = COPY %1
%0(32) = COPY %w0
- %s0 = G_ADD { s32 } %0, %0
+ %s0 = G_ADD s32 %0, %0
...
---
; FAST-NEXT: %3(64) = COPY %0
; FAST-NEXT: %4(64) = COPY %1
; The mapping of G_OR is on FPR.
- ; FAST-NEXT: %2(64) = G_OR { <2 x s32> } %3, %4
+ ; FAST-NEXT: %2(64) = G_OR <2 x s32> %3, %4
; Greedy mode remapped the instruction on the GPR bank.
- ; GREEDY-NEXT: %2(64) = G_OR { <2 x s32> } %0, %1
+ ; GREEDY-NEXT: %2(64) = G_OR <2 x s32> %0, %1
%0(64) = COPY %x0
%1(64) = COPY %x1
- %2(64) = G_OR { <2 x s32> } %0, %1
+ %2(64) = G_OR <2 x s32> %0, %1
...
---
; FAST-NEXT: %3(64) = COPY %0
; FAST-NEXT: %4(64) = COPY %1
; The mapping of G_OR is on FPR.
- ; FAST-NEXT: %2(64) = G_OR { <2 x s32> } %3, %4
+ ; FAST-NEXT: %2(64) = G_OR <2 x s32> %3, %4
; Greedy mode remapped the instruction on the GPR bank.
- ; GREEDY-NEXT: %3(64) = G_OR { <2 x s32> } %0, %1
+ ; GREEDY-NEXT: %3(64) = G_OR <2 x s32> %0, %1
; We need to keep %2 into FPR because we do not know anything about it.
; GREEDY-NEXT: %2(64) = COPY %3
%0(64) = COPY %x0
%1(64) = COPY %x1
- %2(64) = G_OR { <2 x s32> } %0, %1
+ %2(64) = G_OR <2 x s32> %0, %1
...
bb.0.entry:
liveins: %q0, %q1, %q2, %q3
; CHECK-LABEL: name: test_vector_add
- ; CHECK-DAG: [[LHS_LO:%.*]](128), [[LHS_HI:%.*]](128) = G_EXTRACT { <2 x s64> } %0, 0, 128
- ; CHECK-DAG: [[RHS_LO:%.*]](128), [[RHS_HI:%.*]](128) = G_EXTRACT { <2 x s64> } %1, 0, 128
- ; CHECK: [[RES_LO:%.*]](128) = G_ADD { <2 x s64> } [[LHS_LO]], [[RHS_LO]]
- ; CHECK: [[RES_HI:%.*]](128) = G_ADD { <2 x s64> } [[LHS_HI]], [[RHS_HI]]
- ; CHECK: %2(256) = G_SEQUENCE { <4 x s64> } [[RES_LO]], [[RES_HI]]
+ ; CHECK-DAG: [[LHS_LO:%.*]](128), [[LHS_HI:%.*]](128) = G_EXTRACT <2 x s64> %0, 0, 128
+ ; CHECK-DAG: [[RHS_LO:%.*]](128), [[RHS_HI:%.*]](128) = G_EXTRACT <2 x s64> %1, 0, 128
+ ; CHECK: [[RES_LO:%.*]](128) = G_ADD <2 x s64> [[LHS_LO]], [[RHS_LO]]
+ ; CHECK: [[RES_HI:%.*]](128) = G_ADD <2 x s64> [[LHS_HI]], [[RHS_HI]]
+ ; CHECK: %2(256) = G_SEQUENCE <4 x s64> [[RES_LO]], [[RES_HI]]
- %0(256) = G_SEQUENCE { <4 x s64> } %q0, %q1
- %1(256) = G_SEQUENCE { <4 x s64> } %q2, %q3
- %2(256) = G_ADD { <4 x s64> } %0, %1
- %q0, %q1 = G_EXTRACT { <2 x s64> } %2, 0, 128
+ %0(256) = G_SEQUENCE <4 x s64> %q0, %q1
+ %1(256) = G_SEQUENCE <4 x s64> %q2, %q3
+ %2(256) = G_ADD <4 x s64> %0, %1
+ %q0, %q1 = G_EXTRACT <2 x s64> %2, 0, 128
...
; Tests for add.
; CHECK: name: addi32
-; CHECK: G_ADD { s32 }
+; CHECK: G_ADD s32
define i32 @addi32(i32 %arg1, i32 %arg2) {
%res = add i32 %arg1, %arg2
ret i32 %res
body: |
bb.0.entry:
liveins: %edi
- ; CHECK: %0(32) = G_ADD { s32 } %edi
- %0(32) = G_ADD { s32 } %edi, %edi
- ; CHECK: %1(64) = G_ADD { <2 x s32> } %edi
- %1(64) = G_ADD { <2 x s32> } %edi, %edi
- ; CHECK: %2(64) = G_ADD { s64 } %edi
- %2(64) = G_ADD { s64 } %edi, %edi
+ ; CHECK: %0(32) = G_ADD s32 %edi
+ %0(32) = G_ADD s32 %edi, %edi
+ ; CHECK: %1(64) = G_ADD <2 x s32> %edi
+ %1(64) = G_ADD <2 x s32> %edi, %edi
+ ; CHECK: %2(64) = G_ADD s64 %edi
+ %2(64) = G_ADD s64 %edi, %edi
; G_ADD is actually not a valid operand for structure type,
; but that is the only one we have for now for testing.
- ; CHECK: %3(64) = G_ADD { s64 } %edi
- %3(64) = G_ADD { s64 } %edi, %edi
- ; CHECK: %4(48) = G_ADD { s48 } %edi
- %4(48) = G_ADD { s48 } %edi, %edi
+ ; CHECK: %3(64) = G_ADD s64 %edi
+ %3(64) = G_ADD s64 %edi, %edi
+ ; CHECK: %4(48) = G_ADD s48 %edi
+ %4(48) = G_ADD s48 %edi, %edi
...