From 7f09fd6b045da9fd62529fede180ac3e48a88305 Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Tue, 5 Feb 2019 00:26:12 +0000 Subject: [PATCH] GlobalISel: Consolidate load/store legalization The fewerElementsVectors implementation for load/stores handles the scalar reduction case just as well, so drop the redundant code in narrowScalar. This also introduces support for narrowing irregular size breakdowns for scalars. llvm-svn: 353125 --- .../llvm/CodeGen/GlobalISel/LegalizerHelper.h | 2 +- llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp | 117 +++------------------ .../CodeGen/AArch64/GlobalISel/arm64-fallback.ll | 4 +- .../CodeGen/AMDGPU/GlobalISel/legalize-load.mir | 53 +++++----- .../CodeGen/AMDGPU/GlobalISel/legalize-store.mir | 86 ++++++++++----- .../test/CodeGen/X86/GlobalISel/x86_64-fallback.ll | 2 +- 6 files changed, 106 insertions(+), 158 deletions(-) diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h index fc383c7..729249c 100644 --- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h @@ -167,7 +167,7 @@ private: fewerElementsVectorSelect(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy); LegalizeResult - fewerElementsVectorLoadStore(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy); + reduceLoadStoreWidth(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy); LegalizeResult narrowScalarMul(MachineInstr &MI, unsigned TypeIdx, LLT Ty); diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp index a3bc693..cd2d8c1 100644 --- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -633,11 +633,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI, const auto &MMO = **MI.memoperands_begin(); unsigned DstReg = MI.getOperand(0).getReg(); LLT DstTy = MRI.getType(DstReg); - int NumParts = SizeOp0 / NarrowSize; - unsigned HandledSize = NumParts * NarrowTy.getSizeInBits(); - unsigned LeftoverBits = DstTy.getSizeInBits() - HandledSize; - - if (DstTy.isVector() && LeftoverBits != 0) + if (DstTy.isVector()) return UnableToLegalize; if (8 * MMO.getSize() != DstTy.getSizeInBits()) { @@ -649,68 +645,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI, return Legalized; } - // This implementation doesn't work for atomics. Give up instead of doing - // something invalid. - if (MMO.getOrdering() != AtomicOrdering::NotAtomic || - MMO.getFailureOrdering() != AtomicOrdering::NotAtomic) - return UnableToLegalize; - - LLT OffsetTy = LLT::scalar( - MRI.getType(MI.getOperand(1).getReg()).getScalarSizeInBits()); - - MachineFunction &MF = MIRBuilder.getMF(); - SmallVector DstRegs; - for (int i = 0; i < NumParts; ++i) { - unsigned PartDstReg = MRI.createGenericVirtualRegister(NarrowTy); - unsigned SrcReg = 0; - unsigned Offset = i * NarrowSize / 8; - - MachineMemOperand *SplitMMO = - MF.getMachineMemOperand(&MMO, Offset, NarrowSize / 8); - - MIRBuilder.materializeGEP(SrcReg, MI.getOperand(1).getReg(), OffsetTy, - Offset); - - MIRBuilder.buildLoad(PartDstReg, SrcReg, *SplitMMO); - - DstRegs.push_back(PartDstReg); - } - - unsigned MergeResultReg = LeftoverBits == 0 ? DstReg : - MRI.createGenericVirtualRegister(LLT::scalar(HandledSize)); - - // For the leftover piece, still create the merge and insert it. - // TODO: Would it be better to directly insert the intermediate pieces? - if (DstTy.isVector()) - MIRBuilder.buildBuildVector(MergeResultReg, DstRegs); - else - MIRBuilder.buildMerge(MergeResultReg, DstRegs); - - if (LeftoverBits == 0) { - MI.eraseFromParent(); - return Legalized; - } - - unsigned ImpDefReg = MRI.createGenericVirtualRegister(DstTy); - unsigned Insert0Reg = MRI.createGenericVirtualRegister(DstTy); - MIRBuilder.buildUndef(ImpDefReg); - MIRBuilder.buildInsert(Insert0Reg, ImpDefReg, MergeResultReg, 0); - - unsigned PartDstReg - = MRI.createGenericVirtualRegister(LLT::scalar(LeftoverBits)); - unsigned Offset = HandledSize / 8; - - MachineMemOperand *SplitMMO = MIRBuilder.getMF().getMachineMemOperand( - &MMO, Offset, LeftoverBits / 8); - - unsigned SrcReg = 0; - MIRBuilder.materializeGEP(SrcReg, MI.getOperand(1).getReg(), OffsetTy, - Offset); - MIRBuilder.buildLoad(PartDstReg, SrcReg, *SplitMMO); - MIRBuilder.buildInsert(DstReg, Insert0Reg, PartDstReg, HandledSize); - - MI.eraseFromParent(); - return Legalized; + return reduceLoadStoreWidth(MI, TypeIdx, NarrowTy); } case TargetOpcode::G_ZEXTLOAD: case TargetOpcode::G_SEXTLOAD: { @@ -740,15 +675,18 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI, return Legalized; } case TargetOpcode::G_STORE: { - // FIXME: add support for when SizeOp0 isn't an exact multiple of - // NarrowSize. - if (SizeOp0 % NarrowSize != 0) - return UnableToLegalize; - const auto &MMO = **MI.memoperands_begin(); unsigned SrcReg = MI.getOperand(0).getReg(); LLT SrcTy = MRI.getType(SrcReg); + if (SrcTy.isVector()) + return UnableToLegalize; + + int NumParts = SizeOp0 / NarrowSize; + unsigned HandledSize = NumParts * NarrowTy.getSizeInBits(); + unsigned LeftoverBits = SrcTy.getSizeInBits() - HandledSize; + if (SrcTy.isVector() && LeftoverBits != 0) + return UnableToLegalize; if (8 * MMO.getSize() != SrcTy.getSizeInBits()) { unsigned TmpReg = MRI.createGenericVirtualRegister(NarrowTy); @@ -759,34 +697,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI, return Legalized; } - // This implementation doesn't work for atomics. Give up instead of doing - // something invalid. - if (MMO.getOrdering() != AtomicOrdering::NotAtomic || - MMO.getFailureOrdering() != AtomicOrdering::NotAtomic) - return UnableToLegalize; - - int NumParts = SizeOp0 / NarrowSize; - LLT OffsetTy = LLT::scalar( - MRI.getType(MI.getOperand(1).getReg()).getScalarSizeInBits()); - - SmallVector SrcRegs; - extractParts(MI.getOperand(0).getReg(), NarrowTy, NumParts, SrcRegs); - - MachineFunction &MF = MIRBuilder.getMF(); - for (int i = 0; i < NumParts; ++i) { - unsigned DstReg = 0; - unsigned Offset = i * NarrowSize / 8; - - MachineMemOperand *SplitMMO = - MF.getMachineMemOperand(&MMO, Offset, NarrowSize / 8); - - MIRBuilder.materializeGEP(DstReg, MI.getOperand(1).getReg(), OffsetTy, - Offset); - - MIRBuilder.buildStore(SrcRegs[i], DstReg, *SplitMMO); - } - MI.eraseFromParent(); - return Legalized; + return reduceLoadStoreWidth(MI, 0, NarrowTy); } case TargetOpcode::G_CONSTANT: { // FIXME: add support for when SizeOp0 isn't an exact multiple of @@ -2036,8 +1947,8 @@ static int getNarrowTypeBreakDown(LLT OrigTy, LLT NarrowTy, LLT &LeftoverTy) { } LegalizerHelper::LegalizeResult -LegalizerHelper::fewerElementsVectorLoadStore(MachineInstr &MI, unsigned TypeIdx, - LLT NarrowTy) { +LegalizerHelper::reduceLoadStoreWidth(MachineInstr &MI, unsigned TypeIdx, + LLT NarrowTy) { // FIXME: Don't know how to handle secondary types yet. if (TypeIdx != 0) return UnableToLegalize; @@ -2177,7 +2088,7 @@ LegalizerHelper::fewerElementsVector(MachineInstr &MI, unsigned TypeIdx, return fewerElementsVectorSelect(MI, TypeIdx, NarrowTy); case G_LOAD: case G_STORE: - return fewerElementsVectorLoadStore(MI, TypeIdx, NarrowTy); + return reduceLoadStoreWidth(MI, TypeIdx, NarrowTy); default: return UnableToLegalize; } diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll index da3aa3c..c75771a 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll @@ -205,7 +205,7 @@ define void @nonpow2_load_narrowing() { ret void } -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %3:_(s96), %0:_(p0) :: (store 12 into %ir.c, align 16) (in function: nonpow2_store_narrowing +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: %4:_(s64) = G_EXTRACT %3:_(s96), 0 (in function: nonpow2_store_narrowing) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_store_narrowing ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_store_narrowing: define void @nonpow2_store_narrowing(i96* %c) { @@ -215,7 +215,7 @@ define void @nonpow2_store_narrowing(i96* %c) { ret void } -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %0:_(s96), %1:_(p0) :: (store 12 into `i96* undef`, align 16) (in function: nonpow2_constant_narrowing) +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: %0:_(s96) = G_CONSTANT i96 0 (in function: nonpow2_constant_narrowing) ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_constant_narrowing ; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_constant_narrowing: define void @nonpow2_constant_narrowing() { diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load.mir index 2115e82..bfccdb1 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load.mir @@ -243,7 +243,7 @@ body: | %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(s128) = G_LOAD %0 :: (load 4, addrspace 1, align 4) $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %1 - $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %1 + ... --- @@ -255,12 +255,11 @@ body: | ; SI-LABEL: name: test_load_global_s96_align4 ; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; SI: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p1) :: (load 8, align 4, addrspace 1) - ; SI: [[COPY1:%[0-9]+]]:_(s64) = COPY [[LOAD]](s64) - ; SI: [[DEF:%[0-9]+]]:_(s96) = G_IMPLICIT_DEF - ; SI: [[INSERT:%[0-9]+]]:_(s96) = G_INSERT [[DEF]], [[COPY1]](s64), 0 ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[GEP:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C]](s64) ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[GEP]](p1) :: (load 4, addrspace 1) + ; SI: [[DEF:%[0-9]+]]:_(s96) = G_IMPLICIT_DEF + ; SI: [[INSERT:%[0-9]+]]:_(s96) = G_INSERT [[DEF]], [[LOAD]](s64), 0 ; SI: [[INSERT1:%[0-9]+]]:_(s96) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64 ; SI: $vgpr0_vgpr1_vgpr2 = COPY [[INSERT1]](s96) ; VI-LABEL: name: test_load_global_s96_align4 @@ -285,28 +284,28 @@ body: | ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; SI: [[GEP:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C]](s64) ; SI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[GEP]](p1) :: (load 8, align 4, addrspace 1) - ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[LOAD]](s64), [[LOAD1]](s64) - ; SI: [[DEF:%[0-9]+]]:_(s160) = G_IMPLICIT_DEF - ; SI: [[INSERT:%[0-9]+]]:_(s160) = G_INSERT [[DEF]], [[MV]](s128), 0 ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[GEP1:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C1]](s64) ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p1) :: (load 4, addrspace 1) - ; SI: [[INSERT1:%[0-9]+]]:_(s160) = G_INSERT [[INSERT]], [[LOAD2]](s32), 128 - ; SI: S_NOP 0, implicit [[INSERT1]](s160) + ; SI: [[DEF:%[0-9]+]]:_(s160) = G_IMPLICIT_DEF + ; SI: [[INSERT:%[0-9]+]]:_(s160) = G_INSERT [[DEF]], [[LOAD]](s64), 0 + ; SI: [[INSERT1:%[0-9]+]]:_(s160) = G_INSERT [[INSERT]], [[LOAD1]](s64), 64 + ; SI: [[INSERT2:%[0-9]+]]:_(s160) = G_INSERT [[INSERT1]], [[LOAD2]](s32), 128 + ; SI: S_NOP 0, implicit [[INSERT2]](s160) ; VI-LABEL: name: test_load_global_s160_align4 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p1) :: (load 8, align 4, addrspace 1) ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 ; VI: [[GEP:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C]](s64) ; VI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[GEP]](p1) :: (load 8, align 4, addrspace 1) - ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[LOAD]](s64), [[LOAD1]](s64) - ; VI: [[DEF:%[0-9]+]]:_(s160) = G_IMPLICIT_DEF - ; VI: [[INSERT:%[0-9]+]]:_(s160) = G_INSERT [[DEF]], [[MV]](s128), 0 ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[GEP1:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C1]](s64) ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p1) :: (load 4, addrspace 1) - ; VI: [[INSERT1:%[0-9]+]]:_(s160) = G_INSERT [[INSERT]], [[LOAD2]](s32), 128 - ; VI: S_NOP 0, implicit [[INSERT1]](s160) + ; VI: [[DEF:%[0-9]+]]:_(s160) = G_IMPLICIT_DEF + ; VI: [[INSERT:%[0-9]+]]:_(s160) = G_INSERT [[DEF]], [[LOAD]](s64), 0 + ; VI: [[INSERT1:%[0-9]+]]:_(s160) = G_INSERT [[INSERT]], [[LOAD1]](s64), 64 + ; VI: [[INSERT2:%[0-9]+]]:_(s160) = G_INSERT [[INSERT1]], [[LOAD2]](s32), 128 + ; VI: S_NOP 0, implicit [[INSERT2]](s160) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(s160) = G_LOAD %0 :: (load 20, addrspace 1, align 4) S_NOP 0, implicit %1 @@ -327,16 +326,17 @@ body: | ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; SI: [[GEP1:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C1]](s64) ; SI: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[GEP1]](p1) :: (load 8, align 4, addrspace 1) - ; SI: [[MV:%[0-9]+]]:_(s192) = G_MERGE_VALUES [[LOAD]](s64), [[LOAD1]](s64), [[LOAD2]](s64) - ; SI: [[DEF:%[0-9]+]]:_(s224) = G_IMPLICIT_DEF - ; SI: [[INSERT:%[0-9]+]]:_(s224) = G_INSERT [[DEF]], [[MV]](s192), 0 ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 24 ; SI: [[GEP2:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C2]](s64) ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p1) :: (load 4, addrspace 1) - ; SI: [[INSERT1:%[0-9]+]]:_(s224) = G_INSERT [[INSERT]], [[LOAD3]](s32), 192 + ; SI: [[DEF:%[0-9]+]]:_(s224) = G_IMPLICIT_DEF + ; SI: [[INSERT:%[0-9]+]]:_(s224) = G_INSERT [[DEF]], [[LOAD]](s64), 0 + ; SI: [[INSERT1:%[0-9]+]]:_(s224) = G_INSERT [[INSERT]], [[LOAD1]](s64), 64 + ; SI: [[INSERT2:%[0-9]+]]:_(s224) = G_INSERT [[INSERT1]], [[LOAD2]](s64), 128 + ; SI: [[INSERT3:%[0-9]+]]:_(s224) = G_INSERT [[INSERT2]], [[LOAD3]](s32), 192 ; SI: [[DEF1:%[0-9]+]]:_(s256) = G_IMPLICIT_DEF - ; SI: [[INSERT2:%[0-9]+]]:_(s256) = G_INSERT [[DEF1]], [[INSERT1]](s224), 0 - ; SI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT2]](s256) + ; SI: [[INSERT4:%[0-9]+]]:_(s256) = G_INSERT [[DEF1]], [[INSERT3]](s224), 0 + ; SI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT4]](s256) ; VI-LABEL: name: test_load_global_s224_align4 ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 ; VI: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p1) :: (load 8, align 4, addrspace 1) @@ -346,16 +346,17 @@ body: | ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 ; VI: [[GEP1:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C1]](s64) ; VI: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[GEP1]](p1) :: (load 8, align 4, addrspace 1) - ; VI: [[MV:%[0-9]+]]:_(s192) = G_MERGE_VALUES [[LOAD]](s64), [[LOAD1]](s64), [[LOAD2]](s64) - ; VI: [[DEF:%[0-9]+]]:_(s224) = G_IMPLICIT_DEF - ; VI: [[INSERT:%[0-9]+]]:_(s224) = G_INSERT [[DEF]], [[MV]](s192), 0 ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 24 ; VI: [[GEP2:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C2]](s64) ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p1) :: (load 4, addrspace 1) - ; VI: [[INSERT1:%[0-9]+]]:_(s224) = G_INSERT [[INSERT]], [[LOAD3]](s32), 192 + ; VI: [[DEF:%[0-9]+]]:_(s224) = G_IMPLICIT_DEF + ; VI: [[INSERT:%[0-9]+]]:_(s224) = G_INSERT [[DEF]], [[LOAD]](s64), 0 + ; VI: [[INSERT1:%[0-9]+]]:_(s224) = G_INSERT [[INSERT]], [[LOAD1]](s64), 64 + ; VI: [[INSERT2:%[0-9]+]]:_(s224) = G_INSERT [[INSERT1]], [[LOAD2]](s64), 128 + ; VI: [[INSERT3:%[0-9]+]]:_(s224) = G_INSERT [[INSERT2]], [[LOAD3]](s32), 192 ; VI: [[DEF1:%[0-9]+]]:_(s256) = G_IMPLICIT_DEF - ; VI: [[INSERT2:%[0-9]+]]:_(s256) = G_INSERT [[DEF1]], [[INSERT1]](s224), 0 - ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT2]](s256) + ; VI: [[INSERT4:%[0-9]+]]:_(s256) = G_INSERT [[DEF1]], [[INSERT3]](s224), 0 + ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT4]](s256) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(s224) = G_LOAD %0 :: (load 28, addrspace 1, align 4) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store.mir index 90cdbab..d5fabac 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-store.mir @@ -258,13 +258,6 @@ body: | bb.0: liveins: $vgpr0_vgpr1, $vgpr2 - ; CHECK-LABEL: name: test_store_global_i1 - ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 - ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2 - ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 - ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) - ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]] - ; CHECK: G_STORE [[AND]](s32), [[COPY]](p1) :: (store 1, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(s32) = COPY $vgpr2 %2:_(s1) = G_TRUNC %1 @@ -277,11 +270,16 @@ body: | bb.0: liveins: $vgpr0_vgpr1, $vgpr2 - ; CHECK-LABEL: name: test_store_global_i8 - ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 - ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2 - ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) - ; CHECK: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 1, addrspace 1) + ; SI-LABEL: name: test_store_global_i8 + ; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; SI: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 1, addrspace 1) + ; VI-LABEL: name: test_store_global_i8 + ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; VI: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 1, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(s32) = COPY $vgpr2 %2:_(s8) = G_TRUNC %1 @@ -294,11 +292,16 @@ body: | bb.0: liveins: $vgpr0_vgpr1, $vgpr2 - ; CHECK-LABEL: name: test_store_global_i16 - ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 - ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2 - ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) - ; CHECK: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 2, addrspace 1) + ; SI-LABEL: name: test_store_global_i16 + ; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; SI: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 2, addrspace 1) + ; VI-LABEL: name: test_store_global_i16 + ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; VI: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 2, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(s32) = COPY $vgpr2 %2:_(s16) = G_TRUNC %1 @@ -306,15 +309,44 @@ body: | ... --- +name: test_store_global_96 +body: | + bb.0: + liveins: $vgpr0_vgpr1_vgpr2, $vgpr3_vgpr4 + + ; SI-LABEL: name: test_store_global_96 + ; SI: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2 + ; SI: [[COPY1:%[0-9]+]]:_(p1) = COPY $vgpr3_vgpr4 + ; SI: [[EXTRACT:%[0-9]+]]:_(s64) = G_EXTRACT [[COPY]](s96), 0 + ; SI: [[EXTRACT1:%[0-9]+]]:_(s32) = G_EXTRACT [[COPY]](s96), 64 + ; SI: G_STORE [[EXTRACT]](s64), [[COPY1]](p1) :: (store 8, align 16, addrspace 1) + ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 + ; SI: [[GEP:%[0-9]+]]:_(p1) = G_GEP [[COPY1]], [[C]](s64) + ; SI: G_STORE [[EXTRACT1]](s32), [[GEP]](p1) :: (store 4, align 8, addrspace 1) + ; VI-LABEL: name: test_store_global_96 + ; VI: [[COPY:%[0-9]+]]:_(s96) = COPY $vgpr0_vgpr1_vgpr2 + ; VI: [[COPY1:%[0-9]+]]:_(p1) = COPY $vgpr3_vgpr4 + ; VI: G_STORE [[COPY]](s96), [[COPY1]](p1) :: (store 12, align 16, addrspace 1) + %0:_(s96) = COPY $vgpr0_vgpr1_vgpr2 + %1:_(p1) = COPY $vgpr3_vgpr4 + + G_STORE %0, %1 :: (store 12, addrspace 1, align 16) +... + +--- name: test_store_global_i128 body: | bb.0: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5 - ; CHECK-LABEL: name: test_store_global_i128 - ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 - ; CHECK: [[COPY1:%[0-9]+]]:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 - ; CHECK: G_STORE [[COPY1]](s128), [[COPY]](p1) :: (store 16, addrspace 1) + ; SI-LABEL: name: test_store_global_i128 + ; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; SI: [[COPY1:%[0-9]+]]:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 + ; SI: G_STORE [[COPY1]](s128), [[COPY]](p1) :: (store 16, addrspace 1) + ; VI-LABEL: name: test_store_global_i128 + ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; VI: [[COPY1:%[0-9]+]]:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 + ; VI: G_STORE [[COPY1]](s128), [[COPY]](p1) :: (store 16, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 G_STORE %1, %0 :: (store 16, addrspace 1) @@ -326,10 +358,14 @@ body: | bb.0: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5 - ; CHECK-LABEL: name: test_store_global_v2s64 - ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 - ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 - ; CHECK: G_STORE [[COPY1]](<2 x s64>), [[COPY]](p1) :: (store 16, addrspace 1) + ; SI-LABEL: name: test_store_global_v2s64 + ; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; SI: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 + ; SI: G_STORE [[COPY1]](<2 x s64>), [[COPY]](p1) :: (store 16, addrspace 1) + ; VI-LABEL: name: test_store_global_v2s64 + ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; VI: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 + ; VI: G_STORE [[COPY1]](<2 x s64>), [[COPY]](p1) :: (store 16, addrspace 1) %0:_(p1) = COPY $vgpr0_vgpr1 %1:_(<2 x s64>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 G_STORE %1, %0 :: (store 16, addrspace 1) diff --git a/llvm/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll b/llvm/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll index 92bd661..8083af3 100644 --- a/llvm/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll +++ b/llvm/test/CodeGen/X86/GlobalISel/x86_64-fallback.ll @@ -8,7 +8,7 @@ ; the fallback path. ; Check that we fallback on invoke translation failures. -; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: G_STORE %1:_(s80), %0:_(p0) :: (store 10 into %ir.ptr, align 16) (in function: test_x86_fp80_dump) +; FALLBACK-WITH-REPORT-ERR: remark: :0:0: unable to legalize instruction: %1:_(s80) = G_FCONSTANT x86_fp80 0xK4002A000000000000000 ; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for test_x86_fp80_dump ; FALLBACK-WITH-REPORT-OUT-LABEL: test_x86_fp80_dump: define void @test_x86_fp80_dump(x86_fp80* %ptr){ -- 2.7.4