From 60442f0d442723a487528bdd8b48b24657a025e8 Mon Sep 17 00:00:00 2001 From: Nikita Popov Date: Thu, 5 Jan 2023 13:13:06 +0100 Subject: [PATCH] [CodeGen] Convert some tests to opaque pointers (NFC) These are mostly MIR tests, which I did not handle during previous conversions. --- .../AMDGPU/GlobalISel/inst-select-load-smrd.mir | 2 +- .../AMDGPU/GlobalISel/legalize-block-addr.mir | 6 +- .../GlobalISel/legalize-load-memory-metadata.mir | 16 +- .../no-cse-nonlocal-convergent-instrs.mir | 2 +- .../AMDGPU/GlobalISel/regbankselect-block-addr.mir | 6 +- .../AMDGPU/GlobalISel/regbankselect-load.mir | 60 ++-- .../regbankselect-split-scalar-load-metadata.mir | 8 +- .../CodeGen/AMDGPU/GlobalISel/regbankselect.mir | 32 +-- .../CodeGen/AMDGPU/bug-sdag-emitcopyfromreg.ll | 8 +- .../CodeGen/AMDGPU/clamp-omod-special-case.mir | 24 +- ...coalescer-subranges-another-copymi-not-live.mir | 2 +- .../CodeGen/AMDGPU/constant-fold-imm-immreg.mir | 2 +- llvm/test/CodeGen/AMDGPU/flat-load-clustering.mir | 22 +- llvm/test/CodeGen/AMDGPU/fold-imm-f16-f32.mir | 170 +++++------ .../CodeGen/AMDGPU/fp-atomic-to-s_denormmode.mir | 64 ++--- llvm/test/CodeGen/AMDGPU/insert-waitcnts-exp.mir | 16 +- llvm/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir | 14 +- llvm/test/CodeGen/AMDGPU/lds-dma-waitcnt.mir | 34 +-- llvm/test/CodeGen/AMDGPU/load-store-opt-dlc.mir | 40 +-- llvm/test/CodeGen/AMDGPU/load-store-opt-scc.mir | 40 +-- .../AMDGPU/memory-legalizer-atomic-insert-end.mir | 18 +- .../AMDGPU/memory-legalizer-invalid-addrspace.mir | 10 +- .../test/CodeGen/AMDGPU/memory-legalizer-local.mir | 316 ++++++++++----------- ...ory-legalizer-multiple-mem-operands-atomics.mir | 16 +- .../CodeGen/AMDGPU/memory-legalizer-region.mir | 316 ++++++++++----------- llvm/test/CodeGen/AMDGPU/merge-load-store-agpr.mir | 32 +-- .../CodeGen/AMDGPU/merge-out-of-order-ldst.mir | 14 +- llvm/test/CodeGen/AMDGPU/opt-sgpr-to-vgpr-copy.mir | 6 +- .../CodeGen/AMDGPU/optimize-if-exec-masking.mir | 2 +- .../test/CodeGen/AMDGPU/phi-elimination-end-cf.mir | 2 +- llvm/test/CodeGen/AMDGPU/regcoalesce-dbg.mir | 6 +- .../AMDGPU/scc-clobbered-sgpr-to-vmem-spill.ll | 2 +- llvm/test/CodeGen/AMDGPU/sched-barrier-post-RA.mir | 6 +- llvm/test/CodeGen/AMDGPU/sched-barrier-pre-RA.mir | 30 +- .../AMDGPU/sched-group-barrier-pipeline-solver.mir | 8 +- .../CodeGen/AMDGPU/sched-group-barrier-pre-RA.mir | 8 +- llvm/test/CodeGen/AMDGPU/schedule-ilp.mir | 2 +- llvm/test/CodeGen/AMDGPU/schedule-regpressure.mir | 2 +- .../CodeGen/AMDGPU/sgpr-spill-wrong-stack-id.mir | 4 +- .../CodeGen/AMDGPU/smem-no-clause-coalesced.mir | 2 +- .../AMDGPU/undefined-physreg-sgpr-spill.mir | 4 +- llvm/test/CodeGen/AMDGPU/waitcnt-agpr.mir | 8 +- .../test/CodeGen/AMDGPU/waitcnt-back-edge-loop.mir | 8 +- llvm/test/CodeGen/AMDGPU/waitcnt-vscnt.mir | 2 +- llvm/test/CodeGen/AMDGPU/waitcnt.mir | 12 +- llvm/test/CodeGen/ARM/ARMLoadStoreDBG.mir | 10 +- .../test/CodeGen/ARM/GlobalISel/select-pr35926.mir | 2 +- llvm/test/CodeGen/ARM/cmp2-peephole-thumb.mir | 10 +- llvm/test/CodeGen/ARM/cmse-clear-float-bigend.mir | 8 +- llvm/test/CodeGen/ARM/codesize-ifcvt.mir | 26 +- llvm/test/CodeGen/ARM/const-load-align-thumb.mir | 4 +- llvm/test/CodeGen/ARM/dbg-range-extension.mir | 2 +- llvm/test/CodeGen/ARM/fp16-litpool-arm.mir | 10 +- llvm/test/CodeGen/ARM/fp16-litpool-thumb.mir | 10 +- llvm/test/CodeGen/ARM/fp16-litpool2-arm.mir | 6 +- llvm/test/CodeGen/ARM/fp16-litpool3-arm.mir | 6 +- .../ARM/ifcvt-diamond-unanalyzable-common.mir | 4 +- llvm/test/CodeGen/ARM/machine-sink-multidef.mir | 10 +- llvm/test/CodeGen/ARM/misched-int-basic-thumb2.mir | 6 +- llvm/test/CodeGen/ARM/noreturn-csr-skip.mir | 2 +- llvm/test/CodeGen/ARM/pei-swiftself.mir | 2 +- llvm/test/CodeGen/ARM/prera-ldst-aliasing.mir | 14 +- llvm/test/CodeGen/ARM/prera-ldst-insertpt.mir | 4 +- llvm/test/CodeGen/ARM/single-issue-r52.mir | 6 +- llvm/test/CodeGen/ARM/stack_frame_offset.mir | 14 +- llvm/test/CodeGen/ARM/store-prepostinc.mir | 42 +-- llvm/test/CodeGen/ARM/v6-jumptable-clobber.mir | 10 +- llvm/test/CodeGen/ARM/vldm-liveness.mir | 2 +- llvm/test/CodeGen/ARM/vldmia-sched.mir | 4 +- llvm/test/CodeGen/ARM/vlldm-vlstm-uops.mir | 2 +- llvm/test/CodeGen/Hexagon/addrmode-immop.mir | 12 +- llvm/test/CodeGen/Hexagon/addrmode-no-rdef.mir | 9 +- llvm/test/CodeGen/Hexagon/bank-conflict-load.mir | 2 +- llvm/test/CodeGen/Hexagon/bank-conflict.mir | 27 +- llvm/test/CodeGen/Hexagon/cext-opt-negative-fi.mir | 10 +- llvm/test/CodeGen/Hexagon/cext-opt-stack-no-rr.mir | 2 +- llvm/test/CodeGen/Hexagon/cext-unnamed-global.mir | 4 +- .../CodeGen/Hexagon/early-if-conv-lifetime.mir | 11 +- llvm/test/CodeGen/Hexagon/early-if-predicator.mir | 4 +- llvm/test/CodeGen/Hexagon/hwloop-dbg-register.mir | 8 +- llvm/test/CodeGen/Hexagon/ifcvt-diamond-ret.mir | 4 +- .../CodeGen/Hexagon/pipeliner/swp-phi-start.mir | 14 +- llvm/test/CodeGen/Hexagon/postinc-baseoffset.mir | 2 +- llvm/test/CodeGen/Hexagon/regalloc-bad-undef.mir | 4 +- .../Mips/GlobalISel/instruction-select/add_vec.mir | 8 +- .../GlobalISel/instruction-select/brindirect.mir | 4 +- .../GlobalISel/instruction-select/fabs_vec.mir | 4 +- .../Mips/GlobalISel/instruction-select/fence.mir | 2 +- .../floating_point_vec_arithmetic_operations.mir | 16 +- .../GlobalISel/instruction-select/fsqrt_vec.mir | 4 +- .../instruction-select/gloal_address.mir | 2 +- .../Mips/GlobalISel/instruction-select/load.mir | 6 +- .../instruction-select/load_4_unaligned.mir | 6 +- .../instruction-select/load_4_unaligned_r6.mir | 6 +- .../instruction-select/load_store_vec.mir | 12 +- .../Mips/GlobalISel/instruction-select/mul.mir | 2 +- .../Mips/GlobalISel/instruction-select/mul_vec.mir | 8 +- .../GlobalISel/instruction-select/pointers.mir | 6 +- .../instruction-select/rem_and_div_vec.mir | 38 +-- .../Mips/GlobalISel/instruction-select/store.mir | 6 +- .../instruction-select/store_4_unaligned.mir | 6 +- .../instruction-select/store_4_unaligned_r6.mir | 6 +- .../Mips/GlobalISel/instruction-select/sub_vec.mir | 8 +- .../instruction-select/truncStore_and_aExtLoad.mir | 6 +- .../instruction-select/zextLoad_and_sextLoad.mir | 8 +- .../test/CodeGen/Mips/GlobalISel/legalizer/add.mir | 2 +- .../CodeGen/Mips/GlobalISel/legalizer/add_vec.mir | 8 +- .../Mips/GlobalISel/legalizer/add_vec_builtin.mir | 16 +- .../Mips/GlobalISel/legalizer/brindirect.mir | 4 +- .../Mips/GlobalISel/legalizer/dyn_stackalloc.mir | 12 +- .../CodeGen/Mips/GlobalISel/legalizer/fabs_vec.mir | 4 +- .../Mips/GlobalISel/legalizer/fabs_vec_builtin.mir | 4 +- .../CodeGen/Mips/GlobalISel/legalizer/fence.mir | 2 +- .../floating_point_vec_arithmetic_operations.mir | 16 +- ...ing_point_vec_arithmetic_operations_builtin.mir | 16 +- .../Mips/GlobalISel/legalizer/fsqrt_vec.mir | 4 +- .../GlobalISel/legalizer/fsqrt_vec_builtin.mir | 4 +- .../Mips/GlobalISel/legalizer/global_address.mir | 2 +- .../Mips/GlobalISel/legalizer/load_4_unaligned.mir | 16 +- .../test/CodeGen/Mips/GlobalISel/legalizer/mul.mir | 2 +- .../CodeGen/Mips/GlobalISel/legalizer/mul_vec.mir | 8 +- .../Mips/GlobalISel/legalizer/mul_vec_builtin.mir | 8 +- .../test/CodeGen/Mips/GlobalISel/legalizer/phi.mir | 16 +- .../CodeGen/Mips/GlobalISel/legalizer/pointers.mir | 6 +- .../Mips/GlobalISel/legalizer/rem_and_div_vec.mir | 38 +-- .../legalizer/rem_and_div_vec_builtin.mir | 32 +-- .../GlobalISel/legalizer/store_4_unaligned.mir | 16 +- .../CodeGen/Mips/GlobalISel/legalizer/sub_vec.mir | 8 +- .../Mips/GlobalISel/legalizer/sub_vec_builtin.mir | 16 +- .../GlobalISel/legalizer/zextLoad_and_sextLoad.mir | 20 +- .../truncStore_and_aExtLoad.mir | 4 +- .../zextLoad_and_sextLoad.mir | 20 +- .../regbankselect/TypeInfoforMF_skipCopies.mir | 4 +- .../Mips/GlobalISel/regbankselect/add_vec.mir | 8 +- .../Mips/GlobalISel/regbankselect/brindirect.mir | 4 +- .../Mips/GlobalISel/regbankselect/fabs_vec.mir | 4 +- .../Mips/GlobalISel/regbankselect/fence.mir | 2 +- .../floating_point_vec_arithmetic_operations.mir | 16 +- .../Mips/GlobalISel/regbankselect/fsqrt_vec.mir | 4 +- .../GlobalISel/regbankselect/global_address.mir | 2 +- .../CodeGen/Mips/GlobalISel/regbankselect/load.mir | 12 +- .../GlobalISel/regbankselect/load_4_unaligned.mir | 6 +- .../GlobalISel/regbankselect/load_store_vec.mir | 12 +- .../regbankselect/long_ambiguous_chain_s32.mir | 80 +++--- .../regbankselect/long_ambiguous_chain_s64.mir | 80 +++--- .../CodeGen/Mips/GlobalISel/regbankselect/mul.mir | 2 +- .../Mips/GlobalISel/regbankselect/mul_vec.mir | 8 +- .../CodeGen/Mips/GlobalISel/regbankselect/phi.mir | 16 +- .../Mips/GlobalISel/regbankselect/pointers.mir | 6 +- .../GlobalISel/regbankselect/rem_and_div_vec.mir | 38 +-- .../Mips/GlobalISel/regbankselect/select.mir | 4 +- .../Mips/GlobalISel/regbankselect/store.mir | 8 +- .../GlobalISel/regbankselect/store_4_unaligned.mir | 6 +- .../Mips/GlobalISel/regbankselect/sub_vec.mir | 8 +- .../regbankselect/test_TypeInfoforMF.mir | 16 +- .../regbankselect/truncStore_and_aExtLoad.mir | 6 +- .../regbankselect/zextLoad_and_sextLoad.mir | 12 +- .../compact-branch-implicit-def.mir | 18 +- .../Mips/delay-slot-filler-bundled-insts.mir | 16 +- .../indirect-jump-hazard/guards-verify-call.mir | 2 +- .../guards-verify-tailcall.mir | 2 +- llvm/test/CodeGen/Mips/micromips-eva.mir | 50 ++-- .../micromips-sizereduction/micromips-lwp-swp.mir | 12 +- .../micromips-no-lwp-swp.mir | 12 +- .../Mips/mirparser/target-flags-pic-mxgot-tls.mir | 14 +- .../Mips/mirparser/target-flags-pic-o32.mir | 4 +- .../CodeGen/Mips/mirparser/target-flags-pic.mir | 4 +- .../Mips/mirparser/target-flags-static-tls.mir | 14 +- .../test/CodeGen/Mips/unaligned-memops-mapping.mir | 12 +- ...sableHoistingDueToBlockHotnessNoProfileData.mir | 8 +- ...DisableHoistingDueToBlockHotnessProfileData.mir | 8 +- .../PowerPC/NoCRFieldRedefWhenSpillingCRBIT.mir | 2 +- llvm/test/CodeGen/PowerPC/aantidep-def-ec.mir | 13 +- llvm/test/CodeGen/PowerPC/addisdtprelha-nonr3.mir | 4 +- llvm/test/CodeGen/PowerPC/block-placement-1.mir | 52 ++-- .../PowerPC/ctrloop-do-not-duplicate-mi.mir | 12 +- llvm/test/CodeGen/PowerPC/livevars-crash1.mir | 18 +- llvm/test/CodeGen/PowerPC/livevars-crash2.mir | 18 +- .../CodeGen/PowerPC/no-rlwimi-trivial-commute.mir | 14 +- .../test/CodeGen/PowerPC/opt-sub-inst-cr0-live.mir | 2 +- llvm/test/CodeGen/PowerPC/peephole-phi-acc.mir | 20 +- llvm/test/CodeGen/PowerPC/phi-eliminate.mir | 12 +- .../CodeGen/PowerPC/remove-copy-crunsetcrbit.mir | 16 +- .../PowerPC/remove-redundant-li-skip-imp-kill.mir | 6 +- llvm/test/CodeGen/PowerPC/schedule-addi-load.mir | 10 +- llvm/test/CodeGen/PowerPC/sext_elimination.mir | 4 +- llvm/test/CodeGen/PowerPC/shrink-wrap.mir | 2 +- llvm/test/CodeGen/PowerPC/tls_get_addr_fence1.mir | 2 +- llvm/test/CodeGen/PowerPC/tls_get_addr_fence2.mir | 2 +- llvm/test/CodeGen/PowerPC/two-address-crash.mir | 4 +- .../CodeGen/Thumb2/LowOverheadLoops/cmplx_cong.mir | 2 +- .../CodeGen/Thumb2/LowOverheadLoops/cond-mov.mir | 22 +- .../Thumb2/LowOverheadLoops/ctlz-non-zeros.mir | 68 ++--- .../Thumb2/LowOverheadLoops/dont-ignore-vctp.mir | 20 +- .../LowOverheadLoops/end-positive-offset.mir | 32 +-- .../Thumb2/LowOverheadLoops/it-block-chain.mir | 20 +- .../Thumb2/LowOverheadLoops/it-block-itercount.mir | 20 +- .../Thumb2/LowOverheadLoops/it-block-mov.mir | 2 +- .../Thumb2/LowOverheadLoops/it-block-random.mir | 20 +- .../CodeGen/Thumb2/LowOverheadLoops/massive.mir | 34 +-- .../Thumb2/LowOverheadLoops/mov-after-dls.mir | 22 +- .../Thumb2/LowOverheadLoops/multiblock-massive.mir | 24 +- .../Thumb2/LowOverheadLoops/out-of-range-cbz.mir | 38 +-- .../Thumb2/LowOverheadLoops/remove-elem-moves.mir | 60 ++-- .../Thumb2/LowOverheadLoops/revert-after-call.mir | 2 +- .../Thumb2/LowOverheadLoops/revert-after-write.mir | 2 +- .../Thumb2/LowOverheadLoops/revert-non-loop.mir | 24 +- .../Thumb2/LowOverheadLoops/revert-while.mir | 22 +- .../Thumb2/LowOverheadLoops/revertcallearly.mir | 8 +- .../Thumb2/LowOverheadLoops/safe-def-no-mov.mir | 22 +- .../Thumb2/LowOverheadLoops/safe-retaining.mir | 48 ++-- .../CodeGen/Thumb2/LowOverheadLoops/size-limit.mir | 34 +-- .../Thumb2/LowOverheadLoops/spillingmove.mir | 6 +- .../CodeGen/Thumb2/LowOverheadLoops/switch.mir | 10 +- .../LowOverheadLoops/unsafe-cpsr-loop-def.mir | 24 +- .../LowOverheadLoops/unsafe-cpsr-loop-use.mir | 24 +- .../Thumb2/LowOverheadLoops/unsafe-retaining.mir | 44 +-- .../Thumb2/LowOverheadLoops/unsafe-use-after.mir | 22 +- .../CodeGen/Thumb2/LowOverheadLoops/vpt-blocks.mir | 36 ++- .../LowOverheadLoops/while-negative-offset.mir | 32 +-- .../test/CodeGen/Thumb2/LowOverheadLoops/while.mir | 22 +- .../Thumb2/LowOverheadLoops/wls-search-killed.mir | 10 +- .../Thumb2/LowOverheadLoops/wls-search-pred.mir | 18 +- llvm/test/CodeGen/Thumb2/bti-const-island.mir | 10 +- llvm/test/CodeGen/Thumb2/constant-islands-cbz.mir | 16 +- .../CodeGen/Thumb2/frame-index-addrmode-t2i8s4.mir | 8 +- llvm/test/CodeGen/Thumb2/high-reg-spill.mir | 2 +- llvm/test/CodeGen/Thumb2/ifcvt-dead-predicate.mir | 8 +- llvm/test/CodeGen/Thumb2/m4-sched-ldr.mir | 6 +- .../test/CodeGen/Thumb2/mve-postinc-distribute.mir | 136 ++++----- llvm/test/CodeGen/Thumb2/mve-tp-loop.mir | 25 +- .../CodeGen/Thumb2/mve-vpt-block-fold-vcmp.mir | 18 +- .../CodeGen/Thumb2/mve-wls-block-placement.mir | 12 +- llvm/test/CodeGen/Thumb2/phi_prevent_copy.mir | 6 +- llvm/test/CodeGen/Thumb2/pipeliner-inlineasm.mir | 22 +- llvm/test/CodeGen/Thumb2/postinc-distribute.mir | 50 ++-- llvm/test/CodeGen/Thumb2/store-prepostinc.mir | 70 ++--- llvm/test/CodeGen/Thumb2/swp-exitbranchdir.mir | 22 +- llvm/test/CodeGen/Thumb2/swp-fixedii-le.mir | 22 +- llvm/test/CodeGen/Thumb2/swp-fixedii.mir | 22 +- llvm/test/CodeGen/Thumb2/swp-regpressure.mir | 44 +-- llvm/test/CodeGen/Thumb2/tbb-removeadd.mir | 6 +- llvm/test/CodeGen/WebAssembly/cfg-stackify-eh.mir | 4 +- llvm/test/CodeGen/WebAssembly/exception.mir | 6 +- llvm/test/CodeGen/WebAssembly/function-info.mir | 2 +- llvm/test/CodeGen/X86/AMX/amx-fastconfig.mir | 18 +- llvm/test/CodeGen/X86/GlobalISel/legalize-cmp.mir | 4 +- .../CodeGen/X86/GlobalISel/legalize-ptr-add.mir | 16 +- .../CodeGen/X86/GlobalISel/regbankselect-AVX2.mir | 8 +- .../X86/GlobalISel/regbankselect-AVX512.mir | 8 +- .../X86/GlobalISel/regbankselect-X86_64.mir | 60 ++-- llvm/test/CodeGen/X86/GlobalISel/select-GV-32.mir | 10 +- llvm/test/CodeGen/X86/GlobalISel/select-GV-64.mir | 10 +- .../CodeGen/X86/GlobalISel/select-constant.mir | 4 +- .../GlobalISel/select-memop-scalar-unordered.mir | 94 +++--- .../X86/GlobalISel/select-memop-scalar-x32.mir | 40 +-- .../CodeGen/X86/GlobalISel/select-memop-scalar.mir | 94 +++--- .../CodeGen/X86/GlobalISel/select-memop-v128.mir | 20 +- .../CodeGen/X86/GlobalISel/select-memop-v256.mir | 16 +- .../CodeGen/X86/GlobalISel/select-memop-v512.mir | 16 +- .../test/CodeGen/X86/GlobalISel/select-ptr-add.mir | 6 +- .../X86/GlobalISel/x32-select-frameIndex.mir | 4 +- .../CodeGen/X86/GlobalISel/x86-legalize-GV.mir | 4 +- .../X86/GlobalISel/x86-legalize-inttoptr.mir | 6 +- .../X86/GlobalISel/x86-legalize-ptrtoint.mir | 16 +- .../X86/GlobalISel/x86-select-frameIndex.mir | 4 +- .../CodeGen/X86/GlobalISel/x86-select-inttoptr.mir | 6 +- .../CodeGen/X86/GlobalISel/x86-select-ptrtoint.mir | 16 +- .../CodeGen/X86/GlobalISel/x86_64-legalize-GV.mir | 4 +- .../X86/GlobalISel/x86_64-legalize-inttoptr.mir | 6 +- .../X86/GlobalISel/x86_64-legalize-ptrtoint.mir | 20 +- .../X86/GlobalISel/x86_64-select-frameIndex.mir | 4 +- .../X86/GlobalISel/x86_64-select-inttoptr.mir | 6 +- .../X86/GlobalISel/x86_64-select-ptrtoint.mir | 20 +- .../CodeGen/X86/StackColoring-dbg-invariance.mir | 14 +- llvm/test/CodeGen/X86/adx-commute.mir | 18 +- .../CodeGen/X86/basic-block-sections-mir-parse.mir | 10 +- llvm/test/CodeGen/X86/block-placement.mir | 14 +- llvm/test/CodeGen/X86/callbr-asm-kill.mir | 12 +- llvm/test/CodeGen/X86/cf-opt-memops.mir | 6 +- .../CodeGen/X86/codegen-prepare-replacephi.mir | 22 +- .../CodeGen/X86/codegen-prepare-replacephi2.mir | 32 +-- llvm/test/CodeGen/X86/copy-eflags-liveinlists.mir | 2 +- .../X86/dbg-changes-codegen-branch-folding2.mir | 2 +- llvm/test/CodeGen/X86/domain-reassignment.mir | 4 +- llvm/test/CodeGen/X86/expand-call-rvmarker.mir | 6 +- .../X86/fast-regalloc-live-out-debug-values.mir | 54 ++-- llvm/test/CodeGen/X86/fixup-bw-inst.mir | 6 +- llvm/test/CodeGen/X86/heap-alloc-markers.mir | 4 +- llvm/test/CodeGen/X86/implicit-null-checks.mir | 118 ++++---- .../CodeGen/X86/implicit-null-chk-reg-rewrite.mir | 2 +- llvm/test/CodeGen/X86/late-remat-update.mir | 2 +- llvm/test/CodeGen/X86/lea-opt-with-debug.mir | 20 +- llvm/test/CodeGen/X86/limit-split-cost.mir | 12 +- llvm/test/CodeGen/X86/machine-cp-mask-reg.mir | 4 +- llvm/test/CodeGen/X86/movtopush.mir | 10 +- llvm/test/CodeGen/X86/peephole-fold-testrr.mir | 12 +- llvm/test/CodeGen/X86/peephole-recurrence.mir | 4 +- llvm/test/CodeGen/X86/postra-ignore-dbg-instrs.mir | 8 +- llvm/test/CodeGen/X86/pr38952.mir | 2 +- llvm/test/CodeGen/X86/pr51903.mir | 22 +- llvm/test/CodeGen/X86/pre-coalesce.mir | 12 +- llvm/test/CodeGen/X86/regalloc-copy-hints.mir | 2 +- llvm/test/CodeGen/X86/shrink_wrap_dbg_value.mir | 10 +- llvm/test/CodeGen/X86/stack-folding-bmi2.mir | 2 +- .../CodeGen/X86/stack-folding-fp-nofpexcept.mir | 2 +- llvm/test/CodeGen/X86/statepoint-fixup-call.mir | 14 +- .../CodeGen/X86/statepoint-fixup-copy-prop-neg.mir | 12 +- .../CodeGen/X86/statepoint-fixup-copy-prop.mir | 4 +- llvm/test/CodeGen/X86/statepoint-fixup-invoke.mir | 24 +- .../CodeGen/X86/statepoint-fixup-shared-ehpad.mir | 26 +- .../CodeGen/X86/statepoint-fixup-undef-def.mir | 28 +- llvm/test/CodeGen/X86/statepoint-fixup-undef.mir | 22 +- .../X86/statepoint-invoke-ra-enter-at-end.mir | 154 +++++----- .../statepoint-invoke-ra-remove-back-copies.mir | 72 +++-- llvm/test/CodeGen/X86/statepoint-vreg-folding.mir | 64 ++--- llvm/test/CodeGen/X86/statepoint-vreg-twoaddr.mir | 18 +- llvm/test/CodeGen/X86/statepoint-vreg.mir | 16 +- llvm/test/CodeGen/X86/tail-call-conditional.mir | 10 +- llvm/test/CodeGen/X86/taildup-callsiteinfo.mir | 10 +- llvm/test/CodeGen/X86/unfoldMemoryOperand.mir | 13 +- llvm/test/CodeGen/X86/win64-eh-empty-block-2.mir | 4 +- llvm/test/CodeGen/X86/x87-reg-usage.mir | 50 ++-- 323 files changed, 2882 insertions(+), 2914 deletions(-) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-smrd.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-smrd.mir index 8ca6f24..fdb0954 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-smrd.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-smrd.mir @@ -4,7 +4,7 @@ # RUN: llc -march=amdgcn -mcpu=gfx900 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=GCN,GFX9 --- | - define amdgpu_kernel void @smrd_imm(i32 addrspace(4)* %const0) { ret void } + define amdgpu_kernel void @smrd_imm(ptr addrspace(4) %const0) { ret void } define amdgpu_kernel void @smrd_wide() { ret void } define amdgpu_kernel void @constant_address_positive() { ret void } define amdgpu_kernel void @smrd_sgpr() { ret void } diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-block-addr.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-block-addr.mir index 2a75ad8..9b5a0b2 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-block-addr.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-block-addr.mir @@ -2,11 +2,11 @@ # RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer %s -o - | FileCheck %s --- | - @addr = global i8* null + @addr = global ptr null define void @test_blockaddress() { - store i8* blockaddress(@test_blockaddress, %block), i8** @addr - indirectbr i8* blockaddress(@test_blockaddress, %block), [label %block] + store ptr blockaddress(@test_blockaddress, %block), ptr @addr + indirectbr ptr blockaddress(@test_blockaddress, %block), [label %block] block: ret void diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-memory-metadata.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-memory-metadata.mir index 4864355..a862d4a 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-memory-metadata.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-load-memory-metadata.mir @@ -3,26 +3,26 @@ --- | - define i32 @widen_load_range0_tbaa(i24 addrspace(1)* %ptr) { - %load = load i24, i24 addrspace(1)* %ptr, !range !0, !tbaa !1 + define i32 @widen_load_range0_tbaa(ptr addrspace(1) %ptr) { + %load = load i24, ptr addrspace(1) %ptr, !range !0, !tbaa !1 %zext = zext i24 %load to i32 ret i32 %zext } - define i32 @widen_load_range1_tbaa(i24 addrspace(1)* %ptr) { - %load = load i24, i24 addrspace(1)* %ptr, !range !0, !tbaa !1 + define i32 @widen_load_range1_tbaa(ptr addrspace(1) %ptr) { + %load = load i24, ptr addrspace(1) %ptr, !range !0, !tbaa !1 %zext = zext i24 %load to i32 ret i32 %zext } - define i32 @widen_load_tbaa0(i24 addrspace(1)* %ptr) { - %load = load i24, i24 addrspace(1)* %ptr, !tbaa !1 + define i32 @widen_load_tbaa0(ptr addrspace(1) %ptr) { + %load = load i24, ptr addrspace(1) %ptr, !tbaa !1 %zext = zext i24 %load to i32 ret i32 %zext } - define i32 @widen_load_tbaa1(i24 addrspace(1)* %ptr) { - %load = load i24, i24 addrspace(1)* %ptr, !tbaa !1 + define i32 @widen_load_tbaa1(ptr addrspace(1) %ptr) { + %load = load i24, ptr addrspace(1) %ptr, !tbaa !1 %zext = zext i24 %load to i32 ret i32 %zext } diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/no-cse-nonlocal-convergent-instrs.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/no-cse-nonlocal-convergent-instrs.mir index 26d5e5f..684b5ec 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/no-cse-nonlocal-convergent-instrs.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/no-cse-nonlocal-convergent-instrs.mir @@ -30,7 +30,7 @@ # CHECK-NEXT: V_ADD_CO_U32_e64 [[SWIZZLE2]], {{%[0-9]+}}, 0, implicit $exec --- | - define amdgpu_kernel void @no_cse(i32 addrspace(1)*, i32, i1) { + define amdgpu_kernel void @no_cse(ptr addrspace(1), i32, i1) { entry: unreachable if.then: diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-block-addr.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-block-addr.mir index 2e33474..a50c7fe 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-block-addr.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-block-addr.mir @@ -3,11 +3,11 @@ --- | - @addr = global i8* null + @addr = global ptr null define void @test_blockaddress() { - store i8* blockaddress(@test_blockaddress, %block), i8** @addr - indirectbr i8* blockaddress(@test_blockaddress, %block), [label %block] + store ptr blockaddress(@test_blockaddress, %block), ptr @addr + indirectbr ptr blockaddress(@test_blockaddress, %block), [label %block] block: ; preds = %0 ret void diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-load.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-load.mir index 7058451..ad71b96 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-load.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-load.mir @@ -3,72 +3,72 @@ # RUN: llc -amdgpu-global-isel-new-legality -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -run-pass=regbankselect %s -verify-machineinstrs -o - -regbankselect-greedy | FileCheck %s --- | - define amdgpu_kernel void @load_global_v8i32_non_uniform(<8 x i32> addrspace(1)* %in) { + define amdgpu_kernel void @load_global_v8i32_non_uniform(ptr addrspace(1) %in) { %tmp0 = call i32 @llvm.amdgcn.workitem.id.x() #0 - %global.not.uniform.v8i32 = getelementptr <8 x i32>, <8 x i32> addrspace(1)* %in, i32 %tmp0 - %tmp2 = load <8 x i32>, <8 x i32> addrspace(1)* %global.not.uniform.v8i32 + %global.not.uniform.v8i32 = getelementptr <8 x i32>, ptr addrspace(1) %in, i32 %tmp0 + %tmp2 = load <8 x i32>, ptr addrspace(1) %global.not.uniform.v8i32 ret void } - define amdgpu_kernel void @load_global_v4i64_non_uniform(<4 x i64> addrspace(1)* %in) { + define amdgpu_kernel void @load_global_v4i64_non_uniform(ptr addrspace(1) %in) { %tmp0 = call i32 @llvm.amdgcn.workitem.id.x() #0 - %global.not.uniform.v4i64 = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i32 %tmp0 - %tmp2 = load <4 x i64>, <4 x i64> addrspace(1)* %global.not.uniform.v4i64 + %global.not.uniform.v4i64 = getelementptr <4 x i64>, ptr addrspace(1) %in, i32 %tmp0 + %tmp2 = load <4 x i64>, ptr addrspace(1) %global.not.uniform.v4i64 ret void } - define amdgpu_kernel void @load_global_v16i32_non_uniform(<16 x i32> addrspace(1)* %in) { + define amdgpu_kernel void @load_global_v16i32_non_uniform(ptr addrspace(1) %in) { %tmp0 = call i32 @llvm.amdgcn.workitem.id.x() #0 - %global.not.uniform.v16i32 = getelementptr <16 x i32>, <16 x i32> addrspace(1)* %in, i32 %tmp0 - %tmp2 = load <16 x i32>, <16 x i32> addrspace(1)* %global.not.uniform.v16i32 + %global.not.uniform.v16i32 = getelementptr <16 x i32>, ptr addrspace(1) %in, i32 %tmp0 + %tmp2 = load <16 x i32>, ptr addrspace(1) %global.not.uniform.v16i32 ret void } - define amdgpu_kernel void @load_global_v8i64_non_uniform(<8 x i64> addrspace(1)* %in) { + define amdgpu_kernel void @load_global_v8i64_non_uniform(ptr addrspace(1) %in) { %tmp0 = call i32 @llvm.amdgcn.workitem.id.x() #0 - %global.not.uniform.v8i64 = getelementptr <8 x i64>, <8 x i64> addrspace(1)* %in, i32 %tmp0 - %tmp2 = load <8 x i64>, <8 x i64> addrspace(1)* %global.not.uniform.v8i64 + %global.not.uniform.v8i64 = getelementptr <8 x i64>, ptr addrspace(1) %in, i32 %tmp0 + %tmp2 = load <8 x i64>, ptr addrspace(1) %global.not.uniform.v8i64 ret void } define amdgpu_kernel void @load_global_v8i32_uniform() {ret void} define amdgpu_kernel void @load_global_v4i64_uniform() {ret void} define amdgpu_kernel void @load_global_v16i32_uniform() {ret void} define amdgpu_kernel void @load_global_v8i64_uniform() {ret void} - define amdgpu_kernel void @load_constant_v8i32_non_uniform(<8 x i32> addrspace(4)* %in) { + define amdgpu_kernel void @load_constant_v8i32_non_uniform(ptr addrspace(4) %in) { %tmp0 = call i32 @llvm.amdgcn.workitem.id.x() #0 - %constant.not.uniform.v8i32 = getelementptr <8 x i32>, <8 x i32> addrspace(4)* %in, i32 %tmp0 - %tmp2 = load <8 x i32>, <8 x i32> addrspace(4)* %constant.not.uniform.v8i32 + %constant.not.uniform.v8i32 = getelementptr <8 x i32>, ptr addrspace(4) %in, i32 %tmp0 + %tmp2 = load <8 x i32>, ptr addrspace(4) %constant.not.uniform.v8i32 ret void } - define amdgpu_kernel void @load_constant_i256_non_uniform(i256 addrspace(4)* %in) { + define amdgpu_kernel void @load_constant_i256_non_uniform(ptr addrspace(4) %in) { %tmp0 = call i32 @llvm.amdgcn.workitem.id.x() #0 - %constant.not.uniform = getelementptr i256, i256 addrspace(4)* %in, i32 %tmp0 - %tmp2 = load i256, i256 addrspace(4)* %constant.not.uniform + %constant.not.uniform = getelementptr i256, ptr addrspace(4) %in, i32 %tmp0 + %tmp2 = load i256, ptr addrspace(4) %constant.not.uniform ret void } - define amdgpu_kernel void @load_constant_v16i16_non_uniform(<16 x i16> addrspace(4)* %in) { + define amdgpu_kernel void @load_constant_v16i16_non_uniform(ptr addrspace(4) %in) { %tmp0 = call i32 @llvm.amdgcn.workitem.id.x() #0 - %constant.not.uniform = getelementptr <16 x i16>, <16 x i16> addrspace(4)* %in, i32 %tmp0 - %tmp2 = load <16 x i16>, <16 x i16> addrspace(4)* %constant.not.uniform + %constant.not.uniform = getelementptr <16 x i16>, ptr addrspace(4) %in, i32 %tmp0 + %tmp2 = load <16 x i16>, ptr addrspace(4) %constant.not.uniform ret void } - define amdgpu_kernel void @load_constant_v4i64_non_uniform(<4 x i64> addrspace(4)* %in) { + define amdgpu_kernel void @load_constant_v4i64_non_uniform(ptr addrspace(4) %in) { %tmp0 = call i32 @llvm.amdgcn.workitem.id.x() #0 - %constant.not.uniform.v4i64 = getelementptr <4 x i64>, <4 x i64> addrspace(4)* %in, i32 %tmp0 - %tmp2 = load <4 x i64>, <4 x i64> addrspace(4)* %constant.not.uniform.v4i64 + %constant.not.uniform.v4i64 = getelementptr <4 x i64>, ptr addrspace(4) %in, i32 %tmp0 + %tmp2 = load <4 x i64>, ptr addrspace(4) %constant.not.uniform.v4i64 ret void } - define amdgpu_kernel void @load_constant_v16i32_non_uniform(<16 x i32> addrspace(4)* %in) { + define amdgpu_kernel void @load_constant_v16i32_non_uniform(ptr addrspace(4) %in) { %tmp0 = call i32 @llvm.amdgcn.workitem.id.x() #0 - %constant.not.uniform.v16i32 = getelementptr <16 x i32>, <16 x i32> addrspace(4)* %in, i32 %tmp0 - %tmp2 = load <16 x i32>, <16 x i32> addrspace(4)* %constant.not.uniform.v16i32 + %constant.not.uniform.v16i32 = getelementptr <16 x i32>, ptr addrspace(4) %in, i32 %tmp0 + %tmp2 = load <16 x i32>, ptr addrspace(4) %constant.not.uniform.v16i32 ret void } - define amdgpu_kernel void @load_constant_v8i64_non_uniform(<8 x i64> addrspace(4)* %in) { + define amdgpu_kernel void @load_constant_v8i64_non_uniform(ptr addrspace(4) %in) { %tmp0 = call i32 @llvm.amdgcn.workitem.id.x() #0 - %constant.not.uniform.v8i64 = getelementptr <8 x i64>, <8 x i64> addrspace(4)* %in, i32 %tmp0 - %tmp2 = load <8 x i64>, <8 x i64> addrspace(4)* %constant.not.uniform.v8i64 + %constant.not.uniform.v8i64 = getelementptr <8 x i64>, ptr addrspace(4) %in, i32 %tmp0 + %tmp2 = load <8 x i64>, ptr addrspace(4) %constant.not.uniform.v8i64 ret void } diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-split-scalar-load-metadata.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-split-scalar-load-metadata.mir index d111563..938ab16 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-split-scalar-load-metadata.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-split-scalar-load-metadata.mir @@ -3,13 +3,13 @@ --- | - define amdgpu_ps i96 @split_smrd_load_range(i96 addrspace(4)* %ptr) { - %load = load i96, i96 addrspace(4)* %ptr, !range !0 + define amdgpu_ps i96 @split_smrd_load_range(ptr addrspace(4) %ptr) { + %load = load i96, ptr addrspace(4) %ptr, !range !0 ret i96 %load } - define amdgpu_ps <3 x i32> @split_smrd_load_tbaa(<3 x i32> addrspace(4)* %ptr) { - %load = load <3 x i32>, <3 x i32> addrspace(4)* %ptr, !tbaa !1 + define amdgpu_ps <3 x i32> @split_smrd_load_tbaa(ptr addrspace(4) %ptr) { + %load = load <3 x i32>, ptr addrspace(4) %ptr, !tbaa !1 ret <3 x i32> %load } diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect.mir index 3ef20a4..76ee2f9 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect.mir @@ -2,49 +2,49 @@ # RUN: llc -march=amdgcn -mcpu=hawaii -mattr=+flat-for-global -run-pass=regbankselect %s -verify-machineinstrs -o - | FileCheck %s --- | - define amdgpu_kernel void @load_constant(i32 addrspace(4)* %ptr0) { + define amdgpu_kernel void @load_constant(ptr addrspace(4) %ptr0) { ret void } - define amdgpu_kernel void @load_constant_volatile(i32 addrspace(4)* %ptr0) { + define amdgpu_kernel void @load_constant_volatile(ptr addrspace(4) %ptr0) { ret void } - define amdgpu_kernel void @load_global_uniform_invariant(i32 addrspace(1)* %ptr1) { - %tmp0 = load i32, i32 addrspace(1)* %ptr1 + define amdgpu_kernel void @load_global_uniform_invariant(ptr addrspace(1) %ptr1) { + %tmp0 = load i32, ptr addrspace(1) %ptr1 ret void } - define amdgpu_kernel void @load_global_uniform_noclobber(i32 addrspace(1)* %ptr1) { - %tmp0 = load i32, i32 addrspace(1)* %ptr1, !amdgpu.noclobber !0 + define amdgpu_kernel void @load_global_uniform_noclobber(ptr addrspace(1) %ptr1) { + %tmp0 = load i32, ptr addrspace(1) %ptr1, !amdgpu.noclobber !0 ret void } - define amdgpu_kernel void @load_global_uniform_variant(i32 addrspace(1)* %ptr1) { - %tmp0 = load i32, i32 addrspace(1)* %ptr1 + define amdgpu_kernel void @load_global_uniform_variant(ptr addrspace(1) %ptr1) { + %tmp0 = load i32, ptr addrspace(1) %ptr1 ret void } - define amdgpu_kernel void @load_global_uniform_volatile_invariant(i32 addrspace(1)* %ptr1) { - %tmp0 = load i32, i32 addrspace(1)* %ptr1 + define amdgpu_kernel void @load_global_uniform_volatile_invariant(ptr addrspace(1) %ptr1) { + %tmp0 = load i32, ptr addrspace(1) %ptr1 ret void } - define amdgpu_kernel void @load_global_uniform_atomic_invariant(i32 addrspace(1)* %ptr1) { - %tmp0 = load i32, i32 addrspace(1)* %ptr1 + define amdgpu_kernel void @load_global_uniform_atomic_invariant(ptr addrspace(1) %ptr1) { + %tmp0 = load i32, ptr addrspace(1) %ptr1 ret void } - define amdgpu_kernel void @load_global_non_uniform(i32 addrspace(1)* %ptr2) { + define amdgpu_kernel void @load_global_non_uniform(ptr addrspace(1) %ptr2) { %tmp0 = call i32 @llvm.amdgcn.workitem.id.x() #0 - %tmp1 = getelementptr i32, i32 addrspace(1)* %ptr2, i32 %tmp0 - %tmp2 = load i32, i32 addrspace(1)* %tmp1 + %tmp1 = getelementptr i32, ptr addrspace(1) %ptr2, i32 %tmp0 + %tmp2 = load i32, ptr addrspace(1) %tmp1 ret void } define void @non_power_of_2() { ret void } - define amdgpu_kernel void @load_constant_v4i16_from_8_align8(<3 x i16> addrspace(4)* %ptr0) { + define amdgpu_kernel void @load_constant_v4i16_from_8_align8(ptr addrspace(4) %ptr0) { ret void } diff --git a/llvm/test/CodeGen/AMDGPU/bug-sdag-emitcopyfromreg.ll b/llvm/test/CodeGen/AMDGPU/bug-sdag-emitcopyfromreg.ll index b145827..999a1b4 100644 --- a/llvm/test/CodeGen/AMDGPU/bug-sdag-emitcopyfromreg.ll +++ b/llvm/test/CodeGen/AMDGPU/bug-sdag-emitcopyfromreg.ll @@ -3,7 +3,7 @@ ; RUN: llc -march=amdgcn -mcpu=gfx1010 < %s | FileCheck %s -check-prefix=ISA ; RUN: llc -march=amdgcn -mcpu=gfx1010 -stop-before=si-fix-sgpr-copies < %s | FileCheck %s -check-prefix=MIR -define void @f(i32 %arg, float* %ptr) { +define void @f(i32 %arg, ptr %ptr) { ; ISA-LABEL: f: ; ISA: ; %bb.0: ; %bb ; ISA-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) @@ -59,7 +59,7 @@ define void @f(i32 %arg, float* %ptr) { ; MIR-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; MIR-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; MIR-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0 - ; MIR-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM killed [[S_MOV_B64_]], 0, 0 :: (invariant load (s64) from `<2 x i32> addrspace(4)* null`, align 4294967296, addrspace 4) + ; MIR-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM killed [[S_MOV_B64_]], 0, 0 :: (invariant load (s64) from `ptr addrspace(4) null`, align 4294967296, addrspace 4) ; MIR-NEXT: [[COPY3:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX2_IMM]].sub1 ; MIR-NEXT: [[COPY4:%[0-9]+]]:sreg_32 = COPY [[S_LOAD_DWORDX2_IMM]].sub0 ; MIR-NEXT: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 @@ -118,7 +118,7 @@ define void @f(i32 %arg, float* %ptr) { ; MIR-NEXT: FLAT_STORE_DWORD [[COPY8]], [[PHI2]], 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into %ir.ptr) ; MIR-NEXT: SI_RETURN bb: - %i = load <2 x i32>, <2 x i32> addrspace(4)* null, align 4294967296 + %i = load <2 x i32>, ptr addrspace(4) null, align 4294967296 %i1 = extractelement <2 x i32> %i, i64 1 %i2 = extractelement <2 x i32> %i, i64 0 %i3 = lshr i32 %i1, 1 @@ -144,7 +144,7 @@ bb14: br i1 %i20, label %bb14, label %bb21 bb21: - store float %i15, float* %ptr, align 4 + store float %i15, ptr %ptr, align 4 ret void } diff --git a/llvm/test/CodeGen/AMDGPU/clamp-omod-special-case.mir b/llvm/test/CodeGen/AMDGPU/clamp-omod-special-case.mir index 276c809..3bebc64 100644 --- a/llvm/test/CodeGen/AMDGPU/clamp-omod-special-case.mir +++ b/llvm/test/CodeGen/AMDGPU/clamp-omod-special-case.mir @@ -43,8 +43,8 @@ body: | %3 = COPY $vgpr0 %0 = COPY $sgpr0_sgpr1 - %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`) - %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`) + %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`) + %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`) %24 = V_ASHRREV_I32_e32 31, %3, implicit $exec %25 = REG_SEQUENCE %3, 1, %24, 2 %10 = S_MOV_B32 61440 @@ -105,8 +105,8 @@ body: | %3 = COPY $vgpr0 %0 = COPY $sgpr0_sgpr1 - %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`) - %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`) + %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`) + %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`) %24 = V_ASHRREV_I32_e32 31, %3, implicit $exec %25 = REG_SEQUENCE %3, 1, %24, 2 %10 = S_MOV_B32 61440 @@ -168,8 +168,8 @@ body: | %3 = COPY $vgpr0 %0 = COPY $sgpr0_sgpr1 - %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`) - %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`) + %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`) + %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`) %24 = V_ASHRREV_I32_e32 31, %3, implicit $exec %25 = REG_SEQUENCE %3, 1, %24, 2 %10 = S_MOV_B32 61440 @@ -233,8 +233,8 @@ body: | %3 = COPY $vgpr0 %0 = COPY $sgpr0_sgpr1 - %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`) - %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`) + %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`) + %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`) %24 = V_ASHRREV_I32_e32 31, %3, implicit $exec %25 = REG_SEQUENCE %3, 1, %24, 2 %10 = S_MOV_B32 61440 @@ -310,8 +310,8 @@ body: | %3 = COPY $vgpr0 %0 = COPY $sgpr0_sgpr1 - %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`) - %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`) + %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`) + %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`) %24 = V_ASHRREV_I32_e32 31, %3, implicit $exec %25 = REG_SEQUENCE %3, 1, %24, 2 %10 = S_MOV_B32 61440 @@ -375,8 +375,8 @@ body: | %3 = COPY $vgpr0 %0 = COPY $sgpr0_sgpr1 - %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`) - %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`) + %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`) + %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`) %24 = V_ASHRREV_I32_e32 31, %3, implicit $exec %25 = REG_SEQUENCE %3, 1, %24, 2 %10 = S_MOV_B32 61440 diff --git a/llvm/test/CodeGen/AMDGPU/coalescer-subranges-another-copymi-not-live.mir b/llvm/test/CodeGen/AMDGPU/coalescer-subranges-another-copymi-not-live.mir index 419eacb..a077e67 100644 --- a/llvm/test/CodeGen/AMDGPU/coalescer-subranges-another-copymi-not-live.mir +++ b/llvm/test/CodeGen/AMDGPU/coalescer-subranges-another-copymi-not-live.mir @@ -128,6 +128,6 @@ body: | %28:vgpr_32 = IMAGE_LOAD_V1_V4 killed %25, killed %27, 2, -1, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load (s128) from constant-pool, addrspace 4) %29:vgpr_32 = nofpexcept V_ADD_F32_e32 0, killed %28, implicit $mode, implicit $exec $m0 = S_MOV_B32 -1 - DS_WRITE_B32 undef %30:vgpr_32, killed %29, 0, 0, implicit $m0, implicit $exec :: (store (s32) into `i32 addrspace(3)* undef`, addrspace 3) + DS_WRITE_B32 undef %30:vgpr_32, killed %29, 0, 0, implicit $m0, implicit $exec :: (store (s32) into `ptr addrspace(3) undef`, addrspace 3) S_ENDPGM 0 ... diff --git a/llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir b/llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir index 99c2036..c347860 100644 --- a/llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir +++ b/llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir @@ -723,7 +723,7 @@ body: | %2:vgpr_32 = COPY $vgpr0 %0:sgpr_64 = COPY $sgpr0_sgpr1 - %3:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, addrspace 4) + %3:sreg_64_xexec = S_LOAD_DWORDX2_IMM %0, 36, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, addrspace 4) %15:vgpr_32 = V_ASHRREV_I32_e64 31, %2, implicit $exec %16:vreg_64 = REG_SEQUENCE %2, %subreg.sub0, %15, %subreg.sub1 %17:vreg_64 = V_LSHLREV_B64_e64 2, killed %16, implicit $exec diff --git a/llvm/test/CodeGen/AMDGPU/flat-load-clustering.mir b/llvm/test/CodeGen/AMDGPU/flat-load-clustering.mir index d5aa9b6..b2af7df 100644 --- a/llvm/test/CodeGen/AMDGPU/flat-load-clustering.mir +++ b/llvm/test/CodeGen/AMDGPU/flat-load-clustering.mir @@ -4,18 +4,18 @@ # GCN: FLAT_LOAD_DWORD # GCN-NEXT: FLAT_LOAD_DWORD --- | - define amdgpu_kernel void @flat_load_clustering(i32 addrspace(1)* nocapture %arg, i32 addrspace(4)* nocapture readonly %arg1) { + define amdgpu_kernel void @flat_load_clustering(ptr addrspace(1) nocapture %arg, ptr addrspace(4) nocapture readonly %arg1) { bb: %tid = tail call i32 @llvm.amdgcn.workitem.id.x() %idxprom = sext i32 %tid to i64 - %gep1 = getelementptr inbounds i32, i32 addrspace(4)* %arg1, i64 %idxprom - %load1 = load i32, i32 addrspace(4)* %gep1, align 4 - %gep2 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %idxprom - %gep34 = getelementptr inbounds i32, i32 addrspace(4)* %gep1, i64 4 - %load2 = load i32, i32 addrspace(4)* %gep34, align 4 - %gep4 = getelementptr inbounds i32, i32 addrspace(1)* %gep2, i64 4 - store i32 %load1, i32 addrspace(1)* %gep2, align 4 - store i32 %load2, i32 addrspace(1)* %gep4, align 4 + %gep1 = getelementptr inbounds i32, ptr addrspace(4) %arg1, i64 %idxprom + %load1 = load i32, ptr addrspace(4) %gep1, align 4 + %gep2 = getelementptr inbounds i32, ptr addrspace(1) %arg, i64 %idxprom + %gep34 = getelementptr inbounds i32, ptr addrspace(4) %gep1, i64 4 + %load2 = load i32, ptr addrspace(4) %gep34, align 4 + %gep4 = getelementptr inbounds i32, ptr addrspace(1) %gep2, i64 4 + store i32 %load1, ptr addrspace(1) %gep2, align 4 + store i32 %load2, ptr addrspace(1) %gep4, align 4 ret void } @@ -54,8 +54,8 @@ body: | %1 = COPY $sgpr4_sgpr5 %0 = COPY $vgpr0 - %3 = S_LOAD_DWORDX2_IMM %1, 0, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`) - %4 = S_LOAD_DWORDX2_IMM %1, 8, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`) + %3 = S_LOAD_DWORDX2_IMM %1, 0, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`) + %4 = S_LOAD_DWORDX2_IMM %1, 8, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`) %7 = V_LSHLREV_B32_e32 2, %0, implicit $exec %2 = V_MOV_B32_e32 0, implicit $exec undef %12.sub0 = V_ADD_CO_U32_e32 %4.sub0, %7, implicit-def $vcc, implicit $exec diff --git a/llvm/test/CodeGen/AMDGPU/fold-imm-f16-f32.mir b/llvm/test/CodeGen/AMDGPU/fold-imm-f16-f32.mir index 2383e1a..dd61d9c 100644 --- a/llvm/test/CodeGen/AMDGPU/fold-imm-f16-f32.mir +++ b/llvm/test/CodeGen/AMDGPU/fold-imm-f16-f32.mir @@ -1,104 +1,104 @@ # RUN: llc --mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs -run-pass si-fold-operands,si-shrink-instructions %s -o - | FileCheck %s --- | define amdgpu_kernel void @add_f32_1.0_one_f16_use() #0 { - %f16.val0 = load volatile half, half addrspace(1)* undef - %f16.val1 = load volatile half, half addrspace(1)* undef - %f32.val = load volatile float, float addrspace(1)* undef + %f16.val0 = load volatile half, ptr addrspace(1) undef + %f16.val1 = load volatile half, ptr addrspace(1) undef + %f32.val = load volatile float, ptr addrspace(1) undef %f16.add0 = fadd half %f16.val0, 0xH3C00 %f32.add = fadd float %f32.val, 1.000000e+00 - store volatile half %f16.add0, half addrspace(1)* undef - store volatile float %f32.add, float addrspace(1)* undef + store volatile half %f16.add0, ptr addrspace(1) undef + store volatile float %f32.add, ptr addrspace(1) undef ret void } define amdgpu_kernel void @add_f32_1.0_multi_f16_use() #0 { - %f16.val0 = load volatile half, half addrspace(1)* undef - %f16.val1 = load volatile half, half addrspace(1)* undef - %f32.val = load volatile float, float addrspace(1)* undef + %f16.val0 = load volatile half, ptr addrspace(1) undef + %f16.val1 = load volatile half, ptr addrspace(1) undef + %f32.val = load volatile float, ptr addrspace(1) undef %f16.add0 = fadd half %f16.val0, 0xH3C00 %f32.add = fadd float %f32.val, 1.000000e+00 - store volatile half %f16.add0, half addrspace(1)* undef - store volatile float %f32.add, float addrspace(1)* undef + store volatile half %f16.add0, ptr addrspace(1) undef + store volatile float %f32.add, ptr addrspace(1) undef ret void } define amdgpu_kernel void @add_f32_1.0_one_f32_use_one_f16_use () #0 { - %f16.val0 = load volatile half, half addrspace(1)* undef - %f16.val1 = load volatile half, half addrspace(1)* undef - %f32.val = load volatile float, float addrspace(1)* undef + %f16.val0 = load volatile half, ptr addrspace(1) undef + %f16.val1 = load volatile half, ptr addrspace(1) undef + %f32.val = load volatile float, ptr addrspace(1) undef %f16.add0 = fadd half %f16.val0, 0xH3C00 %f32.add = fadd float %f32.val, 1.000000e+00 - store volatile half %f16.add0, half addrspace(1)* undef - store volatile float %f32.add, float addrspace(1)* undef + store volatile half %f16.add0, ptr addrspace(1) undef + store volatile float %f32.add, ptr addrspace(1) undef ret void } define amdgpu_kernel void @add_f32_1.0_one_f32_use_multi_f16_use () #0 { - %f16.val0 = load volatile half, half addrspace(1)* undef - %f16.val1 = load volatile half, half addrspace(1)* undef - %f32.val = load volatile float, float addrspace(1)* undef + %f16.val0 = load volatile half, ptr addrspace(1) undef + %f16.val1 = load volatile half, ptr addrspace(1) undef + %f32.val = load volatile float, ptr addrspace(1) undef %f16.add0 = fadd half %f16.val0, 0xH3C00 %f16.add1 = fadd half %f16.val1, 0xH3C00 %f32.add = fadd float %f32.val, 1.000000e+00 - store volatile half %f16.add0, half addrspace(1)* undef - store volatile half %f16.add1, half addrspace(1)* undef - store volatile float %f32.add, float addrspace(1)* undef + store volatile half %f16.add0, ptr addrspace(1) undef + store volatile half %f16.add1, ptr addrspace(1) undef + store volatile float %f32.add, ptr addrspace(1) undef ret void } define amdgpu_kernel void @add_i32_1_multi_f16_use() #0 { - %f16.val0 = load volatile half, half addrspace(1)* undef - %f16.val1 = load volatile half, half addrspace(1)* undef + %f16.val0 = load volatile half, ptr addrspace(1) undef + %f16.val1 = load volatile half, ptr addrspace(1) undef %f16.add0 = fadd half %f16.val0, 0xH0001 %f16.add1 = fadd half %f16.val1, 0xH0001 - store volatile half %f16.add0, half addrspace(1)* undef - store volatile half %f16.add1,half addrspace(1)* undef + store volatile half %f16.add0, ptr addrspace(1) undef + store volatile half %f16.add1,ptr addrspace(1) undef ret void } define amdgpu_kernel void @add_i32_m2_one_f32_use_multi_f16_use () #0 { - %f16.val0 = load volatile half, half addrspace(1)* undef - %f16.val1 = load volatile half, half addrspace(1)* undef - %f32.val = load volatile float, float addrspace(1)* undef + %f16.val0 = load volatile half, ptr addrspace(1) undef + %f16.val1 = load volatile half, ptr addrspace(1) undef + %f32.val = load volatile float, ptr addrspace(1) undef %f16.add0 = fadd half %f16.val0, 0xHFFFE %f16.add1 = fadd half %f16.val1, 0xHFFFE %f32.add = fadd float %f32.val, 0xffffffffc0000000 - store volatile half %f16.add0, half addrspace(1)* undef - store volatile half %f16.add1, half addrspace(1)* undef - store volatile float %f32.add, float addrspace(1)* undef + store volatile half %f16.add0, ptr addrspace(1) undef + store volatile half %f16.add1, ptr addrspace(1) undef + store volatile float %f32.add, ptr addrspace(1) undef ret void } define amdgpu_kernel void @add_f16_1.0_multi_f32_use() #0 { - %f32.val0 = load volatile float, float addrspace(1)* undef - %f32.val1 = load volatile float, float addrspace(1)* undef - %f32.val = load volatile float, float addrspace(1)* undef + %f32.val0 = load volatile float, ptr addrspace(1) undef + %f32.val1 = load volatile float, ptr addrspace(1) undef + %f32.val = load volatile float, ptr addrspace(1) undef %f32.add0 = fadd float %f32.val0, 1.0 %f32.add1 = fadd float %f32.val1, 1.0 - store volatile float %f32.add0, float addrspace(1)* undef - store volatile float %f32.add1, float addrspace(1)* undef + store volatile float %f32.add0, ptr addrspace(1) undef + store volatile float %f32.add1, ptr addrspace(1) undef ret void } define amdgpu_kernel void @add_f16_1.0_other_high_bits_multi_f16_use() #0 { - %f16.val0 = load volatile half, half addrspace(1)* undef - %f16.val1 = load volatile half, half addrspace(1)* undef - %f32.val = load volatile half, half addrspace(1)* undef + %f16.val0 = load volatile half, ptr addrspace(1) undef + %f16.val1 = load volatile half, ptr addrspace(1) undef + %f32.val = load volatile half, ptr addrspace(1) undef %f16.add0 = fadd half %f16.val0, 0xH3C00 %f32.add = fadd half %f32.val, 1.000000e+00 - store volatile half %f16.add0, half addrspace(1)* undef - store volatile half %f32.add, half addrspace(1)* undef + store volatile half %f16.add0, ptr addrspace(1) undef + store volatile half %f32.add, ptr addrspace(1) undef ret void } define amdgpu_kernel void @add_f16_1.0_other_high_bits_use_f16_f32() #0 { - %f16.val0 = load volatile half, half addrspace(1)* undef - %f16.val1 = load volatile half, half addrspace(1)* undef - %f32.val = load volatile half, half addrspace(1)* undef + %f16.val0 = load volatile half, ptr addrspace(1) undef + %f16.val1 = load volatile half, ptr addrspace(1) undef + %f32.val = load volatile half, ptr addrspace(1) undef %f16.add0 = fadd half %f16.val0, 0xH3C00 %f32.add = fadd half %f32.val, 1.000000e+00 - store volatile half %f16.add0, half addrspace(1)* undef - store volatile half %f32.add, half addrspace(1)* undef + store volatile half %f16.add0, ptr addrspace(1) undef + store volatile half %f32.add, ptr addrspace(1) undef ret void } @@ -158,10 +158,10 @@ body: | %8 = S_MOV_B32 61440 %9 = S_MOV_B32 -1 %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4 - %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `half addrspace(1)* undef`) + %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `ptr addrspace(1) undef`) %12 = V_MOV_B32_e32 1065353216, implicit $exec %13 = V_ADD_F16_e64 0, killed %11, 0, %12, 0, 0, implicit $mode, implicit $exec - BUFFER_STORE_SHORT_OFFSET killed %13, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `half addrspace(1)* undef`) + BUFFER_STORE_SHORT_OFFSET killed %13, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `ptr addrspace(1) undef`) S_ENDPGM 0 ... @@ -221,13 +221,13 @@ body: | %8 = S_MOV_B32 61440 %9 = S_MOV_B32 -1 %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4 - %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `half addrspace(1)* undef`) - %12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `float addrspace(1)* undef`) + %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `ptr addrspace(1) undef`) + %12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `ptr addrspace(1) undef`) %13 = V_MOV_B32_e32 1065353216, implicit $exec %14 = V_ADD_F16_e64 0, killed %11, 0, %13, 0, 0, implicit $mode, implicit $exec %15 = V_ADD_F16_e64 0, killed %12, 0, killed %13, 0, 0, implicit $mode, implicit $exec - BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `half addrspace(1)* undef`) - BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `half addrspace(1)* undef`) + BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `ptr addrspace(1) undef`) + BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `ptr addrspace(1) undef`) S_ENDPGM 0 ... @@ -288,14 +288,14 @@ body: | %8 = S_MOV_B32 61440 %9 = S_MOV_B32 -1 %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4 - %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `half addrspace(1)* undef`) - %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `half addrspace(1)* undef`) - %13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `float addrspace(1)* undef`) + %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `ptr addrspace(1) undef`) + %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `ptr addrspace(1) undef`) + %13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `ptr addrspace(1) undef`) %14 = V_MOV_B32_e32 1065353216, implicit $exec %15 = V_ADD_F16_e64 0, %11, 0, %14, 0, 0, implicit $mode, implicit $exec %16 = V_ADD_F32_e64 0, killed %13, 0, killed %14, 0, 0, implicit $mode, implicit $exec - BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `half addrspace(1)* undef`) - BUFFER_STORE_DWORD_OFFSET killed %16, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `float addrspace(1)* undef`) + BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `ptr addrspace(1) undef`) + BUFFER_STORE_DWORD_OFFSET killed %16, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `ptr addrspace(1) undef`) S_ENDPGM 0 ... @@ -358,16 +358,16 @@ body: | %8 = S_MOV_B32 61440 %9 = S_MOV_B32 -1 %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4 - %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `half addrspace(1)* undef`) - %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `half addrspace(1)* undef`) - %13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `float addrspace(1)* undef`) + %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `ptr addrspace(1) undef`) + %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `ptr addrspace(1) undef`) + %13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `ptr addrspace(1) undef`) %14 = V_MOV_B32_e32 1065353216, implicit $exec %15 = V_ADD_F16_e64 0, %11, 0, %14, 0, 0, implicit $mode, implicit $exec %16 = V_ADD_F16_e64 0, %12, 0, %14, 0, 0, implicit $mode, implicit $exec %17 = V_ADD_F32_e64 0, killed %13, 0, killed %14, 0, 0, implicit $mode, implicit $exec - BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `half addrspace(1)* undef`) - BUFFER_STORE_SHORT_OFFSET killed %16, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `half addrspace(1)* undef`) - BUFFER_STORE_DWORD_OFFSET killed %17, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `float addrspace(1)* undef`) + BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `ptr addrspace(1) undef`) + BUFFER_STORE_SHORT_OFFSET killed %16, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `ptr addrspace(1) undef`) + BUFFER_STORE_DWORD_OFFSET killed %17, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `ptr addrspace(1) undef`) S_ENDPGM 0 ... @@ -424,13 +424,13 @@ body: | %8 = S_MOV_B32 61440 %9 = S_MOV_B32 -1 %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4 - %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `half addrspace(1)* undef`) - %12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `float addrspace(1)* undef`) + %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `ptr addrspace(1) undef`) + %12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `ptr addrspace(1) undef`) %13 = V_MOV_B32_e32 1, implicit $exec %14 = V_ADD_F16_e64 0, killed %11, 0, %13, 0, 0, implicit $mode, implicit $exec %15 = V_ADD_F16_e64 0, killed %12, 0, killed %13, 0, 0, implicit $mode, implicit $exec - BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `half addrspace(1)* undef`) - BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `half addrspace(1)* undef`) + BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `ptr addrspace(1) undef`) + BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `ptr addrspace(1) undef`) S_ENDPGM 0 ... @@ -490,16 +490,16 @@ body: | %8 = S_MOV_B32 61440 %9 = S_MOV_B32 -1 %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4 - %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `half addrspace(1)* undef`) - %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `half addrspace(1)* undef`) - %13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `float addrspace(1)* undef`) + %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `ptr addrspace(1) undef`) + %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `ptr addrspace(1) undef`) + %13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `ptr addrspace(1) undef`) %14 = V_MOV_B32_e32 -2, implicit $exec %15 = V_ADD_F16_e64 0, %11, 0, %14, 0, 0, implicit $mode, implicit $exec %16 = V_ADD_F16_e64 0, %12, 0, %14, 0, 0, implicit $mode, implicit $exec %17 = V_ADD_F32_e64 0, killed %13, 0, killed %14, 0, 0, implicit $mode, implicit $exec - BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `half addrspace(1)* undef`) - BUFFER_STORE_SHORT_OFFSET killed %16, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `half addrspace(1)* undef`) - BUFFER_STORE_DWORD_OFFSET killed %17, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `float addrspace(1)* undef`) + BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `ptr addrspace(1) undef`) + BUFFER_STORE_SHORT_OFFSET killed %16, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `ptr addrspace(1) undef`) + BUFFER_STORE_DWORD_OFFSET killed %17, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `ptr addrspace(1) undef`) S_ENDPGM 0 ... @@ -559,13 +559,13 @@ body: | %8 = S_MOV_B32 61440 %9 = S_MOV_B32 -1 %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4 - %11 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `float addrspace(1)* undef`) - %12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `float addrspace(1)* undef`) + %11 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `ptr addrspace(1) undef`) + %12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `ptr addrspace(1) undef`) %13 = V_MOV_B32_e32 15360, implicit $exec %14 = V_ADD_F32_e64 0, %11, 0, %13, 0, 0, implicit $mode, implicit $exec %15 = V_ADD_F32_e64 0, %12, 0, %13, 0, 0, implicit $mode, implicit $exec - BUFFER_STORE_DWORD_OFFSET killed %14, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `float addrspace(1)* undef`) - BUFFER_STORE_DWORD_OFFSET killed %15, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `float addrspace(1)* undef`) + BUFFER_STORE_DWORD_OFFSET killed %14, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `ptr addrspace(1) undef`) + BUFFER_STORE_DWORD_OFFSET killed %15, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `ptr addrspace(1) undef`) S_ENDPGM 0 ... @@ -624,13 +624,13 @@ body: | %8 = S_MOV_B32 61440 %9 = S_MOV_B32 -1 %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4 - %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `half addrspace(1)* undef`) - %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `half addrspace(1)* undef`) + %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `ptr addrspace(1) undef`) + %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `ptr addrspace(1) undef`) %13 = V_MOV_B32_e32 80886784, implicit $exec %14 = V_ADD_F16_e64 0, %11, 0, %13, 0, 0, implicit $mode, implicit $exec %15 = V_ADD_F16_e64 0, %12, 0, %13, 0, 0, implicit $mode, implicit $exec - BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `half addrspace(1)* undef`) - BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `half addrspace(1)* undef`) + BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `ptr addrspace(1) undef`) + BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `ptr addrspace(1) undef`) S_ENDPGM 0 ... @@ -686,13 +686,13 @@ body: | %8 = S_MOV_B32 61440 %9 = S_MOV_B32 -1 %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4 - %11 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `float addrspace(1)* undef`) - %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `half addrspace(1)* undef`) + %11 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `ptr addrspace(1) undef`) + %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, implicit $exec :: (volatile load (s16) from `ptr addrspace(1) undef`) %13 = V_MOV_B32_e32 305413120, implicit $exec %14 = V_ADD_F32_e64 0, %11, 0, %13, 0, 0, implicit $mode, implicit $exec %15 = V_ADD_F16_e64 0, %12, 0, %13, 0, 0, implicit $mode, implicit $exec - BUFFER_STORE_DWORD_OFFSET killed %14, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `float addrspace(1)* undef`) - BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `half addrspace(1)* undef`) + BUFFER_STORE_DWORD_OFFSET killed %14, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `ptr addrspace(1) undef`) + BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, implicit $exec :: (volatile store (s16) into `ptr addrspace(1) undef`) S_ENDPGM 0 ... diff --git a/llvm/test/CodeGen/AMDGPU/fp-atomic-to-s_denormmode.mir b/llvm/test/CodeGen/AMDGPU/fp-atomic-to-s_denormmode.mir index 1969f9b..5a33425 100644 --- a/llvm/test/CodeGen/AMDGPU/fp-atomic-to-s_denormmode.mir +++ b/llvm/test/CodeGen/AMDGPU/fp-atomic-to-s_denormmode.mir @@ -9,7 +9,7 @@ name: flat_atomic_fcmpswap_to_s_denorm_mode body: | bb.0: - FLAT_ATOMIC_FCMPSWAP undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`) + FLAT_ATOMIC_FCMPSWAP undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`) S_DENORM_MODE 0, implicit-def $mode, implicit $mode ... @@ -21,7 +21,7 @@ body: | name: flat_atomic_fcmpswap_x2_to_s_denorm_mode body: | bb.0: - FLAT_ATOMIC_FCMPSWAP_X2 undef %0:vreg_64, undef %1:vreg_128, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`) + FLAT_ATOMIC_FCMPSWAP_X2 undef %0:vreg_64, undef %1:vreg_128, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`) S_DENORM_MODE 0, implicit-def $mode, implicit $mode ... @@ -33,7 +33,7 @@ body: | name: flat_atomic_fmax_to_s_denorm_mode body: | bb.0: - FLAT_ATOMIC_FMAX undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`) + FLAT_ATOMIC_FMAX undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`) S_DENORM_MODE 0, implicit-def $mode, implicit $mode ... @@ -45,7 +45,7 @@ body: | name: flat_atomic_fmax_x2_to_s_denorm_mode body: | bb.0: - FLAT_ATOMIC_FMAX_X2 undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`) + FLAT_ATOMIC_FMAX_X2 undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`) S_DENORM_MODE 0, implicit-def $mode, implicit $mode ... @@ -57,7 +57,7 @@ body: | name: flat_atomic_fmin_to_s_denorm_mode body: | bb.0: - FLAT_ATOMIC_FMIN undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`) + FLAT_ATOMIC_FMIN undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`) S_DENORM_MODE 0, implicit-def $mode, implicit $mode ... @@ -69,7 +69,7 @@ body: | name: flat_atomic_fmin_x2_to_s_denorm_mode body: | bb.0: - FLAT_ATOMIC_FMIN_X2 undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`) + FLAT_ATOMIC_FMIN_X2 undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`) S_DENORM_MODE 0, implicit-def $mode, implicit $mode ... @@ -81,7 +81,7 @@ body: | name: flat_atomic_fcmpswap_x2_rtn_to_s_denorm_mode body: | bb.0: - %2:vreg_64 = FLAT_ATOMIC_FCMPSWAP_X2_RTN undef %0:vreg_64, undef %1:vreg_128, 0, 1, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`) + %2:vreg_64 = FLAT_ATOMIC_FCMPSWAP_X2_RTN undef %0:vreg_64, undef %1:vreg_128, 0, 1, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`) S_DENORM_MODE 0, implicit-def $mode, implicit $mode ... @@ -93,7 +93,7 @@ body: | name: flat_atomic_fmax_rtn_to_s_denorm_mode body: | bb.0: - %2:vgpr_32 = FLAT_ATOMIC_FMAX_RTN undef %0:vreg_64, undef %1:vgpr_32, 0, 1, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`) + %2:vgpr_32 = FLAT_ATOMIC_FMAX_RTN undef %0:vreg_64, undef %1:vgpr_32, 0, 1, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`) S_DENORM_MODE 0, implicit-def $mode, implicit $mode ... @@ -105,7 +105,7 @@ body: | name: flat_atomic_fmax_x2_rtn_to_s_denorm_mode body: | bb.0: - %2:vreg_64 = FLAT_ATOMIC_FMAX_X2_RTN undef %0:vreg_64, undef %1:vreg_64, 0, 1, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`) + %2:vreg_64 = FLAT_ATOMIC_FMAX_X2_RTN undef %0:vreg_64, undef %1:vreg_64, 0, 1, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`) S_DENORM_MODE 0, implicit-def $mode, implicit $mode ... @@ -117,7 +117,7 @@ body: | name: flat_atomic_fmin_rtn_to_s_denorm_mode body: | bb.0: - %2:vgpr_32 = FLAT_ATOMIC_FMIN_RTN undef %0:vreg_64, undef %1:vgpr_32, 0, 1, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`) + %2:vgpr_32 = FLAT_ATOMIC_FMIN_RTN undef %0:vreg_64, undef %1:vgpr_32, 0, 1, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`) S_DENORM_MODE 0, implicit-def $mode, implicit $mode ... @@ -129,7 +129,7 @@ body: | name: flat_atomic_fmin_x2_rtn_to_s_denorm_mode body: | bb.0: - %2:vreg_64 = FLAT_ATOMIC_FMIN_X2_RTN undef %0:vreg_64, undef %1:vreg_64, 0, 1, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`) + %2:vreg_64 = FLAT_ATOMIC_FMIN_X2_RTN undef %0:vreg_64, undef %1:vreg_64, 0, 1, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`) S_DENORM_MODE 0, implicit-def $mode, implicit $mode ... @@ -141,7 +141,7 @@ body: | name: flat_atomic_fcmpswap_rtn_to_s_denorm_mode body: | bb.0: - %2:vgpr_32 = FLAT_ATOMIC_FCMPSWAP_RTN undef %0:vreg_64, undef %1:vreg_64, 0, 1, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`) + %2:vgpr_32 = FLAT_ATOMIC_FCMPSWAP_RTN undef %0:vreg_64, undef %1:vreg_64, 0, 1, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`) S_DENORM_MODE 0, implicit-def $mode, implicit $mode ... @@ -153,7 +153,7 @@ body: | name: global_atomic_fcmpswap_to_s_denorm_mode body: | bb.0: - GLOBAL_ATOMIC_FCMPSWAP undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`) + GLOBAL_ATOMIC_FCMPSWAP undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`) S_DENORM_MODE 0, implicit-def $mode, implicit $mode ... @@ -165,7 +165,7 @@ body: | name: global_atomic_fcmpswap_x2_to_s_denorm_mode body: | bb.0: - GLOBAL_ATOMIC_FCMPSWAP_X2 undef %0:vreg_64, undef %1:vreg_128, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`) + GLOBAL_ATOMIC_FCMPSWAP_X2 undef %0:vreg_64, undef %1:vreg_128, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`) S_DENORM_MODE 0, implicit-def $mode, implicit $mode ... @@ -177,7 +177,7 @@ body: | name: global_atomic_fmax_to_s_denorm_mode body: | bb.0: - GLOBAL_ATOMIC_FMAX undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`) + GLOBAL_ATOMIC_FMAX undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`) S_DENORM_MODE 0, implicit-def $mode, implicit $mode ... @@ -189,7 +189,7 @@ body: | name: global_atomic_fmax_x2_to_s_denorm_mode body: | bb.0: - GLOBAL_ATOMIC_FMAX_X2 undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`) + GLOBAL_ATOMIC_FMAX_X2 undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`) S_DENORM_MODE 0, implicit-def $mode, implicit $mode ... @@ -201,7 +201,7 @@ body: | name: global_atomic_fmin_to_s_denorm_mode body: | bb.0: - GLOBAL_ATOMIC_FMIN undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`) + GLOBAL_ATOMIC_FMIN undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`) S_DENORM_MODE 0, implicit-def $mode, implicit $mode ... @@ -213,7 +213,7 @@ body: | name: global_atomic_fmin_x2_to_s_denorm_mode body: | bb.0: - GLOBAL_ATOMIC_FMIN_X2 undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`) + GLOBAL_ATOMIC_FMIN_X2 undef %0:vreg_64, undef %1:vreg_64, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`) S_DENORM_MODE 0, implicit-def $mode, implicit $mode ... @@ -225,7 +225,7 @@ body: | name: global_atomic_fcmpswap_rtn_to_s_denorm_mode body: | bb.0: - %2:vgpr_32 = GLOBAL_ATOMIC_FCMPSWAP_RTN undef %0:vreg_64, undef %1:vreg_64, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`) + %2:vgpr_32 = GLOBAL_ATOMIC_FCMPSWAP_RTN undef %0:vreg_64, undef %1:vreg_64, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`) S_DENORM_MODE 0, implicit-def $mode, implicit $mode ... @@ -237,7 +237,7 @@ body: | name: global_atomic_fcmpswap_x2_rtn_to_s_denorm_mode body: | bb.0: - %2:vreg_64 = GLOBAL_ATOMIC_FCMPSWAP_X2_RTN undef %0:vreg_64, undef %1:vreg_128, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`) + %2:vreg_64 = GLOBAL_ATOMIC_FCMPSWAP_X2_RTN undef %0:vreg_64, undef %1:vreg_128, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`) S_DENORM_MODE 0, implicit-def $mode, implicit $mode ... @@ -249,7 +249,7 @@ body: | name: global_atomic_fmax_rtn_to_s_denorm_mode body: | bb.0: - %2:vgpr_32 = GLOBAL_ATOMIC_FMAX_RTN undef %0:vreg_64, undef %1:vgpr_32, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`) + %2:vgpr_32 = GLOBAL_ATOMIC_FMAX_RTN undef %0:vreg_64, undef %1:vgpr_32, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`) S_DENORM_MODE 0, implicit-def $mode, implicit $mode ... @@ -261,7 +261,7 @@ body: | name: global_atomic_fmax_x2_rtn_to_s_denorm_mode body: | bb.0: - %2:vreg_64 = GLOBAL_ATOMIC_FMAX_X2_RTN undef %0:vreg_64, undef %1:vreg_64, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`) + %2:vreg_64 = GLOBAL_ATOMIC_FMAX_X2_RTN undef %0:vreg_64, undef %1:vreg_64, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`) S_DENORM_MODE 0, implicit-def $mode, implicit $mode ... @@ -273,7 +273,7 @@ body: | name: global_atomic_fmin_rtn_to_s_denorm_mode body: | bb.0: - %2:vgpr_32 = GLOBAL_ATOMIC_FMIN_RTN undef %0:vreg_64, undef %1:vgpr_32, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`) + %2:vgpr_32 = GLOBAL_ATOMIC_FMIN_RTN undef %0:vreg_64, undef %1:vgpr_32, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`) S_DENORM_MODE 0, implicit-def $mode, implicit $mode ... @@ -285,7 +285,7 @@ body: | name: global_atomic_fmin_x2_rtn_to_s_denorm_mode body: | bb.0: - %2:vreg_64 = GLOBAL_ATOMIC_FMIN_X2_RTN undef %0:vreg_64, undef %1:vreg_64, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`) + %2:vreg_64 = GLOBAL_ATOMIC_FMIN_X2_RTN undef %0:vreg_64, undef %1:vreg_64, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`) S_DENORM_MODE 0, implicit-def $mode, implicit $mode ... @@ -297,7 +297,7 @@ body: | name: global_atomic_fcmpswap_saddr_to_s_denorm_mode body: | bb.0: - GLOBAL_ATOMIC_FCMPSWAP_SADDR undef %0:vgpr_32, undef %1:vreg_64, undef %3:sgpr_64, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`) + GLOBAL_ATOMIC_FCMPSWAP_SADDR undef %0:vgpr_32, undef %1:vreg_64, undef %3:sgpr_64, 0, 0, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`) S_DENORM_MODE 0, implicit-def $mode, implicit $mode ... @@ -309,7 +309,7 @@ body: | name: global_atomic_fcmpswap_x2_saddr_rtn_to_s_denorm_mode body: | bb.0: - %2:vreg_64 = GLOBAL_ATOMIC_FCMPSWAP_X2_SADDR_RTN undef %0:vgpr_32, undef %1:vreg_128, undef %3:sgpr_64, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`) + %2:vreg_64 = GLOBAL_ATOMIC_FCMPSWAP_X2_SADDR_RTN undef %0:vgpr_32, undef %1:vreg_128, undef %3:sgpr_64, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`) S_DENORM_MODE 0, implicit-def $mode, implicit $mode ... @@ -321,7 +321,7 @@ body: | name: global_atomic_fmax_saddr_rtn_to_s_denorm_mode body: | bb.0: - %2:vgpr_32 = GLOBAL_ATOMIC_FMAX_SADDR_RTN undef %0:vgpr_32, undef %1:vgpr_32, undef %3:sgpr_64, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`) + %2:vgpr_32 = GLOBAL_ATOMIC_FMAX_SADDR_RTN undef %0:vgpr_32, undef %1:vgpr_32, undef %3:sgpr_64, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`) S_DENORM_MODE 0, implicit-def $mode, implicit $mode ... @@ -333,7 +333,7 @@ body: | name: global_atomic_fmax_x2_saddr_rtn_to_s_denorm_mode body: | bb.0: - %2:vreg_64 = GLOBAL_ATOMIC_FMAX_X2_SADDR_RTN undef %0:vgpr_32, undef %1:vreg_64, undef %3:sgpr_64, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`) + %2:vreg_64 = GLOBAL_ATOMIC_FMAX_X2_SADDR_RTN undef %0:vgpr_32, undef %1:vreg_64, undef %3:sgpr_64, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`) S_DENORM_MODE 0, implicit-def $mode, implicit $mode ... @@ -345,7 +345,7 @@ body: | name: global_atomic_fmin_saddr_rtn_to_s_denorm_mode body: | bb.0: - %2:vgpr_32 = GLOBAL_ATOMIC_FMIN_SADDR_RTN undef %0:vgpr_32, undef %1:vgpr_32, undef %3:sgpr_64, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`) + %2:vgpr_32 = GLOBAL_ATOMIC_FMIN_SADDR_RTN undef %0:vgpr_32, undef %1:vgpr_32, undef %3:sgpr_64, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`) S_DENORM_MODE 0, implicit-def $mode, implicit $mode ... @@ -357,7 +357,7 @@ body: | name: global_atomic_fmin_x2_saddr_rtn_to_s_denorm_mode body: | bb.0: - %2:vreg_64 = GLOBAL_ATOMIC_FMIN_X2_SADDR_RTN undef %0:vgpr_32, undef %1:vreg_64, undef %3:sgpr_64, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`) + %2:vreg_64 = GLOBAL_ATOMIC_FMIN_X2_SADDR_RTN undef %0:vgpr_32, undef %1:vreg_64, undef %3:sgpr_64, 0, 1, implicit $exec :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`) S_DENORM_MODE 0, implicit-def $mode, implicit $mode ... @@ -369,7 +369,7 @@ body: | name: flat_fp_atomic_to_s_denorm_mode_waitcnt body: | bb.0: - FLAT_ATOMIC_FMIN undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`) + FLAT_ATOMIC_FMIN undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`) S_WAITCNT 0 S_DENORM_MODE 0, implicit-def $mode, implicit $mode ... @@ -382,7 +382,7 @@ body: | name: flat_fp_atomic_to_s_denorm_mode_valu body: | bb.0: - FLAT_ATOMIC_FMIN undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `float addrspace(1)* undef`) + FLAT_ATOMIC_FMIN undef %0:vreg_64, undef %1:vgpr_32, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store seq_cst seq_cst (s32) on `ptr addrspace(1) undef`) %2:vgpr_32 = V_ADD_F32_e32 undef %1:vgpr_32, undef %1:vgpr_32, implicit $mode, implicit $exec S_DENORM_MODE 0, implicit-def $mode, implicit $mode ... diff --git a/llvm/test/CodeGen/AMDGPU/insert-waitcnts-exp.mir b/llvm/test/CodeGen/AMDGPU/insert-waitcnts-exp.mir index 221e6a8..11ddb7a 100644 --- a/llvm/test/CodeGen/AMDGPU/insert-waitcnts-exp.mir +++ b/llvm/test/CodeGen/AMDGPU/insert-waitcnts-exp.mir @@ -2,10 +2,10 @@ --- | define amdgpu_ps <4 x float> @exp_done_waitcnt(<4 x i32> inreg, <4 x i32> inreg, i32 inreg %w, float %v) #0 { - %a = load volatile float, float addrspace(1)* undef - %b = load volatile float, float addrspace(1)* undef - %c = load volatile float, float addrspace(1)* undef - %d = load volatile float, float addrspace(1)* undef + %a = load volatile float, ptr addrspace(1) undef + %b = load volatile float, ptr addrspace(1) undef + %c = load volatile float, ptr addrspace(1) undef + %d = load volatile float, ptr addrspace(1) undef call void @llvm.amdgcn.exp.f32(i32 15, i32 1, float %a, float %b, float %c, float %d, i1 true, i1 false) ret <4 x float> } @@ -49,10 +49,10 @@ body: | bb.0 (%ir-block.2): $sgpr3 = S_MOV_B32 61440 $sgpr2 = S_MOV_B32 -1 - $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `float addrspace(1)* undef`) - $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `float addrspace(1)* undef`) - $vgpr2 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `float addrspace(1)* undef`) - $vgpr3 = BUFFER_LOAD_DWORD_OFFSET killed $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `float addrspace(1)* undef`) + $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `ptr addrspace(1) undef`) + $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `ptr addrspace(1) undef`) + $vgpr2 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `ptr addrspace(1) undef`) + $vgpr3 = BUFFER_LOAD_DWORD_OFFSET killed $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec :: (volatile load (s32) from `ptr addrspace(1) undef`) EXP_DONE 0, killed $vgpr0, killed $vgpr1, killed $vgpr2, killed $vgpr3, -1, -1, 15, implicit $exec $vgpr0 = V_MOV_B32_e32 1056964608, implicit $exec $vgpr1 = V_MOV_B32_e32 1065353216, implicit $exec diff --git a/llvm/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir b/llvm/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir index f636cfd..1b669a6 100644 --- a/llvm/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir +++ b/llvm/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir @@ -1,21 +1,21 @@ # RUN: llc -run-pass block-placement -march=amdgcn -verify-machineinstrs -o - %s | FileCheck %s --- | - define amdgpu_kernel void @invert_br_undef_vcc(float %cond, i32 addrspace(1)* %out) #0 { + define amdgpu_kernel void @invert_br_undef_vcc(float %cond, ptr addrspace(1) %out) #0 { entry: br i1 undef, label %if, label %else, !structurizecfg.uniform !0, !amdgpu.uniform !0 else: ; preds = %entry - store volatile i32 100, i32 addrspace(1)* undef + store volatile i32 100, ptr addrspace(1) undef br label %done, !structurizecfg.uniform !0 if: ; preds = %entry - store volatile i32 9, i32 addrspace(1)* undef + store volatile i32 9, ptr addrspace(1) undef br label %done, !structurizecfg.uniform !0 done: ; preds = %if, %else %value = phi i32 [ 0, %if ], [ 1, %else ] - store i32 %value, i32 addrspace(1)* %out + store i32 %value, ptr addrspace(1) %out ret void } @@ -55,7 +55,7 @@ body: | bb.0.entry: liveins: $sgpr0_sgpr1 - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`) $sgpr7 = S_MOV_B32 61440 $sgpr6 = S_MOV_B32 -1 S_CBRANCH_VCCNZ %bb.2, implicit undef $vcc @@ -64,7 +64,7 @@ body: | liveins: $sgpr6, $sgpr7, $sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 $vgpr0 = V_MOV_B32_e32 100, implicit $exec - BUFFER_STORE_DWORD_OFFSET killed $vgpr0, killed $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `i32 addrspace(1)* undef`) + BUFFER_STORE_DWORD_OFFSET killed $vgpr0, killed $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `ptr addrspace(1) undef`) $vgpr0 = V_MOV_B32_e32 1, implicit $exec S_BRANCH %bb.3 @@ -72,7 +72,7 @@ body: | liveins: $sgpr6, $sgpr7, $sgpr0_sgpr1_sgpr2_sgpr3:0x00000003 $vgpr0 = V_MOV_B32_e32 9, implicit $exec - BUFFER_STORE_DWORD_OFFSET killed $vgpr0, killed $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `i32 addrspace(1)* undef`) + BUFFER_STORE_DWORD_OFFSET killed $vgpr0, killed $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `ptr addrspace(1) undef`) $vgpr0 = V_MOV_B32_e32 0, implicit $exec bb.3.done: diff --git a/llvm/test/CodeGen/AMDGPU/lds-dma-waitcnt.mir b/llvm/test/CodeGen/AMDGPU/lds-dma-waitcnt.mir index 8c05791..1e4618b 100644 --- a/llvm/test/CodeGen/AMDGPU/lds-dma-waitcnt.mir +++ b/llvm/test/CodeGen/AMDGPU/lds-dma-waitcnt.mir @@ -10,8 +10,8 @@ name: buffer_load_dword_lds_ds_read body: | bb.0: $m0 = S_MOV_B32 0 - BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `i32 addrspace(1)* undef` + 4), (store (s32) into `i32 addrspace(3)* undef` + 4) - $vgpr0 = DS_READ_B32_gfx9 $vgpr1, 0, 0, implicit $m0, implicit $exec :: (load (s32) from `i32 addrspace(3)* undef`) + BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) undef` + 4), (store (s32) into `ptr addrspace(3) undef` + 4) + $vgpr0 = DS_READ_B32_gfx9 $vgpr1, 0, 0, implicit $m0, implicit $exec :: (load (s32) from `ptr addrspace(3) undef`) S_ENDPGM 0 ... @@ -27,9 +27,9 @@ name: buffer_load_dword_lds_vmcnt_1 body: | bb.0: $m0 = S_MOV_B32 0 - BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `i32 addrspace(1)* undef`), (store (s32) into `i32 addrspace(3)* undef`) - $vgpr10 = BUFFER_LOAD_DWORD_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `i32 addrspace(1)* undef`) - $vgpr0 = DS_READ_B32_gfx9 $vgpr1, 0, 0, implicit $m0, implicit $exec :: (load (s32) from `i32 addrspace(3)* undef`) + BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) undef`), (store (s32) into `ptr addrspace(3) undef`) + $vgpr10 = BUFFER_LOAD_DWORD_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) undef`) + $vgpr0 = DS_READ_B32_gfx9 $vgpr1, 0, 0, implicit $m0, implicit $exec :: (load (s32) from `ptr addrspace(3) undef`) S_ENDPGM 0 ... @@ -44,8 +44,8 @@ name: buffer_load_dword_lds_flat_read body: | bb.0: $m0 = S_MOV_B32 0 - BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `i32 addrspace(1)* undef`), (store (s32) into `i32 addrspace(3)* undef`) - $vgpr0 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `i32* undef`) + BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) undef`), (store (s32) into `ptr addrspace(3) undef`) + $vgpr0 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr :: (load (s32) from `ptr undef`) S_ENDPGM 0 @@ -61,8 +61,8 @@ name: global_load_lds_dword_ds_read body: | bb.0: $m0 = S_MOV_B32 0 - GLOBAL_LOAD_LDS_DWORD $vgpr0_vgpr1, 4, 0, implicit $exec, implicit $m0 :: (load (s32) from `i32 addrspace(1)* undef` + 4), (store (s32) into `i32 addrspace(3)* undef` + 4) - $vgpr0 = DS_READ_B32_gfx9 $vgpr1, 0, 0, implicit $m0, implicit $exec :: (load (s32) from `i32 addrspace(3)* undef`) + GLOBAL_LOAD_LDS_DWORD $vgpr0_vgpr1, 4, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) undef` + 4), (store (s32) into `ptr addrspace(3) undef` + 4) + $vgpr0 = DS_READ_B32_gfx9 $vgpr1, 0, 0, implicit $m0, implicit $exec :: (load (s32) from `ptr addrspace(3) undef`) S_ENDPGM 0 ... @@ -77,8 +77,8 @@ name: scratch_load_lds_dword_ds_read body: | bb.0: $m0 = S_MOV_B32 0 - SCRATCH_LOAD_LDS_DWORD $vgpr0, 4, 0, implicit $exec, implicit $m0 :: (load (s32) from `i32 addrspace(5)* undef` + 4), (store (s32) into `i32 addrspace(3)* undef` + 4) - $vgpr0 = DS_READ_B32_gfx9 $vgpr1, 0, 0, implicit $m0, implicit $exec :: (load (s32) from `i32 addrspace(3)* undef`) + SCRATCH_LOAD_LDS_DWORD $vgpr0, 4, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(5) undef` + 4), (store (s32) into `ptr addrspace(3) undef` + 4) + $vgpr0 = DS_READ_B32_gfx9 $vgpr1, 0, 0, implicit $m0, implicit $exec :: (load (s32) from `ptr addrspace(3) undef`) S_ENDPGM 0 ... @@ -91,8 +91,8 @@ name: buffer_store_lds_dword_ds_read body: | bb.0: $m0 = S_MOV_B32 0 - BUFFER_STORE_LDS_DWORD $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `i32 addrspace(3)* undef` + 4), (store (s32) into `i32 addrspace(1)* undef` + 4) - $vgpr0 = DS_READ_B32_gfx9 $vgpr1, 0, 0, implicit $m0, implicit $exec :: (load (s32) from `i32 addrspace(3)* undef`) + BUFFER_STORE_LDS_DWORD $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(3) undef` + 4), (store (s32) into `ptr addrspace(1) undef` + 4) + $vgpr0 = DS_READ_B32_gfx9 $vgpr1, 0, 0, implicit $m0, implicit $exec :: (load (s32) from `ptr addrspace(3) undef`) S_ENDPGM 0 ... @@ -110,10 +110,10 @@ name: series_of_buffer_load_dword_lds_ds_read body: | bb.0: $m0 = S_MOV_B32 0 - BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `i32 addrspace(1)* undef`), (store (s32) into `i32 addrspace(3)* undef`) - BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `i32 addrspace(1)* undef` + 4), (store (s32) into `i32 addrspace(3)* undef` + 4) - BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 8, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `i32 addrspace(1)* undef` + 8), (store (s32) into `i32 addrspace(3)* undef` + 8) - $vgpr0 = DS_READ_B32_gfx9 $vgpr1, 0, 0, implicit $m0, implicit $exec :: (load (s32) from `i32 addrspace(3)* undef`) + BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) undef`), (store (s32) into `ptr addrspace(3) undef`) + BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) undef` + 4), (store (s32) into `ptr addrspace(3) undef` + 4) + BUFFER_LOAD_DWORD_LDS_IDXEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 8, 0, 0, implicit $exec, implicit $m0 :: (load (s32) from `ptr addrspace(1) undef` + 8), (store (s32) into `ptr addrspace(3) undef` + 8) + $vgpr0 = DS_READ_B32_gfx9 $vgpr1, 0, 0, implicit $m0, implicit $exec :: (load (s32) from `ptr addrspace(3) undef`) S_ENDPGM 0 ... diff --git a/llvm/test/CodeGen/AMDGPU/load-store-opt-dlc.mir b/llvm/test/CodeGen/AMDGPU/load-store-opt-dlc.mir index 855acda..bac8a47 100644 --- a/llvm/test/CodeGen/AMDGPU/load-store-opt-dlc.mir +++ b/llvm/test/CodeGen/AMDGPU/load-store-opt-dlc.mir @@ -4,30 +4,30 @@ # operations correctly with/without DLC bit. --- | - define amdgpu_kernel void @test1(i32 addrspace(1)* %out) { - %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1 - store i32 123, i32 addrspace(1)* %out.gep.1 - store i32 456, i32 addrspace(1)* %out + define amdgpu_kernel void @test1(ptr addrspace(1) %out) { + %out.gep.1 = getelementptr i32, ptr addrspace(1) %out, i32 1 + store i32 123, ptr addrspace(1) %out.gep.1 + store i32 456, ptr addrspace(1) %out ret void } - define amdgpu_kernel void @test2(i32 addrspace(1)* %out) { - %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1 - store i32 123, i32 addrspace(1)* %out.gep.1 - store i32 456, i32 addrspace(1)* %out + define amdgpu_kernel void @test2(ptr addrspace(1) %out) { + %out.gep.1 = getelementptr i32, ptr addrspace(1) %out, i32 1 + store i32 123, ptr addrspace(1) %out.gep.1 + store i32 456, ptr addrspace(1) %out ret void } - define amdgpu_kernel void @test3(i32 addrspace(1)* %out) { - %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1 - store i32 123, i32 addrspace(1)* %out.gep.1 - store i32 456, i32 addrspace(1)* %out + define amdgpu_kernel void @test3(ptr addrspace(1) %out) { + %out.gep.1 = getelementptr i32, ptr addrspace(1) %out, i32 1 + store i32 123, ptr addrspace(1) %out.gep.1 + store i32 456, ptr addrspace(1) %out ret void } - define amdgpu_kernel void @test4(i32 addrspace(1)* %out) { - %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1 - store i32 123, i32 addrspace(1)* %out.gep.1 - store i32 456, i32 addrspace(1)* %out + define amdgpu_kernel void @test4(ptr addrspace(1) %out) { + %out.gep.1 = getelementptr i32, ptr addrspace(1) %out, i32 1 + store i32 123, ptr addrspace(1) %out.gep.1 + store i32 456, ptr addrspace(1) %out ret void } ... @@ -48,7 +48,7 @@ body: | $sgpr3 = S_MOV_B32 61440 %0:sgpr_64 = COPY $sgpr0_sgpr1 - %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, addrspace 4) + %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, addrspace 4) %2:sgpr_32 = COPY $sgpr2 %3:sgpr_32 = COPY $sgpr3 %4:sgpr_128 = REG_SEQUENCE %1, %subreg.sub0, %2, %subreg.sub1, %3, %subreg.sub2 @@ -79,7 +79,7 @@ body: | $sgpr3 = S_MOV_B32 61440 %0:sgpr_64 = COPY $sgpr0_sgpr1 - %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, addrspace 4) + %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, addrspace 4) %2:sgpr_32 = COPY $sgpr2 %3:sgpr_32 = COPY $sgpr3 %4:sgpr_128 = REG_SEQUENCE %1, %subreg.sub0, %2, %subreg.sub1, %3, %subreg.sub2 @@ -110,7 +110,7 @@ body: | $sgpr3 = S_MOV_B32 61440 %0:sgpr_64 = COPY $sgpr0_sgpr1 - %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, addrspace 4) + %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, addrspace 4) %2:sgpr_32 = COPY $sgpr2 %3:sgpr_32 = COPY $sgpr3 %4:sgpr_128 = REG_SEQUENCE %1, %subreg.sub0, %2, %subreg.sub1, %3, %subreg.sub2 @@ -140,7 +140,7 @@ body: | $sgpr3 = S_MOV_B32 61440 %0:sgpr_64 = COPY $sgpr0_sgpr1 - %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, addrspace 4) + %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, addrspace 4) %2:sgpr_32 = COPY $sgpr2 %3:sgpr_32 = COPY $sgpr3 %4:sgpr_128 = REG_SEQUENCE %1, %subreg.sub0, %2, %subreg.sub1, %3, %subreg.sub2 diff --git a/llvm/test/CodeGen/AMDGPU/load-store-opt-scc.mir b/llvm/test/CodeGen/AMDGPU/load-store-opt-scc.mir index 8fbe281..19f44dc 100644 --- a/llvm/test/CodeGen/AMDGPU/load-store-opt-scc.mir +++ b/llvm/test/CodeGen/AMDGPU/load-store-opt-scc.mir @@ -4,30 +4,30 @@ # operations correctly with/without SCC bit. --- | - define amdgpu_kernel void @test1(i32 addrspace(1)* %out) { - %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1 - store i32 123, i32 addrspace(1)* %out.gep.1 - store i32 456, i32 addrspace(1)* %out + define amdgpu_kernel void @test1(ptr addrspace(1) %out) { + %out.gep.1 = getelementptr i32, ptr addrspace(1) %out, i32 1 + store i32 123, ptr addrspace(1) %out.gep.1 + store i32 456, ptr addrspace(1) %out ret void } - define amdgpu_kernel void @test2(i32 addrspace(1)* %out) { - %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1 - store i32 123, i32 addrspace(1)* %out.gep.1 - store i32 456, i32 addrspace(1)* %out + define amdgpu_kernel void @test2(ptr addrspace(1) %out) { + %out.gep.1 = getelementptr i32, ptr addrspace(1) %out, i32 1 + store i32 123, ptr addrspace(1) %out.gep.1 + store i32 456, ptr addrspace(1) %out ret void } - define amdgpu_kernel void @test3(i32 addrspace(1)* %out) { - %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1 - store i32 123, i32 addrspace(1)* %out.gep.1 - store i32 456, i32 addrspace(1)* %out + define amdgpu_kernel void @test3(ptr addrspace(1) %out) { + %out.gep.1 = getelementptr i32, ptr addrspace(1) %out, i32 1 + store i32 123, ptr addrspace(1) %out.gep.1 + store i32 456, ptr addrspace(1) %out ret void } - define amdgpu_kernel void @test4(i32 addrspace(1)* %out) { - %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1 - store i32 123, i32 addrspace(1)* %out.gep.1 - store i32 456, i32 addrspace(1)* %out + define amdgpu_kernel void @test4(ptr addrspace(1) %out) { + %out.gep.1 = getelementptr i32, ptr addrspace(1) %out, i32 1 + store i32 123, ptr addrspace(1) %out.gep.1 + store i32 456, ptr addrspace(1) %out ret void } ... @@ -48,7 +48,7 @@ body: | $sgpr3 = S_MOV_B32 61440 %0:sgpr_64 = COPY $sgpr0_sgpr1 - %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, addrspace 4) + %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, addrspace 4) %2:sgpr_32 = COPY $sgpr2 %3:sgpr_32 = COPY $sgpr3 %4:sgpr_128 = REG_SEQUENCE %1, %subreg.sub0, %2, %subreg.sub1, %3, %subreg.sub2 @@ -79,7 +79,7 @@ body: | $sgpr3 = S_MOV_B32 61440 %0:sgpr_64 = COPY $sgpr0_sgpr1 - %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, addrspace 4) + %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, addrspace 4) %2:sgpr_32 = COPY $sgpr2 %3:sgpr_32 = COPY $sgpr3 %4:sgpr_128 = REG_SEQUENCE %1, %subreg.sub0, %2, %subreg.sub1, %3, %subreg.sub2 @@ -110,7 +110,7 @@ body: | $sgpr3 = S_MOV_B32 61440 %0:sgpr_64 = COPY $sgpr0_sgpr1 - %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, addrspace 4) + %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, addrspace 4) %2:sgpr_32 = COPY $sgpr2 %3:sgpr_32 = COPY $sgpr3 %4:sgpr_128 = REG_SEQUENCE %1, %subreg.sub0, %2, %subreg.sub1, %3, %subreg.sub2 @@ -140,7 +140,7 @@ body: | $sgpr3 = S_MOV_B32 61440 %0:sgpr_64 = COPY $sgpr0_sgpr1 - %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, addrspace 4) + %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, addrspace 4) %2:sgpr_32 = COPY $sgpr2 %3:sgpr_32 = COPY $sgpr3 %4:sgpr_128 = REG_SEQUENCE %1, %subreg.sub0, %2, %subreg.sub1, %3, %subreg.sub2 diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-atomic-insert-end.mir b/llvm/test/CodeGen/AMDGPU/memory-legalizer-atomic-insert-end.mir index 311c19b..1972d05 100644 --- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-atomic-insert-end.mir +++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-atomic-insert-end.mir @@ -4,14 +4,14 @@ declare i32 @llvm.amdgcn.workitem.id.x() #0 define amdgpu_kernel void @atomic_max_i32_noret( - i32 addrspace(1)* %out, - i32 addrspace(1)* addrspace(1)* %in, - i32 addrspace(1)* %x, + ptr addrspace(1) %out, + ptr addrspace(1) %in, + ptr addrspace(1) %x, i32 %y) #1 { %tid = call i32 @llvm.amdgcn.workitem.id.x() %idxprom = sext i32 %tid to i64 - %tid.gep = getelementptr i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* %in, i64 %idxprom - %ptr = load volatile i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* %tid.gep + %tid.gep = getelementptr ptr addrspace(1), ptr addrspace(1) %in, i64 %idxprom + %ptr = load volatile ptr addrspace(1), ptr addrspace(1) %tid.gep %xor = xor i32 %tid, 1 %cmp = icmp ne i32 %xor, 0 %1 = call { i1, i64 } @llvm.amdgcn.if(i1 %cmp) @@ -20,8 +20,8 @@ br i1 %2, label %atomic, label %exit atomic: ; preds = %0 - %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 100 - %ret = atomicrmw max i32 addrspace(1)* %gep, i32 %y seq_cst + %gep = getelementptr i32, ptr addrspace(1) %ptr, i32 100 + %ret = atomicrmw max ptr addrspace(1) %gep, i32 %y seq_cst br label %exit exit: ; preds = %atomic, %0 @@ -75,7 +75,7 @@ body: | successors: %bb.1.atomic(0x40000000), %bb.2.exit(0x40000000) liveins: $vgpr0, $sgpr0_sgpr1 - $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM $sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`) + $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM $sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`) $vgpr1 = V_ASHRREV_I32_e32 31, $vgpr0, implicit $exec $vgpr1_vgpr2 = V_LSHL_B64_e64 $vgpr0_vgpr1, 3, implicit $exec $sgpr7 = S_MOV_B32 61440 @@ -92,7 +92,7 @@ body: | successors: %bb.2.exit(0x80000000) liveins: $sgpr4_sgpr5_sgpr6_sgpr7:0x0000000C, $sgpr0_sgpr1, $sgpr2_sgpr3, $vgpr1_vgpr2_vgpr3_vgpr4:0x00000003 - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 15, 0 :: (non-temporal dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 15, 0 :: (non-temporal dereferenceable invariant load (s32) from `ptr addrspace(4) undef`) dead $vgpr0 = V_MOV_B32_e32 -1, implicit $exec dead $vgpr0 = V_MOV_B32_e32 61440, implicit $exec $sgpr4_sgpr5 = S_MOV_B64 0 diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-invalid-addrspace.mir b/llvm/test/CodeGen/AMDGPU/memory-legalizer-invalid-addrspace.mir index 8ba4136..abf74d3 100644 --- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-invalid-addrspace.mir +++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-invalid-addrspace.mir @@ -11,10 +11,10 @@ body: | $vgpr0 = V_MOV_B32_e32 $sgpr2, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr2_sgpr3 $vgpr1 = V_MOV_B32_e32 killed $sgpr3, implicit $exec, implicit $sgpr2_sgpr3, implicit $exec - renamable $vgpr2 = FLAT_LOAD_DWORD killed renamable $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load syncscope("one-as") seq_cst (s32) from `i32 addrspace(42)* undef`) + renamable $vgpr2 = FLAT_LOAD_DWORD killed renamable $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load syncscope("one-as") seq_cst (s32) from `ptr addrspace(42) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -30,7 +30,7 @@ body: | $vgpr2 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (volatile store syncscope("agent-one-as") seq_cst (s32) into `i32 addrspace(42)* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (volatile store syncscope("agent-one-as") seq_cst (s32) into `ptr addrspace(42) undef`) S_ENDPGM 0 ... @@ -47,7 +47,7 @@ body: | $vgpr0 = V_MOV_B32_e32 killed $sgpr4, implicit $exec, implicit $exec $vgpr1 = V_MOV_B32_e32 killed $sgpr5, implicit $exec, implicit $exec $vgpr2 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_ATOMIC_CMPSWAP killed renamable $vgpr2_vgpr3, killed renamable $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store syncscope("workgroup-one-as") seq_cst seq_cst (s32) on `i32 addrspace(42)* undef`) + FLAT_ATOMIC_CMPSWAP killed renamable $vgpr2_vgpr3, killed renamable $vgpr0_vgpr1, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store syncscope("workgroup-one-as") seq_cst seq_cst (s32) on `ptr addrspace(42) undef`) S_ENDPGM 0 ... @@ -63,7 +63,7 @@ body: | $vgpr0 = V_MOV_B32_e32 $sgpr2, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr2_sgpr3 $vgpr1 = V_MOV_B32_e32 killed $sgpr3, implicit $exec, implicit $sgpr2_sgpr3, implicit $exec $vgpr2 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - FLAT_ATOMIC_SWAP killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store syncscope("wavefront-one-as") seq_cst (s32) on `i32 addrspace(42)* undef`) + FLAT_ATOMIC_SWAP killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load store syncscope("wavefront-one-as") seq_cst (s32) on `ptr addrspace(42) undef`) S_ENDPGM 0 ... diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-local.mir b/llvm/test/CodeGen/AMDGPU/memory-legalizer-local.mir index aed7814..af4feb9 100644 --- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-local.mir +++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-local.mir @@ -13,14 +13,14 @@ name: load_singlethread_unordered body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("singlethread-one-as") unordered (s32) from `i32 addrspace(3)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("singlethread-one-as") unordered (s32) from `ptr addrspace(3) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -37,14 +37,14 @@ body: | name: load_singlethread_monotonic body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("singlethread-one-as") monotonic (s32) from `i32 addrspace(3)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("singlethread-one-as") monotonic (s32) from `ptr addrspace(3) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -61,14 +61,14 @@ body: | name: load_singlethread_acquire body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("singlethread-one-as") acquire (s32) from `i32 addrspace(3)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("singlethread-one-as") acquire (s32) from `ptr addrspace(3) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -85,14 +85,14 @@ body: | name: load_singlethread_seq_cst body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("singlethread-one-as") seq_cst (s32) from `i32 addrspace(3)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("singlethread-one-as") seq_cst (s32) from `ptr addrspace(3) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -109,14 +109,14 @@ body: | name: load_wavefront_unordered body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("wavefront-one-as") unordered (s32) from `i32 addrspace(3)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("wavefront-one-as") unordered (s32) from `ptr addrspace(3) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -133,14 +133,14 @@ body: | name: load_wavefront_monotonic body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("wavefront-one-as") monotonic (s32) from `i32 addrspace(3)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("wavefront-one-as") monotonic (s32) from `ptr addrspace(3) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -157,14 +157,14 @@ body: | name: load_wavefront_acquire body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("wavefront-one-as") acquire (s32) from `i32 addrspace(3)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("wavefront-one-as") acquire (s32) from `ptr addrspace(3) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -181,14 +181,14 @@ body: | name: load_wavefront_seq_cst body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("wavefront-one-as") seq_cst (s32) from `i32 addrspace(3)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("wavefront-one-as") seq_cst (s32) from `ptr addrspace(3) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -205,14 +205,14 @@ body: | name: load_workgroup_unordered body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("workgroup-one-as") unordered (s32) from `i32 addrspace(3)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("workgroup-one-as") unordered (s32) from `ptr addrspace(3) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -229,14 +229,14 @@ body: | name: load_workgroup_monotonic body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("workgroup-one-as") monotonic (s32) from `i32 addrspace(3)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("workgroup-one-as") monotonic (s32) from `ptr addrspace(3) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -253,14 +253,14 @@ body: | name: load_workgroup_acquire body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("workgroup-one-as") acquire (s32) from `i32 addrspace(3)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("workgroup-one-as") acquire (s32) from `ptr addrspace(3) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -277,14 +277,14 @@ body: | name: load_workgroup_seq_cst body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("workgroup-one-as") seq_cst (s32) from `i32 addrspace(3)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("workgroup-one-as") seq_cst (s32) from `ptr addrspace(3) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -301,14 +301,14 @@ body: | name: load_agent_unordered body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("agent-one-as") unordered (s32) from `i32 addrspace(3)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("agent-one-as") unordered (s32) from `ptr addrspace(3) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -325,14 +325,14 @@ body: | name: load_agent_monotonic body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("agent-one-as") monotonic (s32) from `i32 addrspace(3)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("agent-one-as") monotonic (s32) from `ptr addrspace(3) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -349,14 +349,14 @@ body: | name: load_agent_acquire body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("agent-one-as") acquire (s32) from `i32 addrspace(3)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("agent-one-as") acquire (s32) from `ptr addrspace(3) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -373,14 +373,14 @@ body: | name: load_agent_seq_cst body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("agent-one-as") seq_cst (s32) from `i32 addrspace(3)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("agent-one-as") seq_cst (s32) from `ptr addrspace(3) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -397,14 +397,14 @@ body: | name: load_system_unordered body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("one-as") unordered (s32) from `i32 addrspace(3)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("one-as") unordered (s32) from `ptr addrspace(3) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -421,14 +421,14 @@ body: | name: load_system_monotonic body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("one-as") monotonic (s32) from `i32 addrspace(3)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("one-as") monotonic (s32) from `ptr addrspace(3) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -445,14 +445,14 @@ body: | name: load_system_acquire body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("one-as") acquire (s32) from `i32 addrspace(3)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("one-as") acquire (s32) from `ptr addrspace(3) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -469,14 +469,14 @@ body: | name: load_system_seq_cst body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("one-as") seq_cst (s32) from `i32 addrspace(3)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 0, implicit $m0, implicit $exec :: (volatile load syncscope("one-as") seq_cst (s32) from `ptr addrspace(3) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -493,12 +493,12 @@ body: | name: store_singlethread_unordered body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") unordered (s32) into `i32 addrspace(3)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") unordered (s32) into `ptr addrspace(3) undef`) S_ENDPGM 0 ... @@ -515,12 +515,12 @@ body: | name: store_singlethread_monotonic body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") monotonic (s32) into `i32 addrspace(3)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") monotonic (s32) into `ptr addrspace(3) undef`) S_ENDPGM 0 ... @@ -537,12 +537,12 @@ body: | name: store_singlethread_release body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") release (s32) into `i32 addrspace(3)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") release (s32) into `ptr addrspace(3) undef`) S_ENDPGM 0 ... @@ -559,12 +559,12 @@ body: | name: store_singlethread_seq_cst body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") seq_cst (s32) into `i32 addrspace(3)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") seq_cst (s32) into `ptr addrspace(3) undef`) S_ENDPGM 0 ... @@ -581,12 +581,12 @@ body: | name: store_wavefront_unordered body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("wavefront-one-as") unordered (s32) into `i32 addrspace(3)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("wavefront-one-as") unordered (s32) into `ptr addrspace(3) undef`) S_ENDPGM 0 ... @@ -603,12 +603,12 @@ body: | name: store_wavefront_monotonic body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("wavefront-one-as") monotonic (s32) into `i32 addrspace(3)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("wavefront-one-as") monotonic (s32) into `ptr addrspace(3) undef`) S_ENDPGM 0 ... @@ -625,12 +625,12 @@ body: | name: store_wavefront_release body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("wavefront-one-as") release (s32) into `i32 addrspace(3)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("wavefront-one-as") release (s32) into `ptr addrspace(3) undef`) S_ENDPGM 0 ... @@ -647,12 +647,12 @@ body: | name: store_wavefront_seq_cst body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("wavefront-one-as") seq_cst (s32) into `i32 addrspace(3)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("wavefront-one-as") seq_cst (s32) into `ptr addrspace(3) undef`) S_ENDPGM 0 ... @@ -669,12 +669,12 @@ body: | name: store_workgroup_unordered body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("workgroup-one-as") unordered (s32) into `i32 addrspace(3)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("workgroup-one-as") unordered (s32) into `ptr addrspace(3) undef`) S_ENDPGM 0 ... @@ -691,12 +691,12 @@ body: | name: store_workgroup_monotonic body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("workgroup-one-as") monotonic (s32) into `i32 addrspace(3)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("workgroup-one-as") monotonic (s32) into `ptr addrspace(3) undef`) S_ENDPGM 0 ... @@ -713,12 +713,12 @@ body: | name: store_workgroup_release body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("workgroup-one-as") release (s32) into `i32 addrspace(3)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("workgroup-one-as") release (s32) into `ptr addrspace(3) undef`) S_ENDPGM 0 ... @@ -735,12 +735,12 @@ body: | name: store_workgroup_seq_cst body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("workgroup-one-as") seq_cst (s32) into `i32 addrspace(3)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("workgroup-one-as") seq_cst (s32) into `ptr addrspace(3) undef`) S_ENDPGM 0 ... @@ -757,12 +757,12 @@ body: | name: store_agent_unordered body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("agent-one-as") unordered (s32) into `i32 addrspace(3)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("agent-one-as") unordered (s32) into `ptr addrspace(3) undef`) S_ENDPGM 0 ... @@ -779,12 +779,12 @@ body: | name: store_agent_monotonic body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("agent-one-as") monotonic (s32) into `i32 addrspace(3)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("agent-one-as") monotonic (s32) into `ptr addrspace(3) undef`) S_ENDPGM 0 ... @@ -801,12 +801,12 @@ body: | name: store_agent_release body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("agent-one-as") release (s32) into `i32 addrspace(3)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("agent-one-as") release (s32) into `ptr addrspace(3) undef`) S_ENDPGM 0 ... @@ -823,12 +823,12 @@ body: | name: store_agent_seq_cst body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("agent-one-as") seq_cst (s32) into `i32 addrspace(3)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("agent-one-as") seq_cst (s32) into `ptr addrspace(3) undef`) S_ENDPGM 0 ... @@ -845,12 +845,12 @@ body: | name: store_system_unordered body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("one-as") unordered (s32) into `i32 addrspace(3)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("one-as") unordered (s32) into `ptr addrspace(3) undef`) S_ENDPGM 0 ... @@ -867,12 +867,12 @@ body: | name: store_system_monotonic body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("one-as") monotonic (s32) into `i32 addrspace(3)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("one-as") monotonic (s32) into `ptr addrspace(3) undef`) S_ENDPGM 0 ... @@ -889,12 +889,12 @@ body: | name: store_system_release body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("one-as") release (s32) into `i32 addrspace(3)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("one-as") release (s32) into `ptr addrspace(3) undef`) S_ENDPGM 0 ... @@ -911,12 +911,12 @@ body: | name: store_system_seq_cst body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("one-as") seq_cst (s32) into `i32 addrspace(3)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("one-as") seq_cst (s32) into `ptr addrspace(3) undef`) S_ENDPGM 0 ... @@ -933,12 +933,12 @@ body: | name: atomicrmw_singlethread_unordered body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") unordered (s32) into `i32 addrspace(3)* undef`) + $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") unordered (s32) into `ptr addrspace(3) undef`) S_ENDPGM 0 ... @@ -955,12 +955,12 @@ body: | name: atomicrmw_singlethread_monotonic body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") monotonic (s32) into `i32 addrspace(3)* undef`) + $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") monotonic (s32) into `ptr addrspace(3) undef`) S_ENDPGM 0 ... @@ -977,12 +977,12 @@ body: | name: atomicrmw_singlethread_acquire body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") acquire (s32) into `i32 addrspace(3)* undef`) + $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") acquire (s32) into `ptr addrspace(3) undef`) S_ENDPGM 0 ... @@ -999,12 +999,12 @@ body: | name: atomicrmw_singlethread_release body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") release (s32) into `i32 addrspace(3)* undef`) + $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") release (s32) into `ptr addrspace(3) undef`) S_ENDPGM 0 ... @@ -1021,12 +1021,12 @@ body: | name: atomicrmw_singlethread_acq_rel body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") acq_rel (s32) into `i32 addrspace(3)* undef`) + $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") acq_rel (s32) into `ptr addrspace(3) undef`) S_ENDPGM 0 ... @@ -1043,12 +1043,12 @@ body: | name: atomicrmw_singlethread_seq_cst body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") seq_cst (s32) into `i32 addrspace(3)* undef`) + $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 0, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") seq_cst (s32) into `ptr addrspace(3) undef`) S_ENDPGM 0 ... diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-atomics.mir b/llvm/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-atomics.mir index 721382c..3223a4b 100644 --- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-atomics.mir +++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-atomics.mir @@ -16,27 +16,27 @@ body: | successors: %bb.1(0x30000000), %bb.2(0x50000000) liveins: $sgpr0_sgpr1, $sgpr3 - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 44, 0 :: (non-temporal dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 44, 0 :: (non-temporal dereferenceable invariant load (s32) from `ptr addrspace(4) undef`) $sgpr8 = S_MOV_B32 &SCRATCH_RSRC_DWORD0, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11 - $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM $sgpr0_sgpr1, 36, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`) + $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM $sgpr0_sgpr1, 36, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`) $sgpr9 = S_MOV_B32 &SCRATCH_RSRC_DWORD1, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11 $sgpr10 = S_MOV_B32 4294967295, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11 $sgpr11 = S_MOV_B32 15204352, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11 $vgpr0 = V_MOV_B32_e32 1, implicit $exec - BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 4, 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(5)* undef`) + BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 4, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(5) undef`) S_WAITCNT 127 S_CMP_LG_U32 killed $sgpr2, 0, implicit-def $scc S_WAITCNT 3855 $vgpr0 = V_MOV_B32_e32 2, implicit $exec $vgpr1 = V_MOV_B32_e32 32772, implicit $exec - BUFFER_STORE_DWORD_OFFEN killed $vgpr0, killed $vgpr1, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 0, 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(5)* undef`) + BUFFER_STORE_DWORD_OFFEN killed $vgpr0, killed $vgpr1, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 0, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(5) undef`) S_CBRANCH_SCC0 %bb.1, implicit killed $scc bb.2: successors: %bb.3(0x80000000) liveins: $sgpr0_sgpr1, $sgpr4_sgpr5, $sgpr3, $sgpr8_sgpr9_sgpr10_sgpr11 - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 52, 0 :: (non-temporal dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 52, 0 :: (non-temporal dereferenceable invariant load (s32) from `ptr addrspace(4) undef`) S_WAITCNT 3855 $vgpr0 = V_MOV_B32_e32 32772, implicit $exec S_BRANCH %bb.3 @@ -45,7 +45,7 @@ body: | successors: %bb.3(0x80000000) liveins: $sgpr0_sgpr1, $sgpr4_sgpr5, $sgpr3, $sgpr8_sgpr9_sgpr10_sgpr11 - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 48, 0 :: (non-temporal dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 48, 0 :: (non-temporal dereferenceable invariant load (s32) from `ptr addrspace(4) undef`) S_WAITCNT 3855 $vgpr0 = V_MOV_B32_e32 4, implicit $exec @@ -55,11 +55,11 @@ body: | S_WAITCNT 127 $sgpr0 = S_LSHL_B32 killed $sgpr0, 2, implicit-def dead $scc $vgpr0 = V_ADD_CO_U32_e32 killed $sgpr0, killed $vgpr0, implicit-def dead $vcc, implicit $exec - $vgpr0 = BUFFER_LOAD_DWORD_OFFEN killed $vgpr0, killed $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 0, 0, 0, implicit $exec :: (load syncscope("agent-one-as") unordered (s32) from `i32 addrspace(1)* undef`), (load syncscope("workgroup-one-as") seq_cst (s32) from `[8192 x i32] addrspace(5)* undef`) + $vgpr0 = BUFFER_LOAD_DWORD_OFFEN killed $vgpr0, killed $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 0, 0, 0, implicit $exec :: (load syncscope("agent-one-as") unordered (s32) from `ptr addrspace(1) undef`), (load syncscope("workgroup-one-as") seq_cst (s32) from `ptr addrspace(5) undef`) $vgpr1 = V_MOV_B32_e32 $sgpr4, implicit $exec, implicit-def $vgpr1_vgpr2, implicit $sgpr4_sgpr5 $vgpr2 = V_MOV_B32_e32 killed $sgpr5, implicit $exec, implicit $sgpr4_sgpr5, implicit $exec S_WAITCNT 3952 - FLAT_STORE_DWORD killed $vgpr1_vgpr2, killed $vgpr0, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32 addrspace(1)* undef`) + FLAT_STORE_DWORD killed $vgpr1_vgpr2, killed $vgpr0, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr addrspace(1) undef`) S_ENDPGM 0 ... diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-region.mir b/llvm/test/CodeGen/AMDGPU/memory-legalizer-region.mir index f836cc0..13133de 100644 --- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-region.mir +++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-region.mir @@ -13,14 +13,14 @@ name: load_singlethread_unordered body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 1, 0, implicit $m0, implicit $exec :: (volatile load syncscope("singlethread-one-as") unordered (s32) from `i32 addrspace(2)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 1, 0, implicit $m0, implicit $exec :: (volatile load syncscope("singlethread-one-as") unordered (s32) from `ptr addrspace(2) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -37,14 +37,14 @@ body: | name: load_singlethread_monotonic body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("singlethread-one-as") monotonic (s32) from `i32 addrspace(2)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("singlethread-one-as") monotonic (s32) from `ptr addrspace(2) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -61,14 +61,14 @@ body: | name: load_singlethread_acquire body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("singlethread-one-as") acquire (s32) from `i32 addrspace(2)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("singlethread-one-as") acquire (s32) from `ptr addrspace(2) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -85,14 +85,14 @@ body: | name: load_singlethread_seq_cst body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("singlethread-one-as") seq_cst (s32) from `i32 addrspace(2)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("singlethread-one-as") seq_cst (s32) from `ptr addrspace(2) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -109,14 +109,14 @@ body: | name: load_wavefront_unordered body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("wavefront-one-as") unordered (s32) from `i32 addrspace(2)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("wavefront-one-as") unordered (s32) from `ptr addrspace(2) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -133,14 +133,14 @@ body: | name: load_wavefront_monotonic body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("wavefront-one-as") monotonic (s32) from `i32 addrspace(2)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("wavefront-one-as") monotonic (s32) from `ptr addrspace(2) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -157,14 +157,14 @@ body: | name: load_wavefront_acquire body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("wavefront-one-as") acquire (s32) from `i32 addrspace(2)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("wavefront-one-as") acquire (s32) from `ptr addrspace(2) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -181,14 +181,14 @@ body: | name: load_wavefront_seq_cst body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("wavefront-one-as") seq_cst (s32) from `i32 addrspace(2)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("wavefront-one-as") seq_cst (s32) from `ptr addrspace(2) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -205,14 +205,14 @@ body: | name: load_workgroup_unordered body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("workgroup-one-as") unordered (s32) from `i32 addrspace(2)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("workgroup-one-as") unordered (s32) from `ptr addrspace(2) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -229,14 +229,14 @@ body: | name: load_workgroup_monotonic body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("workgroup-one-as") monotonic (s32) from `i32 addrspace(2)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("workgroup-one-as") monotonic (s32) from `ptr addrspace(2) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -253,14 +253,14 @@ body: | name: load_workgroup_acquire body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("workgroup-one-as") acquire (s32) from `i32 addrspace(2)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("workgroup-one-as") acquire (s32) from `ptr addrspace(2) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -277,14 +277,14 @@ body: | name: load_workgroup_seq_cst body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("workgroup-one-as") seq_cst (s32) from `i32 addrspace(2)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("workgroup-one-as") seq_cst (s32) from `ptr addrspace(2) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -301,14 +301,14 @@ body: | name: load_agent_unordered body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("agent-one-as") unordered (s32) from `i32 addrspace(2)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("agent-one-as") unordered (s32) from `ptr addrspace(2) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -325,14 +325,14 @@ body: | name: load_agent_monotonic body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("agent-one-as") monotonic (s32) from `i32 addrspace(2)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("agent-one-as") monotonic (s32) from `ptr addrspace(2) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -349,14 +349,14 @@ body: | name: load_agent_acquire body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("agent-one-as") acquire (s32) from `i32 addrspace(2)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("agent-one-as") acquire (s32) from `ptr addrspace(2) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -373,14 +373,14 @@ body: | name: load_agent_seq_cst body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("agent-one-as") seq_cst (s32) from `i32 addrspace(2)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("agent-one-as") seq_cst (s32) from `ptr addrspace(2) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -397,14 +397,14 @@ body: | name: load_system_unordered body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("one-as") unordered (s32) from `i32 addrspace(2)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("one-as") unordered (s32) from `ptr addrspace(2) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -421,14 +421,14 @@ body: | name: load_system_monotonic body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("one-as") monotonic (s32) from `i32 addrspace(2)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("one-as") monotonic (s32) from `ptr addrspace(2) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -445,14 +445,14 @@ body: | name: load_system_acquire body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("one-as") acquire (s32) from `i32 addrspace(2)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("one-as") acquire (s32) from `ptr addrspace(2) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -469,14 +469,14 @@ body: | name: load_system_seq_cst body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, align 4, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec - renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("one-as") seq_cst (s32) from `i32 addrspace(2)* undef`) + renamable $vgpr2 = DS_READ_B32 killed renamable $vgpr0, 0, 1, implicit $m0, implicit $exec :: (volatile load syncscope("one-as") seq_cst (s32) from `ptr addrspace(2) undef`) $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit-def $vgpr0_vgpr1, implicit $sgpr0_sgpr1 $vgpr1 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit $sgpr0_sgpr1, implicit $exec - FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `i32* undef`) + FLAT_STORE_DWORD killed renamable $vgpr0_vgpr1, killed renamable $vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (store (s32) into `ptr undef`) S_ENDPGM 0 ... @@ -493,12 +493,12 @@ body: | name: store_singlethread_unordered body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") unordered (s32) into `i32 addrspace(2)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") unordered (s32) into `ptr addrspace(2) undef`) S_ENDPGM 0 ... @@ -515,12 +515,12 @@ body: | name: store_singlethread_monotonic body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") monotonic (s32) into `i32 addrspace(2)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") monotonic (s32) into `ptr addrspace(2) undef`) S_ENDPGM 0 ... @@ -537,12 +537,12 @@ body: | name: store_singlethread_release body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") release (s32) into `i32 addrspace(2)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") release (s32) into `ptr addrspace(2) undef`) S_ENDPGM 0 ... @@ -559,12 +559,12 @@ body: | name: store_singlethread_seq_cst body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") seq_cst (s32) into `i32 addrspace(2)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") seq_cst (s32) into `ptr addrspace(2) undef`) S_ENDPGM 0 ... @@ -581,12 +581,12 @@ body: | name: store_wavefront_unordered body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("wavefront-one-as") unordered (s32) into `i32 addrspace(2)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("wavefront-one-as") unordered (s32) into `ptr addrspace(2) undef`) S_ENDPGM 0 ... @@ -603,12 +603,12 @@ body: | name: store_wavefront_monotonic body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("wavefront-one-as") monotonic (s32) into `i32 addrspace(2)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("wavefront-one-as") monotonic (s32) into `ptr addrspace(2) undef`) S_ENDPGM 0 ... @@ -625,12 +625,12 @@ body: | name: store_wavefront_release body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("wavefront-one-as") release (s32) into `i32 addrspace(2)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("wavefront-one-as") release (s32) into `ptr addrspace(2) undef`) S_ENDPGM 0 ... @@ -647,12 +647,12 @@ body: | name: store_wavefront_seq_cst body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("wavefront-one-as") seq_cst (s32) into `i32 addrspace(2)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("wavefront-one-as") seq_cst (s32) into `ptr addrspace(2) undef`) S_ENDPGM 0 ... @@ -669,12 +669,12 @@ body: | name: store_workgroup_unordered body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("workgroup-one-as") unordered (s32) into `i32 addrspace(2)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("workgroup-one-as") unordered (s32) into `ptr addrspace(2) undef`) S_ENDPGM 0 ... @@ -691,12 +691,12 @@ body: | name: store_workgroup_monotonic body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("workgroup-one-as") monotonic (s32) into `i32 addrspace(2)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("workgroup-one-as") monotonic (s32) into `ptr addrspace(2) undef`) S_ENDPGM 0 ... @@ -713,12 +713,12 @@ body: | name: store_workgroup_release body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("workgroup-one-as") release (s32) into `i32 addrspace(2)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("workgroup-one-as") release (s32) into `ptr addrspace(2) undef`) S_ENDPGM 0 ... @@ -735,12 +735,12 @@ body: | name: store_workgroup_seq_cst body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("workgroup-one-as") seq_cst (s32) into `i32 addrspace(2)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("workgroup-one-as") seq_cst (s32) into `ptr addrspace(2) undef`) S_ENDPGM 0 ... @@ -757,12 +757,12 @@ body: | name: store_agent_unordered body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("agent-one-as") unordered (s32) into `i32 addrspace(2)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("agent-one-as") unordered (s32) into `ptr addrspace(2) undef`) S_ENDPGM 0 ... @@ -779,12 +779,12 @@ body: | name: store_agent_monotonic body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("agent-one-as") monotonic (s32) into `i32 addrspace(2)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("agent-one-as") monotonic (s32) into `ptr addrspace(2) undef`) S_ENDPGM 0 ... @@ -801,12 +801,12 @@ body: | name: store_agent_release body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("agent-one-as") release (s32) into `i32 addrspace(2)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("agent-one-as") release (s32) into `ptr addrspace(2) undef`) S_ENDPGM 0 ... @@ -823,12 +823,12 @@ body: | name: store_agent_seq_cst body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("agent-one-as") seq_cst (s32) into `i32 addrspace(2)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("agent-one-as") seq_cst (s32) into `ptr addrspace(2) undef`) S_ENDPGM 0 ... @@ -845,12 +845,12 @@ body: | name: store_system_unordered body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store unordered (s32) into `i32 addrspace(2)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store unordered (s32) into `ptr addrspace(2) undef`) S_ENDPGM 0 ... @@ -867,12 +867,12 @@ body: | name: store_system_monotonic body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store monotonic (s32) into `i32 addrspace(2)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store monotonic (s32) into `ptr addrspace(2) undef`) S_ENDPGM 0 ... @@ -889,12 +889,12 @@ body: | name: store_system_release body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("one-as") release (s32) into `i32 addrspace(2)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("one-as") release (s32) into `ptr addrspace(2) undef`) S_ENDPGM 0 ... @@ -911,12 +911,12 @@ body: | name: store_system_seq_cst body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("one-as") seq_cst (s32) into `i32 addrspace(2)* undef`) + DS_WRITE_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("one-as") seq_cst (s32) into `ptr addrspace(2) undef`) S_ENDPGM 0 ... @@ -933,12 +933,12 @@ body: | name: atomicrmw_singlethread_unordered body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") unordered (s32) into `i32 addrspace(2)* undef`) + $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") unordered (s32) into `ptr addrspace(2) undef`) S_ENDPGM 0 ... @@ -955,12 +955,12 @@ body: | name: atomicrmw_singlethread_monotonic body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") monotonic (s32) into `i32 addrspace(2)* undef`) + $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") monotonic (s32) into `ptr addrspace(2) undef`) S_ENDPGM 0 ... @@ -977,12 +977,12 @@ body: | name: atomicrmw_singlethread_acquire body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") acquire (s32) into `i32 addrspace(2)* undef`) + $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") acquire (s32) into `ptr addrspace(2) undef`) S_ENDPGM 0 ... @@ -999,12 +999,12 @@ body: | name: atomicrmw_singlethread_release body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") release (s32) into `i32 addrspace(2)* undef`) + $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") release (s32) into `ptr addrspace(2) undef`) S_ENDPGM 0 ... @@ -1021,12 +1021,12 @@ body: | name: atomicrmw_singlethread_acq_rel body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") acq_rel (s32) into `i32 addrspace(2)* undef`) + $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") acq_rel (s32) into `ptr addrspace(2) undef`) S_ENDPGM 0 ... @@ -1043,12 +1043,12 @@ body: | name: atomicrmw_singlethread_seq_cst body: | bb.0: - $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, addrspace 4) - $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`, align 8, addrspace 4) + $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, addrspace 4) + $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 40, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`, align 8, addrspace 4) $m0 = S_MOV_B32 -1 $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit $exec $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec - $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") seq_cst (s32) into `i32 addrspace(2)* undef`) + $vgpr2 = DS_WRXCHG_RTN_B32 killed renamable $vgpr0, killed renamable $vgpr1, 0, 1, implicit $m0, implicit $exec :: (volatile store syncscope("singlethread-one-as") seq_cst (s32) into `ptr addrspace(2) undef`) S_ENDPGM 0 ... diff --git a/llvm/test/CodeGen/AMDGPU/merge-load-store-agpr.mir b/llvm/test/CodeGen/AMDGPU/merge-load-store-agpr.mir index 7d0a273..4feede7 100644 --- a/llvm/test/CodeGen/AMDGPU/merge-load-store-agpr.mir +++ b/llvm/test/CodeGen/AMDGPU/merge-load-store-agpr.mir @@ -7,8 +7,8 @@ body: | bb.0: %0:vgpr_32 = IMPLICIT_DEF - %1:vgpr_32 = DS_READ_B32_gfx9 %0, 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(3)* undef`) - %2:vgpr_32 = DS_READ_B32_gfx9 %0, 8, 0, implicit $exec :: (load (s32) from `i32 addrspace(3)* undef`) + %1:vgpr_32 = DS_READ_B32_gfx9 %0, 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(3) undef`) + %2:vgpr_32 = DS_READ_B32_gfx9 %0, 8, 0, implicit $exec :: (load (s32) from `ptr addrspace(3) undef`) ... # GCN-LABEL: name: ds_read_b32_a_a @@ -18,8 +18,8 @@ body: | bb.0: %0:vgpr_32 = IMPLICIT_DEF - %1:agpr_32 = DS_READ_B32_gfx9 %0, 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(3)* undef`) - %2:agpr_32 = DS_READ_B32_gfx9 %0, 8, 0, implicit $exec :: (load (s32) from `i32 addrspace(3)* undef`) + %1:agpr_32 = DS_READ_B32_gfx9 %0, 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(3) undef`) + %2:agpr_32 = DS_READ_B32_gfx9 %0, 8, 0, implicit $exec :: (load (s32) from `ptr addrspace(3) undef`) ... # GCN-LABEL: name: ds_read_b32_v_a @@ -30,8 +30,8 @@ body: | bb.0: %0:vgpr_32 = IMPLICIT_DEF - %1:vgpr_32 = DS_READ_B32_gfx9 %0, 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(3)* undef`) - %2:agpr_32 = DS_READ_B32_gfx9 %0, 8, 0, implicit $exec :: (load (s32) from `i32 addrspace(3)* undef`) + %1:vgpr_32 = DS_READ_B32_gfx9 %0, 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(3) undef`) + %2:agpr_32 = DS_READ_B32_gfx9 %0, 8, 0, implicit $exec :: (load (s32) from `ptr addrspace(3) undef`) ... # GCN-LABEL: name: ds_read_b32_a_v @@ -42,8 +42,8 @@ body: | bb.0: %0:vgpr_32 = IMPLICIT_DEF - %1:agpr_32 = DS_READ_B32_gfx9 %0, 8, 0, implicit $exec :: (load (s32) from `i32 addrspace(3)* undef`) - %2:vgpr_32 = DS_READ_B32_gfx9 %0, 0, 0, implicit $exec :: (load (s32) from `i32 addrspace(3)* undef`) + %1:agpr_32 = DS_READ_B32_gfx9 %0, 8, 0, implicit $exec :: (load (s32) from `ptr addrspace(3) undef`) + %2:vgpr_32 = DS_READ_B32_gfx9 %0, 0, 0, implicit $exec :: (load (s32) from `ptr addrspace(3) undef`) ... # GCN-LABEL: name: ds_write_b32_v_v @@ -53,8 +53,8 @@ body: | bb.0: %0:vgpr_32 = IMPLICIT_DEF - DS_WRITE_B32_gfx9 %0, undef %1:vgpr_32, 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(3)* undef`) - DS_WRITE_B32_gfx9 %0, undef %2:vgpr_32, 8, 0, implicit $exec :: (store (s32) into `i32 addrspace(3)* undef`) + DS_WRITE_B32_gfx9 %0, undef %1:vgpr_32, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(3) undef`) + DS_WRITE_B32_gfx9 %0, undef %2:vgpr_32, 8, 0, implicit $exec :: (store (s32) into `ptr addrspace(3) undef`) ... # GCN-LABEL: name: ds_write_b32_a_a @@ -65,8 +65,8 @@ body: | bb.0: %0:vgpr_32 = IMPLICIT_DEF - DS_WRITE_B32_gfx9 %0, undef %1:agpr_32, 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(3)* undef`) - DS_WRITE_B32_gfx9 %0, undef %2:agpr_32, 8, 0, implicit $exec :: (store (s32) into `i32 addrspace(3)* undef`) + DS_WRITE_B32_gfx9 %0, undef %1:agpr_32, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(3) undef`) + DS_WRITE_B32_gfx9 %0, undef %2:agpr_32, 8, 0, implicit $exec :: (store (s32) into `ptr addrspace(3) undef`) ... # GCN-LABEL: name: ds_write_b32_v_a @@ -77,8 +77,8 @@ body: | bb.0: %0:vgpr_32 = IMPLICIT_DEF - DS_WRITE_B32_gfx9 %0, undef %1:vgpr_32, 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(3)* undef`) - DS_WRITE_B32_gfx9 %0, undef %2:agpr_32, 8, 0, implicit $exec :: (store (s32) into `i32 addrspace(3)* undef`) + DS_WRITE_B32_gfx9 %0, undef %1:vgpr_32, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(3) undef`) + DS_WRITE_B32_gfx9 %0, undef %2:agpr_32, 8, 0, implicit $exec :: (store (s32) into `ptr addrspace(3) undef`) ... # GCN-LABEL: name: ds_write_b32_a_v @@ -89,6 +89,6 @@ body: | bb.0: %0:vgpr_32 = IMPLICIT_DEF - DS_WRITE_B32_gfx9 %0, undef %1:agpr_32, 0, 0, implicit $exec :: (store (s32) into `i32 addrspace(3)* undef`) - DS_WRITE_B32_gfx9 %0, undef %2:vgpr_32, 8, 0, implicit $exec :: (store (s32) into `i32 addrspace(3)* undef`) + DS_WRITE_B32_gfx9 %0, undef %1:agpr_32, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(3) undef`) + DS_WRITE_B32_gfx9 %0, undef %2:vgpr_32, 8, 0, implicit $exec :: (store (s32) into `ptr addrspace(3) undef`) ... diff --git a/llvm/test/CodeGen/AMDGPU/merge-out-of-order-ldst.mir b/llvm/test/CodeGen/AMDGPU/merge-out-of-order-ldst.mir index 21149d3..83e841e 100644 --- a/llvm/test/CodeGen/AMDGPU/merge-out-of-order-ldst.mir +++ b/llvm/test/CodeGen/AMDGPU/merge-out-of-order-ldst.mir @@ -10,13 +10,13 @@ name: out_of_order_merge body: | bb.0: %4:vgpr_32 = V_MOV_B32_e32 0, implicit $exec - %5:vreg_64 = DS_READ_B64_gfx9 %4, 776, 0, implicit $exec :: (load (s64) from `double addrspace(3)* undef`, addrspace 3) - %6:vreg_64 = DS_READ_B64_gfx9 %4, 784, 0, implicit $exec :: (load (s64) from `double addrspace(3)* undef` + 8, addrspace 3) - %17:vreg_64 = DS_READ_B64_gfx9 %4, 840, 0, implicit $exec :: (load (s64) from `double addrspace(3)* undef`, addrspace 3) - DS_WRITE_B64_gfx9 %4, %17, 8, 0, implicit $exec :: (store (s64) into `double addrspace(3)* undef` + 8, addrspace 3) - DS_WRITE_B64_gfx9 %4, %6, 0, 0, implicit $exec :: (store (s64) into `double addrspace(3)* undef`, align 16, addrspace 3) - %24:vreg_64 = DS_READ_B64_gfx9 %4, 928, 0, implicit $exec :: (load (s64) from `double addrspace(3)* undef` + 8, addrspace 3) - DS_WRITE_B64_gfx9 undef %29:vgpr_32, %5, 0, 0, implicit $exec :: (store (s64) into `double addrspace(3)* undef`, addrspace 3) + %5:vreg_64 = DS_READ_B64_gfx9 %4, 776, 0, implicit $exec :: (load (s64) from `ptr addrspace(3) undef`, addrspace 3) + %6:vreg_64 = DS_READ_B64_gfx9 %4, 784, 0, implicit $exec :: (load (s64) from `ptr addrspace(3) undef` + 8, addrspace 3) + %17:vreg_64 = DS_READ_B64_gfx9 %4, 840, 0, implicit $exec :: (load (s64) from `ptr addrspace(3) undef`, addrspace 3) + DS_WRITE_B64_gfx9 %4, %17, 8, 0, implicit $exec :: (store (s64) into `ptr addrspace(3) undef` + 8, addrspace 3) + DS_WRITE_B64_gfx9 %4, %6, 0, 0, implicit $exec :: (store (s64) into `ptr addrspace(3) undef`, align 16, addrspace 3) + %24:vreg_64 = DS_READ_B64_gfx9 %4, 928, 0, implicit $exec :: (load (s64) from `ptr addrspace(3) undef` + 8, addrspace 3) + DS_WRITE_B64_gfx9 undef %29:vgpr_32, %5, 0, 0, implicit $exec :: (store (s64) into `ptr addrspace(3) undef`, addrspace 3) S_ENDPGM 0 ... diff --git a/llvm/test/CodeGen/AMDGPU/opt-sgpr-to-vgpr-copy.mir b/llvm/test/CodeGen/AMDGPU/opt-sgpr-to-vgpr-copy.mir index 8fa1ab9..c845453 100644 --- a/llvm/test/CodeGen/AMDGPU/opt-sgpr-to-vgpr-copy.mir +++ b/llvm/test/CodeGen/AMDGPU/opt-sgpr-to-vgpr-copy.mir @@ -4,7 +4,7 @@ # Check that constant is in SGPR registers --- | - define amdgpu_kernel void @const_to_sgpr(i32 addrspace(1)* nocapture %arg, i64 %id) { + define amdgpu_kernel void @const_to_sgpr(ptr addrspace(1) nocapture %arg, i64 %id) { bb: br i1 undef, label %bb1, label %bb2 @@ -15,7 +15,7 @@ ret void } - define amdgpu_kernel void @const_to_sgpr_multiple_use(i32 addrspace(1)* nocapture %arg, i64 %id1, i64 %id2) { + define amdgpu_kernel void @const_to_sgpr_multiple_use(ptr addrspace(1) nocapture %arg, i64 %id1, i64 %id2) { bb: br i1 undef, label %bb1, label %bb2 @@ -26,7 +26,7 @@ ret void } - define amdgpu_kernel void @const_to_sgpr_subreg(i32 addrspace(1)* nocapture %arg, i64 %id) { + define amdgpu_kernel void @const_to_sgpr_subreg(ptr addrspace(1) nocapture %arg, i64 %id) { bb: br i1 undef, label %bb1, label %bb2 diff --git a/llvm/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir b/llvm/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir index 5c708bd..039580f 100644 --- a/llvm/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir +++ b/llvm/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir @@ -6,7 +6,7 @@ br i1 undef, label %if, label %end if: ; preds = %main_body - %v.if = load volatile i32, i32 addrspace(1)* undef + %v.if = load volatile i32, ptr addrspace(1) undef br label %end end: ; preds = %if, %main_body diff --git a/llvm/test/CodeGen/AMDGPU/phi-elimination-end-cf.mir b/llvm/test/CodeGen/AMDGPU/phi-elimination-end-cf.mir index c76effa..83c3050 100644 --- a/llvm/test/CodeGen/AMDGPU/phi-elimination-end-cf.mir +++ b/llvm/test/CodeGen/AMDGPU/phi-elimination-end-cf.mir @@ -46,7 +46,7 @@ body: | %15:sreg_32_xm0 = S_MOV_B32 61440 %16:sreg_32_xm0 = S_MOV_B32 -1 %17:sgpr_128 = REG_SEQUENCE undef %14:sreg_32_xm0, %subreg.sub0, undef %12:sreg_32_xm0, %subreg.sub1, %16, %subreg.sub2, %15, %subreg.sub3 - BUFFER_STORE_DWORD_OFFSET %4, %17, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `i32 addrspace(1)* undef`, addrspace 1) + BUFFER_STORE_DWORD_OFFSET %4, %17, 0, 0, 0, 0, implicit $exec :: (volatile store (s32) into `ptr addrspace(1) undef`, addrspace 1) %19:vgpr_32 = COPY %4 %20:sreg_64 = SI_IF %0, %bb.2, implicit-def dead $exec, implicit-def dead $scc, implicit $exec S_BRANCH %bb.3 diff --git a/llvm/test/CodeGen/AMDGPU/regcoalesce-dbg.mir b/llvm/test/CodeGen/AMDGPU/regcoalesce-dbg.mir index 4f54131..885907e 100644 --- a/llvm/test/CodeGen/AMDGPU/regcoalesce-dbg.mir +++ b/llvm/test/CodeGen/AMDGPU/regcoalesce-dbg.mir @@ -8,7 +8,7 @@ # CHECK: DBG_VALUE{{.*}} %13.sub2 --- | - define amdgpu_kernel void @test(i32 addrspace(1)* %out) { ret void } + define amdgpu_kernel void @test(ptr addrspace(1) %out) { ret void } !0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !4, producer: "llvm", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4, retainedTypes: !4) !1 = !DILocalVariable(name: "a", scope: !2, file: !4, line: 126, type: !6) @@ -56,8 +56,8 @@ body: | %3 = COPY killed $vgpr0 %0 = COPY killed $sgpr0_sgpr1 - %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`) - %5 = S_LOAD_DWORD_IMM killed %0, 13, 0 :: (non-temporal dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`) + %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load (s64) from `ptr addrspace(4) undef`) + %5 = S_LOAD_DWORD_IMM killed %0, 13, 0 :: (non-temporal dereferenceable invariant load (s32) from `ptr addrspace(4) undef`) %18 = V_ASHRREV_I32_e32 31, %3, implicit $exec undef %19.sub0 = COPY killed %3 %19.sub1 = COPY killed %18 diff --git a/llvm/test/CodeGen/AMDGPU/scc-clobbered-sgpr-to-vmem-spill.ll b/llvm/test/CodeGen/AMDGPU/scc-clobbered-sgpr-to-vmem-spill.ll index 959bc7f..9d77979 100644 --- a/llvm/test/CodeGen/AMDGPU/scc-clobbered-sgpr-to-vmem-spill.ll +++ b/llvm/test/CodeGen/AMDGPU/scc-clobbered-sgpr-to-vmem-spill.ll @@ -16,7 +16,7 @@ ; CHECK-NEXT: - basic block: %bb.0 ; CHECK-NEXT: - instruction: S_CBRANCH_SCC1 %bb.2, implicit killed $scc ; CHECK-NEXT: - operand 1: implicit killed $scc -define amdgpu_kernel void @kernel0(i32 addrspace(1)* %out, i32 %in) #1 { +define amdgpu_kernel void @kernel0(ptr addrspace(1) %out, i32 %in) #1 { call void asm sideeffect "", "~{v[0:7]}" () #0 call void asm sideeffect "", "~{v[8:15]}" () #0 call void asm sideeffect "", "~{v[16:19]}"() #0 diff --git a/llvm/test/CodeGen/AMDGPU/sched-barrier-post-RA.mir b/llvm/test/CodeGen/AMDGPU/sched-barrier-post-RA.mir index 56ab64e..4f293d9 100644 --- a/llvm/test/CodeGen/AMDGPU/sched-barrier-post-RA.mir +++ b/llvm/test/CodeGen/AMDGPU/sched-barrier-post-RA.mir @@ -2,9 +2,9 @@ # RUN: llc -march=amdgcn -mcpu=gfx908 -misched-cluster=false -run-pass=postmisched -verify-misched -o - %s | FileCheck %s --- | - define amdgpu_kernel void @no_sched_barrier(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void } - define amdgpu_kernel void @sched_barrier_mask_0(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void } - define amdgpu_kernel void @sched_barrier_mask_1(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void } + define amdgpu_kernel void @no_sched_barrier(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void } + define amdgpu_kernel void @sched_barrier_mask_0(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void } + define amdgpu_kernel void @sched_barrier_mask_1(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void } !0 = distinct !{!0} !1 = !{!1, !0} diff --git a/llvm/test/CodeGen/AMDGPU/sched-barrier-pre-RA.mir b/llvm/test/CodeGen/AMDGPU/sched-barrier-pre-RA.mir index fea30a3..d846516 100644 --- a/llvm/test/CodeGen/AMDGPU/sched-barrier-pre-RA.mir +++ b/llvm/test/CodeGen/AMDGPU/sched-barrier-pre-RA.mir @@ -2,21 +2,21 @@ # RUN: llc -march=amdgcn -mcpu=gfx908 -misched-cluster=false -run-pass=machine-scheduler -verify-misched -o - %s | FileCheck %s --- | - define amdgpu_kernel void @no_sched_barrier(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void } - define amdgpu_kernel void @sched_barrier_mask_0(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void } - define amdgpu_kernel void @sched_barrier_mask_1(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void } - define amdgpu_kernel void @sched_barrier_mask_2(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void } - define amdgpu_kernel void @sched_barrier_mask_4(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void } - define amdgpu_kernel void @sched_barrier_mask_8(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void } - define amdgpu_kernel void @sched_barrier_mask_16(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void } - define amdgpu_kernel void @sched_barrier_mask_32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void } - define amdgpu_kernel void @sched_barrier_mask_64(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void } - define amdgpu_kernel void @sched_barrier_mask_128(i32 addrspace(3)* noalias %out, i32 addrspace(3)* noalias %in) { ret void } - define amdgpu_kernel void @sched_barrier_mask_256(i32 addrspace(3)* noalias %out, i32 addrspace(3)* noalias %in) { ret void } - define amdgpu_kernel void @sched_barrier_mask_512(i32 addrspace(3)* noalias %out, i32 addrspace(3)* noalias %in) { ret void } - define amdgpu_kernel void @sched_barrier_masks_8_12(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void } - define amdgpu_kernel void @sched_barrier_mask_4_bundle(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void } - define amdgpu_kernel void @sched_barrier_mask_0_bundle(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void } + define amdgpu_kernel void @no_sched_barrier(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void } + define amdgpu_kernel void @sched_barrier_mask_0(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void } + define amdgpu_kernel void @sched_barrier_mask_1(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void } + define amdgpu_kernel void @sched_barrier_mask_2(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void } + define amdgpu_kernel void @sched_barrier_mask_4(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void } + define amdgpu_kernel void @sched_barrier_mask_8(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void } + define amdgpu_kernel void @sched_barrier_mask_16(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void } + define amdgpu_kernel void @sched_barrier_mask_32(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void } + define amdgpu_kernel void @sched_barrier_mask_64(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void } + define amdgpu_kernel void @sched_barrier_mask_128(ptr addrspace(3) noalias %out, ptr addrspace(3) noalias %in) { ret void } + define amdgpu_kernel void @sched_barrier_mask_256(ptr addrspace(3) noalias %out, ptr addrspace(3) noalias %in) { ret void } + define amdgpu_kernel void @sched_barrier_mask_512(ptr addrspace(3) noalias %out, ptr addrspace(3) noalias %in) { ret void } + define amdgpu_kernel void @sched_barrier_masks_8_12(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void } + define amdgpu_kernel void @sched_barrier_mask_4_bundle(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void } + define amdgpu_kernel void @sched_barrier_mask_0_bundle(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void } !0 = distinct !{!0} !1 = !{!1, !0} diff --git a/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pipeline-solver.mir b/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pipeline-solver.mir index bf52d6f..67cc6ed 100644 --- a/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pipeline-solver.mir +++ b/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pipeline-solver.mir @@ -3,10 +3,10 @@ # RUN: llc -march=amdgcn -mcpu=gfx908 -amdgpu-igrouplp-exact-solver -run-pass=machine-scheduler -o - %s | FileCheck -check-prefix=EXACT %s --- | - define amdgpu_kernel void @sched_group_barrier_2_VMEM_10_ALU_5_MFMA_2_VMEM_WRITE(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void } - define amdgpu_kernel void @sched_group_barrier_MFMA_VALU_and_SALU_alternating(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void } - define amdgpu_kernel void @sched_group_barrier_2_separate_pipes(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void } - define amdgpu_kernel void @sched_group_barrier_3_separate_pipes(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void } + define amdgpu_kernel void @sched_group_barrier_2_VMEM_10_ALU_5_MFMA_2_VMEM_WRITE(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void } + define amdgpu_kernel void @sched_group_barrier_MFMA_VALU_and_SALU_alternating(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void } + define amdgpu_kernel void @sched_group_barrier_2_separate_pipes(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void } + define amdgpu_kernel void @sched_group_barrier_3_separate_pipes(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void } !0 = distinct !{!0} !1 = !{!1, !0} diff --git a/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pre-RA.mir b/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pre-RA.mir index 6c10c8f..dc3aae8 100644 --- a/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pre-RA.mir +++ b/llvm/test/CodeGen/AMDGPU/sched-group-barrier-pre-RA.mir @@ -3,10 +3,10 @@ # RUN: llc -march=amdgcn -mcpu=gfx908 -misched-cluster=false -run-pass=machine-scheduler -amdgpu-igrouplp-exact-solver -verify-misched -o - %s | FileCheck -check-prefix=EXACT %s --- | - define amdgpu_kernel void @no_sched_group_barrier(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void } - define amdgpu_kernel void @sched_group_barrier_1_VMEM_READ_1_VALU_5_MFMA_1_VMEM_READ_3_VALU_2_VMEM_WRITE(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void } - define amdgpu_kernel void @sched_group_barrier_2_VMEM_1000_ALU_5_MFMA_2_VMEM_WRITE(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void } - define amdgpu_kernel void @sched_group_barrier_MFMA_VALU_and_SALU_alternating(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void } + define amdgpu_kernel void @no_sched_group_barrier(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void } + define amdgpu_kernel void @sched_group_barrier_1_VMEM_READ_1_VALU_5_MFMA_1_VMEM_READ_3_VALU_2_VMEM_WRITE(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void } + define amdgpu_kernel void @sched_group_barrier_2_VMEM_1000_ALU_5_MFMA_2_VMEM_WRITE(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void } + define amdgpu_kernel void @sched_group_barrier_MFMA_VALU_and_SALU_alternating(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void } !0 = distinct !{!0} !1 = !{!1, !0} diff --git a/llvm/test/CodeGen/AMDGPU/schedule-ilp.mir b/llvm/test/CodeGen/AMDGPU/schedule-ilp.mir index c2e6aae..f1a8af4 100644 --- a/llvm/test/CodeGen/AMDGPU/schedule-ilp.mir +++ b/llvm/test/CodeGen/AMDGPU/schedule-ilp.mir @@ -2,7 +2,7 @@ # RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 -verify-machineinstrs -run-pass=machine-scheduler -verify-misched -o - %s | FileCheck %s --- | - define amdgpu_kernel void @schedule_ilp(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in) { ret void } + define amdgpu_kernel void @schedule_ilp(ptr addrspace(1) noalias %out, ptr addrspace(1) noalias %in) { ret void } !0 = distinct !{!0} !1 = !{!1, !0} diff --git a/llvm/test/CodeGen/AMDGPU/schedule-regpressure.mir b/llvm/test/CodeGen/AMDGPU/schedule-regpressure.mir index 469a9ee..b4755c3 100644 --- a/llvm/test/CodeGen/AMDGPU/schedule-regpressure.mir +++ b/llvm/test/CodeGen/AMDGPU/schedule-regpressure.mir @@ -62,7 +62,7 @@ body: | liveins: $sgpr4_sgpr5 %1 = COPY $sgpr4_sgpr5 - %5 = S_LOAD_DWORD_IMM %1, 0, 0 :: (non-temporal dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`) + %5 = S_LOAD_DWORD_IMM %1, 0, 0 :: (non-temporal dereferenceable invariant load (s32) from `ptr addrspace(4) undef`) $m0 = S_MOV_B32 -1 %7 = COPY %5 %6 = DS_READ_B32 %7, 0, 0, implicit $m0, implicit $exec diff --git a/llvm/test/CodeGen/AMDGPU/sgpr-spill-wrong-stack-id.mir b/llvm/test/CodeGen/AMDGPU/sgpr-spill-wrong-stack-id.mir index 84e890d3..efbdbca 100644 --- a/llvm/test/CodeGen/AMDGPU/sgpr-spill-wrong-stack-id.mir +++ b/llvm/test/CodeGen/AMDGPU/sgpr-spill-wrong-stack-id.mir @@ -11,9 +11,9 @@ --- | - define void @sgpr_spill_wrong_stack_id(float addrspace(1)* nocapture readnone %arg, float addrspace(1)* noalias %arg1) { + define void @sgpr_spill_wrong_stack_id(ptr addrspace(1) nocapture readnone %arg, ptr addrspace(1) noalias %arg1) { bb: - %tmp = load i32, i32 addrspace(1)* null, align 4 + %tmp = load i32, ptr addrspace(1) null, align 4 call void @func(i32 undef) call void @func(i32 %tmp) unreachable diff --git a/llvm/test/CodeGen/AMDGPU/smem-no-clause-coalesced.mir b/llvm/test/CodeGen/AMDGPU/smem-no-clause-coalesced.mir index 3c3ce44..a0e20da 100644 --- a/llvm/test/CodeGen/AMDGPU/smem-no-clause-coalesced.mir +++ b/llvm/test/CodeGen/AMDGPU/smem-no-clause-coalesced.mir @@ -36,7 +36,7 @@ body: | %3.sub1:sgpr_128 = S_AND_B32 %2, 65535, implicit-def dead $scc %3.sub3:sgpr_128 = S_MOV_B32 151468 %3.sub2:sgpr_128 = S_MOV_B32 -1 - %7.sub0:sreg_64_xexec = S_LOAD_DWORD_IMM %7, 48, 0 :: (load (s32) from `i8 addrspace(4)* undef`, addrspace 4) + %7.sub0:sreg_64_xexec = S_LOAD_DWORD_IMM %7, 48, 0 :: (load (s32) from `ptr addrspace(4) undef`, addrspace 4) %8:sreg_64_xexec = S_BUFFER_LOAD_DWORDX2_IMM %3, 640, 0 :: (dereferenceable invariant load (s64)) undef %9.sub0:vreg_128 = V_LSHL_ADD_U32_e64 %6, 4, %4, implicit $exec %9.sub1:vreg_128 = V_LSHL_ADD_U32_e64 %5, 4, %0, implicit $exec diff --git a/llvm/test/CodeGen/AMDGPU/undefined-physreg-sgpr-spill.mir b/llvm/test/CodeGen/AMDGPU/undefined-physreg-sgpr-spill.mir index 103a8c0..f985301 100644 --- a/llvm/test/CodeGen/AMDGPU/undefined-physreg-sgpr-spill.mir +++ b/llvm/test/CodeGen/AMDGPU/undefined-physreg-sgpr-spill.mir @@ -44,7 +44,7 @@ body: | liveins: $vgpr0, $sgpr4_sgpr5, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr13 $vgpr1_vgpr2 = COPY killed $sgpr4_sgpr5, implicit $exec - $vgpr1 = GLOBAL_LOAD_UBYTE killed $vgpr1_vgpr2, 0, 0, implicit $exec :: (non-temporal dereferenceable invariant load (s8) from `i1 addrspace(4)* undef`) + $vgpr1 = GLOBAL_LOAD_UBYTE killed $vgpr1_vgpr2, 0, 0, implicit $exec :: (non-temporal dereferenceable invariant load (s8) from `ptr addrspace(4) undef`) $vcc = V_CMP_NE_U32_e64 0, $vgpr0, implicit $exec $sgpr0_sgpr1 = V_CMP_EQ_U32_e64 1, killed $vgpr1, implicit $exec $vgpr1 = V_CNDMASK_B32_e64 0, 0, 0, -1, killed $sgpr0_sgpr1, implicit $exec @@ -109,7 +109,7 @@ body: | liveins: $vgpr0, $sgpr4_sgpr5, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr13 $vgpr1_vgpr2 = COPY killed $sgpr4_sgpr5, implicit $exec - $vgpr1 = GLOBAL_LOAD_UBYTE killed $vgpr1_vgpr2, 0, 0, implicit $exec :: (non-temporal dereferenceable invariant load (s8) from `i1 addrspace(4)* undef`) + $vgpr1 = GLOBAL_LOAD_UBYTE killed $vgpr1_vgpr2, 0, 0, implicit $exec :: (non-temporal dereferenceable invariant load (s8) from `ptr addrspace(4) undef`) $vcc = V_CMP_NE_U32_e64 0, $vgpr0, implicit $exec $sgpr0_sgpr1 = V_CMP_EQ_U32_e64 1, killed $vgpr1, implicit $exec $vgpr1 = V_CNDMASK_B32_e64 0, 0, 0, -1, killed $sgpr0_sgpr1, implicit $exec diff --git a/llvm/test/CodeGen/AMDGPU/waitcnt-agpr.mir b/llvm/test/CodeGen/AMDGPU/waitcnt-agpr.mir index 9841a8c..fe0afd2 100644 --- a/llvm/test/CodeGen/AMDGPU/waitcnt-agpr.mir +++ b/llvm/test/CodeGen/AMDGPU/waitcnt-agpr.mir @@ -2,10 +2,10 @@ # RUN: llc -march=amdgcn -mcpu=gfx90a -run-pass si-insert-waitcnts -verify-machineinstrs %s -o - | FileCheck -check-prefix=GCN %s --- | - define amdgpu_kernel void @flat_zero_waitcnt(i32 addrspace(1)* %global4, - <4 x i32> addrspace(1)* %global16, - i32* %flat4, - <4 x i32>* %flat16) { + define amdgpu_kernel void @flat_zero_waitcnt(ptr addrspace(1) %global4, + ptr addrspace(1) %global16, + ptr %flat4, + ptr %flat16) { ret void } diff --git a/llvm/test/CodeGen/AMDGPU/waitcnt-back-edge-loop.mir b/llvm/test/CodeGen/AMDGPU/waitcnt-back-edge-loop.mir index 9619808..cfad6f4 100644 --- a/llvm/test/CodeGen/AMDGPU/waitcnt-back-edge-loop.mir +++ b/llvm/test/CodeGen/AMDGPU/waitcnt-back-edge-loop.mir @@ -13,8 +13,8 @@ body: | $vgpr1 = V_MOV_B32_e32 0, implicit $exec, implicit-def $vgpr1_vgpr2 $vgpr2 = V_MOV_B32_e32 0, implicit $exec, implicit-def $vgpr1_vgpr2 - $vgpr4 = FLAT_LOAD_DWORD $vgpr1_vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load (s32) from `float addrspace(1)* null`, addrspace 1) - $vgpr0 = FLAT_LOAD_DWORD $vgpr1_vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load (s32) from `float addrspace(1)* null`, addrspace 1) + $vgpr4 = FLAT_LOAD_DWORD $vgpr1_vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load (s32) from `ptr addrspace(1) null`, addrspace 1) + $vgpr0 = FLAT_LOAD_DWORD $vgpr1_vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load (s32) from `ptr addrspace(1) null`, addrspace 1) $sgpr0_sgpr1 = V_CMP_EQ_U32_e64 3, killed $sgpr4, implicit $exec $vgpr3 = V_CNDMASK_B32_e64 0, -1082130432, 0, 1065353216, killed $sgpr0_sgpr1, implicit $exec $vgpr5 = V_MOV_B32_e32 $vgpr0, implicit $exec, implicit $exec @@ -23,7 +23,7 @@ body: | bb.3: successors: %bb.1 - $vgpr5 = FLAT_LOAD_DWORD $vgpr1_vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load (s32) from `float addrspace(1)* null`, addrspace 1) + $vgpr5 = FLAT_LOAD_DWORD $vgpr1_vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load (s32) from `ptr addrspace(1) null`, addrspace 1) bb.1: successors: %bb.5, %bb.2 @@ -43,7 +43,7 @@ body: | bb.4: successors: %bb.3, %bb.1 - $vgpr5 = FLAT_LOAD_DWORD $vgpr1_vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load (s32) from `float addrspace(1)* null`, addrspace 1) + $vgpr5 = FLAT_LOAD_DWORD $vgpr1_vgpr2, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load (s32) from `ptr addrspace(1) null`, addrspace 1) $vgpr4 = V_CVT_I32_F32_e32 $vgpr5, implicit $mode, implicit $exec V_CMP_EQ_U32_e32 2, killed $vgpr4, implicit-def $vcc, implicit $exec $vcc = S_AND_B64 $exec, killed $vcc, implicit-def dead $scc diff --git a/llvm/test/CodeGen/AMDGPU/waitcnt-vscnt.mir b/llvm/test/CodeGen/AMDGPU/waitcnt-vscnt.mir index d20d730..01ebe44 100644 --- a/llvm/test/CodeGen/AMDGPU/waitcnt-vscnt.mir +++ b/llvm/test/CodeGen/AMDGPU/waitcnt-vscnt.mir @@ -12,7 +12,7 @@ machineFunctionInfo: body: | bb.0: liveins: $sgpr0_sgpr1 - $sgpr4 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 4, 0 :: (dereferenceable invariant load (s32) from `i32 addrspace(4)* undef`) + $sgpr4 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 4, 0 :: (dereferenceable invariant load (s32) from `ptr addrspace(4) undef`) S_WAITCNT_VSCNT undef $sgpr_null, 0 $vgpr0 = GLOBAL_ATOMIC_ADD_RTN $vgpr0_vgpr1, $vgpr2, 0, 1, implicit $exec :: (load store syncscope("agent") seq_cst (s32), addrspace 1) S_CMP_LG_U32 killed $sgpr4, 0, implicit-def $scc diff --git a/llvm/test/CodeGen/AMDGPU/waitcnt.mir b/llvm/test/CodeGen/AMDGPU/waitcnt.mir index b7da711..5cc43fd 100644 --- a/llvm/test/CodeGen/AMDGPU/waitcnt.mir +++ b/llvm/test/CodeGen/AMDGPU/waitcnt.mir @@ -2,10 +2,10 @@ # RUN: llc -march=amdgcn -mcpu=gfx900 -run-pass si-insert-waitcnts %s -o - | FileCheck -check-prefixes=CHECK,GFX89 %s --- | - define amdgpu_kernel void @flat_zero_waitcnt(i32 addrspace(1)* %global4, - <4 x i32> addrspace(1)* %global16, - i32* %flat4, - <4 x i32>* %flat16) { + define amdgpu_kernel void @flat_zero_waitcnt(ptr addrspace(1) %global4, + ptr addrspace(1) %global16, + ptr %flat4, + ptr %flat16) { ret void } @@ -350,12 +350,12 @@ body: | name: waitcnt_backedge body: | bb.0: - renamable $sgpr4_sgpr5_sgpr6_sgpr7 = S_LOAD_DWORDX4_IMM renamable $sgpr2_sgpr3, 32, 0 :: (load (s128) from `i32 addrspace(4)* undef`, addrspace 4) + renamable $sgpr4_sgpr5_sgpr6_sgpr7 = S_LOAD_DWORDX4_IMM renamable $sgpr2_sgpr3, 32, 0 :: (load (s128) from `ptr addrspace(4) undef`, addrspace 4) bb.4: renamable $sgpr10_sgpr11 = S_CSELECT_B64 -1, 0, implicit killed $scc renamable $vgpr1 = BUFFER_LOAD_DWORD_OFFEN killed renamable $vgpr5, renamable $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 1, 0, implicit $exec :: (dereferenceable load (s32), align 1, addrspace 4) - renamable $sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM killed renamable $sgpr0_sgpr1, 0, 0 :: (load (s64) from `i32 addrspace(4)* undef`, align 4, addrspace 4) + renamable $sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM killed renamable $sgpr0_sgpr1, 0, 0 :: (load (s64) from `ptr addrspace(4) undef`, align 4, addrspace 4) S_CBRANCH_SCC0 %bb.9, implicit killed $scc bb.9: diff --git a/llvm/test/CodeGen/ARM/ARMLoadStoreDBG.mir b/llvm/test/CodeGen/ARM/ARMLoadStoreDBG.mir index d3c58ad..812ac23 100644 --- a/llvm/test/CodeGen/ARM/ARMLoadStoreDBG.mir +++ b/llvm/test/CodeGen/ARM/ARMLoadStoreDBG.mir @@ -8,17 +8,17 @@ %struct.s = type opaque ; Function Attrs: nounwind - define arm_aapcscc i32 @f(%struct.s* %s, i32 %u, i8* %b, i32 %n) #0 !dbg !4 { + define arm_aapcscc i32 @f(ptr %s, i32 %u, ptr %b, i32 %n) #0 !dbg !4 { entry: - tail call void @llvm.dbg.value(metadata %struct.s* %s, i64 0, metadata !18, metadata !27), !dbg !28 + tail call void @llvm.dbg.value(metadata ptr %s, i64 0, metadata !18, metadata !27), !dbg !28 tail call void @llvm.dbg.value(metadata i32 %u, i64 0, metadata !19, metadata !27), !dbg !28 - tail call void @llvm.dbg.value(metadata i8* %b, i64 0, metadata !20, metadata !27), !dbg !28 + tail call void @llvm.dbg.value(metadata ptr %b, i64 0, metadata !20, metadata !27), !dbg !28 tail call void @llvm.dbg.value(metadata i32 %n, i64 0, metadata !21, metadata !27), !dbg !28 %cmp = icmp ult i32 %n, 4, !dbg !29 br i1 %cmp, label %return, label %if.end, !dbg !31 if.end: ; preds = %entry - tail call arm_aapcscc void @g(%struct.s* %s, i8* %b, i32 %n) #3, !dbg !32 + tail call arm_aapcscc void @g(ptr %s, ptr %b, i32 %n) #3, !dbg !32 br label %return, !dbg !33 return: ; preds = %if.end, %entry @@ -26,7 +26,7 @@ ret i32 %retval.0, !dbg !34 } - declare arm_aapcscc void @g(%struct.s*, i8*, i32) #1 + declare arm_aapcscc void @g(ptr, ptr, i32) #1 ; Function Attrs: nounwind readnone declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #2 diff --git a/llvm/test/CodeGen/ARM/GlobalISel/select-pr35926.mir b/llvm/test/CodeGen/ARM/GlobalISel/select-pr35926.mir index 6bd35b05..a6fc4da 100644 --- a/llvm/test/CodeGen/ARM/GlobalISel/select-pr35926.mir +++ b/llvm/test/CodeGen/ARM/GlobalISel/select-pr35926.mir @@ -11,7 +11,7 @@ } ; Function Attrs: nounwind - declare void @llvm.stackprotector(i8*, i8**) #2 + declare void @llvm.stackprotector(ptr, ptr) #2 attributes #0 = { nounwind readnone speculatable "target-features"="+vfp4" } attributes #1 = { "target-features"="+vfp4" } diff --git a/llvm/test/CodeGen/ARM/cmp2-peephole-thumb.mir b/llvm/test/CodeGen/ARM/cmp2-peephole-thumb.mir index 96a0234..0b44526 100644 --- a/llvm/test/CodeGen/ARM/cmp2-peephole-thumb.mir +++ b/llvm/test/CodeGen/ARM/cmp2-peephole-thumb.mir @@ -15,21 +15,21 @@ %retval = alloca i32, align 4 %mul = alloca i32, align 4 %mul1 = mul nsw i32 %a, %b - store i32 %mul1, i32* %mul, align 4 - %0 = load i32, i32* %mul, align 4 + store i32 %mul1, ptr %mul, align 4 + %0 = load i32, ptr %mul, align 4 %cmp = icmp sle i32 %0, 0 br i1 %cmp, label %if.then, label %if.end if.then: ; preds = %entry - store i32 42, i32* %retval, align 4 + store i32 42, ptr %retval, align 4 br label %return if.end: ; preds = %entry - store i32 1, i32* %retval, align 4 + store i32 1, ptr %retval, align 4 br label %return return: ; preds = %if.end, %if.then - %1 = load i32, i32* %retval, align 4 + %1 = load i32, ptr %retval, align 4 ret i32 %1 } diff --git a/llvm/test/CodeGen/ARM/cmse-clear-float-bigend.mir b/llvm/test/CodeGen/ARM/cmse-clear-float-bigend.mir index a5774e4..47f4e1a 100644 --- a/llvm/test/CodeGen/ARM/cmse-clear-float-bigend.mir +++ b/llvm/test/CodeGen/ARM/cmse-clear-float-bigend.mir @@ -4,17 +4,17 @@ target triple = "thumbebv8m.main-arm-none-eabi" ; Function Attrs: cmse_nonsecure_entry nounwind - define hidden arm_aapcs_vfpcc void @secure_foo(void (double, double, double, double, double, double, double, double)* %fptr) local_unnamed_addr #0 { + define hidden arm_aapcs_vfpcc void @secure_foo(ptr %fptr) local_unnamed_addr #0 { entry: - %0 = ptrtoint void (double, double, double, double, double, double, double, double)* %fptr to i32 + %0 = ptrtoint ptr %fptr to i32 %and = and i32 %0, -2 - %1 = inttoptr i32 %and to void (double, double, double, double, double, double, double, double)* + %1 = inttoptr i32 %and to ptr call arm_aapcs_vfpcc void %1(double 0.000000e+00, double 1.000000e+00, double 2.000000e+00, double 3.000000e+00, double 4.000000e+00, double 5.000000e+00, double 6.000000e+00, double 7.000000e+00) #2 ret void } ; Function Attrs: nounwind - declare void @llvm.stackprotector(i8*, i8**) #1 + declare void @llvm.stackprotector(ptr, ptr) #1 attributes #0 = { "cmse_nonsecure_entry" nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="generic" "target-features"="+8msecext,+armv8-m.main,-d32,-fp64,+fp-armv8,+hwdiv,+thumb-mode,-crypto,-fullfp16,-neon" "unsafe-fp-math"="false" "use-soft-float"="false" } attributes #1 = { nounwind } diff --git a/llvm/test/CodeGen/ARM/codesize-ifcvt.mir b/llvm/test/CodeGen/ARM/codesize-ifcvt.mir index 32c4254..3acbcf1 100644 --- a/llvm/test/CodeGen/ARM/codesize-ifcvt.mir +++ b/llvm/test/CodeGen/ARM/codesize-ifcvt.mir @@ -14,7 +14,7 @@ br label %b5 b3: ; preds = %b1 - %v1 = load i32, i32* undef, align 4 + %v1 = load i32, ptr undef, align 4 %v2 = and i32 %v1, 256 br label %b5 @@ -48,7 +48,7 @@ br label %b5 b3: ; preds = %b1 - %v1 = load i32, i32* undef, align 4 + %v1 = load i32, ptr undef, align 4 %v2 = and i32 %v1, 256 br label %b5 @@ -82,7 +82,7 @@ br label %b5 b3: ; preds = %b1 - %v1 = load i32, i32* undef, align 4 + %v1 = load i32, ptr undef, align 4 %v2 = and i32 %v1, 256 br label %b5 @@ -106,7 +106,7 @@ declare i32 @extfunc() ; Function Attrs: nounwind - declare void @llvm.stackprotector(i8*, i8**) #2 + declare void @llvm.stackprotector(ptr, ptr) #2 attributes #0 = { optsize } attributes #1 = { minsize } @@ -179,7 +179,7 @@ body: | ; CHECK-V7: t2B %bb.4, 14 /* CC::al */, $noreg ; CHECK-V7: bb.3.b3: ; CHECK-V7: successors: %bb.4(0x80000000) - ; CHECK-V7: renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from `i32* undef`) + ; CHECK-V7: renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from `ptr undef`) ; CHECK-V7: renamable $r0 = t2ANDri killed renamable $r0, 256, 14 /* CC::al */, $noreg, $noreg ; CHECK-V7: bb.4.b5: ; CHECK-V7: successors: %bb.5(0x50000000) @@ -213,7 +213,7 @@ body: | ; CHECK-V8: t2B %bb.4, 14 /* CC::al */, $noreg ; CHECK-V8: bb.3.b3: ; CHECK-V8: successors: %bb.4(0x80000000) - ; CHECK-V8: renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from `i32* undef`) + ; CHECK-V8: renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from `ptr undef`) ; CHECK-V8: renamable $r0 = t2ANDri killed renamable $r0, 256, 14 /* CC::al */, $noreg, $noreg ; CHECK-V8: bb.4.b5: ; CHECK-V8: successors: %bb.5(0x50000000) @@ -253,7 +253,7 @@ body: | bb.3.b3: successors: %bb.4(0x80000000) - renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14, $noreg :: (load (s32) from `i32* undef`) + renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14, $noreg :: (load (s32) from `ptr undef`) renamable $r0 = t2ANDri killed renamable $r0, 256, 14, $noreg, $noreg bb.4.b5: @@ -341,7 +341,7 @@ body: | ; CHECK-V7: t2B %bb.4, 14 /* CC::al */, $noreg ; CHECK-V7: bb.3.b3: ; CHECK-V7: successors: %bb.4(0x80000000) - ; CHECK-V7: renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from `i32* undef`) + ; CHECK-V7: renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from `ptr undef`) ; CHECK-V7: renamable $r0 = t2ANDri killed renamable $r0, 256, 14 /* CC::al */, $noreg, $noreg ; CHECK-V7: bb.4.b5: ; CHECK-V7: successors: %bb.5(0x30000000), %bb.6(0x50000000) @@ -378,7 +378,7 @@ body: | ; CHECK-V8: t2B %bb.4, 14 /* CC::al */, $noreg ; CHECK-V8: bb.3.b3: ; CHECK-V8: successors: %bb.4(0x80000000) - ; CHECK-V8: renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from `i32* undef`) + ; CHECK-V8: renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from `ptr undef`) ; CHECK-V8: renamable $r0 = t2ANDri killed renamable $r0, 256, 14 /* CC::al */, $noreg, $noreg ; CHECK-V8: bb.4.b5: ; CHECK-V8: successors: %bb.5(0x30000000), %bb.6(0x50000000) @@ -421,7 +421,7 @@ body: | bb.3.b3: successors: %bb.4(0x80000000) - renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14, $noreg :: (load (s32) from `i32* undef`) + renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14, $noreg :: (load (s32) from `ptr undef`) renamable $r0 = t2ANDri killed renamable $r0, 256, 14, $noreg, $noreg bb.4.b5: @@ -509,7 +509,7 @@ body: | ; CHECK-V7: t2B %bb.4, 14 /* CC::al */, $noreg ; CHECK-V7: bb.3.b3: ; CHECK-V7: successors: %bb.4(0x80000000) - ; CHECK-V7: renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from `i32* undef`) + ; CHECK-V7: renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from `ptr undef`) ; CHECK-V7: renamable $r0 = t2ANDri killed renamable $r0, 256, 14 /* CC::al */, $noreg, $noreg ; CHECK-V7: bb.4.b5: ; CHECK-V7: successors: %bb.5(0x30000000), %bb.6(0x50000000) @@ -546,7 +546,7 @@ body: | ; CHECK-V8: t2B %bb.4, 14 /* CC::al */, $noreg ; CHECK-V8: bb.3.b3: ; CHECK-V8: successors: %bb.4(0x80000000) - ; CHECK-V8: renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from `i32* undef`) + ; CHECK-V8: renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14 /* CC::al */, $noreg :: (load (s32) from `ptr undef`) ; CHECK-V8: renamable $r0 = t2ANDri killed renamable $r0, 256, 14 /* CC::al */, $noreg, $noreg ; CHECK-V8: bb.4.b5: ; CHECK-V8: successors: %bb.5(0x30000000), %bb.6(0x50000000) @@ -589,7 +589,7 @@ body: | bb.3.b3: successors: %bb.4(0x80000000) - renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14, $noreg :: (load (s32) from `i32* undef`) + renamable $r0 = t2LDRi12 undef renamable $r0, 0, 14, $noreg :: (load (s32) from `ptr undef`) renamable $r0 = t2ANDri killed renamable $r0, 256, 14, $noreg, $noreg bb.4.b5: diff --git a/llvm/test/CodeGen/ARM/const-load-align-thumb.mir b/llvm/test/CodeGen/ARM/const-load-align-thumb.mir index 7b2697d..3bab489 100644 --- a/llvm/test/CodeGen/ARM/const-load-align-thumb.mir +++ b/llvm/test/CodeGen/ARM/const-load-align-thumb.mir @@ -6,8 +6,8 @@ define hidden i32 @main() { entry: %P5 = alloca half, align 2 - store half 0xH3FE0, half* %P5, align 2 - %0 = load half, half* %P5, align 2 + store half 0xH3FE0, ptr %P5, align 2 + %0 = load half, ptr %P5, align 2 call void @z_bar(half %0) ret i32 0 } diff --git a/llvm/test/CodeGen/ARM/dbg-range-extension.mir b/llvm/test/CodeGen/ARM/dbg-range-extension.mir index 75eb466..04bfc1d 100644 --- a/llvm/test/CodeGen/ARM/dbg-range-extension.mir +++ b/llvm/test/CodeGen/ARM/dbg-range-extension.mir @@ -104,7 +104,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #2 ; Function Attrs: nounwind - declare void @llvm.stackprotector(i8*, i8**) #3 + declare void @llvm.stackprotector(ptr, ptr) #3 attributes #0 = { minsize nounwind optsize } attributes #1 = { minsize optsize } diff --git a/llvm/test/CodeGen/ARM/fp16-litpool-arm.mir b/llvm/test/CodeGen/ARM/fp16-litpool-arm.mir index d170300..8e671c9 100644 --- a/llvm/test/CodeGen/ARM/fp16-litpool-arm.mir +++ b/llvm/test/CodeGen/ARM/fp16-litpool-arm.mir @@ -10,17 +10,17 @@ declare i32 @llvm.arm.space(i32, i32) #0 - define dso_local i32 @ARM(i64* %LL, i32 %A.coerce) local_unnamed_addr #1 { + define dso_local i32 @ARM(ptr %LL, i32 %A.coerce) local_unnamed_addr #1 { entry: %S = alloca half, align 2 %tmp.0.extract.trunc = trunc i32 %A.coerce to i16 %0 = bitcast i16 %tmp.0.extract.trunc to half - store volatile half 0xH3C00, half* %S, align 2 - store volatile i64 4242424242424242, i64* %LL, align 8 + store volatile half 0xH3C00, ptr %S, align 2 + store volatile i64 4242424242424242, ptr %LL, align 8 %1 = call i32 @llvm.arm.space(i32 8920, i32 undef) - %S.0.S.0.570 = load volatile half, half* %S, align 2 + %S.0.S.0.570 = load volatile half, ptr %S, align 2 %add298 = fadd half %S.0.S.0.570, 0xH2E66 - store volatile half %add298, half* %S, align 2 + store volatile half %add298, ptr %S, align 2 %2 = call i32 @llvm.arm.space(i32 1350, i32 undef) %3 = bitcast half %add298 to i16 %tmp343.0.insert.ext = zext i16 %3 to i32 diff --git a/llvm/test/CodeGen/ARM/fp16-litpool-thumb.mir b/llvm/test/CodeGen/ARM/fp16-litpool-thumb.mir index ca89912..03ddd80 100644 --- a/llvm/test/CodeGen/ARM/fp16-litpool-thumb.mir +++ b/llvm/test/CodeGen/ARM/fp16-litpool-thumb.mir @@ -16,14 +16,14 @@ %S = alloca half, align 2 %tmp.0.extract.trunc = trunc i32 %A.coerce to i16 %0 = bitcast i16 %tmp.0.extract.trunc to half - store volatile float 4.200000e+01, float* %F, align 4 - store volatile half 0xH3C00, half* %S, align 2 - %S.0.S.0.142 = load volatile half, half* %S, align 2 + store volatile float 4.200000e+01, ptr %F, align 4 + store volatile half 0xH3C00, ptr %S, align 2 + %S.0.S.0.142 = load volatile half, ptr %S, align 2 %1 = call i32 @llvm.arm.space(i32 1230, i32 undef) %add42 = fadd half %S.0.S.0.142, 0xH2E66 - store volatile half %add42, half* %S, align 2 + store volatile half %add42, ptr %S, align 2 %2 = call i32 @llvm.arm.space(i32 1330, i32 undef) - %S.0.S.0.119 = load volatile half, half* %S, align 2 + %S.0.S.0.119 = load volatile half, ptr %S, align 2 %3 = bitcast half %add42 to i16 %tmp87.0.insert.ext = zext i16 %3 to i32 ret i32 %tmp87.0.insert.ext diff --git a/llvm/test/CodeGen/ARM/fp16-litpool2-arm.mir b/llvm/test/CodeGen/ARM/fp16-litpool2-arm.mir index 065cc3b..bd343eb 100644 --- a/llvm/test/CodeGen/ARM/fp16-litpool2-arm.mir +++ b/llvm/test/CodeGen/ARM/fp16-litpool2-arm.mir @@ -14,8 +14,8 @@ define dso_local i32 @CP() #1 { entry: %res = alloca half, align 2 - store half 0xH706B, half* %res, align 2 - %0 = load half, half* %res, align 2 + store half 0xH706B, ptr %res, align 2 + %0 = load half, ptr %res, align 2 %tobool = fcmp une half %0, 0xH0000 br i1 %tobool, label %LA, label %END @@ -29,7 +29,7 @@ } ; Function Attrs: nounwind - declare void @llvm.stackprotector(i8*, i8**) #2 + declare void @llvm.stackprotector(ptr, ptr) #2 attributes #0 = { nounwind "target-features"="+v8.2a,+fullfp16" } attributes #1 = { "target-features"="+v8.2a,+fullfp16" } diff --git a/llvm/test/CodeGen/ARM/fp16-litpool3-arm.mir b/llvm/test/CodeGen/ARM/fp16-litpool3-arm.mir index 38348e5..1f8e6b0 100644 --- a/llvm/test/CodeGen/ARM/fp16-litpool3-arm.mir +++ b/llvm/test/CodeGen/ARM/fp16-litpool3-arm.mir @@ -15,8 +15,8 @@ define dso_local i32 @CP() #1 { entry: %res = alloca half, align 2 - store half 0xH706B, half* %res, align 2 - %0 = load half, half* %res, align 2 + store half 0xH706B, ptr %res, align 2 + %0 = load half, ptr %res, align 2 %tobool = fcmp une half %0, 0xH0000 br i1 %tobool, label %LA, label %END @@ -30,7 +30,7 @@ } ; Function Attrs: nounwind - declare void @llvm.stackprotector(i8*, i8**) #2 + declare void @llvm.stackprotector(ptr, ptr) #2 attributes #0 = { nounwind "target-features"="+v8.2a,+fullfp16" } attributes #1 = { "target-features"="+v8.2a,+fullfp16" } diff --git a/llvm/test/CodeGen/ARM/ifcvt-diamond-unanalyzable-common.mir b/llvm/test/CodeGen/ARM/ifcvt-diamond-unanalyzable-common.mir index 98a41ce..ab788f7 100644 --- a/llvm/test/CodeGen/ARM/ifcvt-diamond-unanalyzable-common.mir +++ b/llvm/test/CodeGen/ARM/ifcvt-diamond-unanalyzable-common.mir @@ -6,11 +6,11 @@ --- | target triple = "thumbv7-unknown-linux-gnueabi" - define dso_local i8* @fn1() { + define dso_local ptr @fn1() { entry: br label %l_yes l_yes: - ret i8* blockaddress(@fn1, %l_yes) + ret ptr blockaddress(@fn1, %l_yes) } declare dso_local i32 @fn2(...) diff --git a/llvm/test/CodeGen/ARM/machine-sink-multidef.mir b/llvm/test/CodeGen/ARM/machine-sink-multidef.mir index 5952538..a91cede 100644 --- a/llvm/test/CodeGen/ARM/machine-sink-multidef.mir +++ b/llvm/test/CodeGen/ARM/machine-sink-multidef.mir @@ -9,12 +9,12 @@ @e = external constant [2 x %struct.anon], align 4 - define arm_aapcscc void @g(i32 * noalias %a, i32 *%b, i32 %x) { + define arm_aapcscc void @g(ptr noalias %a, ptr %b, i32 %x) { entry: - %c = getelementptr inbounds [2 x %struct.anon], [2 x %struct.anon]* @e, i32 0, i32 %x, i32 0 - %l1 = load i32, i32* %c, align 4 - %d = getelementptr inbounds [2 x %struct.anon], [2 x %struct.anon]* @e, i32 0, i32 %x, i32 1 - %l2 = load i32, i32* %d, align 4 + %c = getelementptr inbounds [2 x %struct.anon], ptr @e, i32 0, i32 %x, i32 0 + %l1 = load i32, ptr %c, align 4 + %d = getelementptr inbounds [2 x %struct.anon], ptr @e, i32 0, i32 %x, i32 1 + %l2 = load i32, ptr %d, align 4 br i1 undef, label %land.lhs.true, label %if.end land.lhs.true: ; preds = %entry diff --git a/llvm/test/CodeGen/ARM/misched-int-basic-thumb2.mir b/llvm/test/CodeGen/ARM/misched-int-basic-thumb2.mir index 06b34ac..be5340b 100644 --- a/llvm/test/CodeGen/ARM/misched-int-basic-thumb2.mir +++ b/llvm/test/CodeGen/ARM/misched-int-basic-thumb2.mir @@ -17,11 +17,11 @@ define i64 @foo(i16 signext %a, i16 signext %b) { entry: - %0 = load i32, i32* @g1, align 4 - %1 = load i32, i32* @g2, align 4 + %0 = load i32, ptr @g1, align 4 + %1 = load i32, ptr @g2, align 4 %2 = add nuw nsw i32 %0, %0 %3 = sdiv i32 %2, %1 - store i32 %3, i32* @g1, align 4 + store i32 %3, ptr @g1, align 4 %d = mul nsw i16 %a, %a %e = mul nsw i16 %b, %b %f = add nuw nsw i16 %e, %d diff --git a/llvm/test/CodeGen/ARM/noreturn-csr-skip.mir b/llvm/test/CodeGen/ARM/noreturn-csr-skip.mir index 2cf0bc8..07f12c0 100644 --- a/llvm/test/CodeGen/ARM/noreturn-csr-skip.mir +++ b/llvm/test/CodeGen/ARM/noreturn-csr-skip.mir @@ -10,7 +10,7 @@ define void @noret() noreturn nounwind { start: %p = alloca i32 - store i32 42, i32* %p + store i32 42, ptr %p unreachable } ... diff --git a/llvm/test/CodeGen/ARM/pei-swiftself.mir b/llvm/test/CodeGen/ARM/pei-swiftself.mir index cd75589..f7b702f 100644 --- a/llvm/test/CodeGen/ARM/pei-swiftself.mir +++ b/llvm/test/CodeGen/ARM/pei-swiftself.mir @@ -1,6 +1,6 @@ # RUN: llc -o - %s -mtriple=arm-- -run-pass prologepilog | FileCheck %s --- | - define swiftcc i8* @need_emergency_slot(i8 *swiftself %v) { + define swiftcc ptr @need_emergency_slot(ptr swiftself %v) { ; Just a dummy to add a swiftself bit. The real code is in the MI below. unreachable } diff --git a/llvm/test/CodeGen/ARM/prera-ldst-aliasing.mir b/llvm/test/CodeGen/ARM/prera-ldst-aliasing.mir index 0d1ea48..689aa3d 100644 --- a/llvm/test/CodeGen/ARM/prera-ldst-aliasing.mir +++ b/llvm/test/CodeGen/ARM/prera-ldst-aliasing.mir @@ -2,14 +2,14 @@ --- | target triple = "thumbv7---eabi" - define void @ldrd_strd_aa(i32* noalias nocapture %x, i32* noalias nocapture readonly %y) { + define void @ldrd_strd_aa(ptr noalias nocapture %x, ptr noalias nocapture readonly %y) { entry: - %0 = load i32, i32* %y, align 4 - store i32 %0, i32* %x, align 4 - %arrayidx2 = getelementptr inbounds i32, i32* %y, i32 1 - %1 = load i32, i32* %arrayidx2, align 4 - %arrayidx3 = getelementptr inbounds i32, i32* %x, i32 1 - store i32 %1, i32* %arrayidx3, align 4 + %0 = load i32, ptr %y, align 4 + store i32 %0, ptr %x, align 4 + %arrayidx2 = getelementptr inbounds i32, ptr %y, i32 1 + %1 = load i32, ptr %arrayidx2, align 4 + %arrayidx3 = getelementptr inbounds i32, ptr %x, i32 1 + store i32 %1, ptr %arrayidx3, align 4 ret void } ... diff --git a/llvm/test/CodeGen/ARM/prera-ldst-insertpt.mir b/llvm/test/CodeGen/ARM/prera-ldst-insertpt.mir index 42a6fda..7d93945 100644 --- a/llvm/test/CodeGen/ARM/prera-ldst-insertpt.mir +++ b/llvm/test/CodeGen/ARM/prera-ldst-insertpt.mir @@ -4,12 +4,12 @@ --- | target triple = "thumbv7---eabi" - define void @a(i32* nocapture %x, i32 %y, i32 %z) { + define void @a(ptr nocapture %x, i32 %y, i32 %z) { entry: ret void } - define void @b(i32* nocapture %x, i32 %y, i32 %z) { + define void @b(ptr nocapture %x, i32 %y, i32 %z) { entry: ret void } diff --git a/llvm/test/CodeGen/ARM/single-issue-r52.mir b/llvm/test/CodeGen/ARM/single-issue-r52.mir index af393be..05b1673 100644 --- a/llvm/test/CodeGen/ARM/single-issue-r52.mir +++ b/llvm/test/CodeGen/ARM/single-issue-r52.mir @@ -9,14 +9,14 @@ %struct.__neon_int8x8x4_t = type { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } ; Function Attrs: nounwind - define <8 x i8> @foo(i8* %A) { - %tmp1 = call %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4.v8i8.p0i8(i8* %A, i32 8) + define <8 x i8> @foo(ptr %A) { + %tmp1 = call %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4.v8i8.p0(ptr %A, i32 8) %tmp2 = extractvalue %struct.__neon_int8x8x4_t %tmp1, 0 %tmp3 = extractvalue %struct.__neon_int8x8x4_t %tmp1, 1 %tmp4 = add <8 x i8> %tmp2, %tmp3 ret <8 x i8> %tmp4 } - declare %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4.v8i8.p0i8(i8*, i32) + declare %struct.__neon_int8x8x4_t @llvm.arm.neon.vld4.v8i8.p0(ptr, i32) # CHECK: ********** MI Scheduling ********** # CHECK: ScheduleDAGMILive::schedule starting diff --git a/llvm/test/CodeGen/ARM/stack_frame_offset.mir b/llvm/test/CodeGen/ARM/stack_frame_offset.mir index f2cdb80..e387e079 100644 --- a/llvm/test/CodeGen/ARM/stack_frame_offset.mir +++ b/llvm/test/CodeGen/ARM/stack_frame_offset.mir @@ -8,26 +8,26 @@ define i32 @testpos() { entry: %a = alloca i32, align 4 - call void @other(i32* %a) - %b = load i32, i32* %a, align 4 + call void @other(ptr %a) + %b = load i32, ptr %a, align 4 ret i32 %b } define i32 @testneg4() { entry: %a = alloca i32, align 4 - call void @other(i32* %a) - %b = load i32, i32* %a, align 4 + call void @other(ptr %a) + %b = load i32, ptr %a, align 4 ret i32 %b } define i32 @testneg8() { entry: %a = alloca i32, align 4 - call void @other(i32* %a) - %b = load i32, i32* %a, align 4 + call void @other(ptr %a) + %b = load i32, ptr %a, align 4 ret i32 %b } - declare void @other(i32*) + declare void @other(ptr) ... --- diff --git a/llvm/test/CodeGen/ARM/store-prepostinc.mir b/llvm/test/CodeGen/ARM/store-prepostinc.mir index b974bc2..5d76f9b 100644 --- a/llvm/test/CodeGen/ARM/store-prepostinc.mir +++ b/llvm/test/CodeGen/ARM/store-prepostinc.mir @@ -5,27 +5,27 @@ target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" target triple = "thumbv7a-none-unknown-eabi" - define i8* @STR_pre4(i8* %p, i32 %v) { unreachable } - define i8* @STR_pre8(i8* %p, i32 %v) { unreachable } - define i8* @STR_pre255(i8* %p, i32 %v) { unreachable } - define i8* @STR_pre256(i8* %p, i32 %v) { unreachable } - define i8* @STR_pre1024(i8* %p, i32 %v) { unreachable } - define i8* @STR_pre4095(i8* %p, i32 %v) { unreachable } - define i8* @STR_pre4096(i8* %p, i32 %v) { unreachable } - define i8* @STR_prem1024(i8* %p, i32 %v) { unreachable } - define i8* @STR_prem4095(i8* %p, i32 %v) { unreachable } - define i8* @STR_prem4096(i8* %p, i32 %v) { unreachable } - - define i8* @STR_post4(i8* %p, i32 %v) { unreachable } - define i8* @STR_post8(i8* %p, i32 %v) { unreachable } - define i8* @STR_post255(i8* %p, i32 %v) { unreachable } - define i8* @STR_post256(i8* %p, i32 %v) { unreachable } - define i8* @STR_post1024(i8* %p, i32 %v) { unreachable } - define i8* @STR_post4095(i8* %p, i32 %v) { unreachable } - define i8* @STR_post4096(i8* %p, i32 %v) { unreachable } - define i8* @STR_postm1024(i8* %p, i32 %v) { unreachable } - define i8* @STR_postm4095(i8* %p, i32 %v) { unreachable } - define i8* @STR_postm4096(i8* %p, i32 %v) { unreachable } + define ptr @STR_pre4(ptr %p, i32 %v) { unreachable } + define ptr @STR_pre8(ptr %p, i32 %v) { unreachable } + define ptr @STR_pre255(ptr %p, i32 %v) { unreachable } + define ptr @STR_pre256(ptr %p, i32 %v) { unreachable } + define ptr @STR_pre1024(ptr %p, i32 %v) { unreachable } + define ptr @STR_pre4095(ptr %p, i32 %v) { unreachable } + define ptr @STR_pre4096(ptr %p, i32 %v) { unreachable } + define ptr @STR_prem1024(ptr %p, i32 %v) { unreachable } + define ptr @STR_prem4095(ptr %p, i32 %v) { unreachable } + define ptr @STR_prem4096(ptr %p, i32 %v) { unreachable } + + define ptr @STR_post4(ptr %p, i32 %v) { unreachable } + define ptr @STR_post8(ptr %p, i32 %v) { unreachable } + define ptr @STR_post255(ptr %p, i32 %v) { unreachable } + define ptr @STR_post256(ptr %p, i32 %v) { unreachable } + define ptr @STR_post1024(ptr %p, i32 %v) { unreachable } + define ptr @STR_post4095(ptr %p, i32 %v) { unreachable } + define ptr @STR_post4096(ptr %p, i32 %v) { unreachable } + define ptr @STR_postm1024(ptr %p, i32 %v) { unreachable } + define ptr @STR_postm4095(ptr %p, i32 %v) { unreachable } + define ptr @STR_postm4096(ptr %p, i32 %v) { unreachable } ... --- diff --git a/llvm/test/CodeGen/ARM/v6-jumptable-clobber.mir b/llvm/test/CodeGen/ARM/v6-jumptable-clobber.mir index ec6f9ef..023f7a9 100644 --- a/llvm/test/CodeGen/ARM/v6-jumptable-clobber.mir +++ b/llvm/test/CodeGen/ARM/v6-jumptable-clobber.mir @@ -20,8 +20,8 @@ target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64" target triple = "thumbv6m-none--eabi" - define void @foo(i8 %in, i32* %addr) { - store i32 12345678, i32* %addr + define void @foo(i8 %in, ptr %addr) { + store i32 12345678, ptr %addr %1 = call i32 @llvm.arm.space(i32 980, i32 undef) %2 = zext i8 %in to i32 switch i32 %2, label %default [ @@ -99,8 +99,8 @@ unreachable } - define void @bar(i8 %in, i32* %addr) { - store i32 12345678, i32* %addr + define void @bar(i8 %in, ptr %addr) { + store i32 12345678, ptr %addr %1 = zext i8 %in to i32 switch i32 %1, label %default [ i32 0, label %d1 @@ -181,7 +181,7 @@ declare i32 @llvm.arm.space(i32, i32) #0 ; Function Attrs: nounwind - declare void @llvm.stackprotector(i8*, i8**) #0 + declare void @llvm.stackprotector(ptr, ptr) #0 attributes #0 = { nounwind } diff --git a/llvm/test/CodeGen/ARM/vldm-liveness.mir b/llvm/test/CodeGen/ARM/vldm-liveness.mir index 14123ac..db88735 100644 --- a/llvm/test/CodeGen/ARM/vldm-liveness.mir +++ b/llvm/test/CodeGen/ARM/vldm-liveness.mir @@ -14,7 +14,7 @@ # liveness flags are added. --- | target triple = "thumbv7-apple-ios" - define arm_aapcs_vfpcc <4 x float> @foo(float* %ptr) { + define arm_aapcs_vfpcc <4 x float> @foo(ptr %ptr) { ret <4 x float> undef } ... diff --git a/llvm/test/CodeGen/ARM/vldmia-sched.mir b/llvm/test/CodeGen/ARM/vldmia-sched.mir index 1b2d9dd..9a38d28 100644 --- a/llvm/test/CodeGen/ARM/vldmia-sched.mir +++ b/llvm/test/CodeGen/ARM/vldmia-sched.mir @@ -24,8 +24,8 @@ body: | $r0 = t2MOVTi16 internal $r0, target-flags(arm-hi16) @a, 14, $noreg } $r1 = t2ADDri $r0, 8, 14, $noreg, $noreg - VLDMDIA killed $r1, 14, $noreg, def $d23, def $d24, def $d25, def $d26, def $d27, def $d28, def $d29, def $d30, def $d31 :: (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 2, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 4, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 6, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 8, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 10, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 12, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 14, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 16, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 18, i32 0) to <2 x float>*)`, align 4) + VLDMDIA killed $r1, 14, $noreg, def $d23, def $d24, def $d25, def $d26, def $d27, def $d28, def $d29, def $d30, def $d31 :: (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 2, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 4, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 6, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 8, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 10, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 12, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 14, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 16, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 18, i32 0)`, align 4) $r0, dead $cpsr = tADDi8 killed $r0, 80, 14, $noreg - VLDMDIA killed $r0, 14, $noreg, def $d0, def $d1, def $d2, def $d3, def $d4, def $d5, def $d6 :: (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 20, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 22, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 24, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 26, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 28, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 30, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 32, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 33, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 34, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 35, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 36, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 37, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 38, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 39, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 40, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 41, i32 0) to <2 x float>*)`, align 4), (load (s64) from `<2 x float>* bitcast (float* getelementptr ([1 x float], [1 x float]* @a, i32 42, i32 0) to <2 x float>*)`, align 4) + VLDMDIA killed $r0, 14, $noreg, def $d0, def $d1, def $d2, def $d3, def $d4, def $d5, def $d6 :: (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 20, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 22, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 24, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 26, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 28, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 30, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 32, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 33, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 34, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 35, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 36, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 37, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 38, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 39, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 40, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 41, i32 0)`, align 4), (load (s64) from `ptr getelementptr ([1 x float], ptr @a, i32 42, i32 0)`, align 4) ... diff --git a/llvm/test/CodeGen/ARM/vlldm-vlstm-uops.mir b/llvm/test/CodeGen/ARM/vlldm-vlstm-uops.mir index 63cd2a6..8c49a53 100644 --- a/llvm/test/CodeGen/ARM/vlldm-vlstm-uops.mir +++ b/llvm/test/CodeGen/ARM/vlldm-vlstm-uops.mir @@ -2,7 +2,7 @@ --- | target triple = "thumbv8m.main-arm-none-eabi" - define hidden void @foo(void ()* nocapture %baz) local_unnamed_addr #0 { + define hidden void @foo(ptr nocapture %baz) local_unnamed_addr #0 { entry: %call = call i32 @bar() #0 %tobool = icmp eq i32 %call, 0 diff --git a/llvm/test/CodeGen/Hexagon/addrmode-immop.mir b/llvm/test/CodeGen/Hexagon/addrmode-immop.mir index f1928f8..3069cbe 100644 --- a/llvm/test/CodeGen/Hexagon/addrmode-immop.mir +++ b/llvm/test/CodeGen/Hexagon/addrmode-immop.mir @@ -7,21 +7,21 @@ --- | target triple = "hexagon-unknown-unknown-elf" - %s.0 = type { i32 (...)**, i32, i32, %s.1 } + %s.0 = type { ptr, i32, i32, %s.1 } %s.1 = type { i32, i32 } - @g0 = external dso_local unnamed_addr constant { [3 x i8*], [3 x i8*] }, align 4 + @g0 = external dso_local unnamed_addr constant { [3 x ptr], [3 x ptr] }, align 4 ; Function Attrs: norecurse define void @f0() #0 { b0: - %v0 = load i32 (%s.0*)*, i32 (%s.0*)** bitcast (i8* getelementptr (i8, i8* bitcast (i8** getelementptr inbounds ({ [3 x i8*], [3 x i8*] }, { [3 x i8*], [3 x i8*] }* @g0, i32 0, inrange i32 0, i32 3) to i8*), i32 sub (i32 ptrtoint (i32 (%s.0*)* @f1 to i32), i32 1)) to i32 (%s.0*)**), align 4 - %v1 = call i32 %v0(%s.0* nonnull undef) + %v0 = load ptr, ptr getelementptr (i8, ptr getelementptr inbounds ({ [3 x ptr], [3 x ptr] }, ptr @g0, i32 0, inrange i32 0, i32 3), i32 sub (i32 ptrtoint (ptr @f1 to i32), i32 1)), align 4 + %v1 = call i32 %v0(ptr nonnull undef) unreachable } ; Function Attrs: norecurse nounwind - declare dso_local i32 @f1(%s.0*) #1 align 2 + declare dso_local i32 @f1(ptr) #1 align 2 attributes #0 = { norecurse "target-cpu"="hexagonv60" } attributes #1 = { norecurse nounwind "target-cpu"="hexagonv60" } @@ -33,7 +33,7 @@ tracksRegLiveness: true body: | bb.0.b0: $r2 = A2_tfrsi @g0 + 12 - $r2 = L2_loadri_io killed $r2, @f1 - 1 :: (load (s32) from `i32 (%s.0*)** bitcast (i8* getelementptr (i8, i8* bitcast (i8** getelementptr inbounds ({ [3 x i8*], [3 x i8*] }, { [3 x i8*], [3 x i8*] }* @g0, i32 0, inrange i32 0, i32 3) to i8*), i32 sub (i32 ptrtoint (i32 (%s.0*)* @f1 to i32), i32 1)) to i32 (%s.0*)**)`) + $r2 = L2_loadri_io killed $r2, @f1 - 1 :: (load (s32) from `ptr getelementptr (i8, ptr getelementptr inbounds ({ [3 x ptr], [3 x ptr] }, ptr @g0, i32 0, inrange i32 0, i32 3), i32 sub (i32 ptrtoint (ptr @f1 to i32), i32 1))`) ADJCALLSTACKDOWN 0, 0, implicit-def $r29, implicit-def dead $r30, implicit $r31, implicit $r30, implicit $r29 PS_callr_nr killed $r2, hexagoncsr, implicit undef $r0, implicit-def $r29, implicit-def dead $r0 ADJCALLSTACKUP 0, 0, implicit-def dead $r29, implicit-def dead $r30, implicit-def dead $r31, implicit $r29 diff --git a/llvm/test/CodeGen/Hexagon/addrmode-no-rdef.mir b/llvm/test/CodeGen/Hexagon/addrmode-no-rdef.mir index 8e43189..ab035d9 100644 --- a/llvm/test/CodeGen/Hexagon/addrmode-no-rdef.mir +++ b/llvm/test/CodeGen/Hexagon/addrmode-no-rdef.mir @@ -14,14 +14,13 @@ define dso_local i32 @f0(i1 zeroext %a0) local_unnamed_addr #0 { b0: %v0 = tail call i32 @llvm.read_register.i32(metadata !0) %v1 = add nsw i32 %v0, 4096 - %v2 = inttoptr i32 %v1 to %s.0* - %v3 = getelementptr inbounds %s.0, %s.0* %v2, i32 -1 - %v4 = tail call i32 bitcast (i32 (...)* @f1 to i32 (%s.0*)*)(%s.0* noundef nonnull %v3) #2 + %v2 = inttoptr i32 %v1 to ptr + %v3 = getelementptr inbounds %s.0, ptr %v2, i32 -1 + %v4 = tail call i32 @f1(ptr noundef nonnull %v3) #2 br i1 %a0, label %b2, label %b1 b1: ; preds = %b0 - %v5 = getelementptr inbounds %s.0, %s.0* %v3, i32 0, i32 0, i32 0, i32 0 - %v6 = load i32, i32* %v5, align 4 + %v6 = load i32, ptr %v3, align 4 br label %b2 b2: ; preds = %b1, %b0 diff --git a/llvm/test/CodeGen/Hexagon/bank-conflict-load.mir b/llvm/test/CodeGen/Hexagon/bank-conflict-load.mir index 8a924ff..5d84e66 100644 --- a/llvm/test/CodeGen/Hexagon/bank-conflict-load.mir +++ b/llvm/test/CodeGen/Hexagon/bank-conflict-load.mir @@ -8,7 +8,7 @@ # CHECK: L2_loadri_io killed $r0, 12 --- | - define void @foo(i32* %a, i32* %b) { + define void @foo(ptr %a, ptr %b) { ret void } ... diff --git a/llvm/test/CodeGen/Hexagon/bank-conflict.mir b/llvm/test/CodeGen/Hexagon/bank-conflict.mir index 07c0edd..f59fc5c 100644 --- a/llvm/test/CodeGen/Hexagon/bank-conflict.mir +++ b/llvm/test/CodeGen/Hexagon/bank-conflict.mir @@ -31,19 +31,18 @@ define void @f0(i32 %a0) { b0: - %v0 = bitcast [10 x %s.0]* inttoptr (i32 -121502345 to [10 x %s.0]*) to [10 x %s.0]* + %v0 = bitcast ptr inttoptr (i32 -121502345 to ptr) to ptr br label %b1 b1: ; preds = %b5, %b0 %v1 = phi i32 [ 0, %b0 ], [ %v28, %b5 ] %v2 = phi i32 [ 0, %b0 ], [ %v27, %b5 ] - %v3 = load i32, i32* @g2, align 4 - %v4 = load i32, i32* @g3, align 8 + %v3 = load i32, ptr @g2, align 4 + %v4 = load i32, ptr @g3, align 8 %v5 = and i32 %v4, %v3 - %v6 = getelementptr [10 x %s.0], [10 x %s.0]* %v0, i32 0, i32 %v2 - %v7 = bitcast %s.0* %v6 to %s.0* - %v8 = getelementptr %s.0, %s.0* %v7, i32 0, i32 12 - %v9 = getelementptr %s.0, %s.0* %v7, i32 0, i32 13 + %v6 = getelementptr [10 x %s.0], ptr %v0, i32 0, i32 %v2 + %v8 = getelementptr %s.0, ptr %v6, i32 0, i32 12 + %v9 = getelementptr %s.0, ptr %v6, i32 0, i32 13 br label %b2 b2: ; preds = %b4, %b1 @@ -51,20 +50,20 @@ %v11 = phi i32 [ %v13, %b4 ], [ %v5, %b1 ] %v12 = tail call i32 @llvm.hexagon.S2.cl0(i32 %v11) %v13 = tail call i32 @llvm.hexagon.S2.setbit.r(i32 %v11, i32 %v12) - %v14 = getelementptr [24 x i32], [24 x i32]* %v8, i32 0, i32 %v12 - %v15 = load i32, i32* %v14, align 4 + %v14 = getelementptr [24 x i32], ptr %v8, i32 0, i32 %v12 + %v15 = load i32, ptr %v14, align 4 %v16 = tail call i64 @llvm.hexagon.M2.vmpy2s.s0(i32 %v15, i32 %v15) - %v17 = getelementptr [24 x i32], [24 x i32]* %v9, i32 0, i32 %v12 - %v18 = load i32, i32* %v17, align 4 + %v17 = getelementptr [24 x i32], ptr %v9, i32 0, i32 %v12 + %v18 = load i32, ptr %v17, align 4 %v19 = tail call i64 @llvm.hexagon.M2.vmac2s.s0(i64 %v16, i32 %v18, i32 %v18) - %v20 = load i8, i8* @g4, align 1 + %v20 = load i8, ptr @g4, align 1 %v21 = and i8 %v20, 1 %v22 = icmp eq i8 %v21, 0 br i1 %v22, label %b3, label %b4 b3: ; preds = %b2 %v23 = tail call i64 @llvm.hexagon.A2.vaddws(i64 %v10, i64 %v19) - store i64 %v23, i64* @g0, align 8 + store i64 %v23, ptr @g0, align 8 br label %b4 b4: ; preds = %b3, %b2 @@ -80,7 +79,7 @@ br i1 %v29, label %b6, label %b1 b6: ; preds = %b5 - store i64 %v19, i64* @g1, align 8 + store i64 %v19, ptr @g1, align 8 ret void } diff --git a/llvm/test/CodeGen/Hexagon/cext-opt-negative-fi.mir b/llvm/test/CodeGen/Hexagon/cext-opt-negative-fi.mir index 63277e0..5586748 100644 --- a/llvm/test/CodeGen/Hexagon/cext-opt-negative-fi.mir +++ b/llvm/test/CodeGen/Hexagon/cext-opt-negative-fi.mir @@ -22,16 +22,16 @@ %s.9 = type { i8, i8 } ; Function Attrs: nounwind optsize - define dso_local void @f0(%s.0* byval(%s.0) nocapture readonly align 8 %a0) local_unnamed_addr #0 { + define dso_local void @f0(ptr byval(%s.0) nocapture readonly align 8 %a0) local_unnamed_addr #0 { b0: - %v0 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 10 - %v1 = load i8, i8* %v0, align 8 - %v2 = tail call i8* @f1(i8 signext %v1) #0 + %v0 = getelementptr inbounds %s.0, ptr %a0, i32 0, i32 10 + %v1 = load i8, ptr %v0, align 8 + %v2 = tail call ptr @f1(i8 signext %v1) #0 unreachable } ; Function Attrs: nounwind optsize - declare dso_local i8* @f1(i8 signext) local_unnamed_addr #0 + declare dso_local ptr @f1(i8 signext) local_unnamed_addr #0 attributes #0 = { nounwind optsize "target-cpu"="hexagonv65" } diff --git a/llvm/test/CodeGen/Hexagon/cext-opt-stack-no-rr.mir b/llvm/test/CodeGen/Hexagon/cext-opt-stack-no-rr.mir index 74346d0..150cc07 100644 --- a/llvm/test/CodeGen/Hexagon/cext-opt-stack-no-rr.mir +++ b/llvm/test/CodeGen/Hexagon/cext-opt-stack-no-rr.mir @@ -14,7 +14,7 @@ body: | successors: %bb.1, %bb.2 %0:intregs = IMPLICIT_DEF - %1:intregs = L2_loadrub_io killed %0:intregs, 0 :: (load (s8) from `i8* undef`, align 2) + %1:intregs = L2_loadrub_io killed %0:intregs, 0 :: (load (s8) from `ptr undef`, align 2) %2:predregs = C2_cmpeqi %1:intregs, 5 %3:intregs = A2_tfrsi 0 S2_pstorerbt_io %2:predregs, %stack.0, 267, killed %3:intregs :: (store (s8) into %stack.0) diff --git a/llvm/test/CodeGen/Hexagon/cext-unnamed-global.mir b/llvm/test/CodeGen/Hexagon/cext-unnamed-global.mir index 22cb2e4..0673ba7 100644 --- a/llvm/test/CodeGen/Hexagon/cext-unnamed-global.mir +++ b/llvm/test/CodeGen/Hexagon/cext-unnamed-global.mir @@ -11,11 +11,11 @@ define void @f0() #0 { b0: - tail call fastcc void @f1(float* inttoptr (i64 add (i64 ptrtoint ([0 x i8]* @0 to i64), i64 128) to float*), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @1, i32 0, i32 0)) + tail call fastcc void @f1(ptr inttoptr (i64 add (i64 ptrtoint (ptr @0 to i64), i64 128) to ptr), ptr @1) ret void } - declare fastcc void @f1(float* nocapture readonly, i64* nocapture readonly) #1 + declare fastcc void @f1(ptr nocapture readonly, ptr nocapture readonly) #1 attributes #0 = { alwaysinline nounwind "target-cpu"="hexagonv60" } attributes #1 = { noinline norecurse nounwind "target-cpu"="hexagonv60" } diff --git a/llvm/test/CodeGen/Hexagon/early-if-conv-lifetime.mir b/llvm/test/CodeGen/Hexagon/early-if-conv-lifetime.mir index 0dedca4..8fe30e3 100644 --- a/llvm/test/CodeGen/Hexagon/early-if-conv-lifetime.mir +++ b/llvm/test/CodeGen/Hexagon/early-if-conv-lifetime.mir @@ -14,21 +14,20 @@ %s.2 = type { %s.3 } %s.3 = type { %s.4 } %s.4 = type { %s.5 } - %s.5 = type { i32, i32, i8* } + %s.5 = type { i32, i32, ptr } - declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #0 + declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #0 define hidden fastcc void @f0() { b0: %v0 = alloca %s.0, align 4 - %v1 = load i8, i8* undef, align 1 + %v1 = load i8, ptr undef, align 1 %v2 = add i8 %v1, -102 %v3 = icmp ult i8 %v2, 1 br i1 %v3, label %b1, label %b2 b1: ; preds = %b0 - %v4 = bitcast %s.0* %v0 to i8* - call void @llvm.lifetime.end.p0i8(i64 12, i8* nonnull %v4) + call void @llvm.lifetime.end.p0(i64 12, ptr nonnull %v0) br label %b2 b2: ; preds = %b1, %b0 @@ -58,7 +57,7 @@ body: | successors: %bb.1.b1(0x40000000), %bb.2.b2(0x40000000) %1 = IMPLICIT_DEF - %0 = L2_loadrb_io killed %1, 0 :: (load (s8) from `i8* undef`) + %0 = L2_loadrb_io killed %1, 0 :: (load (s8) from `ptr undef`) %2 = C2_cmpeqi killed %0, 102 %3 = COPY killed %2 J2_jumpf killed %3, %bb.2.b2, implicit-def dead $pc diff --git a/llvm/test/CodeGen/Hexagon/early-if-predicator.mir b/llvm/test/CodeGen/Hexagon/early-if-predicator.mir index 51fb2ab..1cb2f1e 100644 --- a/llvm/test/CodeGen/Hexagon/early-if-predicator.mir +++ b/llvm/test/CodeGen/Hexagon/early-if-predicator.mir @@ -6,12 +6,12 @@ --- | target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048" - define void @if-cvt(i32* %p, i1 %c) { + define void @if-cvt(ptr %p, i1 %c) { entry: br i1 %c, label %if, label %endif if: ; preds = %entry - store i32 1, i32* %p, align 4 + store i32 1, ptr %p, align 4 br label %endif endif: ; preds = %if, %entry diff --git a/llvm/test/CodeGen/Hexagon/hwloop-dbg-register.mir b/llvm/test/CodeGen/Hexagon/hwloop-dbg-register.mir index 2f26458..db3c7bf 100644 --- a/llvm/test/CodeGen/Hexagon/hwloop-dbg-register.mir +++ b/llvm/test/CodeGen/Hexagon/hwloop-dbg-register.mir @@ -9,7 +9,7 @@ %s.0 = type { i32 } - @g0 = external dso_local local_unnamed_addr global %s.0*, align 4, !dbg !0 + @g0 = external dso_local local_unnamed_addr global ptr, align 4, !dbg !0 define dso_local void @f0() local_unnamed_addr #0 !dbg !13 { b0: @@ -21,9 +21,9 @@ %v2 = add nsw i32 %v1, -8 %v3 = add nsw i32 %v2, %v0 call void @llvm.dbg.value(metadata i32 %v3, metadata !19, metadata !DIExpression()), !dbg !24 - %v4 = load %s.0*, %s.0** @g0, align 4 - %v5 = getelementptr inbounds %s.0, %s.0* %v4, i32 %v3, i32 0 - store i32 undef, i32* %v5, align 4 + %v4 = load ptr, ptr @g0, align 4 + %v5 = getelementptr inbounds %s.0, ptr %v4, i32 %v3, i32 0 + store i32 undef, ptr %v5, align 4 %v6 = icmp eq i32 %v2, 0 br i1 %v6, label %b2, label %b1 diff --git a/llvm/test/CodeGen/Hexagon/ifcvt-diamond-ret.mir b/llvm/test/CodeGen/Hexagon/ifcvt-diamond-ret.mir index 797efc3..630b286 100644 --- a/llvm/test/CodeGen/Hexagon/ifcvt-diamond-ret.mir +++ b/llvm/test/CodeGen/Hexagon/ifcvt-diamond-ret.mir @@ -16,10 +16,10 @@ body: | J2_jumpf killed renamable $p0, %bb.2, implicit-def dead $pc bb.1: - S4_storeiri_io undef renamable $r0, 0, 32768 :: (store (s32) into `i32* undef`) + S4_storeiri_io undef renamable $r0, 0, 32768 :: (store (s32) into `ptr undef`) PS_jmpret $r31, implicit-def dead $pc bb.2: - S4_storeiri_io undef renamable $r0, 0, 32768 :: (store (s32) into `i32* undef`) + S4_storeiri_io undef renamable $r0, 0, 32768 :: (store (s32) into `ptr undef`) PS_jmpret $r31, implicit-def dead $pc ... diff --git a/llvm/test/CodeGen/Hexagon/pipeliner/swp-phi-start.mir b/llvm/test/CodeGen/Hexagon/pipeliner/swp-phi-start.mir index 9743709..413d13d 100644 --- a/llvm/test/CodeGen/Hexagon/pipeliner/swp-phi-start.mir +++ b/llvm/test/CodeGen/Hexagon/pipeliner/swp-phi-start.mir @@ -18,7 +18,7 @@ target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048" ; Function Attrs: nounwind - define void @f0(i32 %a0, i16* nocapture %a1) #0 { + define void @f0(i32 %a0, ptr nocapture %a1) #0 { b0: br i1 undef, label %b1, label %b2.preheader @@ -26,20 +26,20 @@ br i1 undef, label %b3, label %b2.preheader b2.preheader: ; preds = %b0, %b1 - %cgep = getelementptr i16, i16* %a1, i32 undef + %cgep = getelementptr i16, ptr %a1, i32 undef br label %b2 b2: ; preds = %b2.preheader, %b2 - %lsr.iv = phi i16* [ %cgep, %b2.preheader ], [ %cgep3, %b2 ] + %lsr.iv = phi ptr [ %cgep, %b2.preheader ], [ %cgep3, %b2 ] %v1 = phi i32 [ %v7, %b2 ], [ undef, %b2.preheader ] %v2 = phi i32 [ %v1, %b2 ], [ %a0, %b2.preheader ] %v3 = add nsw i32 %v2, -2 - %cgep2 = getelementptr inbounds i16, i16* %a1, i32 %v3 - %v5 = load i16, i16* %cgep2, align 2, !tbaa !0 - store i16 %v5, i16* %lsr.iv, align 2, !tbaa !0 + %cgep2 = getelementptr inbounds i16, ptr %a1, i32 %v3 + %v5 = load i16, ptr %cgep2, align 2, !tbaa !0 + store i16 %v5, ptr %lsr.iv, align 2, !tbaa !0 %v7 = add nsw i32 %v1, -1 %v8 = icmp sgt i32 %v7, 0 - %cgep3 = getelementptr i16, i16* %lsr.iv, i32 -1 + %cgep3 = getelementptr i16, ptr %lsr.iv, i32 -1 br i1 %v8, label %b2, label %b3 b3: ; preds = %b2, %b1 diff --git a/llvm/test/CodeGen/Hexagon/postinc-baseoffset.mir b/llvm/test/CodeGen/Hexagon/postinc-baseoffset.mir index 962df0c..3bf9b3e 100644 --- a/llvm/test/CodeGen/Hexagon/postinc-baseoffset.mir +++ b/llvm/test/CodeGen/Hexagon/postinc-baseoffset.mir @@ -9,7 +9,7 @@ # CHECK: r1 = memw(r0++#8) --- | - define void @fred(i32* %a) { ret void } + define void @fred(ptr %a) { ret void } ... --- name: fred diff --git a/llvm/test/CodeGen/Hexagon/regalloc-bad-undef.mir b/llvm/test/CodeGen/Hexagon/regalloc-bad-undef.mir index 093d3ad..67f4dd7 100644 --- a/llvm/test/CodeGen/Hexagon/regalloc-bad-undef.mir +++ b/llvm/test/CodeGen/Hexagon/regalloc-bad-undef.mir @@ -42,7 +42,7 @@ %shl50 = shl i64 %add45, %4 %and52 = and i64 %shl37, %or12 %and54 = and i64 %shl50, %or26 - store i64 %and54, i64* undef, align 8 + store i64 %and54, ptr undef, align 8 %cmp56 = icmp eq i64 %and52, 0 br i1 %cmp56, label %for.end, label %if.end82 @@ -186,7 +186,7 @@ body: | %21 = COPY %13 %21 = S2_lsr_i_p_and %21, %29, 9 %22 = S2_asl_i_p_and %22, %7, 42 - S2_storerd_io undef %23, 0, %22 :: (store (s64) into `i64* undef`) + S2_storerd_io undef %23, 0, %22 :: (store (s64) into `ptr undef`) %25 = C2_cmpeqp %21, %51 J2_jumpt %25, %bb.3.for.end, implicit-def dead $pc J2_jump %bb.2.if.end82, implicit-def dead $pc diff --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/add_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/add_vec.mir index e578cf9..72ddaa4 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/add_vec.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/add_vec.mir @@ -2,10 +2,10 @@ # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600 --- | - define void @add_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void } - define void @add_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void } - define void @add_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void } - define void @add_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void } + define void @add_v16i8(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @add_v8i16(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @add_v4i32(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @add_v2i64(ptr %a, ptr %b, ptr %c) { entry: ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/brindirect.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/brindirect.mir index 5990dc0..bca4283 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/brindirect.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/brindirect.mir @@ -2,9 +2,9 @@ # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32 --- | - define i32 @indirectbr(i8* %addr) { + define i32 @indirectbr(ptr %addr) { entry: - indirectbr i8* %addr, [label %L1, label %L2] + indirectbr ptr %addr, [label %L1, label %L2] L1: ret i32 0 diff --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/fabs_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/fabs_vec.mir index 308c18f..2e7dd7a 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/fabs_vec.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/fabs_vec.mir @@ -2,8 +2,8 @@ # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600 --- | - define void @fabs_v4f32(<4 x float>* %a, <4 x float>* %c) { entry: ret void } - define void @fabs_v2f64(<2 x double>* %a, <2 x double>* %c) { entry: ret void } + define void @fabs_v4f32(ptr %a, ptr %c) { entry: ret void } + define void @fabs_v2f64(ptr %a, ptr %c) { entry: ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/fence.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/fence.mir index f569ec8..c3903fa 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/fence.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/fence.mir @@ -2,7 +2,7 @@ # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32 --- | - define void @atomic_load_i32(i32* %ptr) { ret void } + define void @atomic_load_i32(ptr %ptr) { ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/floating_point_vec_arithmetic_operations.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/floating_point_vec_arithmetic_operations.mir index 1fcba13..c19b123 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/floating_point_vec_arithmetic_operations.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/floating_point_vec_arithmetic_operations.mir @@ -2,17 +2,17 @@ # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600 --- | - define void @fadd_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void } - define void @fadd_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void } + define void @fadd_v4f32(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @fadd_v2f64(ptr %a, ptr %b, ptr %c) { entry: ret void } - define void @fsub_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void } - define void @fsub_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void } + define void @fsub_v4f32(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @fsub_v2f64(ptr %a, ptr %b, ptr %c) { entry: ret void } - define void @fmul_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void } - define void @fmul_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void } + define void @fmul_v4f32(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @fmul_v2f64(ptr %a, ptr %b, ptr %c) { entry: ret void } - define void @fdiv_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void } - define void @fdiv_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void } + define void @fdiv_v4f32(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @fdiv_v2f64(ptr %a, ptr %b, ptr %c) { entry: ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/fsqrt_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/fsqrt_vec.mir index 2fdae65..4458b39 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/fsqrt_vec.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/fsqrt_vec.mir @@ -2,8 +2,8 @@ # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600 --- | - define void @sqrt_v4f32(<4 x float>* %a, <4 x float>* %c) { entry: ret void } - define void @sqrt_v2f64(<2 x double>* %a, <2 x double>* %c) { entry: ret void } + define void @sqrt_v4f32(ptr %a, ptr %c) { entry: ret void } + define void @sqrt_v2f64(ptr %a, ptr %c) { entry: ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/gloal_address.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/gloal_address.mir index f4d065a2..94eb203 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/gloal_address.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/gloal_address.mir @@ -5,7 +5,7 @@ @.str = private unnamed_addr constant [11 x i8] c"hello %d \0A\00" define void @main() {entry: ret void} - declare i32 @printf(i8*, ...) + declare i32 @printf(ptr, ...) ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load.mir index 0540d41..e5af2cc 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load.mir @@ -3,9 +3,9 @@ # RUN: llc -O0 -mtriple=mipsel-linux-gnu -mattr=+fp64,+mips32r2 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32FP64 --- | - define void @load_i32(i32* %ptr) {entry: ret void} - define void @load_float(float* %ptr) {entry: ret void} - define void @load_double(double* %ptr) {entry: ret void} + define void @load_i32(ptr %ptr) {entry: ret void} + define void @load_float(ptr %ptr) {entry: ret void} + define void @load_double(ptr %ptr) {entry: ret void} ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load_4_unaligned.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load_4_unaligned.mir index 78808f6..e053a09 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load_4_unaligned.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load_4_unaligned.mir @@ -8,19 +8,19 @@ define float @load_float_align1() { entry: - %0 = load float, float* @float_align1, align 1 + %0 = load float, ptr @float_align1, align 1 ret float %0 } define float @load_float_align4() { entry: - %0 = load float, float* @float_align4, align 4 + %0 = load float, ptr @float_align4, align 4 ret float %0 } define i32 @load_i32_align8() { entry: - %0 = load i32, i32* @i32_align8, align 8 + %0 = load i32, ptr @i32_align8, align 8 ret i32 %0 } diff --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load_4_unaligned_r6.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load_4_unaligned_r6.mir index 3b4a40a..26d3974 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load_4_unaligned_r6.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load_4_unaligned_r6.mir @@ -8,19 +8,19 @@ define float @load_float_align1() { entry: - %0 = load float, float* @float_align1, align 1 + %0 = load float, ptr @float_align1, align 1 ret float %0 } define float @load_float_align8() { entry: - %0 = load float, float* @float_align8, align 8 + %0 = load float, ptr @float_align8, align 8 ret float %0 } define i32 @load_i32_align2() { entry: - %0 = load i32, i32* @i32_align2, align 2 + %0 = load i32, ptr @i32_align2, align 2 ret i32 %0 } diff --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load_store_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load_store_vec.mir index 18a884b..eafe5af 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load_store_vec.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/load_store_vec.mir @@ -2,12 +2,12 @@ # RUN: llc -O0 -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=msa,+fp64 -mattr=nan2008 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600 --- | - define void @load_store_v16i8(<16 x i8>* %a, <16 x i8>* %b) { entry: ret void } - define void @load_store_v8i16(<8 x i16>* %a, <8 x i16>* %b) { entry: ret void } - define void @load_store_v4i32(<4 x i32>* %a, <4 x i32>* %b) { entry: ret void } - define void @load_store_v2i64(<2 x i64>* %a, <2 x i64>* %b) { entry: ret void } - define void @load_store_v4f32(<4 x float>* %a, <4 x float>* %b) { entry: ret void } - define void @load_store_v2f64(<2 x double>* %a, <2 x double>* %b) { entry: ret void } + define void @load_store_v16i8(ptr %a, ptr %b) { entry: ret void } + define void @load_store_v8i16(ptr %a, ptr %b) { entry: ret void } + define void @load_store_v4i32(ptr %a, ptr %b) { entry: ret void } + define void @load_store_v2i64(ptr %a, ptr %b) { entry: ret void } + define void @load_store_v4f32(ptr %a, ptr %b) { entry: ret void } + define void @load_store_v2f64(ptr %a, ptr %b) { entry: ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/mul.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/mul.mir index 7a6a684..6044f73 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/mul.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/mul.mir @@ -3,7 +3,7 @@ --- | define void @mul_i32(i32 %x, i32 %y) {entry: ret void} - define void @umul_with_overflow(i32 %lhs, i32 %rhs, i32* %pmul, i1* %pcarry_flag) { ret void } + define void @umul_with_overflow(i32 %lhs, i32 %rhs, ptr %pmul, ptr %pcarry_flag) { ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/mul_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/mul_vec.mir index 14ad676..e353892 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/mul_vec.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/mul_vec.mir @@ -2,10 +2,10 @@ # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600 --- | - define void @mul_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void } - define void @mul_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void } - define void @mul_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void } - define void @mul_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void } + define void @mul_v16i8(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @mul_v8i16(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @mul_v4i32(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @mul_v2i64(ptr %a, ptr %b, ptr %c) { entry: ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/pointers.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/pointers.mir index 2d5dea5..0ae5c18 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/pointers.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/pointers.mir @@ -2,9 +2,9 @@ # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32 --- | - define void @ptr_arg_in_regs(i32* %p) {entry: ret void} - define void @ptr_arg_on_stack(i32 %x1, i32 %x2, i32 %x3, i32 %x4, i32* %p) {entry: ret void} - define void @ret_ptr(i8* %p) {entry: ret void} + define void @ptr_arg_in_regs(ptr %p) {entry: ret void} + define void @ptr_arg_on_stack(i32 %x1, i32 %x2, i32 %x3, i32 %x4, ptr %p) {entry: ret void} + define void @ret_ptr(ptr %p) {entry: ret void} ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/rem_and_div_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/rem_and_div_vec.mir index c15fcbe..05c40e5 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/rem_and_div_vec.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/rem_and_div_vec.mir @@ -2,25 +2,25 @@ # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600 --- | - define void @sdiv_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void } - define void @sdiv_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void } - define void @sdiv_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void } - define void @sdiv_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void } - - define void @srem_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void } - define void @srem_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void } - define void @srem_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void } - define void @srem_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void } - - define void @udiv_v16u8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void } - define void @udiv_v8u16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void } - define void @udiv_v4u32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void } - define void @udiv_v2u64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void } - - define void @urem_v16u8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void } - define void @urem_v8u16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void } - define void @urem_v4u32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void } - define void @urem_v2u64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void } + define void @sdiv_v16i8(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @sdiv_v8i16(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @sdiv_v4i32(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @sdiv_v2i64(ptr %a, ptr %b, ptr %c) { entry: ret void } + + define void @srem_v16i8(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @srem_v8i16(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @srem_v4i32(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @srem_v2i64(ptr %a, ptr %b, ptr %c) { entry: ret void } + + define void @udiv_v16u8(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @udiv_v8u16(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @udiv_v4u32(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @udiv_v2u64(ptr %a, ptr %b, ptr %c) { entry: ret void } + + define void @urem_v16u8(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @urem_v8u16(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @urem_v4u32(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @urem_v2u64(ptr %a, ptr %b, ptr %c) { entry: ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/store.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/store.mir index 075ee35c..89a0603 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/store.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/store.mir @@ -3,9 +3,9 @@ # RUN: llc -O0 -mtriple=mipsel-linux-gnu -mattr=+fp64,+mips32r2 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32FP64 --- | - define void @store_i32(i32* %ptr) { entry: ret void } - define void @store_float(float* %ptr) { entry: ret void } - define void @store_double(double* %ptr) { entry: ret void } + define void @store_i32(ptr %ptr) { entry: ret void } + define void @store_float(ptr %ptr) { entry: ret void } + define void @store_double(ptr %ptr) { entry: ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/store_4_unaligned.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/store_4_unaligned.mir index 0d29668..8bbfa34 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/store_4_unaligned.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/store_4_unaligned.mir @@ -8,19 +8,19 @@ define void @store_float_align1(float %a) { entry: - store float %a, float* @float_align1, align 1 + store float %a, ptr @float_align1, align 1 ret void } define void @store_float_align4(float %a) { entry: - store float %a, float* @float_align4, align 4 + store float %a, ptr @float_align4, align 4 ret void } define void @store_i32_align8(i32 signext %a) { entry: - store i32 %a, i32* @i32_align8, align 8 + store i32 %a, ptr @i32_align8, align 8 ret void } diff --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/store_4_unaligned_r6.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/store_4_unaligned_r6.mir index c776f2a..7bcd1eb 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/store_4_unaligned_r6.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/store_4_unaligned_r6.mir @@ -8,19 +8,19 @@ define void @store_float_align1(float %a) #0 { entry: - store float %a, float* @float_align1, align 1 + store float %a, ptr @float_align1, align 1 ret void } define void @store_float_align8(float %a) #0 { entry: - store float %a, float* @float_align8, align 8 + store float %a, ptr @float_align8, align 8 ret void } define void @store_i32_align2(i32 signext %a) #0 { entry: - store i32 %a, i32* @i32_align2, align 2 + store i32 %a, ptr @i32_align2, align 2 ret void } diff --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/sub_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/sub_vec.mir index 974e089..229b110 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/sub_vec.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/sub_vec.mir @@ -2,10 +2,10 @@ # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600 --- | - define void @sub_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void } - define void @sub_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void } - define void @sub_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void } - define void @sub_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void } + define void @sub_v16i8(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @sub_v8i16(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @sub_v4i32(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @sub_v2i64(ptr %a, ptr %b, ptr %c) { entry: ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/truncStore_and_aExtLoad.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/truncStore_and_aExtLoad.mir index 655d472..eed965f 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/truncStore_and_aExtLoad.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/truncStore_and_aExtLoad.mir @@ -2,9 +2,9 @@ # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32 --- | - define void @load_store_i8(i8* %px, i8* %py) {entry: ret void} - define void @load_store_i16(i16* %px, i16* %py) {entry: ret void} - define void @load_store_i32(i32* %px, i32* %py) {entry: ret void} + define void @load_store_i8(ptr %px, ptr %py) {entry: ret void} + define void @load_store_i16(ptr %px, ptr %py) {entry: ret void} + define void @load_store_i32(ptr %px, ptr %py) {entry: ret void} ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/zextLoad_and_sextLoad.mir b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/zextLoad_and_sextLoad.mir index 25f3160..1efa32d 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/zextLoad_and_sextLoad.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/instruction-select/zextLoad_and_sextLoad.mir @@ -2,10 +2,10 @@ # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32 --- | - define void @load1_s8_to_zextLoad1_s32(i8* %px) {entry: ret void} - define void @load2_s16_to_zextLoad2_s32(i16* %px) {entry: ret void} - define void @load1_s8_to_sextLoad1_s32(i8* %px) {entry: ret void} - define void @load2_s16_to_sextLoad2_s32(i16* %px) {entry: ret void} + define void @load1_s8_to_zextLoad1_s32(ptr %px) {entry: ret void} + define void @load2_s16_to_zextLoad2_s32(ptr %px) {entry: ret void} + define void @load1_s8_to_sextLoad1_s32(ptr %px) {entry: ret void} + define void @load2_s16_to_sextLoad2_s32(ptr %px) {entry: ret void} ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add.mir index cf6b47a..592e7a4 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add.mir @@ -11,7 +11,7 @@ define void @add_i16_aext() {entry: ret void} define void @add_i64() {entry: ret void} define void @add_i128() {entry: ret void} - define void @uadd_with_overflow(i32 %lhs, i32 %rhs, i32* %padd, i1* %pcarry_flag) { ret void } + define void @uadd_with_overflow(i32 %lhs, i32 %rhs, ptr %padd, ptr %pcarry_flag) { ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add_vec.mir index 3b30544..a700556 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add_vec.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add_vec.mir @@ -2,10 +2,10 @@ # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600 --- | - define void @add_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void } - define void @add_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void } - define void @add_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void } - define void @add_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void } + define void @add_v16i8(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @add_v8i16(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @add_v4i32(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @add_v2i64(ptr %a, ptr %b, ptr %c) { entry: ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add_vec_builtin.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add_vec_builtin.mir index ad86500..f4ba16e 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add_vec_builtin.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/add_vec_builtin.mir @@ -3,28 +3,28 @@ --- | declare <16 x i8> @llvm.mips.addv.b(<16 x i8>, <16 x i8>) - define void @add_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void } + define void @add_v16i8_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } declare <8 x i16> @llvm.mips.addv.h(<8 x i16>, <8 x i16>) - define void @add_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void } + define void @add_v8i16_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } declare <4 x i32> @llvm.mips.addv.w(<4 x i32>, <4 x i32>) - define void @add_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void } + define void @add_v4i32_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } declare <2 x i64> @llvm.mips.addv.d(<2 x i64>, <2 x i64>) - define void @add_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void } + define void @add_v2i64_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } declare <16 x i8> @llvm.mips.addvi.b(<16 x i8>, i32 immarg) - define void @add_v16i8_builtin_imm(<16 x i8>* %a, <16 x i8>* %c) { entry: ret void } + define void @add_v16i8_builtin_imm(ptr %a, ptr %c) { entry: ret void } declare <8 x i16> @llvm.mips.addvi.h(<8 x i16>, i32 immarg) - define void @add_v8i16_builtin_imm(<8 x i16>* %a, <8 x i16>* %c) { entry: ret void } + define void @add_v8i16_builtin_imm(ptr %a, ptr %c) { entry: ret void } declare <4 x i32> @llvm.mips.addvi.w(<4 x i32>, i32 immarg) - define void @add_v4i32_builtin_imm(<4 x i32>* %a, <4 x i32>* %c) { entry: ret void } + define void @add_v4i32_builtin_imm(ptr %a, ptr %c) { entry: ret void } declare <2 x i64> @llvm.mips.addvi.d(<2 x i64>, i32 immarg) - define void @add_v2i64_builtin_imm(<2 x i64>* %a, <2 x i64>* %c) { entry: ret void } + define void @add_v2i64_builtin_imm(ptr %a, ptr %c) { entry: ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/brindirect.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/brindirect.mir index 705ace0..6431fb0 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/brindirect.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/brindirect.mir @@ -2,9 +2,9 @@ # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32 --- | - define i32 @indirectbr(i8* %addr) { + define i32 @indirectbr(ptr %addr) { entry: - indirectbr i8* %addr, [label %L1, label %L2] + indirectbr ptr %addr, [label %L1, label %L2] L1: ret i32 0 diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/dyn_stackalloc.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/dyn_stackalloc.mir index 00f630b..43b2803 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/dyn_stackalloc.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/dyn_stackalloc.mir @@ -2,17 +2,17 @@ # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32 --- | - declare i32 @puts(i8*) - declare void @llvm.memset.p0i8.i32(i8*, i8, i32, i1) + declare i32 @puts(ptr) + declare void @llvm.memset.p0.i32(ptr, i8, i32, i1) define void @Print_c_N_times(i8 %c, i32 %N) { entry: %add = add i32 %N, 1 %vla = alloca i8, i32 %add, align 1 - call void @llvm.memset.p0i8.i32(i8* align 1 %vla, i8 %c, i32 %N, i1 false) - %arrayidx = getelementptr inbounds i8, i8* %vla, i32 %N - store i8 0, i8* %arrayidx, align 1 - %call = call i32 @puts(i8* %vla) + call void @llvm.memset.p0.i32(ptr align 1 %vla, i8 %c, i32 %N, i1 false) + %arrayidx = getelementptr inbounds i8, ptr %vla, i32 %N + store i8 0, ptr %arrayidx, align 1 + %call = call i32 @puts(ptr %vla) ret void } diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fabs_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fabs_vec.mir index 1190eb0..a6a7d8e 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fabs_vec.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fabs_vec.mir @@ -2,8 +2,8 @@ # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600 --- | - define void @fabs_v4f32(<4 x float>* %a, <4 x float>* %c) { entry: ret void } - define void @fabs_v2f64(<2 x double>* %a, <2 x double>* %c) { entry: ret void } + define void @fabs_v4f32(ptr %a, ptr %c) { entry: ret void } + define void @fabs_v2f64(ptr %a, ptr %c) { entry: ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fabs_vec_builtin.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fabs_vec_builtin.mir index afab80f..f8cc829 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fabs_vec_builtin.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fabs_vec_builtin.mir @@ -3,10 +3,10 @@ --- | declare <4 x float> @llvm.mips.fmax.a.w(<4 x float>, <4 x float>) - define void @fabs_v4f32_builtin(<4 x float>* %a, <4 x float>* %c) { entry: ret void } + define void @fabs_v4f32_builtin(ptr %a, ptr %c) { entry: ret void } declare <2 x double> @llvm.mips.fmax.a.d(<2 x double>, <2 x double>) - define void @fabs_v2f64_builtin(<2 x double>* %a, <2 x double>* %c) { entry: ret void } + define void @fabs_v2f64_builtin(ptr %a, ptr %c) { entry: ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fence.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fence.mir index 43b9bdf..d207454 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fence.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fence.mir @@ -2,7 +2,7 @@ # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32 --- | - define void @atomic_load_i32(i32* %ptr) { ret void } + define void @atomic_load_i32(ptr %ptr) { ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/floating_point_vec_arithmetic_operations.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/floating_point_vec_arithmetic_operations.mir index 4060c17..ddc5f34 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/floating_point_vec_arithmetic_operations.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/floating_point_vec_arithmetic_operations.mir @@ -2,17 +2,17 @@ # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600 --- | - define void @fadd_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void } - define void @fadd_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void } + define void @fadd_v4f32(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @fadd_v2f64(ptr %a, ptr %b, ptr %c) { entry: ret void } - define void @fsub_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void } - define void @fsub_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void } + define void @fsub_v4f32(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @fsub_v2f64(ptr %a, ptr %b, ptr %c) { entry: ret void } - define void @fmul_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void } - define void @fmul_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void } + define void @fmul_v4f32(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @fmul_v2f64(ptr %a, ptr %b, ptr %c) { entry: ret void } - define void @fdiv_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void } - define void @fdiv_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void } + define void @fdiv_v4f32(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @fdiv_v2f64(ptr %a, ptr %b, ptr %c) { entry: ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/floating_point_vec_arithmetic_operations_builtin.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/floating_point_vec_arithmetic_operations_builtin.mir index b1fdeea..252584a 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/floating_point_vec_arithmetic_operations_builtin.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/floating_point_vec_arithmetic_operations_builtin.mir @@ -3,28 +3,28 @@ --- | declare <4 x float> @llvm.mips.fadd.w(<4 x float>, <4 x float>) - define void @fadd_v4f32_builtin(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void } + define void @fadd_v4f32_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } declare <2 x double> @llvm.mips.fadd.d(<2 x double>, <2 x double>) - define void @fadd_v2f64_builtin(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void } + define void @fadd_v2f64_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } declare <4 x float> @llvm.mips.fsub.w(<4 x float>, <4 x float>) - define void @fsub_v4f32_builtin(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void } + define void @fsub_v4f32_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } declare <2 x double> @llvm.mips.fsub.d(<2 x double>, <2 x double>) - define void @fsub_v2f64_builtin(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void } + define void @fsub_v2f64_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } declare <4 x float> @llvm.mips.fmul.w(<4 x float>, <4 x float>) - define void @fmul_v4f32_builtin(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void } + define void @fmul_v4f32_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } declare <2 x double> @llvm.mips.fmul.d(<2 x double>, <2 x double>) - define void @fmul_v2f64_builtin(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void } + define void @fmul_v2f64_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } declare <4 x float> @llvm.mips.fdiv.w(<4 x float>, <4 x float>) - define void @fdiv_v4f32_builtin(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void } + define void @fdiv_v4f32_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } declare <2 x double> @llvm.mips.fdiv.d(<2 x double>, <2 x double>) - define void @fdiv_v2f64_builtin(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void } + define void @fdiv_v2f64_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fsqrt_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fsqrt_vec.mir index 599c8c0..cae3ab4 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fsqrt_vec.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fsqrt_vec.mir @@ -2,8 +2,8 @@ # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600 --- | - define void @sqrt_v4f32(<4 x float>* %a, <4 x float>* %c) { entry: ret void } - define void @sqrt_v2f64(<2 x double>* %a, <2 x double>* %c) { entry: ret void } + define void @sqrt_v4f32(ptr %a, ptr %c) { entry: ret void } + define void @sqrt_v2f64(ptr %a, ptr %c) { entry: ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fsqrt_vec_builtin.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fsqrt_vec_builtin.mir index 36dfdbb..eeb1a94 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fsqrt_vec_builtin.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/fsqrt_vec_builtin.mir @@ -3,10 +3,10 @@ --- | declare <4 x float> @llvm.mips.fsqrt.w(<4 x float>) - define void @fsqrt_v4f32_builtin(<4 x float>* %a, <4 x float>* %c) { entry: ret void } + define void @fsqrt_v4f32_builtin(ptr %a, ptr %c) { entry: ret void } declare <2 x double> @llvm.mips.fsqrt.d(<2 x double>) - define void @fsqrt_v2f64_builtin(<2 x double>* %a, <2 x double>* %c) { entry: ret void } + define void @fsqrt_v2f64_builtin(ptr %a, ptr %c) { entry: ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/global_address.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/global_address.mir index 2056eda..b1129f5 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/global_address.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/global_address.mir @@ -5,7 +5,7 @@ @.str = private unnamed_addr constant [11 x i8] c"hello %d \0A\00" define void @main() {entry: ret void} - declare i32 @printf(i8*, ...) + declare i32 @printf(ptr, ...) ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/load_4_unaligned.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/load_4_unaligned.mir index dd76044..8de7b2d 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/load_4_unaligned.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/load_4_unaligned.mir @@ -14,49 +14,49 @@ define float @load_float_align1() { entry: - %0 = load float, float* @float_align1, align 1 + %0 = load float, ptr @float_align1, align 1 ret float %0 } define float @load_float_align2() { entry: - %0 = load float, float* @float_align2, align 2 + %0 = load float, ptr @float_align2, align 2 ret float %0 } define float @load_float_align4() { entry: - %0 = load float, float* @float_align4, align 4 + %0 = load float, ptr @float_align4, align 4 ret float %0 } define float @load_float_align8() { entry: - %0 = load float, float* @float_align8, align 8 + %0 = load float, ptr @float_align8, align 8 ret float %0 } define i32 @load_i32_align1() { entry: - %0 = load i32, i32* @i32_align1, align 1 + %0 = load i32, ptr @i32_align1, align 1 ret i32 %0 } define i32 @load_i32_align2() { entry: - %0 = load i32, i32* @i32_align2, align 2 + %0 = load i32, ptr @i32_align2, align 2 ret i32 %0 } define i32 @load_i32_align4() { entry: - %0 = load i32, i32* @i32_align4, align 4 + %0 = load i32, ptr @i32_align4, align 4 ret i32 %0 } define i32 @load_i32_align8() { entry: - %0 = load i32, i32* @i32_align8, align 8 + %0 = load i32, ptr @i32_align8, align 8 ret i32 %0 } diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul.mir index cc2a43d..e96d826 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul.mir @@ -12,7 +12,7 @@ define void @mul_i64() {entry: ret void} define void @mul_i128() {entry: ret void} define void @umulh_i64() {entry: ret void} - define void @umul_with_overflow(i32 %lhs, i32 %rhs, i32* %pmul, i1* %pcarry_flag) { ret void } + define void @umul_with_overflow(i32 %lhs, i32 %rhs, ptr %pmul, ptr %pcarry_flag) { ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul_vec.mir index a0934a4..d61656b 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul_vec.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul_vec.mir @@ -2,10 +2,10 @@ # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600 --- | - define void @mul_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void } - define void @mul_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void } - define void @mul_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void } - define void @mul_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void } + define void @mul_v16i8(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @mul_v8i16(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @mul_v4i32(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @mul_v2i64(ptr %a, ptr %b, ptr %c) { entry: ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul_vec_builtin.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul_vec_builtin.mir index 30fb1cd..65d2b075 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul_vec_builtin.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/mul_vec_builtin.mir @@ -3,16 +3,16 @@ --- | declare <16 x i8> @llvm.mips.mulv.b(<16 x i8>, <16 x i8>) - define void @mul_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void } + define void @mul_v16i8_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } declare <8 x i16> @llvm.mips.mulv.h(<8 x i16>, <8 x i16>) - define void @mul_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void } + define void @mul_v8i16_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } declare <4 x i32> @llvm.mips.mulv.w(<4 x i32>, <4 x i32>) - define void @mul_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void } + define void @mul_v4i32_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } declare <2 x i64> @llvm.mips.mulv.d(<2 x i64>, <2 x i64>) - define void @mul_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void } + define void @mul_v2i64_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/phi.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/phi.mir index e5963d6..261bcd6 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/phi.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/phi.mir @@ -77,10 +77,10 @@ ret i64 %cond } - define void @phi_ambiguous_i64_in_fpr(i1 %cnd, i64* %i64_ptr_a, i64* %i64_ptr_b, i64* %i64_ptr_c) { + define void @phi_ambiguous_i64_in_fpr(i1 %cnd, ptr %i64_ptr_a, ptr %i64_ptr_b, ptr %i64_ptr_c) { entry: - %0 = load i64, i64* %i64_ptr_a, align 8 - %1 = load i64, i64* %i64_ptr_b, align 8 + %0 = load i64, ptr %i64_ptr_a, align 8 + %1 = load i64, ptr %i64_ptr_b, align 8 br i1 %cnd, label %cond.true, label %cond.false cond.true: ; preds = %entry @@ -91,7 +91,7 @@ cond.end: ; preds = %cond.false, %cond.true %cond = phi i64 [ %0, %cond.true ], [ %1, %cond.false ] - store i64 %cond, i64* %i64_ptr_c, align 8 + store i64 %cond, ptr %i64_ptr_c, align 8 ret void } @@ -110,10 +110,10 @@ ret float %cond } - define void @phi_ambiguous_float_in_gpr(i1 %cnd, float* %f32_ptr_a, float* %f32_ptr_b, float* %f32_ptr_c) { + define void @phi_ambiguous_float_in_gpr(i1 %cnd, ptr %f32_ptr_a, ptr %f32_ptr_b, ptr %f32_ptr_c) { entry: - %0 = load float, float* %f32_ptr_a, align 4 - %1 = load float, float* %f32_ptr_b, align 4 + %0 = load float, ptr %f32_ptr_a, align 4 + %1 = load float, ptr %f32_ptr_b, align 4 br i1 %cnd, label %cond.true, label %cond.false cond.true: ; preds = %entry @@ -124,7 +124,7 @@ cond.end: ; preds = %cond.false, %cond.true %cond = phi float [ %0, %cond.true ], [ %1, %cond.false ] - store float %cond, float* %f32_ptr_c, align 4 + store float %cond, ptr %f32_ptr_c, align 4 ret void } diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/pointers.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/pointers.mir index 805298e..2e455a1 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/pointers.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/pointers.mir @@ -2,9 +2,9 @@ # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32 --- | - define void @ptr_arg_in_regs(i32* %p) {entry: ret void} - define void @ptr_arg_on_stack(i32 %x1, i32 %x2, i32 %x3, i32 %x4, i32* %p) {entry: ret void} - define void @ret_ptr(i8* %p) {entry: ret void} + define void @ptr_arg_in_regs(ptr %p) {entry: ret void} + define void @ptr_arg_on_stack(i32 %x1, i32 %x2, i32 %x3, i32 %x4, ptr %p) {entry: ret void} + define void @ret_ptr(ptr %p) {entry: ret void} ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/rem_and_div_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/rem_and_div_vec.mir index 06be78b..7d58e1a 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/rem_and_div_vec.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/rem_and_div_vec.mir @@ -2,25 +2,25 @@ # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600 --- | - define void @sdiv_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void } - define void @sdiv_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void } - define void @sdiv_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void } - define void @sdiv_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void } - - define void @srem_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void } - define void @srem_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void } - define void @srem_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void } - define void @srem_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void } - - define void @udiv_v16u8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void } - define void @udiv_v8u16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void } - define void @udiv_v4u32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void } - define void @udiv_v2u64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void } - - define void @urem_v16u8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void } - define void @urem_v8u16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void } - define void @urem_v4u32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void } - define void @urem_v2u64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void } + define void @sdiv_v16i8(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @sdiv_v8i16(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @sdiv_v4i32(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @sdiv_v2i64(ptr %a, ptr %b, ptr %c) { entry: ret void } + + define void @srem_v16i8(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @srem_v8i16(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @srem_v4i32(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @srem_v2i64(ptr %a, ptr %b, ptr %c) { entry: ret void } + + define void @udiv_v16u8(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @udiv_v8u16(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @udiv_v4u32(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @udiv_v2u64(ptr %a, ptr %b, ptr %c) { entry: ret void } + + define void @urem_v16u8(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @urem_v8u16(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @urem_v4u32(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @urem_v2u64(ptr %a, ptr %b, ptr %c) { entry: ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/rem_and_div_vec_builtin.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/rem_and_div_vec_builtin.mir index cd2dfc4..7d7749b 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/rem_and_div_vec_builtin.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/rem_and_div_vec_builtin.mir @@ -3,52 +3,52 @@ --- | declare <16 x i8> @llvm.mips.div.s.b(<16 x i8>, <16 x i8>) - define void @sdiv_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void } + define void @sdiv_v16i8_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } declare <8 x i16> @llvm.mips.div.s.h(<8 x i16>, <8 x i16>) - define void @sdiv_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void } + define void @sdiv_v8i16_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } declare <4 x i32> @llvm.mips.div.s.w(<4 x i32>, <4 x i32>) - define void @sdiv_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void } + define void @sdiv_v4i32_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } declare <2 x i64> @llvm.mips.div.s.d(<2 x i64>, <2 x i64>) - define void @sdiv_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void } + define void @sdiv_v2i64_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } declare <16 x i8> @llvm.mips.mod.s.b(<16 x i8>, <16 x i8>) - define void @smod_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void } + define void @smod_v16i8_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } declare <8 x i16> @llvm.mips.mod.s.h(<8 x i16>, <8 x i16>) - define void @smod_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void } + define void @smod_v8i16_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } declare <4 x i32> @llvm.mips.mod.s.w(<4 x i32>, <4 x i32>) - define void @smod_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void } + define void @smod_v4i32_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } declare <2 x i64> @llvm.mips.mod.s.d(<2 x i64>, <2 x i64>) - define void @smod_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void } + define void @smod_v2i64_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } declare <16 x i8> @llvm.mips.div.u.b(<16 x i8>, <16 x i8>) - define void @udiv_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void } + define void @udiv_v16i8_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } declare <8 x i16> @llvm.mips.div.u.h(<8 x i16>, <8 x i16>) - define void @udiv_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void } + define void @udiv_v8i16_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } declare <4 x i32> @llvm.mips.div.u.w(<4 x i32>, <4 x i32>) - define void @udiv_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void } + define void @udiv_v4i32_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } declare <2 x i64> @llvm.mips.div.u.d(<2 x i64>, <2 x i64>) - define void @udiv_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void } + define void @udiv_v2i64_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } declare <16 x i8> @llvm.mips.mod.u.b(<16 x i8>, <16 x i8>) - define void @umod_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void } + define void @umod_v16i8_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } declare <8 x i16> @llvm.mips.mod.u.h(<8 x i16>, <8 x i16>) - define void @umod_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void } + define void @umod_v8i16_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } declare <4 x i32> @llvm.mips.mod.u.w(<4 x i32>, <4 x i32>) - define void @umod_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void } + define void @umod_v4i32_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } declare <2 x i64> @llvm.mips.mod.u.d(<2 x i64>, <2 x i64>) - define void @umod_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void } + define void @umod_v2i64_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/store_4_unaligned.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/store_4_unaligned.mir index 23d28a3..6989720 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/store_4_unaligned.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/store_4_unaligned.mir @@ -14,49 +14,49 @@ define void @store_float_align1(float %a) { entry: - store float %a, float* @float_align1, align 1 + store float %a, ptr @float_align1, align 1 ret void } define void @store_float_align2(float %a) { entry: - store float %a, float* @float_align2, align 2 + store float %a, ptr @float_align2, align 2 ret void } define void @store_float_align4(float %a) { entry: - store float %a, float* @float_align4, align 4 + store float %a, ptr @float_align4, align 4 ret void } define void @store_float_align8(float %a) { entry: - store float %a, float* @float_align8, align 8 + store float %a, ptr @float_align8, align 8 ret void } define void @store_i32_align1(i32 signext %a) { entry: - store i32 %a, i32* @i32_align1, align 1 + store i32 %a, ptr @i32_align1, align 1 ret void } define void @store_i32_align2(i32 signext %a) { entry: - store i32 %a, i32* @i32_align2, align 2 + store i32 %a, ptr @i32_align2, align 2 ret void } define void @store_i32_align4(i32 signext %a) { entry: - store i32 %a, i32* @i32_align4, align 4 + store i32 %a, ptr @i32_align4, align 4 ret void } define void @store_i32_align8(i32 signext %a) { entry: - store i32 %a, i32* @i32_align8, align 8 + store i32 %a, ptr @i32_align8, align 8 ret void } diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/sub_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/sub_vec.mir index 9b8d977..645c63b 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/sub_vec.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/sub_vec.mir @@ -2,10 +2,10 @@ # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600 --- | - define void @sub_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void } - define void @sub_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void } - define void @sub_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void } - define void @sub_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void } + define void @sub_v16i8(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @sub_v8i16(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @sub_v4i32(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @sub_v2i64(ptr %a, ptr %b, ptr %c) { entry: ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/sub_vec_builtin.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/sub_vec_builtin.mir index efa9f47..fd24664 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/sub_vec_builtin.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/sub_vec_builtin.mir @@ -3,28 +3,28 @@ --- | declare <16 x i8> @llvm.mips.subv.b(<16 x i8>, <16 x i8>) - define void @sub_v16i8_builtin(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void } + define void @sub_v16i8_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } declare <8 x i16> @llvm.mips.subv.h(<8 x i16>, <8 x i16>) - define void @sub_v8i16_builtin(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void } + define void @sub_v8i16_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } declare <4 x i32> @llvm.mips.subv.w(<4 x i32>, <4 x i32>) - define void @sub_v4i32_builtin(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void } + define void @sub_v4i32_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } declare <2 x i64> @llvm.mips.subv.d(<2 x i64>, <2 x i64>) - define void @sub_v2i64_builtin(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void } + define void @sub_v2i64_builtin(ptr %a, ptr %b, ptr %c) { entry: ret void } declare <16 x i8> @llvm.mips.subvi.b(<16 x i8>, i32 immarg) - define void @sub_v16i8_builtin_imm(<16 x i8>* %a, <16 x i8>* %c) { entry: ret void } + define void @sub_v16i8_builtin_imm(ptr %a, ptr %c) { entry: ret void } declare <8 x i16> @llvm.mips.subvi.h(<8 x i16>, i32 immarg) - define void @sub_v8i16_builtin_imm(<8 x i16>* %a, <8 x i16>* %c) { entry: ret void } + define void @sub_v8i16_builtin_imm(ptr %a, ptr %c) { entry: ret void } declare <4 x i32> @llvm.mips.subvi.w(<4 x i32>, i32 immarg) - define void @sub_v4i32_builtin_imm(<4 x i32>* %a, <4 x i32>* %c) { entry: ret void } + define void @sub_v4i32_builtin_imm(ptr %a, ptr %c) { entry: ret void } declare <2 x i64> @llvm.mips.subvi.d(<2 x i64>, i32 immarg) - define void @sub_v2i64_builtin_imm(<2 x i64>* %a, <2 x i64>* %c) { entry: ret void } + define void @sub_v2i64_builtin_imm(ptr %a, ptr %c) { entry: ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/zextLoad_and_sextLoad.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/zextLoad_and_sextLoad.mir index 817a2a0..2e1c82c 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/zextLoad_and_sextLoad.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/zextLoad_and_sextLoad.mir @@ -2,16 +2,16 @@ # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=legalizer -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32 --- | - define void @load1_s8_to_zextLoad1_s32(i8* %px) {entry: ret void} - define void @load2_s16_to_zextLoad2_s32(i16* %px) {entry: ret void} - define void @load1_s8_to_zextLoad1_s16(i8* %px) {entry: ret void} - define void @load1_s8_to_zextLoad1_s16_to_zextLoad1_s32(i8* %px) {entry: ret void} - define void @load4_s32_to_zextLoad4_s64(i8* %px) {entry: ret void} - define void @load1_s8_to_sextLoad1_s32(i8* %px) {entry: ret void} - define void @load2_s16_to_sextLoad2_s32(i16* %px) {entry: ret void} - define void @load1_s8_to_sextLoad1_s16(i8* %px) {entry: ret void} - define void @load1_s8_to_sextLoad1_s16_to_sextLoad1_s32(i8* %px) {entry: ret void} - define void @load4_s32_to_sextLoad4_s64(i8* %px) {entry: ret void} + define void @load1_s8_to_zextLoad1_s32(ptr %px) {entry: ret void} + define void @load2_s16_to_zextLoad2_s32(ptr %px) {entry: ret void} + define void @load1_s8_to_zextLoad1_s16(ptr %px) {entry: ret void} + define void @load1_s8_to_zextLoad1_s16_to_zextLoad1_s32(ptr %px) {entry: ret void} + define void @load4_s32_to_zextLoad4_s64(ptr %px) {entry: ret void} + define void @load1_s8_to_sextLoad1_s32(ptr %px) {entry: ret void} + define void @load2_s16_to_sextLoad2_s32(ptr %px) {entry: ret void} + define void @load1_s8_to_sextLoad1_s16(ptr %px) {entry: ret void} + define void @load1_s8_to_sextLoad1_s16_to_sextLoad1_s32(ptr %px) {entry: ret void} + define void @load4_s32_to_sextLoad4_s64(ptr %px) {entry: ret void} ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/truncStore_and_aExtLoad.mir b/llvm/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/truncStore_and_aExtLoad.mir index 5289d88..b68179a 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/truncStore_and_aExtLoad.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/truncStore_and_aExtLoad.mir @@ -2,8 +2,8 @@ # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=mips-prelegalizer-combiner -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32 --- | - define void @load1_s8_to_load1_s32(i8* %px) {entry: ret void} - define void @load2_s16_to_load2_s32(i16* %px) {entry: ret void} + define void @load1_s8_to_load1_s32(ptr %px) {entry: ret void} + define void @load2_s16_to_load2_s32(ptr %px) {entry: ret void} ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/zextLoad_and_sextLoad.mir b/llvm/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/zextLoad_and_sextLoad.mir index 6390ad4..cea581c 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/zextLoad_and_sextLoad.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/mips-prelegalizer-combiner/zextLoad_and_sextLoad.mir @@ -2,16 +2,16 @@ # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=mips-prelegalizer-combiner -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32 --- | - define void @load1_s8_to_zextLoad1_s32(i8* %px) {entry: ret void} - define void @load2_s16_to_zextLoad2_s32(i16* %px) {entry: ret void} - define void @load1_s8_to_zextLoad1_s16(i8* %px) {entry: ret void} - define void @load1_s8_to_zextLoad1_s16_to_zextLoad1_s32(i8* %px) {entry: ret void} - define void @load4_s32_to_zextLoad4_s64(i8* %px) {entry: ret void} - define void @load1_s8_to_sextLoad1_s32(i8* %px) {entry: ret void} - define void @load2_s16_to_sextLoad2_s32(i16* %px) {entry: ret void} - define void @load1_s8_to_sextLoad1_s16(i8* %px) {entry: ret void} - define void @load1_s8_to_sextLoad1_s16_to_sextLoad1_s32(i8* %px) {entry: ret void} - define void @load4_s32_to_sextLoad4_s64(i8* %px) {entry: ret void} + define void @load1_s8_to_zextLoad1_s32(ptr %px) {entry: ret void} + define void @load2_s16_to_zextLoad2_s32(ptr %px) {entry: ret void} + define void @load1_s8_to_zextLoad1_s16(ptr %px) {entry: ret void} + define void @load1_s8_to_zextLoad1_s16_to_zextLoad1_s32(ptr %px) {entry: ret void} + define void @load4_s32_to_zextLoad4_s64(ptr %px) {entry: ret void} + define void @load1_s8_to_sextLoad1_s32(ptr %px) {entry: ret void} + define void @load2_s16_to_sextLoad2_s32(ptr %px) {entry: ret void} + define void @load1_s8_to_sextLoad1_s16(ptr %px) {entry: ret void} + define void @load1_s8_to_sextLoad1_s16_to_sextLoad1_s32(ptr %px) {entry: ret void} + define void @load4_s32_to_sextLoad4_s64(ptr %px) {entry: ret void} ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/TypeInfoforMF_skipCopies.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/TypeInfoforMF_skipCopies.mir index fcc3460..011a05e 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/TypeInfoforMF_skipCopies.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/TypeInfoforMF_skipCopies.mir @@ -2,8 +2,8 @@ # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32 --- | - define void @skipCopiesOutgoing(float* %ptr_a, float* %ptr_b, float* %ptr_c) {entry: ret void} - define void @skipCopiesIncoming(float* %float_ptr) {entry: ret void} + define void @skipCopiesOutgoing(ptr %ptr_a, ptr %ptr_b, ptr %ptr_c) {entry: ret void} + define void @skipCopiesIncoming(ptr %float_ptr) {entry: ret void} ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/add_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/add_vec.mir index 4236c15..a963d2d 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/add_vec.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/add_vec.mir @@ -2,10 +2,10 @@ # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600 --- | - define void @add_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void } - define void @add_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void } - define void @add_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void } - define void @add_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void } + define void @add_v16i8(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @add_v8i16(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @add_v4i32(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @add_v2i64(ptr %a, ptr %b, ptr %c) { entry: ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/brindirect.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/brindirect.mir index dee3e5d..918cf4e 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/brindirect.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/brindirect.mir @@ -2,9 +2,9 @@ # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32 --- | - define i32 @indirectbr(i8* %addr) { + define i32 @indirectbr(ptr %addr) { entry: - indirectbr i8* %addr, [label %L1, label %L2] + indirectbr ptr %addr, [label %L1, label %L2] L1: ret i32 0 diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/fabs_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/fabs_vec.mir index 56dc87b..2b0476f 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/fabs_vec.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/fabs_vec.mir @@ -2,8 +2,8 @@ # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600 --- | - define void @fabs_v4f32(<4 x float>* %a, <4 x float>* %c) { entry: ret void } - define void @fabs_v2f64(<2 x double>* %a, <2 x double>* %c) { entry: ret void } + define void @fabs_v4f32(ptr %a, ptr %c) { entry: ret void } + define void @fabs_v2f64(ptr %a, ptr %c) { entry: ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/fence.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/fence.mir index e9051c3..9c7ec54 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/fence.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/fence.mir @@ -2,7 +2,7 @@ # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32 --- | - define void @atomic_load_i32(i32* %ptr) { ret void } + define void @atomic_load_i32(ptr %ptr) { ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/floating_point_vec_arithmetic_operations.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/floating_point_vec_arithmetic_operations.mir index 11acce6..b696be6 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/floating_point_vec_arithmetic_operations.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/floating_point_vec_arithmetic_operations.mir @@ -2,17 +2,17 @@ # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600 --- | - define void @fadd_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void } - define void @fadd_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void } + define void @fadd_v4f32(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @fadd_v2f64(ptr %a, ptr %b, ptr %c) { entry: ret void } - define void @fsub_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void } - define void @fsub_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void } + define void @fsub_v4f32(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @fsub_v2f64(ptr %a, ptr %b, ptr %c) { entry: ret void } - define void @fmul_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void } - define void @fmul_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void } + define void @fmul_v4f32(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @fmul_v2f64(ptr %a, ptr %b, ptr %c) { entry: ret void } - define void @fdiv_v4f32(<4 x float>* %a, <4 x float>* %b, <4 x float>* %c) { entry: ret void } - define void @fdiv_v2f64(<2 x double>* %a, <2 x double>* %b, <2 x double>* %c) { entry: ret void } + define void @fdiv_v4f32(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @fdiv_v2f64(ptr %a, ptr %b, ptr %c) { entry: ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/fsqrt_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/fsqrt_vec.mir index 8b15f07..a918bae 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/fsqrt_vec.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/fsqrt_vec.mir @@ -2,8 +2,8 @@ # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600 --- | - define void @sqrt_v4f32(<4 x float>* %a, <4 x float>* %c) { entry: ret void } - define void @sqrt_v2f64(<2 x double>* %a, <2 x double>* %c) { entry: ret void } + define void @sqrt_v4f32(ptr %a, ptr %c) { entry: ret void } + define void @sqrt_v2f64(ptr %a, ptr %c) { entry: ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/global_address.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/global_address.mir index cf19ddc..0e20bc7 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/global_address.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/global_address.mir @@ -5,7 +5,7 @@ @.str = private unnamed_addr constant [11 x i8] c"hello %d \0A\00" define void @main() {entry: ret void} - declare i32 @printf(i8*, ...) + declare i32 @printf(ptr, ...) ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load.mir index cc1f9aa..ef607c1 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load.mir @@ -2,12 +2,12 @@ # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32 --- | - define void @load_i32(i32* %ptr) {entry: ret void} - define void @load_i64(i64* %ptr) {entry: ret void} - define void @load_ambiguous_i64_in_fpr(i64* %i64_ptr_a, i64* %i64_ptr_b) {entry: ret void} - define void @load_float(float* %ptr) {entry: ret void} - define void @load_ambiguous_float_in_gpr(float* %float_ptr_a, float* %float_ptr_b) {entry: ret void} - define void @load_double(double* %ptr) {entry: ret void} + define void @load_i32(ptr %ptr) {entry: ret void} + define void @load_i64(ptr %ptr) {entry: ret void} + define void @load_ambiguous_i64_in_fpr(ptr %i64_ptr_a, ptr %i64_ptr_b) {entry: ret void} + define void @load_float(ptr %ptr) {entry: ret void} + define void @load_ambiguous_float_in_gpr(ptr %float_ptr_a, ptr %float_ptr_b) {entry: ret void} + define void @load_double(ptr %ptr) {entry: ret void} ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load_4_unaligned.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load_4_unaligned.mir index 863c26f..142d7ea 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load_4_unaligned.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load_4_unaligned.mir @@ -9,19 +9,19 @@ define float @load_float_align1() { entry: - %0 = load float, float* @float_align1, align 1 + %0 = load float, ptr @float_align1, align 1 ret float %0 } define float @load_float_align4() { entry: - %0 = load float, float* @float_align4, align 4 + %0 = load float, ptr @float_align4, align 4 ret float %0 } define i32 @load_i32_align8() { entry: - %0 = load i32, i32* @i32_align8, align 8 + %0 = load i32, ptr @i32_align8, align 8 ret i32 %0 } diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load_store_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load_store_vec.mir index 71ac161..64117a2 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load_store_vec.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/load_store_vec.mir @@ -2,12 +2,12 @@ # RUN: llc -O0 -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=msa,+fp64 -mattr=nan2008 -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600 --- | - define void @load_store_v16i8(<16 x i8>* %a, <16 x i8>* %b) { entry: ret void } - define void @load_store_v8i16(<8 x i16>* %a, <8 x i16>* %b) { entry: ret void } - define void @load_store_v4i32(<4 x i32>* %a, <4 x i32>* %b) { entry: ret void } - define void @load_store_v2i64(<2 x i64>* %a, <2 x i64>* %b) { entry: ret void } - define void @load_store_v4f32(<4 x float>* %a, <4 x float>* %b) { entry: ret void } - define void @load_store_v2f64(<2 x double>* %a, <2 x double>* %b) { entry: ret void } + define void @load_store_v16i8(ptr %a, ptr %b) { entry: ret void } + define void @load_store_v8i16(ptr %a, ptr %b) { entry: ret void } + define void @load_store_v4i32(ptr %a, ptr %b) { entry: ret void } + define void @load_store_v2i64(ptr %a, ptr %b) { entry: ret void } + define void @load_store_v4f32(ptr %a, ptr %b) { entry: ret void } + define void @load_store_v2f64(ptr %a, ptr %b) { entry: ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s32.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s32.mir index b8e5e2d..4226f2b 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s32.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s32.mir @@ -2,7 +2,7 @@ # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32 --- | - define void @long_chain_ambiguous_i64_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, i64* %a, i64* %b, i64* %c, i64* %result) { + define void @long_chain_ambiguous_i64_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, ptr %a, ptr %b, ptr %c, ptr %result) { entry: br i1 %cnd0, label %pre.PHI.2, label %pre.PHI.1 @@ -13,15 +13,15 @@ br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0 b.PHI.1.0: ; preds = %pre.PHI.1.0 - %phi1.0 = load i64, i64* %a + %phi1.0 = load i64, ptr %a br label %b.PHI.1 b.PHI.1.1: ; preds = %pre.PHI.1 - %phi1.1 = load i64, i64* %b + %phi1.1 = load i64, ptr %b br label %b.PHI.1 b.PHI.1.2: ; preds = %pre.PHI.1.0 - %phi1.2 = load i64, i64* %c + %phi1.2 = load i64, ptr %c br label %b.PHI.1 b.PHI.1: ; preds = %b.PHI.1.2, %b.PHI.1.1, %b.PHI.1.0 @@ -29,18 +29,18 @@ br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3 b.PHI.1.end: ; preds = %b.PHI.1 - store i64 %phi1, i64* %result + store i64 %phi1, ptr %result ret void pre.PHI.2: ; preds = %entry br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1 b.PHI.2.0: ; preds = %pre.PHI.2 - %phi2.0 = load i64, i64* %a + %phi2.0 = load i64, ptr %a br label %b.PHI.2 b.PHI.2.1: ; preds = %pre.PHI.2 - %phi2.1 = load i64, i64* %b + %phi2.1 = load i64, ptr %b br label %b.PHI.2 b.PHI.2: ; preds = %b.PHI.2.1, %b.PHI.2.0 @@ -48,7 +48,7 @@ br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end b.PHI.2.end: ; preds = %b.PHI.2 - store i64 %phi2, i64* %result + store i64 %phi2, ptr %result ret void b.PHI.3: ; preds = %b.PHI.2, %b.PHI.1 @@ -56,12 +56,12 @@ %phi4 = phi i64 [ %phi2, %b.PHI.2 ], [ %phi1, %b.PHI.1 ] %sel_1.2 = select i1 %cnd2, i64 %phi3, i64 %phi4 %sel_3_1.2 = select i1 %cnd1, i64 %sel_1.2, i64 %phi3 - store i64 %sel_3_1.2, i64* %result - store i64 %phi3, i64* %result + store i64 %sel_3_1.2, ptr %result + store i64 %phi3, ptr %result ret void } - define void @long_chain_i64_in_gpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, i64* %a, i64* %b, i64* %c, i64* %result) { + define void @long_chain_i64_in_gpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, ptr %a, ptr %b, ptr %c, ptr %result) { entry: br i1 %cnd0, label %pre.PHI.2, label %pre.PHI.1 @@ -72,15 +72,15 @@ br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0 b.PHI.1.0: ; preds = %pre.PHI.1.0 - %phi1.0 = load i64, i64* %a + %phi1.0 = load i64, ptr %a br label %b.PHI.1 b.PHI.1.1: ; preds = %pre.PHI.1 - %phi1.1 = load i64, i64* %b + %phi1.1 = load i64, ptr %b br label %b.PHI.1 b.PHI.1.2: ; preds = %pre.PHI.1.0 - %phi1.2 = load i64, i64* %c + %phi1.2 = load i64, ptr %c br label %b.PHI.1 b.PHI.1: ; preds = %b.PHI.1.2, %b.PHI.1.1, %b.PHI.1.0 @@ -88,18 +88,18 @@ br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3 b.PHI.1.end: ; preds = %b.PHI.1 - store i64 %phi1, i64* %result + store i64 %phi1, ptr %result ret void pre.PHI.2: ; preds = %entry br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1 b.PHI.2.0: ; preds = %pre.PHI.2 - %phi2.0 = load i64, i64* %a + %phi2.0 = load i64, ptr %a br label %b.PHI.2 b.PHI.2.1: ; preds = %pre.PHI.2 - %phi2.1 = load i64, i64* %b + %phi2.1 = load i64, ptr %b br label %b.PHI.2 b.PHI.2: ; preds = %b.PHI.2.1, %b.PHI.2.0 @@ -107,7 +107,7 @@ br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end b.PHI.2.end: ; preds = %b.PHI.2 - store i64 %phi2, i64* %result + store i64 %phi2, ptr %result ret void b.PHI.3: ; preds = %b.PHI.2, %b.PHI.1 @@ -115,12 +115,12 @@ %phi4 = phi i64 [ %phi2, %b.PHI.2 ], [ 0, %b.PHI.1 ] %sel_1.2 = select i1 %cnd2, i64 %phi3, i64 %phi4 %sel_3_1.2 = select i1 %cnd1, i64 %sel_1.2, i64 %phi3 - store i64 %sel_3_1.2, i64* %result - store i64 %phi3, i64* %result + store i64 %sel_3_1.2, ptr %result + store i64 %phi3, ptr %result ret void } - define void @long_chain_ambiguous_double_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, double* %a, double* %b, double* %c, double* %result) { + define void @long_chain_ambiguous_double_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, ptr %a, ptr %b, ptr %c, ptr %result) { entry: br i1 %cnd0, label %pre.PHI.2, label %pre.PHI.1 @@ -131,15 +131,15 @@ br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0 b.PHI.1.0: ; preds = %pre.PHI.1.0 - %phi1.0 = load double, double* %a + %phi1.0 = load double, ptr %a br label %b.PHI.1 b.PHI.1.1: ; preds = %pre.PHI.1 - %phi1.1 = load double, double* %b + %phi1.1 = load double, ptr %b br label %b.PHI.1 b.PHI.1.2: ; preds = %pre.PHI.1.0 - %phi1.2 = load double, double* %c + %phi1.2 = load double, ptr %c br label %b.PHI.1 b.PHI.1: ; preds = %b.PHI.1.2, %b.PHI.1.1, %b.PHI.1.0 @@ -147,18 +147,18 @@ br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3 b.PHI.1.end: ; preds = %b.PHI.1 - store double %phi1, double* %result + store double %phi1, ptr %result ret void pre.PHI.2: ; preds = %entry br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1 b.PHI.2.0: ; preds = %pre.PHI.2 - %phi2.0 = load double, double* %a + %phi2.0 = load double, ptr %a br label %b.PHI.2 b.PHI.2.1: ; preds = %pre.PHI.2 - %phi2.1 = load double, double* %b + %phi2.1 = load double, ptr %b br label %b.PHI.2 b.PHI.2: ; preds = %b.PHI.2.1, %b.PHI.2.0 @@ -166,7 +166,7 @@ br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end b.PHI.2.end: ; preds = %b.PHI.2 - store double %phi2, double* %result + store double %phi2, ptr %result ret void b.PHI.3: ; preds = %b.PHI.2, %b.PHI.1 @@ -174,12 +174,12 @@ %phi4 = phi double [ %phi2, %b.PHI.2 ], [ %phi1, %b.PHI.1 ] %sel_1.2 = select i1 %cnd2, double %phi3, double %phi4 %sel_3_1.2 = select i1 %cnd1, double %sel_1.2, double %phi3 - store double %sel_3_1.2, double* %result - store double %phi3, double* %result + store double %sel_3_1.2, ptr %result + store double %phi3, ptr %result ret void } - define void @long_chain_double_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, double* %a, double* %b, double* %c, double* %result) { + define void @long_chain_double_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, ptr %a, ptr %b, ptr %c, ptr %result) { entry: br i1 %cnd0, label %pre.PHI.2, label %pre.PHI.1 @@ -190,15 +190,15 @@ br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0 b.PHI.1.0: ; preds = %pre.PHI.1.0 - %phi1.0 = load double, double* %a + %phi1.0 = load double, ptr %a br label %b.PHI.1 b.PHI.1.1: ; preds = %pre.PHI.1 - %phi1.1 = load double, double* %b + %phi1.1 = load double, ptr %b br label %b.PHI.1 b.PHI.1.2: ; preds = %pre.PHI.1.0 - %phi1.2 = load double, double* %c + %phi1.2 = load double, ptr %c br label %b.PHI.1 b.PHI.1: ; preds = %b.PHI.1.2, %b.PHI.1.1, %b.PHI.1.0 @@ -206,18 +206,18 @@ br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3 b.PHI.1.end: ; preds = %b.PHI.1 - store double %phi1, double* %result + store double %phi1, ptr %result ret void pre.PHI.2: ; preds = %entry br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1 b.PHI.2.0: ; preds = %pre.PHI.2 - %phi2.0 = load double, double* %a + %phi2.0 = load double, ptr %a br label %b.PHI.2 b.PHI.2.1: ; preds = %pre.PHI.2 - %phi2.1 = load double, double* %b + %phi2.1 = load double, ptr %b br label %b.PHI.2 b.PHI.2: ; preds = %b.PHI.2.1, %b.PHI.2.0 @@ -225,7 +225,7 @@ br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end b.PHI.2.end: ; preds = %b.PHI.2 - store double %phi2, double* %result + store double %phi2, ptr %result ret void b.PHI.3: ; preds = %b.PHI.2, %b.PHI.1 @@ -233,8 +233,8 @@ %phi4 = phi double [ %phi2, %b.PHI.2 ], [ 0.000000e+00, %b.PHI.1 ] %sel_1.2 = select i1 %cnd2, double %phi3, double %phi4 %sel_3_1.2 = select i1 %cnd1, double %sel_1.2, double %phi3 - store double %sel_3_1.2, double* %result - store double %phi3, double* %result + store double %sel_3_1.2, ptr %result + store double %phi3, ptr %result ret void } diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s64.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s64.mir index b8e5e2d..4226f2b 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s64.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s64.mir @@ -2,7 +2,7 @@ # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32 --- | - define void @long_chain_ambiguous_i64_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, i64* %a, i64* %b, i64* %c, i64* %result) { + define void @long_chain_ambiguous_i64_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, ptr %a, ptr %b, ptr %c, ptr %result) { entry: br i1 %cnd0, label %pre.PHI.2, label %pre.PHI.1 @@ -13,15 +13,15 @@ br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0 b.PHI.1.0: ; preds = %pre.PHI.1.0 - %phi1.0 = load i64, i64* %a + %phi1.0 = load i64, ptr %a br label %b.PHI.1 b.PHI.1.1: ; preds = %pre.PHI.1 - %phi1.1 = load i64, i64* %b + %phi1.1 = load i64, ptr %b br label %b.PHI.1 b.PHI.1.2: ; preds = %pre.PHI.1.0 - %phi1.2 = load i64, i64* %c + %phi1.2 = load i64, ptr %c br label %b.PHI.1 b.PHI.1: ; preds = %b.PHI.1.2, %b.PHI.1.1, %b.PHI.1.0 @@ -29,18 +29,18 @@ br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3 b.PHI.1.end: ; preds = %b.PHI.1 - store i64 %phi1, i64* %result + store i64 %phi1, ptr %result ret void pre.PHI.2: ; preds = %entry br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1 b.PHI.2.0: ; preds = %pre.PHI.2 - %phi2.0 = load i64, i64* %a + %phi2.0 = load i64, ptr %a br label %b.PHI.2 b.PHI.2.1: ; preds = %pre.PHI.2 - %phi2.1 = load i64, i64* %b + %phi2.1 = load i64, ptr %b br label %b.PHI.2 b.PHI.2: ; preds = %b.PHI.2.1, %b.PHI.2.0 @@ -48,7 +48,7 @@ br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end b.PHI.2.end: ; preds = %b.PHI.2 - store i64 %phi2, i64* %result + store i64 %phi2, ptr %result ret void b.PHI.3: ; preds = %b.PHI.2, %b.PHI.1 @@ -56,12 +56,12 @@ %phi4 = phi i64 [ %phi2, %b.PHI.2 ], [ %phi1, %b.PHI.1 ] %sel_1.2 = select i1 %cnd2, i64 %phi3, i64 %phi4 %sel_3_1.2 = select i1 %cnd1, i64 %sel_1.2, i64 %phi3 - store i64 %sel_3_1.2, i64* %result - store i64 %phi3, i64* %result + store i64 %sel_3_1.2, ptr %result + store i64 %phi3, ptr %result ret void } - define void @long_chain_i64_in_gpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, i64* %a, i64* %b, i64* %c, i64* %result) { + define void @long_chain_i64_in_gpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, ptr %a, ptr %b, ptr %c, ptr %result) { entry: br i1 %cnd0, label %pre.PHI.2, label %pre.PHI.1 @@ -72,15 +72,15 @@ br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0 b.PHI.1.0: ; preds = %pre.PHI.1.0 - %phi1.0 = load i64, i64* %a + %phi1.0 = load i64, ptr %a br label %b.PHI.1 b.PHI.1.1: ; preds = %pre.PHI.1 - %phi1.1 = load i64, i64* %b + %phi1.1 = load i64, ptr %b br label %b.PHI.1 b.PHI.1.2: ; preds = %pre.PHI.1.0 - %phi1.2 = load i64, i64* %c + %phi1.2 = load i64, ptr %c br label %b.PHI.1 b.PHI.1: ; preds = %b.PHI.1.2, %b.PHI.1.1, %b.PHI.1.0 @@ -88,18 +88,18 @@ br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3 b.PHI.1.end: ; preds = %b.PHI.1 - store i64 %phi1, i64* %result + store i64 %phi1, ptr %result ret void pre.PHI.2: ; preds = %entry br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1 b.PHI.2.0: ; preds = %pre.PHI.2 - %phi2.0 = load i64, i64* %a + %phi2.0 = load i64, ptr %a br label %b.PHI.2 b.PHI.2.1: ; preds = %pre.PHI.2 - %phi2.1 = load i64, i64* %b + %phi2.1 = load i64, ptr %b br label %b.PHI.2 b.PHI.2: ; preds = %b.PHI.2.1, %b.PHI.2.0 @@ -107,7 +107,7 @@ br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end b.PHI.2.end: ; preds = %b.PHI.2 - store i64 %phi2, i64* %result + store i64 %phi2, ptr %result ret void b.PHI.3: ; preds = %b.PHI.2, %b.PHI.1 @@ -115,12 +115,12 @@ %phi4 = phi i64 [ %phi2, %b.PHI.2 ], [ 0, %b.PHI.1 ] %sel_1.2 = select i1 %cnd2, i64 %phi3, i64 %phi4 %sel_3_1.2 = select i1 %cnd1, i64 %sel_1.2, i64 %phi3 - store i64 %sel_3_1.2, i64* %result - store i64 %phi3, i64* %result + store i64 %sel_3_1.2, ptr %result + store i64 %phi3, ptr %result ret void } - define void @long_chain_ambiguous_double_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, double* %a, double* %b, double* %c, double* %result) { + define void @long_chain_ambiguous_double_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, ptr %a, ptr %b, ptr %c, ptr %result) { entry: br i1 %cnd0, label %pre.PHI.2, label %pre.PHI.1 @@ -131,15 +131,15 @@ br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0 b.PHI.1.0: ; preds = %pre.PHI.1.0 - %phi1.0 = load double, double* %a + %phi1.0 = load double, ptr %a br label %b.PHI.1 b.PHI.1.1: ; preds = %pre.PHI.1 - %phi1.1 = load double, double* %b + %phi1.1 = load double, ptr %b br label %b.PHI.1 b.PHI.1.2: ; preds = %pre.PHI.1.0 - %phi1.2 = load double, double* %c + %phi1.2 = load double, ptr %c br label %b.PHI.1 b.PHI.1: ; preds = %b.PHI.1.2, %b.PHI.1.1, %b.PHI.1.0 @@ -147,18 +147,18 @@ br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3 b.PHI.1.end: ; preds = %b.PHI.1 - store double %phi1, double* %result + store double %phi1, ptr %result ret void pre.PHI.2: ; preds = %entry br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1 b.PHI.2.0: ; preds = %pre.PHI.2 - %phi2.0 = load double, double* %a + %phi2.0 = load double, ptr %a br label %b.PHI.2 b.PHI.2.1: ; preds = %pre.PHI.2 - %phi2.1 = load double, double* %b + %phi2.1 = load double, ptr %b br label %b.PHI.2 b.PHI.2: ; preds = %b.PHI.2.1, %b.PHI.2.0 @@ -166,7 +166,7 @@ br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end b.PHI.2.end: ; preds = %b.PHI.2 - store double %phi2, double* %result + store double %phi2, ptr %result ret void b.PHI.3: ; preds = %b.PHI.2, %b.PHI.1 @@ -174,12 +174,12 @@ %phi4 = phi double [ %phi2, %b.PHI.2 ], [ %phi1, %b.PHI.1 ] %sel_1.2 = select i1 %cnd2, double %phi3, double %phi4 %sel_3_1.2 = select i1 %cnd1, double %sel_1.2, double %phi3 - store double %sel_3_1.2, double* %result - store double %phi3, double* %result + store double %sel_3_1.2, ptr %result + store double %phi3, ptr %result ret void } - define void @long_chain_double_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, double* %a, double* %b, double* %c, double* %result) { + define void @long_chain_double_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, ptr %a, ptr %b, ptr %c, ptr %result) { entry: br i1 %cnd0, label %pre.PHI.2, label %pre.PHI.1 @@ -190,15 +190,15 @@ br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0 b.PHI.1.0: ; preds = %pre.PHI.1.0 - %phi1.0 = load double, double* %a + %phi1.0 = load double, ptr %a br label %b.PHI.1 b.PHI.1.1: ; preds = %pre.PHI.1 - %phi1.1 = load double, double* %b + %phi1.1 = load double, ptr %b br label %b.PHI.1 b.PHI.1.2: ; preds = %pre.PHI.1.0 - %phi1.2 = load double, double* %c + %phi1.2 = load double, ptr %c br label %b.PHI.1 b.PHI.1: ; preds = %b.PHI.1.2, %b.PHI.1.1, %b.PHI.1.0 @@ -206,18 +206,18 @@ br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3 b.PHI.1.end: ; preds = %b.PHI.1 - store double %phi1, double* %result + store double %phi1, ptr %result ret void pre.PHI.2: ; preds = %entry br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1 b.PHI.2.0: ; preds = %pre.PHI.2 - %phi2.0 = load double, double* %a + %phi2.0 = load double, ptr %a br label %b.PHI.2 b.PHI.2.1: ; preds = %pre.PHI.2 - %phi2.1 = load double, double* %b + %phi2.1 = load double, ptr %b br label %b.PHI.2 b.PHI.2: ; preds = %b.PHI.2.1, %b.PHI.2.0 @@ -225,7 +225,7 @@ br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end b.PHI.2.end: ; preds = %b.PHI.2 - store double %phi2, double* %result + store double %phi2, ptr %result ret void b.PHI.3: ; preds = %b.PHI.2, %b.PHI.1 @@ -233,8 +233,8 @@ %phi4 = phi double [ %phi2, %b.PHI.2 ], [ 0.000000e+00, %b.PHI.1 ] %sel_1.2 = select i1 %cnd2, double %phi3, double %phi4 %sel_3_1.2 = select i1 %cnd1, double %sel_1.2, double %phi3 - store double %sel_3_1.2, double* %result - store double %phi3, double* %result + store double %sel_3_1.2, ptr %result + store double %phi3, ptr %result ret void } diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/mul.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/mul.mir index 056cfdc..b0fc873 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/mul.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/mul.mir @@ -3,7 +3,7 @@ --- | define void @mul_i32(i32 %x, i32 %y) {entry: ret void} - define void @umul_with_overflow(i32 %lhs, i32 %rhs, i32* %pmul, i1* %pcarry_flag) { ret void } + define void @umul_with_overflow(i32 %lhs, i32 %rhs, ptr %pmul, ptr %pcarry_flag) { ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/mul_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/mul_vec.mir index 874f3e5..4d6bce2 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/mul_vec.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/mul_vec.mir @@ -2,10 +2,10 @@ # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600 --- | - define void @mul_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void } - define void @mul_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void } - define void @mul_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void } - define void @mul_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void } + define void @mul_v16i8(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @mul_v8i16(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @mul_v4i32(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @mul_v2i64(ptr %a, ptr %b, ptr %c) { entry: ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/phi.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/phi.mir index 4744e36..dc75898 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/phi.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/phi.mir @@ -32,10 +32,10 @@ ret i64 %cond } - define void @phi_ambiguous_i64_in_fpr(i1 %cnd, i64* %i64_ptr_a, i64* %i64_ptr_b, i64* %i64_ptr_c) { + define void @phi_ambiguous_i64_in_fpr(i1 %cnd, ptr %i64_ptr_a, ptr %i64_ptr_b, ptr %i64_ptr_c) { entry: - %0 = load i64, i64* %i64_ptr_a, align 8 - %1 = load i64, i64* %i64_ptr_b, align 8 + %0 = load i64, ptr %i64_ptr_a, align 8 + %1 = load i64, ptr %i64_ptr_b, align 8 br i1 %cnd, label %cond.true, label %cond.false cond.true: ; preds = %entry @@ -46,7 +46,7 @@ cond.end: ; preds = %cond.false, %cond.true %cond = phi i64 [ %0, %cond.true ], [ %1, %cond.false ] - store i64 %cond, i64* %i64_ptr_c, align 8 + store i64 %cond, ptr %i64_ptr_c, align 8 ret void } @@ -65,10 +65,10 @@ ret float %cond } - define void @phi_ambiguous_float_in_gpr(i1 %cnd, float* %f32_ptr_a, float* %f32_ptr_b, float* %f32_ptr_c) { + define void @phi_ambiguous_float_in_gpr(i1 %cnd, ptr %f32_ptr_a, ptr %f32_ptr_b, ptr %f32_ptr_c) { entry: - %0 = load float, float* %f32_ptr_a, align 4 - %1 = load float, float* %f32_ptr_b, align 4 + %0 = load float, ptr %f32_ptr_a, align 4 + %1 = load float, ptr %f32_ptr_b, align 4 br i1 %cnd, label %cond.true, label %cond.false cond.true: ; preds = %entry @@ -79,7 +79,7 @@ cond.end: ; preds = %cond.false, %cond.true %cond = phi float [ %0, %cond.true ], [ %1, %cond.false ] - store float %cond, float* %f32_ptr_c, align 4 + store float %cond, ptr %f32_ptr_c, align 4 ret void } diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/pointers.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/pointers.mir index b5c6efc..044ab13 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/pointers.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/pointers.mir @@ -2,9 +2,9 @@ # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32 --- | - define void @ptr_arg_in_regs(i32* %p) {entry: ret void} - define void @ptr_arg_on_stack(i32 %x1, i32 %x2, i32 %x3, i32 %x4, i32* %p) {entry: ret void} - define void @ret_ptr(i8* %p) {entry: ret void} + define void @ptr_arg_in_regs(ptr %p) {entry: ret void} + define void @ptr_arg_on_stack(i32 %x1, i32 %x2, i32 %x3, i32 %x4, ptr %p) {entry: ret void} + define void @ret_ptr(ptr %p) {entry: ret void} ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/rem_and_div_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/rem_and_div_vec.mir index 0863b09..cef6101 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/rem_and_div_vec.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/rem_and_div_vec.mir @@ -2,25 +2,25 @@ # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600 --- | - define void @sdiv_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void } - define void @sdiv_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void } - define void @sdiv_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void } - define void @sdiv_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void } - - define void @srem_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void } - define void @srem_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void } - define void @srem_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void } - define void @srem_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void } - - define void @udiv_v16u8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void } - define void @udiv_v8u16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void } - define void @udiv_v4u32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void } - define void @udiv_v2u64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void } - - define void @urem_v16u8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void } - define void @urem_v8u16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void } - define void @urem_v4u32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void } - define void @urem_v2u64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void } + define void @sdiv_v16i8(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @sdiv_v8i16(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @sdiv_v4i32(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @sdiv_v2i64(ptr %a, ptr %b, ptr %c) { entry: ret void } + + define void @srem_v16i8(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @srem_v8i16(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @srem_v4i32(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @srem_v2i64(ptr %a, ptr %b, ptr %c) { entry: ret void } + + define void @udiv_v16u8(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @udiv_v8u16(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @udiv_v4u32(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @udiv_v2u64(ptr %a, ptr %b, ptr %c) { entry: ret void } + + define void @urem_v16u8(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @urem_v8u16(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @urem_v4u32(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @urem_v2u64(ptr %a, ptr %b, ptr %c) { entry: ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/select.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/select.mir index 93e6b72..20f18fe 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/select.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/select.mir @@ -5,9 +5,9 @@ define void @select_i32(i32, i32) {entry: ret void} define void @select_ptr(i32, i32) {entry: ret void} define void @select_i64() {entry: ret void} - define void @select_ambiguous_i64_in_fpr(i64* %i64_ptr_a, i64* %i64_ptr_b, i64* %i64_ptr_c) {entry: ret void} + define void @select_ambiguous_i64_in_fpr(ptr %i64_ptr_a, ptr %i64_ptr_b, ptr %i64_ptr_c) {entry: ret void} define void @select_float() {entry: ret void} - define void @select_ambiguous_float_in_gpr(float* %f32_ptr_a, float* %f32_ptr_b, float* %f32_ptr_c) {entry: ret void} + define void @select_ambiguous_float_in_gpr(ptr %f32_ptr_a, ptr %f32_ptr_b, ptr %f32_ptr_c) {entry: ret void} define void @select_double() {entry: ret void} ... diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/store.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/store.mir index fbc32e8..80bf04a 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/store.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/store.mir @@ -2,10 +2,10 @@ # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32 --- | - define void @store_i32(i32* %ptr) { entry: ret void } - define void @store_i64(i64* %ptr) { entry: ret void } - define void @store_float(float* %ptr) { entry: ret void } - define void @store_double(double* %ptr) { entry: ret void } + define void @store_i32(ptr %ptr) { entry: ret void } + define void @store_i64(ptr %ptr) { entry: ret void } + define void @store_float(ptr %ptr) { entry: ret void } + define void @store_double(ptr %ptr) { entry: ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/store_4_unaligned.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/store_4_unaligned.mir index b57b161..3f705eb 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/store_4_unaligned.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/store_4_unaligned.mir @@ -9,19 +9,19 @@ define void @store_float_align1(float %a) { entry: - store float %a, float* @float_align1, align 1 + store float %a, ptr @float_align1, align 1 ret void } define void @store_float_align4(float %a) { entry: - store float %a, float* @float_align4, align 4 + store float %a, ptr @float_align4, align 4 ret void } define void @store_i32_align8(i32 signext %a) { entry: - store i32 %a, i32* @i32_align8, align 8 + store i32 %a, ptr @i32_align8, align 8 ret void } diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/sub_vec.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/sub_vec.mir index cc66a98..b2114a9 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/sub_vec.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/sub_vec.mir @@ -2,10 +2,10 @@ # RUN: llc -mtriple=mipsel-linux-gnu -mcpu=mips32r5 -mattr=+msa,+fp64,+nan2008 -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=P5600 --- | - define void @sub_v16i8(<16 x i8>* %a, <16 x i8>* %b, <16 x i8>* %c) { entry: ret void } - define void @sub_v8i16(<8 x i16>* %a, <8 x i16>* %b, <8 x i16>* %c) { entry: ret void } - define void @sub_v4i32(<4 x i32>* %a, <4 x i32>* %b, <4 x i32>* %c) { entry: ret void } - define void @sub_v2i64(<2 x i64>* %a, <2 x i64>* %b, <2 x i64>* %c) { entry: ret void } + define void @sub_v16i8(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @sub_v8i16(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @sub_v4i32(ptr %a, ptr %b, ptr %c) { entry: ret void } + define void @sub_v2i64(ptr %a, ptr %b, ptr %c) { entry: ret void } ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/test_TypeInfoforMF.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/test_TypeInfoforMF.mir index c56572d..0d81e012 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/test_TypeInfoforMF.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/test_TypeInfoforMF.mir @@ -2,14 +2,14 @@ # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32 --- | - define void @outgoing_gpr(i32* %i32_ptr) {entry: ret void} - define void @outgoing_fpr(float* %float_ptr) {entry: ret void} - define void @outgoing_gpr_instr(i32* %i32_ptr1, i32* %i32_ptr2) {entry: ret void} - define void @outgoing_fpr_instr(float* %float_ptr1, float* %float_ptr2) {entry: ret void} - define void @incoming_gpr(i32* %a) {entry: ret void} - define void @incoming_fpr(float* %a) {entry: ret void} - define void @incoming_i32_instr(i32* %i32_ptr) {entry: ret void} - define void @incoming_float_instr(float* %float_ptr) {entry: ret void} + define void @outgoing_gpr(ptr %i32_ptr) {entry: ret void} + define void @outgoing_fpr(ptr %float_ptr) {entry: ret void} + define void @outgoing_gpr_instr(ptr %i32_ptr1, ptr %i32_ptr2) {entry: ret void} + define void @outgoing_fpr_instr(ptr %float_ptr1, ptr %float_ptr2) {entry: ret void} + define void @incoming_gpr(ptr %a) {entry: ret void} + define void @incoming_fpr(ptr %a) {entry: ret void} + define void @incoming_i32_instr(ptr %i32_ptr) {entry: ret void} + define void @incoming_float_instr(ptr %float_ptr) {entry: ret void} ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/truncStore_and_aExtLoad.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/truncStore_and_aExtLoad.mir index d7f549f..5ed6dc4f 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/truncStore_and_aExtLoad.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/truncStore_and_aExtLoad.mir @@ -2,9 +2,9 @@ # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32 --- | - define void @load_store_i8(i8* %px, i8* %py) {entry: ret void} - define void @load_store_i16(i16* %px, i16* %py) {entry: ret void} - define void @load_store_i32(i32* %px, i32* %py) {entry: ret void} + define void @load_store_i8(ptr %px, ptr %py) {entry: ret void} + define void @load_store_i16(ptr %px, ptr %py) {entry: ret void} + define void @load_store_i32(ptr %px, ptr %py) {entry: ret void} ... --- diff --git a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/zextLoad_and_sextLoad.mir b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/zextLoad_and_sextLoad.mir index 44baa0d..1ffed37 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/zextLoad_and_sextLoad.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/zextLoad_and_sextLoad.mir @@ -2,12 +2,12 @@ # RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32 --- | - define void @load1_s8_to_zextLoad1_s32(i8* %px) {entry: ret void} - define void @load2_s16_to_zextLoad2_s32(i16* %px) {entry: ret void} - define void @load4_s32_to_zextLoad4_s64(i8* %px) {entry: ret void} - define void @load1_s8_to_sextLoad1_s32(i8* %px) {entry: ret void} - define void @load2_s16_to_sextLoad2_s32(i16* %px) {entry: ret void} - define void @load4_s32_to_sextLoad4_s64(i8* %px) {entry: ret void} + define void @load1_s8_to_zextLoad1_s32(ptr %px) {entry: ret void} + define void @load2_s16_to_zextLoad2_s32(ptr %px) {entry: ret void} + define void @load4_s32_to_zextLoad4_s64(ptr %px) {entry: ret void} + define void @load1_s8_to_sextLoad1_s32(ptr %px) {entry: ret void} + define void @load2_s16_to_sextLoad2_s32(ptr %px) {entry: ret void} + define void @load4_s32_to_sextLoad4_s64(ptr %px) {entry: ret void} ... --- diff --git a/llvm/test/CodeGen/Mips/compactbranches/compact-branch-implicit-def.mir b/llvm/test/CodeGen/Mips/compactbranches/compact-branch-implicit-def.mir index c964988..33e43ad 100644 --- a/llvm/test/CodeGen/Mips/compactbranches/compact-branch-implicit-def.mir +++ b/llvm/test/CodeGen/Mips/compactbranches/compact-branch-implicit-def.mir @@ -14,33 +14,33 @@ entry: %retval = alloca i32, align 4 %a.addr = alloca i32, align 4 - store i32 %a, i32* %a.addr, align 4 - %0 = load i32, i32* %a.addr, align 4 + store i32 %a, ptr %a.addr, align 4 + %0 = load i32, ptr %a.addr, align 4 %cmp = icmp sgt i32 %0, 5 br i1 %cmp, label %if.then, label %if.else if.then: ; preds = %entry - %1 = load i32, i32* %a.addr, align 4 - %2 = load i32, i32* %a.addr, align 4 + %1 = load i32, ptr %a.addr, align 4 + %2 = load i32, ptr %a.addr, align 4 %add = add nsw i32 %1, %2 - store i32 %add, i32* %retval, align 4 + store i32 %add, ptr %retval, align 4 br label %return if.else: ; preds = %entry - %3 = load i32, i32* %a.addr, align 4 + %3 = load i32, ptr %a.addr, align 4 %call = call i32 @g(i32 signext %3) - store i32 %call, i32* %retval, align 4 + store i32 %call, ptr %retval, align 4 br label %return return: ; preds = %if.else, %if.then - %4 = load i32, i32* %retval, align 4 + %4 = load i32, ptr %retval, align 4 ret i32 %4 } declare i32 @g(i32 signext) ; Function Attrs: nounwind - declare void @llvm.stackprotector(i8*, i8**) + declare void @llvm.stackprotector(ptr, ptr) !llvm.ident = !{!0} diff --git a/llvm/test/CodeGen/Mips/delay-slot-filler-bundled-insts.mir b/llvm/test/CodeGen/Mips/delay-slot-filler-bundled-insts.mir index 0663305..9cde85b 100644 --- a/llvm/test/CodeGen/Mips/delay-slot-filler-bundled-insts.mir +++ b/llvm/test/CodeGen/Mips/delay-slot-filler-bundled-insts.mir @@ -16,28 +16,28 @@ --- | target datalayout = "E-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128" target triple = "mips64-unknown-freebsd" - declare i8* @func_a(i64 zeroext) - declare i8* @func_b(i64 zeroext) + declare ptr @func_a(i64 zeroext) + declare ptr @func_b(i64 zeroext) ; Function Attrs: nounwind - define i8* @test(i64 zeroext %nbytes) local_unnamed_addr #0 { + define ptr @test(i64 zeroext %nbytes) local_unnamed_addr #0 { entry: %cmp = icmp eq i64 %nbytes, 0 br i1 %cmp, label %if.else, label %if.then if.then: ; preds = %entry - %call = tail call i8* @func_a(i64 zeroext %nbytes) + %call = tail call ptr @func_a(i64 zeroext %nbytes) br label %return if.else: ; preds = %entry - %call1 = tail call i8* @func_b(i64 zeroext 0) + %call1 = tail call ptr @func_b(i64 zeroext 0) br label %return return: ; preds = %if.else, %if.then - %retval.0 = phi i8* [ %call, %if.then ], [ %call1, %if.else ] - ret i8* %retval.0 + %retval.0 = phi ptr [ %call, %if.then ], [ %call1, %if.else ] + ret ptr %retval.0 } ; Function Attrs: nounwind - declare void @llvm.stackprotector(i8*, i8**) #0 + declare void @llvm.stackprotector(ptr, ptr) #0 attributes #0 = { nounwind } diff --git a/llvm/test/CodeGen/Mips/indirect-jump-hazard/guards-verify-call.mir b/llvm/test/CodeGen/Mips/indirect-jump-hazard/guards-verify-call.mir index 02fef74..fe72a17 100644 --- a/llvm/test/CodeGen/Mips/indirect-jump-hazard/guards-verify-call.mir +++ b/llvm/test/CodeGen/Mips/indirect-jump-hazard/guards-verify-call.mir @@ -7,7 +7,7 @@ # CHECK: Bad machine code: invalid instruction when using jump guards! --- | - define i32 @fooTail(i32 (i32)* nocapture %f1) { + define i32 @fooTail(ptr nocapture %f1) { entry: %0 = tail call i32 %f1(i32 14) ret i32 %0 diff --git a/llvm/test/CodeGen/Mips/indirect-jump-hazard/guards-verify-tailcall.mir b/llvm/test/CodeGen/Mips/indirect-jump-hazard/guards-verify-tailcall.mir index d313cad..e87af58 100644 --- a/llvm/test/CodeGen/Mips/indirect-jump-hazard/guards-verify-tailcall.mir +++ b/llvm/test/CodeGen/Mips/indirect-jump-hazard/guards-verify-tailcall.mir @@ -7,7 +7,7 @@ # CHECK: Bad machine code: invalid instruction when using jump guards! --- | - define i32 @fooTail(i32 (i32)* nocapture %f1) { + define i32 @fooTail(ptr nocapture %f1) { entry: %0 = tail call i32 %f1(i32 14) ret i32 %0 diff --git a/llvm/test/CodeGen/Mips/micromips-eva.mir b/llvm/test/CodeGen/Mips/micromips-eva.mir index 3ab4b22..f45118d 100644 --- a/llvm/test/CodeGen/Mips/micromips-eva.mir +++ b/llvm/test/CodeGen/Mips/micromips-eva.mir @@ -10,40 +10,40 @@ ; Function Attrs: noinline nounwind optnone define void @_Z3foov() { entry: - %0 = load i8, i8* getelementptr inbounds ([13 x i8], [13 x i8]* @bArray, i32 0, i32 5), align 1 + %0 = load i8, ptr getelementptr inbounds ([13 x i8], ptr @bArray, i32 0, i32 5), align 1 %conv = sext i8 %0 to i32 %sub = sub nsw i32 %conv, 7 %conv1 = trunc i32 %sub to i8 - store i8 %conv1, i8* getelementptr inbounds ([13 x i8], [13 x i8]* @bArray, i32 0, i32 3), align 1 - %1 = load i8, i8* getelementptr inbounds ([13 x i8], [13 x i8]* @bArray, i32 0, i32 5), align 1 + store i8 %conv1, ptr getelementptr inbounds ([13 x i8], ptr @bArray, i32 0, i32 3), align 1 + %1 = load i8, ptr getelementptr inbounds ([13 x i8], ptr @bArray, i32 0, i32 5), align 1 %conv2 = sext i8 %1 to i32 %sub3 = sub nsw i32 %conv2, 7 %conv4 = trunc i32 %sub3 to i8 - store i8 %conv4, i8* getelementptr inbounds ([13 x i8], [13 x i8]* @bArray, i32 0, i32 3), align 1 - %2 = load i16, i16* getelementptr inbounds ([13 x i16], [13 x i16]* @hArray, i32 0, i32 5), align 2 + store i8 %conv4, ptr getelementptr inbounds ([13 x i8], ptr @bArray, i32 0, i32 3), align 1 + %2 = load i16, ptr getelementptr inbounds ([13 x i16], ptr @hArray, i32 0, i32 5), align 2 %conv5 = sext i16 %2 to i32 %sub6 = sub nsw i32 %conv5, 7 %conv7 = trunc i32 %sub6 to i16 - store i16 %conv7, i16* getelementptr inbounds ([13 x i16], [13 x i16]* @hArray, i32 0, i32 3), align 2 - %3 = load i16, i16* getelementptr inbounds ([13 x i16], [13 x i16]* @hArray, i32 0, i32 5), align 2 + store i16 %conv7, ptr getelementptr inbounds ([13 x i16], ptr @hArray, i32 0, i32 3), align 2 + %3 = load i16, ptr getelementptr inbounds ([13 x i16], ptr @hArray, i32 0, i32 5), align 2 %conv8 = sext i16 %3 to i32 %sub9 = sub nsw i32 %conv8, 7 %conv10 = trunc i32 %sub9 to i16 - store i16 %conv10, i16* getelementptr inbounds ([13 x i16], [13 x i16]* @hArray, i32 0, i32 3), align 2 - %4 = load i32, i32* getelementptr inbounds ([13 x i32], [13 x i32]* @wArray, i32 0, i32 5), align 4 + store i16 %conv10, ptr getelementptr inbounds ([13 x i16], ptr @hArray, i32 0, i32 3), align 2 + %4 = load i32, ptr getelementptr inbounds ([13 x i32], ptr @wArray, i32 0, i32 5), align 4 %sub11 = sub nsw i32 %4, 7 - store i32 %sub11, i32* getelementptr inbounds ([13 x i32], [13 x i32]* @wArray, i32 0, i32 3), align 4 + store i32 %sub11, ptr getelementptr inbounds ([13 x i32], ptr @wArray, i32 0, i32 3), align 4 ret void } ; Function Attrs: noinline nounwind optnone - define i32 @_Z3barPi(i32* %z) { + define i32 @_Z3barPi(ptr %z) { entry: - %z.addr = alloca i32*, align 4 - store i32* %z, i32** %z.addr, align 4 - %0 = load i32*, i32** %z.addr, align 4 + %z.addr = alloca ptr, align 4 + store ptr %z, ptr %z.addr, align 4 + %0 = load ptr, ptr %z.addr, align 4 fence seq_cst - %1 = atomicrmw add i32* %0, i32 42 monotonic + %1 = atomicrmw add ptr %0, i32 42 monotonic fence seq_cst %2 = add i32 %1, 42 ret i32 %2 @@ -100,25 +100,25 @@ body: | bb.0.entry: %0:gpr32 = LUi target-flags(mips-abs-hi) @bArray %1:gpr32 = ADDiu killed %0, target-flags(mips-abs-lo) @bArray - %2:gpr32 = LBuE %1, 5 :: (dereferenceable load (s8) from `i8* getelementptr inbounds ([13 x i8], [13 x i8]* @bArray, i32 0, i32 5)`) + %2:gpr32 = LBuE %1, 5 :: (dereferenceable load (s8) from `ptr getelementptr inbounds ([13 x i8], ptr @bArray, i32 0, i32 5)`) %3:gpr32 = ADDiu killed %2, -7 - SBE killed %3, %1, 3 :: (store (s8) into `i8* getelementptr inbounds ([13 x i8], [13 x i8]* @bArray, i32 0, i32 3)`) - %4:gpr32 = LBE %1, 5 :: (dereferenceable load (s8) from `i8* getelementptr inbounds ([13 x i8], [13 x i8]* @bArray, i32 0, i32 5)`) + SBE killed %3, %1, 3 :: (store (s8) into `ptr getelementptr inbounds ([13 x i8], ptr @bArray, i32 0, i32 3)`) + %4:gpr32 = LBE %1, 5 :: (dereferenceable load (s8) from `ptr getelementptr inbounds ([13 x i8], ptr @bArray, i32 0, i32 5)`) %5:gpr32 = ADDiu killed %4, -7 - SBE killed %5, %1, 3 :: (store (s8) into `i8* getelementptr inbounds ([13 x i8], [13 x i8]* @bArray, i32 0, i32 3)`) + SBE killed %5, %1, 3 :: (store (s8) into `ptr getelementptr inbounds ([13 x i8], ptr @bArray, i32 0, i32 3)`) %6:gpr32 = LUi target-flags(mips-abs-hi) @hArray %7:gpr32 = ADDiu killed %6, target-flags(mips-abs-lo) @hArray - %8:gpr32 = LHuE %7, 10 :: (dereferenceable load (s16) from `i16* getelementptr inbounds ([13 x i16], [13 x i16]* @hArray, i32 0, i32 5)`) + %8:gpr32 = LHuE %7, 10 :: (dereferenceable load (s16) from `ptr getelementptr inbounds ([13 x i16], ptr @hArray, i32 0, i32 5)`) %9:gpr32 = ADDiu killed %8, -7 - SHE killed %9, %7, 6 :: (store (s16) into `i16* getelementptr inbounds ([13 x i16], [13 x i16]* @hArray, i32 0, i32 3)`) - %10:gpr32 = LHE %7, 10 :: (dereferenceable load (s16) from `i16* getelementptr inbounds ([13 x i16], [13 x i16]* @hArray, i32 0, i32 5)`) + SHE killed %9, %7, 6 :: (store (s16) into `ptr getelementptr inbounds ([13 x i16], ptr @hArray, i32 0, i32 3)`) + %10:gpr32 = LHE %7, 10 :: (dereferenceable load (s16) from `ptr getelementptr inbounds ([13 x i16], ptr @hArray, i32 0, i32 5)`) %11:gpr32 = ADDiu killed %10, -7 - SHE killed %11, %7, 6 :: (store (s16) into `i16* getelementptr inbounds ([13 x i16], [13 x i16]* @hArray, i32 0, i32 3)`) + SHE killed %11, %7, 6 :: (store (s16) into `ptr getelementptr inbounds ([13 x i16], ptr @hArray, i32 0, i32 3)`) %12:gpr32 = LUi target-flags(mips-abs-hi) @wArray %13:gpr32 = ADDiu killed %12, target-flags(mips-abs-lo) @wArray - %14:gpr32 = LWE %13, 20 :: (dereferenceable load (s32) from `i32* getelementptr inbounds ([13 x i32], [13 x i32]* @wArray, i32 0, i32 5)`) + %14:gpr32 = LWE %13, 20 :: (dereferenceable load (s32) from `ptr getelementptr inbounds ([13 x i32], ptr @wArray, i32 0, i32 5)`) %15:gpr32 = ADDiu killed %14, -7 - SWE killed %15, %13, 12 :: (store (s32) into `i32* getelementptr inbounds ([13 x i32], [13 x i32]* @wArray, i32 0, i32 3)`) + SWE killed %15, %13, 12 :: (store (s32) into `ptr getelementptr inbounds ([13 x i32], ptr @wArray, i32 0, i32 3)`) RetRA ... diff --git a/llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-lwp-swp.mir b/llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-lwp-swp.mir index feab7ec..7ffdb40 100644 --- a/llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-lwp-swp.mir +++ b/llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-lwp-swp.mir @@ -3,15 +3,15 @@ # RUN: %s -o - | FileCheck %s --- | - define void @f1(i32* %adr, i32 %val) { ret void } - define void @f2(i32* %adr, i32 %val) { ret void } - define void @f3(i32* %adr, i32 %val) { ret void } - define void @f4(i32* %adr, i32 %val) { ret void } + define void @f1(ptr %adr, i32 %val) { ret void } + define void @f2(ptr %adr, i32 %val) { ret void } + define void @f3(ptr %adr, i32 %val) { ret void } + define void @f4(ptr %adr, i32 %val) { ret void } - declare i32* @f() + declare ptr @f() ; Function Attrs: nounwind - declare void @llvm.stackprotector(i8*, i8**) + declare void @llvm.stackprotector(ptr, ptr) ... --- diff --git a/llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-no-lwp-swp.mir b/llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-no-lwp-swp.mir index c9044f3..2b136a3 100644 --- a/llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-no-lwp-swp.mir +++ b/llvm/test/CodeGen/Mips/micromips-sizereduction/micromips-no-lwp-swp.mir @@ -3,15 +3,15 @@ # RUN: %s -o - | FileCheck %s --- | - define void @f1(i32* %adr, i32 %val) { ret void } - define void @f2(i32* %adr, i32 %val) { ret void } - define void @f3(i32* %adr, i32 %val) { ret void } - define void @f4(i32* %adr, i32 %val) { ret void } + define void @f1(ptr %adr, i32 %val) { ret void } + define void @f2(ptr %adr, i32 %val) { ret void } + define void @f3(ptr %adr, i32 %val) { ret void } + define void @f4(ptr %adr, i32 %val) { ret void } - declare i32* @f() + declare ptr @f() ; Function Attrs: nounwind - declare void @llvm.stackprotector(i8*, i8**) + declare void @llvm.stackprotector(ptr, ptr) ... --- # CHECK-LABEL: name: f1 diff --git a/llvm/test/CodeGen/Mips/mirparser/target-flags-pic-mxgot-tls.mir b/llvm/test/CodeGen/Mips/mirparser/target-flags-pic-mxgot-tls.mir index 00b4089..4370084 100644 --- a/llvm/test/CodeGen/Mips/mirparser/target-flags-pic-mxgot-tls.mir +++ b/llvm/test/CodeGen/Mips/mirparser/target-flags-pic-mxgot-tls.mir @@ -19,32 +19,32 @@ entry: %call = tail call i32 @_Z1gi(i32 signext %asd) %add = add nsw i32 %call, %asd - %0 = load i32, i32* @v, align 4 + %0 = load i32, ptr @v, align 4 %add1 = add nsw i32 %add, %0 - %.b.i.i = load i1, i1* @__tls_guard, align 1 + %.b.i.i = load i1, ptr @__tls_guard, align 1 br i1 %.b.i.i, label %entry._ZTW1k.exit_crit_edge, label %init.i.i entry._ZTW1k.exit_crit_edge: - %.pre = load i32, i32* @k, align 4 + %.pre = load i32, ptr @k, align 4 br label %_ZTW1k.exit init.i.i: - store i1 true, i1* @__tls_guard, align 1 + store i1 true, ptr @__tls_guard, align 1 %call.i.i.i = tail call i32 @_Z1gi(i32 signext 3) - store i32 %call.i.i.i, i32* @k, align 4 + store i32 %call.i.i.i, ptr @k, align 4 br label %_ZTW1k.exit _ZTW1k.exit: %1 = phi i32 [ %.pre, %entry._ZTW1k.exit_crit_edge ], [ %call.i.i.i, %init.i.i ] %add2 = add nsw i32 %add1, %1 - br i1 icmp ne (void ()* @_ZTH1j, void ()* null), label %2, label %_ZTW1j.exit + br i1 icmp ne (ptr @_ZTH1j, ptr null), label %2, label %_ZTW1j.exit ;