--- /dev/null
+# RUN: llc -march=amdgcn -mcpu=gfx90a -run-pass=si-load-store-opt -verify-machineinstrs -o - %s | FileCheck %s
+
+# The purpose of this test is to make sure we are combining relevant memory
+# operations correctly with/without SCC bit.
+
+--- |
+ define amdgpu_kernel void @test1(i32 addrspace(1)* %out) {
+ %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
+ store i32 123, i32 addrspace(1)* %out.gep.1
+ store i32 456, i32 addrspace(1)* %out
+ ret void
+ }
+
+ define amdgpu_kernel void @test2(i32 addrspace(1)* %out) {
+ %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
+ store i32 123, i32 addrspace(1)* %out.gep.1
+ store i32 456, i32 addrspace(1)* %out
+ ret void
+ }
+
+ define amdgpu_kernel void @test3(i32 addrspace(1)* %out) {
+ %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
+ store i32 123, i32 addrspace(1)* %out.gep.1
+ store i32 456, i32 addrspace(1)* %out
+ ret void
+ }
+ define amdgpu_kernel void @test4(i32 addrspace(1)* %out) {
+ %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
+ store i32 123, i32 addrspace(1)* %out.gep.1
+ store i32 456, i32 addrspace(1)* %out
+ ret void
+ }
+...
+
+# CHECK: BUFFER_STORE_DWORDX2_OFFSET killed %{{[0-9]+}}, %{{[0-9]+}}, 0, 4, 0, 0, 0, implicit $exec :: (store 8 into %ir.out.gep.1, align 4, addrspace 1)
+---
+name: test1
+liveins:
+ - { reg: '$sgpr0_sgpr1', virtual-reg: '' }
+body: |
+ bb.0 (%ir-block.0):
+ liveins: $sgpr0_sgpr1
+
+ $vgpr0 = V_MOV_B32_e32 123, implicit $exec
+ $vgpr1 = V_MOV_B32_e32 456, implicit $exec
+
+ $sgpr2 = S_MOV_B32 -1
+ $sgpr3 = S_MOV_B32 61440
+
+ %0:sgpr_64 = COPY $sgpr0_sgpr1
+ %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load 8 from `i64 addrspace(4)* undef`, addrspace 4)
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:sgpr_128 = REG_SEQUENCE %1, %2, %3
+
+ %5:vgpr_32 = COPY $vgpr0
+ %6:vgpr_32 = COPY $vgpr1
+
+ BUFFER_STORE_DWORD_OFFSET %5, %4, 0, 4, 0, 0, 0, implicit $exec :: (store 4 into %ir.out.gep.1, addrspace 1)
+ BUFFER_STORE_DWORD_OFFSET %6, %4, 0, 8, 0, 0, 0, implicit $exec :: (store 4 into %ir.out.gep.1, addrspace 1)
+
+ S_ENDPGM 0
+...
+
+# CHECK: BUFFER_STORE_DWORD_OFFSET %{{[0-9]+}}, %{{[0-9]+}}, 0, 4, 16, 0, 0, implicit $exec :: (store 4 into %ir.out.gep.1, addrspace 1)
+# CHECK: BUFFER_STORE_DWORD_OFFSET %{{[0-9]+}}, %{{[0-9]+}}, 0, 8, 0, 0, 0, implicit $exec :: (store 4 into %ir.out.gep.1, addrspace 1)
+---
+name: test2
+liveins:
+ - { reg: '$sgpr0_sgpr1', virtual-reg: '' }
+body: |
+ bb.0 (%ir-block.0):
+ liveins: $sgpr0_sgpr1
+
+ $vgpr0 = V_MOV_B32_e32 123, implicit $exec
+ $vgpr1 = V_MOV_B32_e32 456, implicit $exec
+
+ $sgpr2 = S_MOV_B32 -1
+ $sgpr3 = S_MOV_B32 61440
+
+ %0:sgpr_64 = COPY $sgpr0_sgpr1
+ %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load 8 from `i64 addrspace(4)* undef`, addrspace 4)
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:sgpr_128 = REG_SEQUENCE %1, %2, %3
+
+ %5:vgpr_32 = COPY $vgpr0
+ %6:vgpr_32 = COPY $vgpr1
+
+ BUFFER_STORE_DWORD_OFFSET %5, %4, 0, 4, 16, 0, 0, implicit $exec :: (store 4 into %ir.out.gep.1, addrspace 1)
+ BUFFER_STORE_DWORD_OFFSET %6, %4, 0, 8, 0, 0, 0, implicit $exec :: (store 4 into %ir.out.gep.1, addrspace 1)
+
+ S_ENDPGM 0
+...
+
+# CHECK: BUFFER_STORE_DWORD_OFFSET %{{[0-9]+}}, %{{[0-9]+}}, 0, 4, 0, 0, 0, implicit $exec :: (store 4 into %ir.out.gep.1, addrspace 1)
+# CHECK: BUFFER_STORE_DWORD_OFFSET %{{[0-9]+}}, %{{[0-9]+}}, 0, 8, 16, 0, 0, implicit $exec :: (store 4 into %ir.out.gep.1, addrspace 1)
+---
+name: test3
+liveins:
+ - { reg: '$sgpr0_sgpr1', virtual-reg: '' }
+body: |
+ bb.0 (%ir-block.0):
+ liveins: $sgpr0_sgpr1
+
+ $vgpr0 = V_MOV_B32_e32 123, implicit $exec
+ $vgpr1 = V_MOV_B32_e32 456, implicit $exec
+
+ $sgpr2 = S_MOV_B32 -1
+ $sgpr3 = S_MOV_B32 61440
+
+ %0:sgpr_64 = COPY $sgpr0_sgpr1
+ %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load 8 from `i64 addrspace(4)* undef`, addrspace 4)
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:sgpr_128 = REG_SEQUENCE %1, %2, %3
+
+ %5:vgpr_32 = COPY $vgpr0
+ %6:vgpr_32 = COPY $vgpr1
+
+ BUFFER_STORE_DWORD_OFFSET %5, %4, 0, 4, 0, 0, 0, implicit $exec :: (store 4 into %ir.out.gep.1, addrspace 1)
+ BUFFER_STORE_DWORD_OFFSET %6, %4, 0, 8, 16, 0, 0, implicit $exec :: (store 4 into %ir.out.gep.1, addrspace 1)
+
+ S_ENDPGM 0
+...
+
+# CHECK: BUFFER_STORE_DWORDX2_OFFSET killed %{{[0-9]+}}, %{{[0-9]+}}, 0, 4, 16, 0, 0, implicit $exec :: (store 8 into %ir.out.gep.1, align 4, addrspace 1)
+---
+name: test4
+liveins:
+ - { reg: '$sgpr0_sgpr1', virtual-reg: '' }
+body: |
+ bb.0 (%ir-block.0):
+ liveins: $sgpr0_sgpr1
+
+ $vgpr0 = V_MOV_B32_e32 123, implicit $exec
+ $vgpr1 = V_MOV_B32_e32 456, implicit $exec
+
+ $sgpr2 = S_MOV_B32 -1
+ $sgpr3 = S_MOV_B32 61440
+
+ %0:sgpr_64 = COPY $sgpr0_sgpr1
+ %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load 8 from `i64 addrspace(4)* undef`, addrspace 4)
+ %2:sgpr_32 = COPY $sgpr2
+ %3:sgpr_32 = COPY $sgpr3
+ %4:sgpr_128 = REG_SEQUENCE %1, %2, %3
+
+ %5:vgpr_32 = COPY $vgpr0
+ %6:vgpr_32 = COPY $vgpr1
+
+ BUFFER_STORE_DWORD_OFFSET %5, %4, 0, 4, 16, 0, 0, implicit $exec :: (store 4 into %ir.out.gep.1, addrspace 1)
+ BUFFER_STORE_DWORD_OFFSET %6, %4, 0, 8, 16, 0, 0, implicit $exec :: (store 4 into %ir.out.gep.1, addrspace 1)
+
+ S_ENDPGM 0
+...
+++ /dev/null
-# RUN: llc -march=amdgcn -mcpu=gfx90a -run-pass=si-load-store-opt -verify-machineinstrs -o - %s | FileCheck %s
-
-# The purpose of this test is to make sure we are combining relevant memory
-# operations correctly with/without SCC bit.
-
---- |
- define amdgpu_kernel void @test1(i32 addrspace(1)* %out) {
- %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
- store i32 123, i32 addrspace(1)* %out.gep.1
- store i32 456, i32 addrspace(1)* %out
- ret void
- }
-
- define amdgpu_kernel void @test2(i32 addrspace(1)* %out) {
- %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
- store i32 123, i32 addrspace(1)* %out.gep.1
- store i32 456, i32 addrspace(1)* %out
- ret void
- }
-
- define amdgpu_kernel void @test3(i32 addrspace(1)* %out) {
- %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
- store i32 123, i32 addrspace(1)* %out.gep.1
- store i32 456, i32 addrspace(1)* %out
- ret void
- }
- define amdgpu_kernel void @test4(i32 addrspace(1)* %out) {
- %out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
- store i32 123, i32 addrspace(1)* %out.gep.1
- store i32 456, i32 addrspace(1)* %out
- ret void
- }
-...
-
-# CHECK: BUFFER_STORE_DWORDX2_OFFSET killed %{{[0-9]+}}, %{{[0-9]+}}, 0, 4, 0, 0, 0, implicit $exec :: (store 8 into %ir.out.gep.1, align 4, addrspace 1)
----
-name: test1
-liveins:
- - { reg: '$sgpr0_sgpr1', virtual-reg: '' }
-body: |
- bb.0 (%ir-block.0):
- liveins: $sgpr0_sgpr1
-
- $vgpr0 = V_MOV_B32_e32 123, implicit $exec
- $vgpr1 = V_MOV_B32_e32 456, implicit $exec
-
- $sgpr2 = S_MOV_B32 -1
- $sgpr3 = S_MOV_B32 61440
-
- %0:sgpr_64 = COPY $sgpr0_sgpr1
- %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load 8 from `i64 addrspace(4)* undef`, addrspace 4)
- %2:sgpr_32 = COPY $sgpr2
- %3:sgpr_32 = COPY $sgpr3
- %4:sgpr_128 = REG_SEQUENCE %1, %2, %3
-
- %5:vgpr_32 = COPY $vgpr0
- %6:vgpr_32 = COPY $vgpr1
-
- BUFFER_STORE_DWORD_OFFSET %5, %4, 0, 4, 0, 0, 0, implicit $exec :: (store 4 into %ir.out.gep.1, addrspace 1)
- BUFFER_STORE_DWORD_OFFSET %6, %4, 0, 8, 0, 0, 0, implicit $exec :: (store 4 into %ir.out.gep.1, addrspace 1)
-
- S_ENDPGM 0
-...
-
-# CHECK: BUFFER_STORE_DWORD_OFFSET %{{[0-9]+}}, %{{[0-9]+}}, 0, 4, 16, 0, 0, implicit $exec :: (store 4 into %ir.out.gep.1, addrspace 1)
-# CHECK: BUFFER_STORE_DWORD_OFFSET %{{[0-9]+}}, %{{[0-9]+}}, 0, 8, 0, 0, 0, implicit $exec :: (store 4 into %ir.out.gep.1, addrspace 1)
----
-name: test2
-liveins:
- - { reg: '$sgpr0_sgpr1', virtual-reg: '' }
-body: |
- bb.0 (%ir-block.0):
- liveins: $sgpr0_sgpr1
-
- $vgpr0 = V_MOV_B32_e32 123, implicit $exec
- $vgpr1 = V_MOV_B32_e32 456, implicit $exec
-
- $sgpr2 = S_MOV_B32 -1
- $sgpr3 = S_MOV_B32 61440
-
- %0:sgpr_64 = COPY $sgpr0_sgpr1
- %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load 8 from `i64 addrspace(4)* undef`, addrspace 4)
- %2:sgpr_32 = COPY $sgpr2
- %3:sgpr_32 = COPY $sgpr3
- %4:sgpr_128 = REG_SEQUENCE %1, %2, %3
-
- %5:vgpr_32 = COPY $vgpr0
- %6:vgpr_32 = COPY $vgpr1
-
- BUFFER_STORE_DWORD_OFFSET %5, %4, 0, 4, 16, 0, 0, implicit $exec :: (store 4 into %ir.out.gep.1, addrspace 1)
- BUFFER_STORE_DWORD_OFFSET %6, %4, 0, 8, 0, 0, 0, implicit $exec :: (store 4 into %ir.out.gep.1, addrspace 1)
-
- S_ENDPGM 0
-...
-
-# CHECK: BUFFER_STORE_DWORD_OFFSET %{{[0-9]+}}, %{{[0-9]+}}, 0, 4, 0, 0, 0, implicit $exec :: (store 4 into %ir.out.gep.1, addrspace 1)
-# CHECK: BUFFER_STORE_DWORD_OFFSET %{{[0-9]+}}, %{{[0-9]+}}, 0, 8, 16, 0, 0, implicit $exec :: (store 4 into %ir.out.gep.1, addrspace 1)
----
-name: test3
-liveins:
- - { reg: '$sgpr0_sgpr1', virtual-reg: '' }
-body: |
- bb.0 (%ir-block.0):
- liveins: $sgpr0_sgpr1
-
- $vgpr0 = V_MOV_B32_e32 123, implicit $exec
- $vgpr1 = V_MOV_B32_e32 456, implicit $exec
-
- $sgpr2 = S_MOV_B32 -1
- $sgpr3 = S_MOV_B32 61440
-
- %0:sgpr_64 = COPY $sgpr0_sgpr1
- %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load 8 from `i64 addrspace(4)* undef`, addrspace 4)
- %2:sgpr_32 = COPY $sgpr2
- %3:sgpr_32 = COPY $sgpr3
- %4:sgpr_128 = REG_SEQUENCE %1, %2, %3
-
- %5:vgpr_32 = COPY $vgpr0
- %6:vgpr_32 = COPY $vgpr1
-
- BUFFER_STORE_DWORD_OFFSET %5, %4, 0, 4, 0, 0, 0, implicit $exec :: (store 4 into %ir.out.gep.1, addrspace 1)
- BUFFER_STORE_DWORD_OFFSET %6, %4, 0, 8, 16, 0, 0, implicit $exec :: (store 4 into %ir.out.gep.1, addrspace 1)
-
- S_ENDPGM 0
-...
-
-# CHECK: BUFFER_STORE_DWORDX2_OFFSET killed %{{[0-9]+}}, %{{[0-9]+}}, 0, 4, 16, 0, 0, implicit $exec :: (store 8 into %ir.out.gep.1, align 4, addrspace 1)
----
-name: test4
-liveins:
- - { reg: '$sgpr0_sgpr1', virtual-reg: '' }
-body: |
- bb.0 (%ir-block.0):
- liveins: $sgpr0_sgpr1
-
- $vgpr0 = V_MOV_B32_e32 123, implicit $exec
- $vgpr1 = V_MOV_B32_e32 456, implicit $exec
-
- $sgpr2 = S_MOV_B32 -1
- $sgpr3 = S_MOV_B32 61440
-
- %0:sgpr_64 = COPY $sgpr0_sgpr1
- %1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load 8 from `i64 addrspace(4)* undef`, addrspace 4)
- %2:sgpr_32 = COPY $sgpr2
- %3:sgpr_32 = COPY $sgpr3
- %4:sgpr_128 = REG_SEQUENCE %1, %2, %3
-
- %5:vgpr_32 = COPY $vgpr0
- %6:vgpr_32 = COPY $vgpr1
-
- BUFFER_STORE_DWORD_OFFSET %5, %4, 0, 4, 16, 0, 0, implicit $exec :: (store 4 into %ir.out.gep.1, addrspace 1)
- BUFFER_STORE_DWORD_OFFSET %6, %4, 0, 8, 16, 0, 0, implicit $exec :: (store 4 into %ir.out.gep.1, addrspace 1)
-
- S_ENDPGM 0
-...