+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck %s
+
; We may have subregister live ranges that are undefined on some paths. The
; verifier should not complain about this.
-
-; CHECK-LABEL: {{^}}func:
define amdgpu_kernel void @func() #0 {
+; CHECK-LABEL: func:
+; CHECK: ; %bb.0: ; %B0
+; CHECK-NEXT: s_mov_b32 s0, 0
+; CHECK-NEXT: s_cbranch_scc1 BB0_2
+; CHECK-NEXT: ; %bb.1: ; %B30.1
+; CHECK-NEXT: s_mov_b32 s0, 0x7fc00000
+; CHECK-NEXT: BB0_2: ; %B30.2
+; CHECK-NEXT: v_mov_b32_e32 v0, s0
+; CHECK-NEXT: s_mov_b32 m0, -1
+; CHECK-NEXT: ds_write_b32 v0, v0
+; CHECK-NEXT: s_endpgm
B0:
br i1 undef, label %B1, label %B2
; FIXME: Extra undef subregister copy should be removed before
; overwritten with defined copy
-; CHECK-LABEL: {{^}}valley_partially_undef_copy:
define amdgpu_ps float @valley_partially_undef_copy() #0 {
+; CHECK-LABEL: valley_partially_undef_copy:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_mov_b32 s3, 0xf000
+; CHECK-NEXT: s_mov_b32 s2, -1
+; CHECK-NEXT: buffer_load_dword v1, off, s[0:3], 0
+; CHECK-NEXT: buffer_load_dword v0, off, s[0:3], 0
+; CHECK-NEXT: v_mov_b32_e32 v2, 0x7fc00000
+; CHECK-NEXT: s_waitcnt vmcnt(0)
+; CHECK-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; CHECK-NEXT: buffer_store_dword v2, off, s[0:3], 0
+; CHECK-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v1
+; CHECK-NEXT: BB1_1: ; %bb9
+; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: s_andn2_b64 vcc, exec, s[0:1]
+; CHECK-NEXT: s_cbranch_vccnz BB1_1
+; CHECK-NEXT: ; %bb.2: ; %bb11
+; CHECK-NEXT: s_mov_b32 s3, 0xf000
+; CHECK-NEXT: s_mov_b32 s2, -1
+; CHECK-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0)
+; CHECK-NEXT: ; return to shader part epilog
bb:
%tmp = load volatile i32, i32 addrspace(1)* undef, align 4
%tmp1 = load volatile i32, i32 addrspace(1)* undef, align 4
}
; FIXME: Should be able to remove the undef copies
-
-; CHECK-LABEL: {{^}}partially_undef_copy:
-; CHECK: v_mov_b32_e32 v5, 5
-; CHECK-DAG: v_mov_b32_e32 v6, 6
-
-; CHECK-DAG: v_mov_b32_e32 v[[OUTPUT_LO:[0-9]+]], v5
-
-; Undef copy
-; CHECK-DAG: v_mov_b32_e32 v1, v6
-
-; undef copy
-; CHECK-DAG: v_mov_b32_e32 v2, v7
-
-; CHECK-DAG: v_mov_b32_e32 v[[OUTPUT_HI:[0-9]+]], v8
-; CHECK-DAG: v_mov_b32_e32 v[[OUTPUT_LO]], v6
-
-; CHECK: buffer_store_dwordx4 v{{\[}}[[OUTPUT_LO]]:[[OUTPUT_HI]]{{\]}}
define amdgpu_kernel void @partially_undef_copy() #0 {
+; CHECK-LABEL: partially_undef_copy:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: v_mov_b32_e32 v5, 5
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: v_mov_b32_e32 v6, 6
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: v_mov_b32_e32 v0, v5
+; CHECK-NEXT: v_mov_b32_e32 v1, v6
+; CHECK-NEXT: v_mov_b32_e32 v2, v7
+; CHECK-NEXT: v_mov_b32_e32 v3, v8
+; CHECK-NEXT: s_mov_b32 s3, 0xf000
+; CHECK-NEXT: s_mov_b32 s2, -1
+; CHECK-NEXT: v_mov_b32_e32 v0, v6
+; CHECK-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; CHECK-NEXT: ;;#ASMSTART
+; CHECK-NEXT: v_nop
+; CHECK-NEXT: ;;#ASMEND
+; CHECK-NEXT: s_endpgm
%tmp0 = call i32 asm sideeffect "v_mov_b32_e32 v5, 5", "={v5}"()
%tmp1 = call i32 asm sideeffect "v_mov_b32_e32 v6, 6", "={v6}"()